summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--compiler/rustc/src/main.rs4
-rw-r--r--compiler/rustc_apfloat/src/lib.rs2
-rw-r--r--compiler/rustc_arena/src/lib.rs8
-rw-r--r--compiler/rustc_ast/Cargo.toml10
-rw-r--r--compiler/rustc_ast/src/ast.rs305
-rw-r--r--compiler/rustc_ast/src/ast_traits.rs90
-rw-r--r--compiler/rustc_ast/src/attr/mod.rs153
-rw-r--r--compiler/rustc_ast/src/lib.rs6
-rw-r--r--compiler/rustc_ast/src/mut_visit.rs94
-rw-r--r--compiler/rustc_ast/src/node_id.rs2
-rw-r--r--compiler/rustc_ast/src/token.rs98
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs273
-rw-r--r--compiler/rustc_ast/src/util/literal.rs32
-rw-r--r--compiler/rustc_ast/src/util/parser.rs72
-rw-r--r--compiler/rustc_ast/src/visit.rs90
-rw-r--r--compiler/rustc_ast_lowering/Cargo.toml14
-rw-r--r--compiler/rustc_ast_lowering/src/asm.rs214
-rw-r--r--compiler/rustc_ast_lowering/src/block.rs13
-rw-r--r--compiler/rustc_ast_lowering/src/errors.rs347
-rw-r--r--compiler/rustc_ast_lowering/src/expr.rs318
-rw-r--r--compiler/rustc_ast_lowering/src/index.rs67
-rw-r--r--compiler/rustc_ast_lowering/src/item.rs255
-rw-r--r--compiler/rustc_ast_lowering/src/lib.rs529
-rw-r--r--compiler/rustc_ast_lowering/src/lifetime_collector.rs15
-rw-r--r--compiler/rustc_ast_lowering/src/pat.rs114
-rw-r--r--compiler/rustc_ast_lowering/src/path.rs56
-rw-r--r--compiler/rustc_ast_passes/Cargo.toml1
-rw-r--r--compiler/rustc_ast_passes/src/ast_validation.rs421
-rw-r--r--compiler/rustc_ast_passes/src/errors.rs234
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs382
-rw-r--r--compiler/rustc_ast_passes/src/lib.rs5
-rw-r--r--compiler/rustc_ast_passes/src/node_count.rs22
-rw-r--r--compiler/rustc_ast_pretty/Cargo.toml1
-rw-r--r--compiler/rustc_ast_pretty/src/lib.rs2
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs38
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/expr.rs14
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/item.rs8
-rw-r--r--compiler/rustc_attr/Cargo.toml1
-rw-r--r--compiler/rustc_attr/src/builtin.rs398
-rw-r--r--compiler/rustc_attr/src/lib.rs4
-rw-r--r--compiler/rustc_attr/src/session_diagnostics.rs393
-rw-r--r--compiler/rustc_borrowck/Cargo.toml1
-rw-r--r--compiler/rustc_borrowck/src/constraint_generation.rs14
-rw-r--r--compiler/rustc_borrowck/src/constraints/mod.rs11
-rw-r--r--compiler/rustc_borrowck/src/consumers.rs9
-rw-r--r--compiler/rustc_borrowck/src/dataflow.rs4
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs150
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs182
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs218
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mod.rs30
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/move_errors.rs6
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs163
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs1
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_errors.rs168
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_name.rs127
-rw-r--r--compiler/rustc_borrowck/src/invalidation.rs26
-rw-r--r--compiler/rustc_borrowck/src/lib.rs76
-rw-r--r--compiler/rustc_borrowck/src/location.rs5
-rw-r--r--compiler/rustc_borrowck/src/nll.rs19
-rw-r--r--compiler/rustc_borrowck/src/places_conflict.rs28
-rw-r--r--compiler/rustc_borrowck/src/prefixes.rs1
-rw-r--r--compiler/rustc_borrowck/src/region_infer/mod.rs233
-rw-r--r--compiler/rustc_borrowck/src/region_infer/opaque_types.rs393
-rw-r--r--compiler/rustc_borrowck/src/region_infer/values.rs4
-rw-r--r--compiler/rustc_borrowck/src/renumber.rs14
-rw-r--r--compiler/rustc_borrowck/src/session_diagnostics.rs130
-rw-r--r--compiler/rustc_borrowck/src/type_check/canonical.rs52
-rw-r--r--compiler/rustc_borrowck/src/type_check/constraint_conversion.rs73
-rw-r--r--compiler/rustc_borrowck/src/type_check/free_region_relations.rs81
-rw-r--r--compiler/rustc_borrowck/src/type_check/input_output.rs39
-rw-r--r--compiler/rustc_borrowck/src/type_check/mod.rs377
-rw-r--r--compiler/rustc_borrowck/src/type_check/relate_tys.rs19
-rw-r--r--compiler/rustc_borrowck/src/universal_regions.rs32
-rw-r--r--compiler/rustc_builtin_macros/Cargo.toml13
-rw-r--r--compiler/rustc_builtin_macros/src/asm.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/assert.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/assert/context.rs31
-rw-r--r--compiler/rustc_builtin_macros/src/cfg.rs10
-rw-r--r--compiler/rustc_builtin_macros/src/cfg_eval.rs79
-rw-r--r--compiler/rustc_builtin_macros/src/cmdline_attrs.rs8
-rw-r--r--compiler/rustc_builtin_macros/src/concat.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/concat_bytes.rs5
-rw-r--r--compiler/rustc_builtin_macros/src/derive.rs9
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/bounds.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/clone.rs6
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs5
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs6
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs70
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs6
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/debug.rs16
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/decodable.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/default.rs29
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/encodable.rs6
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/mod.rs100
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/ty.rs5
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/hash.rs6
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/mod.rs18
-rw-r--r--compiler/rustc_builtin_macros/src/edition_panic.rs4
-rw-r--r--compiler/rustc_builtin_macros/src/format.rs1968
-rw-r--r--compiler/rustc_builtin_macros/src/format/ast.rs240
-rw-r--r--compiler/rustc_builtin_macros/src/format/expand.rs353
-rw-r--r--compiler/rustc_builtin_macros/src/global_allocator.rs7
-rw-r--r--compiler/rustc_builtin_macros/src/lib.rs5
-rw-r--r--compiler/rustc_builtin_macros/src/proc_macro_harness.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/source_util.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/standard_library_imports.rs5
-rw-r--r--compiler/rustc_builtin_macros/src/test.rs48
-rw-r--r--compiler/rustc_builtin_macros/src/test_harness.rs30
-rw-r--r--compiler/rustc_codegen_cranelift/.cirrus.yml2
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/main.yml101
-rw-r--r--compiler/rustc_codegen_cranelift/.vscode/settings.json6
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.lock169
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.toml16
-rw-r--r--compiler/rustc_codegen_cranelift/Readme.md4
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock46
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs52
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_backend.rs18
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs21
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/config.rs3
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/mod.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/prepare.rs179
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/rustc_info.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/tests.rs610
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/utils.rs75
-rwxr-xr-xcompiler/rustc_codegen_cranelift/clean_all.sh6
-rw-r--r--compiler/rustc_codegen_cranelift/config.txt35
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_system.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs1
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core.rs18
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs118
-rw-r--r--compiler/rustc_codegen_cranelift/example/std_example.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-abi-cafe-Disable-some-test-on-x86_64-pc-windows-gnu.patch29
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch96
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0003-rand-Disable-rand-tests-on-mingw.patch47
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch12
-rw-r--r--compiler/rustc_codegen_cranelift/rust-toolchain2
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh6
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh8
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/tests.sh203
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs55
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs31
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs10
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/analyze.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/archive.rs3
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs299
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs46
-rw-r--r--compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs177
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs163
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs213
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs282
-rw-r--r--compiler/rustc_codegen_cranelift/src/discriminant.rs31
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs560
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs100
-rw-r--r--compiler/rustc_codegen_cranelift/src/global_asm.rs114
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs188
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs8
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs130
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs166
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs69
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs17
-rw-r--r--compiler/rustc_codegen_cranelift/src/num.rs61
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/mod.rs17
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs61
-rw-r--r--compiler/rustc_codegen_cranelift/src/toolchain.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/trap.rs25
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs41
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs69
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs26
-rwxr-xr-xcompiler/rustc_codegen_cranelift/test.sh13
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_system.rs2
-rw-r--r--compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch1
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs43
-rw-r--r--compiler/rustc_codegen_gcc/src/archive.rs5
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs5
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs211
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs18
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs24
-rw-r--r--compiler/rustc_codegen_gcc/src/errors.rs242
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs39
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs204
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs18
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/asm.rs9
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/int.rs2
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml4
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs109
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs17
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs116
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs121
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs287
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs145
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs16
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs87
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs42
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs40
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs740
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs96
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs11
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs148
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs11
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs133
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs53
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs1
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml1
-rw-r--r--compiler/rustc_codegen_ssa/src/back/archive.rs73
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs629
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs208
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs33
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs90
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs183
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs5
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs62
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs356
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs33
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs325
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs36
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs34
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs11
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs22
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs14
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs217
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/abi.rs3
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs163
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs1
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/misc.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs1
-rw-r--r--compiler/rustc_const_eval/Cargo.toml1
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs86
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs92
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs133
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs29
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs39
-rw-r--r--compiler/rustc_const_eval/src/errors.rs155
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs143
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs38
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs30
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs71
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs3
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs68
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs111
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs277
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs18
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs52
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs19
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs14
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs35
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs268
-rw-r--r--compiler/rustc_const_eval/src/lib.rs10
-rw-r--r--compiler/rustc_const_eval/src/might_permit_raw_init.rs40
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs89
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs181
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs12
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs104
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs6
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs35
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs132
-rw-r--r--compiler/rustc_const_eval/src/util/might_permit_raw_init.rs151
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs2
-rw-r--r--compiler/rustc_data_structures/Cargo.toml24
-rw-r--r--compiler/rustc_data_structures/src/fingerprint.rs4
-rw-r--r--compiler/rustc_data_structures/src/flock/linux.rs7
-rw-r--r--compiler/rustc_data_structures/src/fx.rs15
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/mod.rs4
-rw-r--r--compiler/rustc_data_structures/src/lib.rs6
-rw-r--r--compiler/rustc_data_structures/src/map_in_place.rs127
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/mod.rs41
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/tests.rs8
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs64
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs37
-rw-r--r--compiler/rustc_data_structures/src/sso/set.rs2
-rw-r--r--compiler/rustc_data_structures/src/sync.rs4
-rw-r--r--compiler/rustc_data_structures/src/thin_vec.rs135
-rw-r--r--compiler/rustc_data_structures/src/thin_vec/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation.rs133
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation/tests.rs48
-rw-r--r--compiler/rustc_data_structures/src/unord.rs382
-rw-r--r--compiler/rustc_driver/Cargo.toml5
-rw-r--r--compiler/rustc_driver/src/lib.rs120
-rw-r--r--compiler/rustc_driver/src/pretty.rs20
-rw-r--r--compiler/rustc_driver/src/session_diagnostics.rs40
-rw-r--r--compiler/rustc_error_codes/src/error_codes.rs2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0045.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0092.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0094.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0161.md7
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0210.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0211.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0311.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0579.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0591.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0622.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0695.md3
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0732.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0743.md2
-rw-r--r--compiler/rustc_error_codes/src/lib.rs2
-rw-r--r--compiler/rustc_error_messages/Cargo.toml1
-rw-r--r--compiler/rustc_error_messages/locales/en-US/ast_lowering.ftl141
-rw-r--r--compiler/rustc_error_messages/locales/en-US/ast_passes.ftl91
-rw-r--r--compiler/rustc_error_messages/locales/en-US/attr.ftl107
-rw-r--r--compiler/rustc_error_messages/locales/en-US/borrowck.ftl58
-rw-r--r--compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl4
-rw-r--r--compiler/rustc_error_messages/locales/en-US/codegen_gcc.ftl68
-rw-r--r--compiler/rustc_error_messages/locales/en-US/codegen_ssa.ftl121
-rw-r--r--compiler/rustc_error_messages/locales/en-US/compiletest.ftl5
-rw-r--r--compiler/rustc_error_messages/locales/en-US/const_eval.ftl78
-rw-r--r--compiler/rustc_error_messages/locales/en-US/driver.ftl13
-rw-r--r--compiler/rustc_error_messages/locales/en-US/errors.ftl13
-rw-r--r--compiler/rustc_error_messages/locales/en-US/expand.ftl21
-rw-r--r--compiler/rustc_error_messages/locales/en-US/hir_analysis.ftl152
-rw-r--r--compiler/rustc_error_messages/locales/en-US/infer.ftl173
-rw-r--r--compiler/rustc_error_messages/locales/en-US/interface.ftl43
-rw-r--r--compiler/rustc_error_messages/locales/en-US/lint.ftl373
-rw-r--r--compiler/rustc_error_messages/locales/en-US/metadata.ftl277
-rw-r--r--compiler/rustc_error_messages/locales/en-US/middle.ftl29
-rw-r--r--compiler/rustc_error_messages/locales/en-US/mir_dataflow.ftl29
-rw-r--r--compiler/rustc_error_messages/locales/en-US/monomorphize.ftl26
-rw-r--r--compiler/rustc_error_messages/locales/en-US/parser.ftl361
-rw-r--r--compiler/rustc_error_messages/locales/en-US/passes.ftl633
-rw-r--r--compiler/rustc_error_messages/locales/en-US/plugin_impl.ftl4
-rw-r--r--compiler/rustc_error_messages/locales/en-US/privacy.ftl20
-rw-r--r--compiler/rustc_error_messages/locales/en-US/query_system.ftl30
-rw-r--r--compiler/rustc_error_messages/locales/en-US/save_analysis.ftl1
-rw-r--r--compiler/rustc_error_messages/locales/en-US/session.ftl60
-rw-r--r--compiler/rustc_error_messages/locales/en-US/symbol_mangling.ftl1
-rw-r--r--compiler/rustc_error_messages/locales/en-US/trait_selection.ftl26
-rw-r--r--compiler/rustc_error_messages/locales/en-US/ty_utils.ftl47
-rw-r--r--compiler/rustc_error_messages/locales/en-US/typeck.ftl125
-rw-r--r--compiler/rustc_error_messages/src/lib.rs88
-rw-r--r--compiler/rustc_errors/Cargo.toml10
-rw-r--r--compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs27
-rw-r--r--compiler/rustc_errors/src/diagnostic.rs256
-rw-r--r--compiler/rustc_errors/src/diagnostic_builder.rs132
-rw-r--r--compiler/rustc_errors/src/diagnostic_impls.rs222
-rw-r--r--compiler/rustc_errors/src/emitter.rs142
-rw-r--r--compiler/rustc_errors/src/json.rs21
-rw-r--r--compiler/rustc_errors/src/lib.rs237
-rw-r--r--compiler/rustc_errors/src/translation.rs117
-rw-r--r--compiler/rustc_expand/src/base.rs117
-rw-r--r--compiler/rustc_expand/src/build.rs47
-rw-r--r--compiler/rustc_expand/src/config.rs84
-rw-r--r--compiler/rustc_expand/src/errors.rs48
-rw-r--r--compiler/rustc_expand/src/expand.rs112
-rw-r--r--compiler/rustc_expand/src/lib.rs5
-rw-r--r--compiler/rustc_expand/src/mbe/macro_parser.rs2
-rw-r--r--compiler/rustc_expand/src/mbe/macro_rules.rs35
-rw-r--r--compiler/rustc_expand/src/mbe/metavar_expr.rs2
-rw-r--r--compiler/rustc_expand/src/mbe/transcribe.rs34
-rw-r--r--compiler/rustc_expand/src/module.rs8
-rw-r--r--compiler/rustc_expand/src/placeholders.rs16
-rw-r--r--compiler/rustc_expand/src/proc_macro_server.rs215
-rw-r--r--compiler/rustc_expand/src/tokenstream/tests.rs18
-rw-r--r--compiler/rustc_feature/Cargo.toml1
-rw-r--r--compiler/rustc_feature/src/accepted.rs12
-rw-r--r--compiler/rustc_feature/src/active.rs32
-rw-r--r--compiler/rustc_feature/src/builtin_attrs.rs74
-rw-r--r--compiler/rustc_feature/src/lib.rs6
-rw-r--r--compiler/rustc_feature/src/removed.rs3
-rw-r--r--compiler/rustc_fs_util/src/lib.rs3
-rw-r--r--compiler/rustc_graphviz/src/lib.rs2
-rw-r--r--compiler/rustc_hir/Cargo.toml1
-rw-r--r--compiler/rustc_hir/src/def.rs144
-rw-r--r--compiler/rustc_hir/src/definitions.rs1
-rw-r--r--compiler/rustc_hir/src/errors.rs10
-rw-r--r--compiler/rustc_hir/src/hir.rs302
-rw-r--r--compiler/rustc_hir/src/hir_id.rs70
-rw-r--r--compiler/rustc_hir/src/intravisit.rs186
-rw-r--r--compiler/rustc_hir/src/lang_items.rs15
-rw-r--r--compiler/rustc_hir/src/lib.rs9
-rw-r--r--compiler/rustc_hir/src/pat_util.rs19
-rw-r--r--compiler/rustc_hir/src/stable_hash_impls.rs27
-rw-r--r--compiler/rustc_hir/src/target.rs15
-rw-r--r--compiler/rustc_hir/src/weak_lang_items.rs6
-rw-r--r--compiler/rustc_hir_analysis/Cargo.toml32
-rw-r--r--compiler/rustc_hir_analysis/README.md (renamed from compiler/rustc_typeck/README.md)0
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/errors.rs411
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/generics.rs662
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/mod.rs3136
-rw-r--r--compiler/rustc_hir_analysis/src/bounds.rs (renamed from compiler/rustc_typeck/src/bounds.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs1443
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_method.rs1825
-rw-r--r--compiler/rustc_hir_analysis/src/check/dropck.rs (renamed from compiler/rustc_typeck/src/check/dropck.rs)14
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs (renamed from compiler/rustc_typeck/src/check/intrinsic.rs)60
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsicck.rs437
-rw-r--r--compiler/rustc_hir_analysis/src/check/mod.rs515
-rw-r--r--compiler/rustc_hir_analysis/src/check/region.rs (renamed from compiler/rustc_typeck/src/check/region.rs)51
-rw-r--r--compiler/rustc_hir_analysis/src/check/wfcheck.rs (renamed from compiler/rustc_typeck/src/check/wfcheck.rs)367
-rw-r--r--compiler/rustc_hir_analysis/src/check_unused.rs192
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/builtin.rs572
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs (renamed from compiler/rustc_typeck/src/coherence/inherent_impls.rs)18
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs (renamed from compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs)58
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/mod.rs (renamed from compiler/rustc_typeck/src/coherence/mod.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/orphan.rs (renamed from compiler/rustc_typeck/src/coherence/orphan.rs)38
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/unsafety.rs96
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs2263
-rw-r--r--compiler/rustc_hir_analysis/src/collect/generics_of.rs481
-rw-r--r--compiler/rustc_hir_analysis/src/collect/item_bounds.rs (renamed from compiler/rustc_typeck/src/collect/item_bounds.rs)16
-rw-r--r--compiler/rustc_hir_analysis/src/collect/lifetimes.rs1888
-rw-r--r--compiler/rustc_hir_analysis/src/collect/predicates_of.rs707
-rw-r--r--compiler/rustc_hir_analysis/src/collect/type_of.rs (renamed from compiler/rustc_typeck/src/collect/type_of.rs)265
-rw-r--r--compiler/rustc_hir_analysis/src/constrained_generic_params.rs (renamed from compiler/rustc_typeck/src/constrained_generic_params.rs)10
-rw-r--r--compiler/rustc_hir_analysis/src/errors.rs282
-rw-r--r--compiler/rustc_hir_analysis/src/hir_wf_check.rs (renamed from compiler/rustc_typeck/src/hir_wf_check.rs)68
-rw-r--r--compiler/rustc_hir_analysis/src/impl_wf_check.rs (renamed from compiler/rustc_typeck/src/impl_wf_check.rs)43
-rw-r--r--compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs (renamed from compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs)83
-rw-r--r--compiler/rustc_hir_analysis/src/lib.rs552
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/explicit.rs (renamed from compiler/rustc_typeck/src/outlives/explicit.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs (renamed from compiler/rustc_typeck/src/outlives/implicit_infer.rs)4
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/mod.rs129
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/test.rs21
-rw-r--r--compiler/rustc_hir_analysis/src/outlives/utils.rs186
-rw-r--r--compiler/rustc_hir_analysis/src/structured_errors.rs (renamed from compiler/rustc_typeck/src/structured_errors.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs (renamed from compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs (renamed from compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs (renamed from compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs)255
-rw-r--r--compiler/rustc_hir_analysis/src/variance/constraints.rs (renamed from compiler/rustc_typeck/src/variance/constraints.rs)14
-rw-r--r--compiler/rustc_hir_analysis/src/variance/mod.rs (renamed from compiler/rustc_typeck/src/variance/mod.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/variance/solve.rs (renamed from compiler/rustc_typeck/src/variance/solve.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/variance/terms.rs (renamed from compiler/rustc_typeck/src/variance/terms.rs)0
-rw-r--r--compiler/rustc_hir_analysis/src/variance/test.rs15
-rw-r--r--compiler/rustc_hir_analysis/src/variance/xform.rs (renamed from compiler/rustc_typeck/src/variance/xform.rs)0
-rw-r--r--compiler/rustc_hir_pretty/Cargo.toml1
-rw-r--r--compiler/rustc_hir_pretty/src/lib.rs158
-rw-r--r--compiler/rustc_hir_typeck/Cargo.toml28
-rw-r--r--compiler/rustc_hir_typeck/src/_match.rs (renamed from compiler/rustc_typeck/src/check/_match.rs)131
-rw-r--r--compiler/rustc_hir_typeck/src/autoderef.rs (renamed from compiler/rustc_typeck/src/check/autoderef.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs831
-rw-r--r--compiler/rustc_hir_typeck/src/cast.rs (renamed from compiler/rustc_typeck/src/check/cast.rs)139
-rw-r--r--compiler/rustc_hir_typeck/src/check.rs324
-rw-r--r--compiler/rustc_hir_typeck/src/closure.rs824
-rw-r--r--compiler/rustc_hir_typeck/src/coercion.rs (renamed from compiler/rustc_typeck/src/check/coercion.rs)172
-rw-r--r--compiler/rustc_hir_typeck/src/demand.rs (renamed from compiler/rustc_typeck/src/check/demand.rs)218
-rw-r--r--compiler/rustc_hir_typeck/src/diverges.rs (renamed from compiler/rustc_typeck/src/check/diverges.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/errors.rs126
-rw-r--r--compiler/rustc_hir_typeck/src/expectation.rs (renamed from compiler/rustc_typeck/src/check/expectation.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs2896
-rw-r--r--compiler/rustc_hir_typeck/src/expr_use_visitor.rs (renamed from compiler/rustc_typeck/src/expr_use_visitor.rs)20
-rw-r--r--compiler/rustc_hir_typeck/src/fallback.rs (renamed from compiler/rustc_typeck/src/check/fallback.rs)4
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs (renamed from compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs)198
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs (renamed from compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs)25
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs2236
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs312
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs1250
-rw-r--r--compiler/rustc_hir_typeck/src/gather_locals.rs (renamed from compiler/rustc_typeck/src/check/gather_locals.rs)3
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs (renamed from compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs)7
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs (renamed from compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs (renamed from compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs309
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs (renamed from compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs)19
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/mod.rs647
-rw-r--r--compiler/rustc_hir_typeck/src/inherited.rs213
-rw-r--r--compiler/rustc_hir_typeck/src/intrinsicck.rs108
-rw-r--r--compiler/rustc_hir_typeck/src/lib.rs507
-rw-r--r--compiler/rustc_hir_typeck/src/mem_categorization.rs (renamed from compiler/rustc_typeck/src/mem_categorization.rs)24
-rw-r--r--compiler/rustc_hir_typeck/src/method/confirm.rs (renamed from compiler/rustc_typeck/src/check/method/confirm.rs)20
-rw-r--r--compiler/rustc_hir_typeck/src/method/mod.rs625
-rw-r--r--compiler/rustc_hir_typeck/src/method/prelude2021.rs (renamed from compiler/rustc_typeck/src/check/method/prelude2021.rs)122
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs (renamed from compiler/rustc_typeck/src/check/method/probe.rs)180
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs (renamed from compiler/rustc_typeck/src/check/method/suggest.rs)663
-rw-r--r--compiler/rustc_hir_typeck/src/op.rs994
-rw-r--r--compiler/rustc_hir_typeck/src/pat.rs (renamed from compiler/rustc_typeck/src/check/pat.rs)77
-rw-r--r--compiler/rustc_hir_typeck/src/place_op.rs (renamed from compiler/rustc_typeck/src/check/place_op.rs)4
-rw-r--r--compiler/rustc_hir_typeck/src/rvalue_scopes.rs (renamed from compiler/rustc_typeck/src/check/rvalue_scopes.rs)0
-rw-r--r--compiler/rustc_hir_typeck/src/upvar.rs (renamed from compiler/rustc_typeck/src/check/upvar.rs)34
-rw-r--r--compiler/rustc_hir_typeck/src/writeback.rs (renamed from compiler/rustc_typeck/src/check/writeback.rs)118
-rw-r--r--compiler/rustc_incremental/Cargo.toml1
-rw-r--r--compiler/rustc_incremental/src/lib.rs1
-rw-r--r--compiler/rustc_incremental/src/persist/dirty_clean.rs10
-rw-r--r--compiler/rustc_index/Cargo.toml1
-rw-r--r--compiler/rustc_index/src/lib.rs4
-rw-r--r--compiler/rustc_index/src/vec.rs4
-rw-r--r--compiler/rustc_infer/Cargo.toml1
-rw-r--r--compiler/rustc_infer/src/errors/mod.rs505
-rw-r--r--compiler/rustc_infer/src/errors/note_and_explain.rs177
-rw-r--r--compiler/rustc_infer/src/infer/at.rs13
-rw-r--r--compiler/rustc_infer/src/infer/canonical/canonicalizer.rs19
-rw-r--r--compiler/rustc_infer/src/infer/canonical/mod.rs2
-rw-r--r--compiler/rustc_infer/src/infer/canonical/query_response.rs94
-rw-r--r--compiler/rustc_infer/src/infer/canonical/substitute.rs7
-rw-r--r--compiler/rustc_infer/src/infer/combine.rs99
-rw-r--r--compiler/rustc_infer/src/infer/equate.rs19
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/mod.rs643
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs421
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs147
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs10
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs60
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs18
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs9
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs37
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs20
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs2
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note.rs151
-rw-r--r--compiler/rustc_infer/src/infer/free_regions.rs13
-rw-r--r--compiler/rustc_infer/src/infer/freshen.rs5
-rw-r--r--compiler/rustc_infer/src/infer/fudge.rs4
-rw-r--r--compiler/rustc_infer/src/infer/glb.rs2
-rw-r--r--compiler/rustc_infer/src/infer/higher_ranked/mod.rs17
-rw-r--r--compiler/rustc_infer/src/infer/lattice.rs2
-rw-r--r--compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs307
-rw-r--r--compiler/rustc_infer/src/infer/lub.rs2
-rw-r--r--compiler/rustc_infer/src/infer/mod.rs406
-rw-r--r--compiler/rustc_infer/src/infer/nll_relate/mod.rs87
-rw-r--r--compiler/rustc_infer/src/infer/opaque_types.rs87
-rw-r--r--compiler/rustc_infer/src/infer/opaque_types/table.rs2
-rw-r--r--compiler/rustc_infer/src/infer/outlives/components.rs17
-rw-r--r--compiler/rustc_infer/src/infer/outlives/env.rs66
-rw-r--r--compiler/rustc_infer/src/infer/outlives/mod.rs2
-rw-r--r--compiler/rustc_infer/src/infer/outlives/obligations.rs170
-rw-r--r--compiler/rustc_infer/src/infer/outlives/test_type_match.rs15
-rw-r--r--compiler/rustc_infer/src/infer/outlives/verify.rs117
-rw-r--r--compiler/rustc_infer/src/infer/projection.rs2
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/leak_check.rs2
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/mod.rs41
-rw-r--r--compiler/rustc_infer/src/infer/resolve.rs25
-rw-r--r--compiler/rustc_infer/src/infer/sub.rs48
-rw-r--r--compiler/rustc_infer/src/infer/type_variable.rs1
-rw-r--r--compiler/rustc_infer/src/infer/undo_log.rs2
-rw-r--r--compiler/rustc_infer/src/lib.rs6
-rw-r--r--compiler/rustc_infer/src/traits/engine.rs15
-rw-r--r--compiler/rustc_infer/src/traits/error_reporting/mod.rs2
-rw-r--r--compiler/rustc_infer/src/traits/mod.rs10
-rw-r--r--compiler/rustc_infer/src/traits/structural_impls.rs1
-rw-r--r--compiler/rustc_infer/src/traits/util.rs12
-rw-r--r--compiler/rustc_interface/Cargo.toml5
-rw-r--r--compiler/rustc_interface/src/errors.rs89
-rw-r--r--compiler/rustc_interface/src/interface.rs117
-rw-r--r--compiler/rustc_interface/src/lib.rs7
-rw-r--r--compiler/rustc_interface/src/passes.rs126
-rw-r--r--compiler/rustc_interface/src/proc_macro_decls.rs5
-rw-r--r--compiler/rustc_interface/src/queries.rs24
-rw-r--r--compiler/rustc_interface/src/tests.rs54
-rw-r--r--compiler/rustc_interface/src/util.rs126
-rw-r--r--compiler/rustc_lexer/Cargo.toml1
-rw-r--r--compiler/rustc_lexer/src/cursor.rs16
-rw-r--r--compiler/rustc_lexer/src/lib.rs80
-rw-r--r--compiler/rustc_lexer/src/unescape.rs27
-rw-r--r--compiler/rustc_lint/Cargo.toml2
-rw-r--r--compiler/rustc_lint/src/array_into_iter.rs65
-rw-r--r--compiler/rustc_lint/src/builtin.rs1049
-rw-r--r--compiler/rustc_lint/src/context.rs224
-rw-r--r--compiler/rustc_lint/src/early.rs36
-rw-r--r--compiler/rustc_lint/src/enum_intrinsics_non_enums.rs26
-rw-r--r--compiler/rustc_lint/src/errors.rs150
-rw-r--r--compiler/rustc_lint/src/expect.rs14
-rw-r--r--compiler/rustc_lint/src/for_loops_over_fallibles.rs183
-rw-r--r--compiler/rustc_lint/src/hidden_unicode_codepoints.rs98
-rw-r--r--compiler/rustc_lint/src/internal.rs233
-rw-r--r--compiler/rustc_lint/src/late.rs84
-rw-r--r--compiler/rustc_lint/src/let_underscore.rs168
-rw-r--r--compiler/rustc_lint/src/levels.rs1037
-rw-r--r--compiler/rustc_lint/src/lib.rs68
-rw-r--r--compiler/rustc_lint/src/methods.rs37
-rw-r--r--compiler/rustc_lint/src/non_ascii_idents.rs68
-rw-r--r--compiler/rustc_lint/src/non_fmt_panic.rs113
-rw-r--r--compiler/rustc_lint/src/nonstandard_style.rs136
-rw-r--r--compiler/rustc_lint/src/noop_method_call.rs30
-rw-r--r--compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs163
-rw-r--r--compiler/rustc_lint/src/pass_by_value.rs18
-rw-r--r--compiler/rustc_lint/src/passes.rs14
-rw-r--r--compiler/rustc_lint/src/redundant_semicolon.rs19
-rw-r--r--compiler/rustc_lint/src/traits.rs36
-rw-r--r--compiler/rustc_lint/src/types.rs442
-rw-r--r--compiler/rustc_lint/src/unused.rs255
-rw-r--r--compiler/rustc_lint_defs/src/builtin.rs101
-rw-r--r--compiler/rustc_lint_defs/src/lib.rs61
-rw-r--r--compiler/rustc_llvm/build.rs11
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp13
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp11
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp304
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp114
-rw-r--r--compiler/rustc_llvm/src/lib.rs2
-rw-r--r--compiler/rustc_log/src/lib.rs3
-rw-r--r--compiler/rustc_macros/Cargo.toml2
-rw-r--r--compiler/rustc_macros/src/diagnostics/diagnostic.rs243
-rw-r--r--compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs769
-rw-r--r--compiler/rustc_macros/src/diagnostics/fluent.rs91
-rw-r--r--compiler/rustc_macros/src/diagnostics/mod.rs52
-rw-r--r--compiler/rustc_macros/src/diagnostics/subdiagnostic.rs810
-rw-r--r--compiler/rustc_macros/src/diagnostics/utils.rs480
-rw-r--r--compiler/rustc_macros/src/lib.rs36
-rw-r--r--compiler/rustc_macros/src/query.rs536
-rw-r--r--compiler/rustc_macros/src/symbols.rs2
-rw-r--r--compiler/rustc_metadata/Cargo.toml1
-rw-r--r--compiler/rustc_metadata/src/creader.rs56
-rw-r--r--compiler/rustc_metadata/src/dependency_format.rs79
-rw-r--r--compiler/rustc_metadata/src/errors.rs713
-rw-r--r--compiler/rustc_metadata/src/foreign_modules.rs6
-rw-r--r--compiler/rustc_metadata/src/fs.rs25
-rw-r--r--compiler/rustc_metadata/src/lib.rs9
-rw-r--r--compiler/rustc_metadata/src/locator.rs329
-rw-r--r--compiler/rustc_metadata/src/native_libs.rs349
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder.rs479
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs74
-rw-r--r--compiler/rustc_metadata/src/rmeta/encoder.rs1193
-rw-r--r--compiler/rustc_metadata/src/rmeta/mod.rs64
-rw-r--r--compiler/rustc_metadata/src/rmeta/table.rs19
-rw-r--r--compiler/rustc_middle/Cargo.toml29
-rw-r--r--compiler/rustc_middle/benches/lib.rs54
-rw-r--r--compiler/rustc_middle/src/arena.rs9
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs189
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs52
-rw-r--r--compiler/rustc_middle/src/error.rs57
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs220
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs36
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs27
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs2
-rw-r--r--compiler/rustc_middle/src/lib.rs9
-rw-r--r--compiler/rustc_middle/src/lint.rs329
-rw-r--r--compiler/rustc_middle/src/macros.rs13
-rw-r--r--compiler/rustc_middle/src/metadata.rs3
-rw-r--r--compiler/rustc_middle/src/middle/lang_items.rs6
-rw-r--r--compiler/rustc_middle/src/middle/limits.rs10
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs214
-rw-r--r--compiler/rustc_middle/src/middle/resolve_lifetime.rs25
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs88
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs2
-rw-r--r--compiler/rustc_middle/src/mir/generic_graph.rs4
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs331
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs32
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs6
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs17
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs44
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs163
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs296
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs6
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs36
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs59
-rw-r--r--compiler/rustc_middle/src/mir/query.rs35
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs4
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs225
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs5
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs4
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs4
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs223
-rw-r--r--compiler/rustc_middle/src/mir/type_visitable.rs181
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs193
-rw-r--r--compiler/rustc_middle/src/query/mod.rs466
-rw-r--r--compiler/rustc_middle/src/thir.rs237
-rw-r--r--compiler/rustc_middle/src/thir/visit.rs29
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs58
-rw-r--r--compiler/rustc_middle/src/traits/query.rs16
-rw-r--r--compiler/rustc_middle/src/traits/select.rs9
-rw-r--r--compiler/rustc_middle/src/traits/specialization_graph.rs2
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs14
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs11
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs27
-rw-r--r--compiler/rustc_middle/src/ty/assoc.rs2
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs14
-rw-r--r--compiler/rustc_middle/src/ty/cast.rs32
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs1
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs56
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs56
-rw-r--r--compiler/rustc_middle/src/ty/consts/valtree.rs5
-rw-r--r--compiler/rustc_middle/src/ty/context.rs224
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs26
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs5
-rw-r--r--compiler/rustc_middle/src/ty/error.rs53
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs11
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs34
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs83
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs29
-rw-r--r--compiler/rustc_middle/src/ty/impls_ty.rs2
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs145
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs204
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs262
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs10
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs2421
-rw-r--r--compiler/rustc_middle/src/ty/list.rs4
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs461
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs28
-rw-r--r--compiler/rustc_middle/src/ty/opaque_types.rs218
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs14
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs13
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs338
-rw-r--r--compiler/rustc_middle/src/ty/query.rs60
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs47
-rw-r--r--compiler/rustc_middle/src/ty/rvalue_scopes.rs2
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs549
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs201
-rw-r--r--compiler/rustc_middle/src/ty/subst.rs142
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs1
-rw-r--r--compiler/rustc_middle/src/ty/util.rs51
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs44
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs8
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs24
-rw-r--r--compiler/rustc_middle/src/values.rs202
-rw-r--r--compiler/rustc_mir_build/Cargo.toml1
-rw-r--r--compiler/rustc_mir_build/src/build/block.rs220
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_constant.rs40
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_operand.rs5
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_place.rs188
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs45
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_temp.rs10
-rw-r--r--compiler/rustc_mir_build/src/build/expr/into.rs28
-rw-r--r--compiler/rustc_mir_build/src/build/expr/stmt.rs22
-rw-r--r--compiler/rustc_mir_build/src/build/matches/mod.rs233
-rw-r--r--compiler/rustc_mir_build/src/build/matches/simplify.rs51
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs71
-rw-r--r--compiler/rustc_mir_build/src/build/matches/util.rs61
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs500
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs42
-rw-r--r--compiler/rustc_mir_build/src/check_unsafety.rs49
-rw-r--r--compiler/rustc_mir_build/src/lib.rs2
-rw-r--r--compiler/rustc_mir_build/src/lints.rs24
-rw-r--r--compiler/rustc_mir_build/src/thir/constant.rs2
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/block.rs18
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/expr.rs79
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/mod.rs114
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs149
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs101
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs67
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/mod.rs135
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/usefulness.rs47
-rw-r--r--compiler/rustc_mir_dataflow/Cargo.toml4
-rw-r--r--compiler/rustc_mir_dataflow/src/elaborate_drops.rs5
-rw-r--r--compiler/rustc_mir_dataflow/src/errors.rs71
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/engine.rs22
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/graphviz.rs4
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/mod.rs3
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/tests.rs10
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/liveness.rs107
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs2
-rw-r--r--compiler/rustc_mir_dataflow/src/lib.rs4
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs1
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/builder.rs4
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/mod.rs2
-rw-r--r--compiler/rustc_mir_dataflow/src/rustc_peek.rs30
-rw-r--r--compiler/rustc_mir_dataflow/src/storage.rs2
-rw-r--r--compiler/rustc_mir_transform/Cargo.toml1
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/add_call_guards.rs2
-rw-r--r--compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs2
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs19
-rw-r--r--compiler/rustc_mir_transform/src/check_const_item_mutation.rs18
-rw-r--r--compiler/rustc_mir_transform/src/check_packed_ref.rs40
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs115
-rw-r--r--compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs2
-rw-r--r--compiler/rustc_mir_transform/src/const_goto.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs131
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs95
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml1
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs6
-rw-r--r--compiler/rustc_mir_transform/src/dead_store_elimination.rs2
-rw-r--r--compiler/rustc_mir_transform/src/deaggregator.rs4
-rw-r--r--compiler/rustc_mir_transform/src/deduce_param_attrs.rs248
-rw-r--r--compiler/rustc_mir_transform/src/deduplicate_blocks.rs5
-rw-r--r--compiler/rustc_mir_transform/src/deref_separator.rs23
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs8
-rw-r--r--compiler/rustc_mir_transform/src/early_otherwise_branch.rs6
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs41
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs16
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs16
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs22
-rw-r--r--compiler/rustc_mir_transform/src/generator.rs111
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs416
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs2
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs180
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs31
-rw-r--r--compiler/rustc_mir_transform/src/marker.rs20
-rw-r--r--compiler/rustc_mir_transform/src/multiple_return_terminators.rs2
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs4
-rw-r--r--compiler/rustc_mir_transform/src/nrvo.rs8
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs118
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs4
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs2
-rw-r--r--compiler/rustc_mir_transform/src/required_consts.rs11
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs2
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs8
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs135
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs10
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs2
-rw-r--r--compiler/rustc_mir_transform/src/simplify_try.rs4
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs6
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs76
-rw-r--r--compiler/rustc_monomorphize/Cargo.toml5
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs202
-rw-r--r--compiler/rustc_monomorphize/src/errors.rs84
-rw-r--r--compiler/rustc_monomorphize/src/lib.rs6
-rw-r--r--compiler/rustc_monomorphize/src/partitioning/default.rs6
-rw-r--r--compiler/rustc_monomorphize/src/partitioning/mod.rs15
-rw-r--r--compiler/rustc_monomorphize/src/polymorphize.rs64
-rw-r--r--compiler/rustc_monomorphize/src/util.rs2
-rw-r--r--compiler/rustc_parse/Cargo.toml1
-rw-r--r--compiler/rustc_parse/src/errors.rs1237
-rw-r--r--compiler/rustc_parse/src/lexer/mod.rs399
-rw-r--r--compiler/rustc_parse/src/lexer/tokentrees.rs428
-rw-r--r--compiler/rustc_parse/src/lexer/unescape_error_reporting.rs27
-rw-r--r--compiler/rustc_parse/src/lib.rs10
-rw-r--r--compiler/rustc_parse/src/parser/attr.rs176
-rw-r--r--compiler/rustc_parse/src/parser/attr_wrapper.rs109
-rw-r--r--compiler/rustc_parse/src/parser/diagnostics.rs628
-rw-r--r--compiler/rustc_parse/src/parser/expr.rs1282
-rw-r--r--compiler/rustc_parse/src/parser/generics.rs45
-rw-r--r--compiler/rustc_parse/src/parser/item.rs353
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs196
-rw-r--r--compiler/rustc_parse/src/parser/nonterminal.rs24
-rw-r--r--compiler/rustc_parse/src/parser/pat.rs83
-rw-r--r--compiler/rustc_parse/src/parser/path.rs22
-rw-r--r--compiler/rustc_parse/src/parser/stmt.rs211
-rw-r--r--compiler/rustc_parse/src/parser/ty.rs50
-rw-r--r--compiler/rustc_parse_format/src/lib.rs117
-rw-r--r--compiler/rustc_parse_format/src/tests.rs58
-rw-r--r--compiler/rustc_passes/src/check_attr.rs353
-rw-r--r--compiler/rustc_passes/src/check_const.rs17
-rw-r--r--compiler/rustc_passes/src/dead.rs158
-rw-r--r--compiler/rustc_passes/src/debugger_visualizer.rs15
-rw-r--r--compiler/rustc_passes/src/diagnostic_items.rs45
-rw-r--r--compiler/rustc_passes/src/entry.rs187
-rw-r--r--compiler/rustc_passes/src/errors.rs1120
-rw-r--r--compiler/rustc_passes/src/hir_id_validator.rs25
-rw-r--r--compiler/rustc_passes/src/hir_stats.rs540
-rw-r--r--compiler/rustc_passes/src/lang_items.rs211
-rw-r--r--compiler/rustc_passes/src/layout_test.rs77
-rw-r--r--compiler/rustc_passes/src/lib.rs3
-rw-r--r--compiler/rustc_passes/src/lib_features.rs59
-rw-r--r--compiler/rustc_passes/src/liveness.rs271
-rw-r--r--compiler/rustc_passes/src/loops.rs161
-rw-r--r--compiler/rustc_passes/src/naked_functions.rs178
-rw-r--r--compiler/rustc_passes/src/reachable.rs53
-rw-r--r--compiler/rustc_passes/src/stability.rs392
-rw-r--r--compiler/rustc_passes/src/weak_lang_items.rs31
-rw-r--r--compiler/rustc_plugin_impl/Cargo.toml2
-rw-r--r--compiler/rustc_plugin_impl/src/errors.rs20
-rw-r--r--compiler/rustc_plugin_impl/src/lib.rs3
-rw-r--r--compiler/rustc_plugin_impl/src/load.rs16
-rw-r--r--compiler/rustc_privacy/Cargo.toml2
-rw-r--r--compiler/rustc_privacy/src/errors.rs44
-rw-r--r--compiler/rustc_privacy/src/lib.rs462
-rw-r--r--compiler/rustc_query_impl/Cargo.toml4
-rw-r--r--compiler/rustc_query_impl/src/keys.rs42
-rw-r--r--compiler/rustc_query_impl/src/lib.rs23
-rw-r--r--compiler/rustc_query_impl/src/on_disk_cache.rs89
-rw-r--r--compiler/rustc_query_impl/src/plumbing.rs561
-rw-r--r--compiler/rustc_query_impl/src/profiling_support.rs25
-rw-r--r--compiler/rustc_query_impl/src/values.rs45
-rw-r--r--compiler/rustc_query_system/Cargo.toml9
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs73
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs45
-rw-r--r--compiler/rustc_query_system/src/error.rs93
-rw-r--r--compiler/rustc_query_system/src/ich/hcx.rs28
-rw-r--r--compiler/rustc_query_system/src/ich/impls_hir.rs24
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs8
-rw-r--r--compiler/rustc_query_system/src/lib.rs10
-rw-r--r--compiler/rustc_query_system/src/query/config.rs22
-rw-r--r--compiler/rustc_query_system/src/query/job.rs90
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs15
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs93
-rw-r--r--compiler/rustc_query_system/src/values.rs15
-rw-r--r--compiler/rustc_resolve/Cargo.toml1
-rw-r--r--compiler/rustc_resolve/src/access_levels.rs237
-rw-r--r--compiler/rustc_resolve/src/build_reduced_graph.rs63
-rw-r--r--compiler/rustc_resolve/src/check_unused.rs4
-rw-r--r--compiler/rustc_resolve/src/def_collector.rs30
-rw-r--r--compiler/rustc_resolve/src/diagnostics.rs200
-rw-r--r--compiler/rustc_resolve/src/effective_visibilities.rs188
-rw-r--r--compiler/rustc_resolve/src/ident.rs105
-rw-r--r--compiler/rustc_resolve/src/imports.rs141
-rw-r--r--compiler/rustc_resolve/src/late.rs594
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs984
-rw-r--r--compiler/rustc_resolve/src/late/lifetimes.rs2144
-rw-r--r--compiler/rustc_resolve/src/lib.rs167
-rw-r--r--compiler/rustc_resolve/src/macros.rs56
-rw-r--r--compiler/rustc_save_analysis/Cargo.toml2
-rw-r--r--compiler/rustc_save_analysis/src/dump_visitor.rs102
-rw-r--r--compiler/rustc_save_analysis/src/errors.rs10
-rw-r--r--compiler/rustc_save_analysis/src/lib.rs70
-rw-r--r--compiler/rustc_save_analysis/src/sig.rs22
-rw-r--r--compiler/rustc_serialize/Cargo.toml1
-rw-r--r--compiler/rustc_serialize/src/collection_impls.rs20
-rw-r--r--compiler/rustc_serialize/src/lib.rs4
-rw-r--r--compiler/rustc_serialize/src/opaque.rs4
-rw-r--r--compiler/rustc_serialize/src/serialize.rs28
-rw-r--r--compiler/rustc_session/Cargo.toml1
-rw-r--r--compiler/rustc_session/src/cgu_reuse_tracker.rs44
-rw-r--r--compiler/rustc_session/src/config.rs49
-rw-r--r--compiler/rustc_session/src/config/sigpipe.rs25
-rw-r--r--compiler/rustc_session/src/cstore.rs35
-rw-r--r--compiler/rustc_session/src/errors.rs193
-rw-r--r--compiler/rustc_session/src/filesearch.rs1
-rw-r--r--compiler/rustc_session/src/lib.rs7
-rw-r--r--compiler/rustc_session/src/options.rs207
-rw-r--r--compiler/rustc_session/src/output.rs35
-rw-r--r--compiler/rustc_session/src/parse.rs134
-rw-r--r--compiler/rustc_session/src/session.rs261
-rw-r--r--compiler/rustc_session/src/utils.rs11
-rw-r--r--compiler/rustc_smir/src/lib.rs2
-rw-r--r--compiler/rustc_smir/src/mir.rs12
-rw-r--r--compiler/rustc_span/Cargo.toml1
-rw-r--r--compiler/rustc_span/src/def_id.rs35
-rw-r--r--compiler/rustc_span/src/hygiene.rs29
-rw-r--r--compiler/rustc_span/src/lib.rs49
-rw-r--r--compiler/rustc_span/src/source_map.rs151
-rw-r--r--compiler/rustc_span/src/source_map/tests.rs47
-rw-r--r--compiler/rustc_span/src/span_encoding.rs42
-rw-r--r--compiler/rustc_span/src/symbol.rs84
-rw-r--r--compiler/rustc_symbol_mangling/Cargo.toml3
-rw-r--r--compiler/rustc_symbol_mangling/src/errors.rs34
-rw-r--r--compiler/rustc_symbol_mangling/src/legacy.rs2
-rw-r--r--compiler/rustc_symbol_mangling/src/lib.rs8
-rw-r--r--compiler/rustc_symbol_mangling/src/test.rs34
-rw-r--r--compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs26
-rw-r--r--compiler/rustc_symbol_mangling/src/v0.rs18
-rw-r--r--compiler/rustc_target/Cargo.toml3
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs43
-rw-r--r--compiler/rustc_target/src/abi/call/amdgpu.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/arm.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/avr.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/bpf.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/hexagon.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/mips.rs8
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs49
-rw-r--r--compiler/rustc_target/src/abi/call/msp430.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx.rs33
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx64.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/s390x.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/sparc.rs8
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs6
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs2
-rw-r--r--compiler/rustc_target/src/abi/call/x86_win64.rs2
-rw-r--r--compiler/rustc_target/src/abi/mod.rs137
-rw-r--r--compiler/rustc_target/src/lib.rs3
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_darwin.rs8
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs5
-rw-r--r--compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs2
-rw-r--r--compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs2
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_none.rs4
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs4
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs6
-rw-r--r--compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs2
-rw-r--r--compiler/rustc_target/src/spec/abi.rs138
-rw-r--r--compiler/rustc_target/src/spec/android_base.rs7
-rw-r--r--compiler/rustc_target/src/spec/apple_base.rs60
-rw-r--r--compiler/rustc_target/src/spec/apple_sdk_base.rs45
-rw-r--r--compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs2
-rw-r--r--compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabi.rs5
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs5
-rw-r--r--compiler/rustc_target/src/spec/armv4t_none_eabi.rs56
-rw-r--r--compiler/rustc_target/src/spec/armv5te_none_eabi.rs41
-rw-r--r--compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs5
-rw-r--r--compiler/rustc_target/src/spec/armv7_linux_androideabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv7a_none_eabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv7a_none_eabihf.rs4
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabi.rs5
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabihf.rs5
-rw-r--r--compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs2
-rw-r--r--compiler/rustc_target/src/spec/avr_gnu_base.rs10
-rw-r--r--compiler/rustc_target/src/spec/bpf_base.rs2
-rw-r--r--compiler/rustc_target/src/spec/crt_objects.rs40
-rw-r--r--compiler/rustc_target/src/spec/fuchsia_base.rs11
-rw-r--r--compiler/rustc_target/src/spec/hermit_base.rs6
-rw-r--r--compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs5
-rw-r--r--compiler/rustc_target/src/spec/i386_apple_ios.rs3
-rw-r--r--compiler/rustc_target/src/spec/i686_apple_darwin.rs10
-rw-r--r--compiler/rustc_target/src/spec/i686_linux_android.rs3
-rw-r--r--compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs9
-rw-r--r--compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs4
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_freebsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_haiku.rs7
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs8
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs7
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_netbsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_openbsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs9
-rw-r--r--compiler/rustc_target/src/spec/i686_wrs_vxworks.rs7
-rw-r--r--compiler/rustc_target/src/spec/illumos_base.rs6
-rw-r--r--compiler/rustc_target/src/spec/l4re_base.rs6
-rw-r--r--compiler/rustc_target/src/spec/linux_base.rs8
-rw-r--r--compiler/rustc_target/src/spec/linux_kernel_base.rs3
-rw-r--r--compiler/rustc_target/src/spec/linux_musl_base.rs8
-rw-r--r--compiler/rustc_target/src/spec/mipsel_sony_psp.rs10
-rw-r--r--compiler/rustc_target/src/spec/mipsel_unknown_none.rs5
-rw-r--r--compiler/rustc_target/src/spec/mod.rs603
-rw-r--r--compiler/rustc_target/src/spec/msp430_none_elf.rs4
-rw-r--r--compiler/rustc_target/src/spec/msvc_base.rs11
-rw-r--r--compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs3
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs9
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs18
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs9
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs3
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs5
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs4
-rw-r--r--compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs5
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs6
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs6
-rw-r--r--compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs11
-rw-r--r--compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs11
-rw-r--r--compiler/rustc_target/src/spec/solaris_base.rs4
-rw-r--r--compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs4
-rw-r--r--compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs4
-rw-r--r--compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs4
-rw-r--r--compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs4
-rw-r--r--compiler/rustc_target/src/spec/tests/tests_impl.rs155
-rw-r--r--compiler/rustc_target/src/spec/thumb_base.rs5
-rw-r--r--compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs11
-rw-r--r--compiler/rustc_target/src/spec/thumbv5te_none_eabi.rs41
-rw-r--r--compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs4
-rw-r--r--compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs4
-rw-r--r--compiler/rustc_target/src/spec/uefi_msvc_base.rs6
-rw-r--r--compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs4
-rw-r--r--compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs8
-rw-r--r--compiler/rustc_target/src/spec/wasm32_wasi.rs10
-rw-r--r--compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs8
-rw-r--r--compiler/rustc_target/src/spec/wasm_base.rs14
-rw-r--r--compiler/rustc_target/src/spec/windows_gnu_base.rs41
-rw-r--r--compiler/rustc_target/src/spec/windows_gnullvm_base.rs12
-rw-r--r--compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs7
-rw-r--r--compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs4
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_darwin.rs13
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_tvos.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs8
-rw-r--r--compiler/rustc_target/src/spec/x86_64_fuchsia.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_linux_android.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_solaris.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs9
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs4
-rw-r--r--compiler/rustc_target/src/spec/x86_64_sun_solaris.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs4
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs2
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_none.rs12
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs4
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_redox.rs7
-rw-r--r--compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs9
-rw-r--r--compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs7
-rw-r--r--compiler/rustc_trait_selection/Cargo.toml1
-rw-r--r--compiler/rustc_trait_selection/src/autoderef.rs23
-rw-r--r--compiler/rustc_trait_selection/src/errors.rs102
-rw-r--r--compiler/rustc_trait_selection/src/infer.rs43
-rw-r--r--compiler/rustc_trait_selection/src/lib.rs4
-rw-r--r--compiler/rustc_trait_selection/src/traits/auto_trait.rs246
-rw-r--r--compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs11
-rw-r--r--compiler/rustc_trait_selection/src/traits/codegen.rs93
-rw-r--r--compiler/rustc_trait_selection/src/traits/coherence.rs172
-rw-r--r--compiler/rustc_trait_selection/src/traits/const_evaluatable.rs67
-rw-r--r--compiler/rustc_trait_selection/src/traits/engine.rs51
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs921
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs18
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs716
-rw-r--r--compiler/rustc_trait_selection/src/traits/fulfill.rs115
-rw-r--r--compiler/rustc_trait_selection/src/traits/misc.rs112
-rw-r--r--compiler/rustc_trait_selection/src/traits/mod.rs312
-rw-r--r--compiler/rustc_trait_selection/src/traits/object_safety.rs202
-rw-r--r--compiler/rustc_trait_selection/src/traits/on_unimplemented.rs71
-rw-r--r--compiler/rustc_trait_selection/src/traits/outlives_bounds.rs115
-rw-r--r--compiler/rustc_trait_selection/src/traits/project.rs301
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/normalize.rs99
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs23
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs8
-rw-r--r--compiler/rustc_trait_selection/src/traits/relationships.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs122
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs120
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs253
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/mod.rs134
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs48
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_match.rs9
-rw-r--r--compiler/rustc_trait_selection/src/traits/util.rs39
-rw-r--r--compiler/rustc_trait_selection/src/traits/wf.rs81
-rw-r--r--compiler/rustc_traits/src/chalk/db.rs2
-rw-r--r--compiler/rustc_traits/src/chalk/lowering.rs13
-rw-r--r--compiler/rustc_traits/src/dropck_outlives.rs220
-rw-r--r--compiler/rustc_traits/src/evaluate_obligation.rs20
-rw-r--r--compiler/rustc_traits/src/implied_outlives_bounds.rs76
-rw-r--r--compiler/rustc_traits/src/lib.rs3
-rw-r--r--compiler/rustc_traits/src/normalize_erasing_regions.rs46
-rw-r--r--compiler/rustc_traits/src/type_op.rs68
-rw-r--r--compiler/rustc_transmute/Cargo.toml5
-rw-r--r--compiler/rustc_transmute/src/layout/dfa.rs1
-rw-r--r--compiler/rustc_transmute/src/layout/nfa.rs6
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs58
-rw-r--r--compiler/rustc_transmute/src/lib.rs82
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/mod.rs6
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/query_context.rs2
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/tests.rs4
-rw-r--r--compiler/rustc_ty_utils/Cargo.toml3
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs551
-rw-r--r--compiler/rustc_ty_utils/src/assoc.rs18
-rw-r--r--compiler/rustc_ty_utils/src/common_traits.rs11
-rw-r--r--compiler/rustc_ty_utils/src/consts.rs210
-rw-r--r--compiler/rustc_ty_utils/src/errors.rs69
-rw-r--r--compiler/rustc_ty_utils/src/implied_bounds.rs61
-rw-r--r--compiler/rustc_ty_utils/src/instance.rs192
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs1803
-rw-r--r--compiler/rustc_ty_utils/src/layout_sanity_check.rs303
-rw-r--r--compiler/rustc_ty_utils/src/lib.rs11
-rw-r--r--compiler/rustc_ty_utils/src/needs_drop.rs12
-rw-r--r--compiler/rustc_ty_utils/src/representability.rs451
-rw-r--r--compiler/rustc_ty_utils/src/ty.rs26
-rw-r--r--compiler/rustc_type_ir/Cargo.toml1
-rw-r--r--compiler/rustc_type_ir/src/lib.rs57
-rw-r--r--compiler/rustc_type_ir/src/sty.rs109
-rw-r--r--compiler/rustc_typeck/Cargo.toml32
-rw-r--r--compiler/rustc_typeck/src/astconv/errors.rs410
-rw-r--r--compiler/rustc_typeck/src/astconv/generics.rs664
-rw-r--r--compiler/rustc_typeck/src/astconv/mod.rs3091
-rw-r--r--compiler/rustc_typeck/src/check/callee.rs675
-rw-r--r--compiler/rustc_typeck/src/check/check.rs1712
-rw-r--r--compiler/rustc_typeck/src/check/closure.rs805
-rw-r--r--compiler/rustc_typeck/src/check/compare_method.rs1547
-rw-r--r--compiler/rustc_typeck/src/check/expr.rs2824
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/checks.rs1900
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/mod.rs296
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs912
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior.rs632
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs309
-rw-r--r--compiler/rustc_typeck/src/check/inherited.rs183
-rw-r--r--compiler/rustc_typeck/src/check/intrinsicck.rs530
-rw-r--r--compiler/rustc_typeck/src/check/method/mod.rs658
-rw-r--r--compiler/rustc_typeck/src/check/mod.rs970
-rw-r--r--compiler/rustc_typeck/src/check/op.rs1076
-rw-r--r--compiler/rustc_typeck/src/check/regionck.rs47
-rw-r--r--compiler/rustc_typeck/src/check_unused.rs196
-rw-r--r--compiler/rustc_typeck/src/coherence/builtin.rs603
-rw-r--r--compiler/rustc_typeck/src/coherence/unsafety.rs66
-rw-r--r--compiler/rustc_typeck/src/collect.rs3361
-rw-r--r--compiler/rustc_typeck/src/errors.rs326
-rw-r--r--compiler/rustc_typeck/src/lib.rs579
-rw-r--r--compiler/rustc_typeck/src/outlives/mod.rs130
-rw-r--r--compiler/rustc_typeck/src/outlives/outlives_bounds.rs90
-rw-r--r--compiler/rustc_typeck/src/outlives/test.rs21
-rw-r--r--compiler/rustc_typeck/src/outlives/utils.rs175
-rw-r--r--compiler/rustc_typeck/src/variance/test.rs14
1188 files changed, 83864 insertions, 65190 deletions
diff --git a/compiler/rustc/src/main.rs b/compiler/rustc/src/main.rs
index 0de1a7819..e21c9b660 100644
--- a/compiler/rustc/src/main.rs
+++ b/compiler/rustc/src/main.rs
@@ -1,3 +1,5 @@
+#![feature(unix_sigpipe)]
+
// A note about jemalloc: rustc uses jemalloc when built for CI and
// distribution. The obvious way to do this is with the `#[global_allocator]`
// mechanism. However, for complicated reasons (see
@@ -23,6 +25,7 @@
// libraries. So we must reference jemalloc symbols one way or another, because
// this file is the only object code in the rustc executable.
+#[unix_sigpipe = "sig_dfl"]
fn main() {
// See the comment at the top of this file for an explanation of this.
#[cfg(feature = "jemalloc-sys")]
@@ -58,6 +61,5 @@ fn main() {
}
}
- rustc_driver::set_sigpipe_handler();
rustc_driver::main()
}
diff --git a/compiler/rustc_apfloat/src/lib.rs b/compiler/rustc_apfloat/src/lib.rs
index cfc3d5b15..dde368e7b 100644
--- a/compiler/rustc_apfloat/src/lib.rs
+++ b/compiler/rustc_apfloat/src/lib.rs
@@ -33,6 +33,8 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![no_std]
#![forbid(unsafe_code)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate alloc;
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index a5f1cbc96..46dbbd83d 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -16,10 +16,12 @@
#![feature(maybe_uninit_slice)]
#![feature(min_specialization)]
#![feature(decl_macro)]
+#![feature(pointer_byte_offsets)]
#![feature(rustc_attrs)]
#![cfg_attr(test, feature(test))]
#![feature(strict_provenance)]
-#![feature(ptr_const_cast)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
use smallvec::SmallVec;
@@ -210,7 +212,7 @@ impl<T> TypedArena<T> {
unsafe {
if mem::size_of::<T>() == 0 {
- self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T);
+ self.ptr.set(self.ptr.get().wrapping_byte_add(1));
let ptr = ptr::NonNull::<T>::dangling().as_ptr();
// Don't drop the object. This `write` is equivalent to `forget`.
ptr::write(ptr, object);
@@ -218,7 +220,7 @@ impl<T> TypedArena<T> {
} else {
let ptr = self.ptr.get();
// Advance the pointer.
- self.ptr.set(self.ptr.get().offset(1));
+ self.ptr.set(self.ptr.get().add(1));
// Write into uninitialized memory.
ptr::write(ptr, object);
&mut *ptr
diff --git a/compiler/rustc_ast/Cargo.toml b/compiler/rustc_ast/Cargo.toml
index 9822e9864..fcbf96818 100644
--- a/compiler/rustc_ast/Cargo.toml
+++ b/compiler/rustc_ast/Cargo.toml
@@ -4,15 +4,15 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
-rustc_serialize = { path = "../rustc_serialize" }
-tracing = "0.1"
-rustc_span = { path = "../rustc_span" }
+bitflags = "1.2.1"
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_index = { path = "../rustc_index" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
-bitflags = "1.2.1"
+thin-vec = "0.2.8"
+tracing = "0.1"
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 870a7c0be..4ef43735a 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -24,22 +24,19 @@ pub use UnsafeSource::*;
use crate::ptr::P;
use crate::token::{self, CommentKind, Delimiter};
-use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream};
-
+use crate::tokenstream::{DelimSpan, LazyAttrTokenStream, TokenStream};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lrc;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::source_map::{respan, Spanned};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
-
-use std::cmp::Ordering;
use std::convert::TryFrom;
use std::fmt;
use std::mem;
+use thin_vec::ThinVec;
/// A "Label" is an identifier of some point in sources,
/// e.g. in the following code:
@@ -94,7 +91,7 @@ pub struct Path {
/// The segments in the path: the things separated by `::`.
/// Global paths begin with `kw::PathRoot`.
pub segments: Vec<PathSegment>,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
impl PartialEq<Symbol> for Path {
@@ -326,46 +323,17 @@ pub type GenericBounds = Vec<GenericBound>;
/// Specifies the enforced ordering for generic parameters. In the future,
/// if we wanted to relax this order, we could override `PartialEq` and
/// `PartialOrd`, to allow the kinds to be unordered.
-#[derive(Hash, Clone, Copy)]
+#[derive(Hash, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum ParamKindOrd {
Lifetime,
- Type,
- Const,
- // `Infer` is not actually constructed directly from the AST, but is implicitly constructed
- // during HIR lowering, and `ParamKindOrd` will implicitly order inferred variables last.
- Infer,
+ TypeOrConst,
}
-impl Ord for ParamKindOrd {
- fn cmp(&self, other: &Self) -> Ordering {
- use ParamKindOrd::*;
- let to_int = |v| match v {
- Lifetime => 0,
- Infer | Type | Const => 1,
- };
-
- to_int(*self).cmp(&to_int(*other))
- }
-}
-impl PartialOrd for ParamKindOrd {
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-impl PartialEq for ParamKindOrd {
- fn eq(&self, other: &Self) -> bool {
- self.cmp(other) == Ordering::Equal
- }
-}
-impl Eq for ParamKindOrd {}
-
impl fmt::Display for ParamKindOrd {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParamKindOrd::Lifetime => "lifetime".fmt(f),
- ParamKindOrd::Type => "type".fmt(f),
- ParamKindOrd::Const { .. } => "const".fmt(f),
- ParamKindOrd::Infer => "infer".fmt(f),
+ ParamKindOrd::TypeOrConst => "type and const".fmt(f),
}
}
}
@@ -497,7 +465,6 @@ pub struct WhereRegionPredicate {
/// E.g., `T = int`.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct WhereEqPredicate {
- pub id: NodeId,
pub span: Span,
pub lhs_ty: P<Ty>,
pub rhs_ty: P<Ty>,
@@ -505,7 +472,7 @@ pub struct WhereEqPredicate {
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Crate {
- pub attrs: Vec<Attribute>,
+ pub attrs: AttrVec,
pub items: Vec<P<Item>>,
pub spans: ModSpans,
/// Must be equal to `CRATE_NODE_ID` after the crate root is expanded, but may hold
@@ -567,7 +534,7 @@ pub struct Block {
/// Distinguishes between `unsafe { ... }` and `{ ... }`.
pub rules: BlockCheckMode,
pub span: Span,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
/// The following *isn't* a parse error, but will cause multiple errors in following stages.
/// ```compile_fail
/// let x = {
@@ -586,7 +553,7 @@ pub struct Pat {
pub id: NodeId,
pub kind: PatKind,
pub span: Span,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
impl Pat {
@@ -597,7 +564,7 @@ impl Pat {
// In a type expression `_` is an inference variable.
PatKind::Wild => TyKind::Infer,
// An IDENT pattern with no binding mode would be valid as path to a type. E.g. `u32`.
- PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => {
+ PatKind::Ident(BindingAnnotation::NONE, ident, None) => {
TyKind::Path(None, Path::from_ident(*ident))
}
PatKind::Path(qself, path) => TyKind::Path(qself.clone(), path.clone()),
@@ -684,10 +651,43 @@ pub struct PatField {
pub is_placeholder: bool,
}
-#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
-pub enum BindingMode {
- ByRef(Mutability),
- ByValue(Mutability),
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum ByRef {
+ Yes,
+ No,
+}
+
+impl From<bool> for ByRef {
+ fn from(b: bool) -> ByRef {
+ match b {
+ false => ByRef::No,
+ true => ByRef::Yes,
+ }
+ }
+}
+
+/// Explicit binding annotations given in the HIR for a binding. Note
+/// that this is not the final binding *mode* that we infer after type
+/// inference.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub struct BindingAnnotation(pub ByRef, pub Mutability);
+
+impl BindingAnnotation {
+ pub const NONE: Self = Self(ByRef::No, Mutability::Not);
+ pub const REF: Self = Self(ByRef::Yes, Mutability::Not);
+ pub const MUT: Self = Self(ByRef::No, Mutability::Mut);
+ pub const REF_MUT: Self = Self(ByRef::Yes, Mutability::Mut);
+
+ pub fn prefix_str(self) -> &'static str {
+ match self {
+ Self::NONE => "",
+ Self::REF => "ref ",
+ Self::MUT => "mut ",
+ Self::REF_MUT => "ref mut ",
+ }
+ }
}
#[derive(Clone, Encodable, Decodable, Debug)]
@@ -716,7 +716,7 @@ pub enum PatKind {
/// or a unit struct/variant pattern, or a const pattern (in the last two cases the third
/// field must be `None`). Disambiguation cannot be done with parser alone, so it happens
/// during name resolution.
- Ident(BindingMode, Ident, Option<P<Pat>>),
+ Ident(BindingAnnotation, Ident, Option<P<Pat>>),
/// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
/// The `bool` is `true` in the presence of a `..`.
@@ -771,7 +771,7 @@ pub enum PatKind {
Paren(P<Pat>),
/// A macro pattern; pre-expansion.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Copy)]
@@ -937,8 +937,8 @@ impl Stmt {
/// a trailing semicolon.
///
/// This only modifies the parsed AST struct, not the attached
- /// `LazyTokenStream`. The parser is responsible for calling
- /// `CreateTokenStream::add_trailing_semi` when there is actually
+ /// `LazyAttrTokenStream`. The parser is responsible for calling
+ /// `ToAttrTokenStream::add_trailing_semi` when there is actually
/// a semicolon in the tokenstream.
pub fn add_trailing_semicolon(mut self) -> Self {
self.kind = match self.kind {
@@ -981,10 +981,10 @@ pub enum StmtKind {
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct MacCallStmt {
- pub mac: MacCall,
+ pub mac: P<MacCall>,
pub style: MacStmtStyle,
pub attrs: AttrVec,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
@@ -1009,7 +1009,7 @@ pub struct Local {
pub kind: LocalKind,
pub span: Span,
pub attrs: AttrVec,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
@@ -1108,28 +1108,10 @@ pub struct Expr {
pub kind: ExprKind,
pub span: Span,
pub attrs: AttrVec,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
impl Expr {
- /// Returns `true` if this expression would be valid somewhere that expects a value;
- /// for example, an `if` condition.
- pub fn returns(&self) -> bool {
- if let ExprKind::Block(ref block, _) = self.kind {
- match block.stmts.last().map(|last_stmt| &last_stmt.kind) {
- // Implicit return
- Some(StmtKind::Expr(_)) => true,
- // Last statement is an explicit return?
- Some(StmtKind::Semi(expr)) => matches!(expr.kind, ExprKind::Ret(_)),
- // This is a block that doesn't end in either an implicit or explicit return.
- _ => false,
- }
- } else {
- // This is not a block, it is a value.
- true
- }
- }
-
/// Is this expr either `N`, or `{ N }`.
///
/// If this is not the case, name resolution does not resolve `N` when using
@@ -1269,7 +1251,7 @@ impl Expr {
id: DUMMY_NODE_ID,
kind: ExprKind::Err,
span: DUMMY_SP,
- attrs: ThinVec::new(),
+ attrs: AttrVec::new(),
tokens: None,
},
)
@@ -1338,14 +1320,13 @@ pub enum ExprKind {
///
/// The `PathSegment` represents the method name and its generic arguments
/// (within the angle brackets).
- /// The first element of the vector of an `Expr` is the expression that evaluates
- /// to the object on which the method is being called on (the receiver),
- /// and the remaining elements are the rest of the arguments.
- /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
- /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d])`.
+ /// The standalone `Expr` is the receiver expression.
+ /// The vector of `Expr` is the arguments.
+ /// `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
+ /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, x, [a, b, c, d])`.
/// This `Span` is the span of the function, without the dot and receiver
/// (e.g. `foo(a, b)` in `x.foo(a, b)`
- MethodCall(PathSegment, Vec<P<Expr>>, Span),
+ MethodCall(PathSegment, P<Expr>, Vec<P<Expr>>, Span),
/// A tuple (e.g., `(a, b, c, d)`).
Tup(Vec<P<Expr>>),
/// A binary operation (e.g., `a + b`, `a * b`).
@@ -1439,7 +1420,7 @@ pub enum ExprKind {
InlineAsm(P<InlineAsm>),
/// A macro invocation; pre-expansion.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
/// A struct literal expression.
///
@@ -1691,7 +1672,7 @@ pub enum StrStyle {
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Lit {
/// The original literal token as written in source code.
- pub token: token::Lit,
+ pub token_lit: token::Lit,
/// The "semantic" representation of the literal lowered from the original tokens.
/// Strings are unescaped, hexadecimal forms are eliminated, etc.
/// FIXME: Remove this and only create the semantic representation during lowering to HIR.
@@ -1719,7 +1700,7 @@ impl StrLit {
StrStyle::Raw(n) => token::StrRaw(n),
};
Lit {
- token: token::Lit::new(token_kind, self.symbol, self.suffix),
+ token_lit: token::Lit::new(token_kind, self.symbol, self.suffix),
span: self.span,
kind: LitKind::Str(self.symbol_unescaped, self.style),
}
@@ -1753,7 +1734,8 @@ pub enum LitFloatType {
/// E.g., `"foo"`, `42`, `12.34`, or `bool`.
#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq, HashStable_Generic)]
pub enum LitKind {
- /// A string literal (`"foo"`).
+ /// A string literal (`"foo"`). The symbol is unescaped, and so may differ
+ /// from the original token's symbol.
Str(Symbol, StrStyle),
/// A byte string (`b"foo"`).
ByteStr(Lrc<[u8]>),
@@ -1763,12 +1745,13 @@ pub enum LitKind {
Char(char),
/// An integer literal (`1`).
Int(u128, LitIntType),
- /// A float literal (`1f64` or `1E10f64`).
+ /// A float literal (`1f64` or `1E10f64`). Stored as a symbol rather than
+ /// `f64` so that `LitKind` can impl `Eq` and `Hash`.
Float(Symbol, LitFloatType),
/// A boolean literal.
Bool(bool),
/// Placeholder for a literal that wasn't well-formed in some way.
- Err(Symbol),
+ Err,
}
impl LitKind {
@@ -1807,7 +1790,7 @@ impl LitKind {
| LitKind::Int(_, LitIntType::Unsuffixed)
| LitKind::Float(_, LitFloatType::Unsuffixed)
| LitKind::Bool(..)
- | LitKind::Err(..) => false,
+ | LitKind::Err => false,
}
}
}
@@ -1966,7 +1949,7 @@ pub struct Ty {
pub id: NodeId,
pub kind: TyKind,
pub span: Span,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
impl Clone for Ty {
@@ -2042,7 +2025,7 @@ pub enum TyKind {
/// Inferred type of a `self` or `&self` argument in a method.
ImplicitSelf,
/// A macro in the type position.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
/// Placeholder for a kind that has failed to be defined.
Err,
/// Placeholder for a `va_list`.
@@ -2059,8 +2042,11 @@ impl TyKind {
}
pub fn is_simple_path(&self) -> Option<Symbol> {
- if let TyKind::Path(None, Path { segments, .. }) = &self && segments.len() == 1 {
- Some(segments[0].ident.name)
+ if let TyKind::Path(None, Path { segments, .. }) = &self
+ && let [segment] = &segments[..]
+ && segment.args.is_none()
+ {
+ Some(segment.ident.name)
} else {
None
}
@@ -2071,6 +2057,7 @@ impl TyKind {
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum TraitObjectSyntax {
Dyn,
+ DynStar,
None,
}
@@ -2086,15 +2073,15 @@ pub enum InlineAsmRegOrRegClass {
bitflags::bitflags! {
#[derive(Encodable, Decodable, HashStable_Generic)]
pub struct InlineAsmOptions: u16 {
- const PURE = 1 << 0;
- const NOMEM = 1 << 1;
- const READONLY = 1 << 2;
+ const PURE = 1 << 0;
+ const NOMEM = 1 << 1;
+ const READONLY = 1 << 2;
const PRESERVES_FLAGS = 1 << 3;
- const NORETURN = 1 << 4;
- const NOSTACK = 1 << 5;
- const ATT_SYNTAX = 1 << 6;
- const RAW = 1 << 7;
- const MAY_UNWIND = 1 << 8;
+ const NORETURN = 1 << 4;
+ const NOSTACK = 1 << 5;
+ const ATT_SYNTAX = 1 << 6;
+ const RAW = 1 << 7;
+ const MAY_UNWIND = 1 << 8;
}
}
@@ -2230,7 +2217,7 @@ pub type ExplicitSelf = Spanned<SelfKind>;
impl Param {
/// Attempts to cast parameter to `ExplicitSelf`.
pub fn to_self(&self) -> Option<ExplicitSelf> {
- if let PatKind::Ident(BindingMode::ByValue(mutbl), ident, _) = self.pat.kind {
+ if let PatKind::Ident(BindingAnnotation(ByRef::No, mutbl), ident, _) = self.pat.kind {
if ident.name == kw::SelfLower {
return match self.ty.kind {
TyKind::ImplicitSelf => Some(respan(self.pat.span, SelfKind::Value(mutbl))),
@@ -2260,11 +2247,24 @@ impl Param {
pub fn from_self(attrs: AttrVec, eself: ExplicitSelf, eself_ident: Ident) -> Param {
let span = eself.span.to(eself_ident.span);
let infer_ty = P(Ty { id: DUMMY_NODE_ID, kind: TyKind::ImplicitSelf, span, tokens: None });
- let param = |mutbl, ty| Param {
+ let (mutbl, ty) = match eself.node {
+ SelfKind::Explicit(ty, mutbl) => (mutbl, ty),
+ SelfKind::Value(mutbl) => (mutbl, infer_ty),
+ SelfKind::Region(lt, mutbl) => (
+ Mutability::Not,
+ P(Ty {
+ id: DUMMY_NODE_ID,
+ kind: TyKind::Rptr(lt, MutTy { ty: infer_ty, mutbl }),
+ span,
+ tokens: None,
+ }),
+ ),
+ };
+ Param {
attrs,
pat: P(Pat {
id: DUMMY_NODE_ID,
- kind: PatKind::Ident(BindingMode::ByValue(mutbl), eself_ident, None),
+ kind: PatKind::Ident(BindingAnnotation(ByRef::No, mutbl), eself_ident, None),
span,
tokens: None,
}),
@@ -2272,19 +2272,6 @@ impl Param {
ty,
id: DUMMY_NODE_ID,
is_placeholder: false,
- };
- match eself.node {
- SelfKind::Explicit(ty, mutbl) => param(mutbl, ty),
- SelfKind::Value(mutbl) => param(mutbl, infer_ty),
- SelfKind::Region(lt, mutbl) => param(
- Mutability::Not,
- P(Ty {
- id: DUMMY_NODE_ID,
- kind: TyKind::Rptr(lt, MutTy { ty: infer_ty, mutbl }),
- span,
- tokens: None,
- }),
- ),
}
}
}
@@ -2336,9 +2323,9 @@ impl Async {
}
/// In this case this is an `async` return, the `NodeId` for the generated `impl Trait` item.
- pub fn opt_return_id(self) -> Option<NodeId> {
+ pub fn opt_return_id(self) -> Option<(NodeId, Span)> {
match self {
- Async::Yes { return_impl_trait_id, .. } => Some(return_impl_trait_id),
+ Async::Yes { return_impl_trait_id, span, .. } => Some((return_impl_trait_id, span)),
Async::No => None,
}
}
@@ -2522,8 +2509,8 @@ impl<S: Encoder> Encodable<S> for AttrId {
}
impl<D: Decoder> Decodable<D> for AttrId {
- fn decode(_: &mut D) -> AttrId {
- crate::attr::mk_attr_id()
+ default fn decode(_: &mut D) -> AttrId {
+ panic!("cannot decode `AttrId` with `{}`", std::any::type_name::<D>());
}
}
@@ -2531,7 +2518,7 @@ impl<D: Decoder> Decodable<D> for AttrId {
pub struct AttrItem {
pub path: Path,
pub args: MacArgs,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
/// A list of attributes.
@@ -2549,9 +2536,15 @@ pub struct Attribute {
}
#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct NormalAttr {
+ pub item: AttrItem,
+ pub tokens: Option<LazyAttrTokenStream>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
pub enum AttrKind {
/// A normal attribute.
- Normal(AttrItem, Option<LazyTokenStream>),
+ Normal(P<NormalAttr>),
/// A doc comment (e.g. `/// ...`, `//! ...`, `/** ... */`, `/*! ... */`).
/// Doc attributes (e.g. `#[doc="..."]`) are represented with the `Normal`
@@ -2596,13 +2589,13 @@ impl PolyTraitRef {
pub struct Visibility {
pub kind: VisibilityKind,
pub span: Span,
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum VisibilityKind {
Public,
- Restricted { path: P<Path>, id: NodeId },
+ Restricted { path: P<Path>, id: NodeId, shorthand: bool },
Inherited,
}
@@ -2665,7 +2658,7 @@ impl VariantData {
/// An item definition.
#[derive(Clone, Encodable, Decodable, Debug)]
pub struct Item<K = ItemKind> {
- pub attrs: Vec<Attribute>,
+ pub attrs: AttrVec,
pub id: NodeId,
pub span: Span,
pub vis: Visibility,
@@ -2682,7 +2675,7 @@ pub struct Item<K = ItemKind> {
///
/// Note that the tokens here do not include the outer attributes, but will
/// include inner attributes.
- pub tokens: Option<LazyTokenStream>,
+ pub tokens: Option<LazyAttrTokenStream>,
}
impl Item {
@@ -2873,7 +2866,7 @@ pub enum ItemKind {
/// A macro invocation.
///
/// E.g., `foo!(..)`.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
/// A macro definition.
MacroDef(MacroDef),
@@ -2945,9 +2938,9 @@ pub enum AssocItemKind {
/// An associated function.
Fn(Box<Fn>),
/// An associated type.
- TyAlias(Box<TyAlias>),
+ Type(Box<TyAlias>),
/// A macro expanding to associated items.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
}
impl AssocItemKind {
@@ -2955,7 +2948,7 @@ impl AssocItemKind {
match *self {
Self::Const(defaultness, ..)
| Self::Fn(box Fn { defaultness, .. })
- | Self::TyAlias(box TyAlias { defaultness, .. }) => defaultness,
+ | Self::Type(box TyAlias { defaultness, .. }) => defaultness,
Self::MacCall(..) => Defaultness::Final,
}
}
@@ -2966,7 +2959,7 @@ impl From<AssocItemKind> for ItemKind {
match assoc_item_kind {
AssocItemKind::Const(a, b, c) => ItemKind::Const(a, b, c),
AssocItemKind::Fn(fn_kind) => ItemKind::Fn(fn_kind),
- AssocItemKind::TyAlias(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
+ AssocItemKind::Type(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
AssocItemKind::MacCall(a) => ItemKind::MacCall(a),
}
}
@@ -2979,7 +2972,7 @@ impl TryFrom<ItemKind> for AssocItemKind {
Ok(match item_kind {
ItemKind::Const(a, b, c) => AssocItemKind::Const(a, b, c),
ItemKind::Fn(fn_kind) => AssocItemKind::Fn(fn_kind),
- ItemKind::TyAlias(ty_alias_kind) => AssocItemKind::TyAlias(ty_alias_kind),
+ ItemKind::TyAlias(ty_kind) => AssocItemKind::Type(ty_kind),
ItemKind::MacCall(a) => AssocItemKind::MacCall(a),
_ => return Err(item_kind),
})
@@ -2996,7 +2989,7 @@ pub enum ForeignItemKind {
/// An foreign type.
TyAlias(Box<TyAlias>),
/// A macro expanding to foreign items.
- MacCall(MacCall),
+ MacCall(P<MacCall>),
}
impl From<ForeignItemKind> for ItemKind {
@@ -3030,22 +3023,34 @@ pub type ForeignItem = Item<ForeignItemKind>;
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- rustc_data_structures::static_assert_size!(AssocItemKind, 72);
- rustc_data_structures::static_assert_size!(Attribute, 152);
- rustc_data_structures::static_assert_size!(Block, 48);
- rustc_data_structures::static_assert_size!(Expr, 104);
- rustc_data_structures::static_assert_size!(Fn, 192);
- rustc_data_structures::static_assert_size!(ForeignItemKind, 72);
- rustc_data_structures::static_assert_size!(GenericBound, 88);
- rustc_data_structures::static_assert_size!(Generics, 72);
- rustc_data_structures::static_assert_size!(Impl, 200);
- rustc_data_structures::static_assert_size!(Item, 200);
- rustc_data_structures::static_assert_size!(ItemKind, 112);
- rustc_data_structures::static_assert_size!(Lit, 48);
- rustc_data_structures::static_assert_size!(Pat, 120);
- rustc_data_structures::static_assert_size!(Path, 40);
- rustc_data_structures::static_assert_size!(PathSegment, 24);
- rustc_data_structures::static_assert_size!(Stmt, 32);
- rustc_data_structures::static_assert_size!(Ty, 96);
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(AssocItem, 104);
+ static_assert_size!(AssocItemKind, 32);
+ static_assert_size!(Attribute, 32);
+ static_assert_size!(Block, 48);
+ static_assert_size!(Expr, 104);
+ static_assert_size!(ExprKind, 72);
+ static_assert_size!(Fn, 184);
+ static_assert_size!(ForeignItem, 96);
+ static_assert_size!(ForeignItemKind, 24);
+ static_assert_size!(GenericArg, 24);
+ static_assert_size!(GenericBound, 88);
+ static_assert_size!(Generics, 72);
+ static_assert_size!(Impl, 200);
+ static_assert_size!(Item, 184);
+ static_assert_size!(ItemKind, 112);
+ static_assert_size!(Lit, 48);
+ static_assert_size!(LitKind, 24);
+ static_assert_size!(Local, 72);
+ static_assert_size!(Param, 40);
+ static_assert_size!(Pat, 120);
+ static_assert_size!(Path, 40);
+ static_assert_size!(PathSegment, 24);
+ static_assert_size!(PatKind, 96);
+ static_assert_size!(Stmt, 32);
+ static_assert_size!(StmtKind, 16);
+ static_assert_size!(Ty, 96);
+ static_assert_size!(TyKind, 72);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_ast/src/ast_traits.rs b/compiler/rustc_ast/src/ast_traits.rs
index 5c30a75a1..1b31be07f 100644
--- a/compiler/rustc_ast/src/ast_traits.rs
+++ b/compiler/rustc_ast/src/ast_traits.rs
@@ -4,7 +4,7 @@
use crate::ptr::P;
use crate::token::Nonterminal;
-use crate::tokenstream::LazyTokenStream;
+use crate::tokenstream::LazyAttrTokenStream;
use crate::{Arm, Crate, ExprField, FieldDef, GenericParam, Param, PatField, Variant};
use crate::{AssocItem, Expr, ForeignItem, Item, NodeId};
use crate::{AttrItem, AttrKind, Block, Pat, Path, Ty, Visibility};
@@ -124,18 +124,18 @@ impl HasSpan for AttrItem {
/// A trait for AST nodes having (or not having) collected tokens.
pub trait HasTokens {
- fn tokens(&self) -> Option<&LazyTokenStream>;
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>>;
+ fn tokens(&self) -> Option<&LazyAttrTokenStream>;
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>>;
}
macro_rules! impl_has_tokens {
($($T:ty),+ $(,)?) => {
$(
impl HasTokens for $T {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
self.tokens.as_ref()
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
Some(&mut self.tokens)
}
}
@@ -147,10 +147,10 @@ macro_rules! impl_has_tokens_none {
($($T:ty),+ $(,)?) => {
$(
impl HasTokens for $T {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
None
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
None
}
}
@@ -162,25 +162,25 @@ impl_has_tokens!(AssocItem, AttrItem, Block, Expr, ForeignItem, Item, Pat, Path,
impl_has_tokens_none!(Arm, ExprField, FieldDef, GenericParam, Param, PatField, Variant);
impl<T: AstDeref<Target: HasTokens>> HasTokens for T {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
self.ast_deref().tokens()
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
self.ast_deref_mut().tokens_mut()
}
}
impl<T: HasTokens> HasTokens for Option<T> {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
self.as_ref().and_then(|inner| inner.tokens())
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
self.as_mut().and_then(|inner| inner.tokens_mut())
}
}
impl HasTokens for StmtKind {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
match self {
StmtKind::Local(local) => local.tokens.as_ref(),
StmtKind::Item(item) => item.tokens(),
@@ -189,7 +189,7 @@ impl HasTokens for StmtKind {
StmtKind::MacCall(mac) => mac.tokens.as_ref(),
}
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
match self {
StmtKind::Local(local) => Some(&mut local.tokens),
StmtKind::Item(item) => item.tokens_mut(),
@@ -201,26 +201,26 @@ impl HasTokens for StmtKind {
}
impl HasTokens for Stmt {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
self.kind.tokens()
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
self.kind.tokens_mut()
}
}
impl HasTokens for Attribute {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
match &self.kind {
- AttrKind::Normal(_, tokens) => tokens.as_ref(),
+ AttrKind::Normal(normal) => normal.tokens.as_ref(),
kind @ AttrKind::DocComment(..) => {
panic!("Called tokens on doc comment attr {:?}", kind)
}
}
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
Some(match &mut self.kind {
- AttrKind::Normal(_, tokens) => tokens,
+ AttrKind::Normal(normal) => &mut normal.tokens,
kind @ AttrKind::DocComment(..) => {
panic!("Called tokens_mut on doc comment attr {:?}", kind)
}
@@ -229,7 +229,7 @@ impl HasTokens for Attribute {
}
impl HasTokens for Nonterminal {
- fn tokens(&self) -> Option<&LazyTokenStream> {
+ fn tokens(&self) -> Option<&LazyAttrTokenStream> {
match self {
Nonterminal::NtItem(item) => item.tokens(),
Nonterminal::NtStmt(stmt) => stmt.tokens(),
@@ -243,7 +243,7 @@ impl HasTokens for Nonterminal {
Nonterminal::NtIdent(..) | Nonterminal::NtLifetime(..) => None,
}
}
- fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> {
match self {
Nonterminal::NtItem(item) => item.tokens_mut(),
Nonterminal::NtStmt(stmt) => stmt.tokens_mut(),
@@ -270,7 +270,7 @@ pub trait HasAttrs {
/// during token collection.
const SUPPORTS_CUSTOM_INNER_ATTRS: bool;
fn attrs(&self) -> &[Attribute];
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec));
}
macro_rules! impl_has_attrs {
@@ -279,12 +279,13 @@ macro_rules! impl_has_attrs {
impl HasAttrs for $T {
const SUPPORTS_CUSTOM_INNER_ATTRS: bool = $inner;
+ #[inline]
fn attrs(&self) -> &[Attribute] {
&self.attrs
}
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- VecOrAttrVec::visit(&mut self.attrs, f)
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
+ f(&mut self.attrs)
}
}
)+
@@ -299,7 +300,7 @@ macro_rules! impl_has_attrs_none {
fn attrs(&self) -> &[Attribute] {
&[]
}
- fn visit_attrs(&mut self, _f: impl FnOnce(&mut Vec<Attribute>)) {}
+ fn visit_attrs(&mut self, _f: impl FnOnce(&mut AttrVec)) {}
}
)+
};
@@ -330,7 +331,7 @@ impl<T: AstDeref<Target: HasAttrs>> HasAttrs for T {
fn attrs(&self) -> &[Attribute] {
self.ast_deref().attrs()
}
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
self.ast_deref_mut().visit_attrs(f)
}
}
@@ -340,7 +341,7 @@ impl<T: HasAttrs> HasAttrs for Option<T> {
fn attrs(&self) -> &[Attribute] {
self.as_ref().map(|inner| inner.attrs()).unwrap_or(&[])
}
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
if let Some(inner) = self.as_mut() {
inner.visit_attrs(f);
}
@@ -362,13 +363,13 @@ impl HasAttrs for StmtKind {
}
}
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
match self {
- StmtKind::Local(local) => visit_attrvec(&mut local.attrs, f),
+ StmtKind::Local(local) => f(&mut local.attrs),
StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.visit_attrs(f),
StmtKind::Item(item) => item.visit_attrs(f),
StmtKind::Empty => {}
- StmtKind::MacCall(mac) => visit_attrvec(&mut mac.attrs, f),
+ StmtKind::MacCall(mac) => f(&mut mac.attrs),
}
}
}
@@ -378,38 +379,11 @@ impl HasAttrs for Stmt {
fn attrs(&self) -> &[Attribute] {
self.kind.attrs()
}
- fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
self.kind.visit_attrs(f);
}
}
-/// Helper trait for the impls above. Abstracts over
-/// the two types of attribute fields that AST nodes
-/// may have (`Vec<Attribute>` or `AttrVec`).
-trait VecOrAttrVec {
- fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
-}
-
-impl VecOrAttrVec for Vec<Attribute> {
- fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- f(self)
- }
-}
-
-impl VecOrAttrVec for AttrVec {
- fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
- visit_attrvec(self, f)
- }
-}
-
-fn visit_attrvec(attrs: &mut AttrVec, f: impl FnOnce(&mut Vec<Attribute>)) {
- crate::mut_visit::visit_clobber(attrs, |attrs| {
- let mut vec = attrs.into();
- f(&mut vec);
- vec.into()
- });
-}
-
/// A newtype around an AST node that implements the traits above if the node implements them.
pub struct AstNodeWrapper<Wrapped, Tag> {
pub wrapped: Wrapped,
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 86af7769d..990f4f8f1 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -7,18 +7,22 @@ use crate::ast::{MacArgs, MacArgsEq, MacDelimiter, MetaItem, MetaItemKind, Neste
use crate::ast::{Path, PathSegment};
use crate::ptr::P;
use crate::token::{self, CommentKind, Delimiter, Token};
-use crate::tokenstream::{AttrAnnotatedTokenStream, AttrAnnotatedTokenTree};
use crate::tokenstream::{DelimSpan, Spacing, TokenTree};
-use crate::tokenstream::{LazyTokenStream, TokenStream};
+use crate::tokenstream::{LazyAttrTokenStream, TokenStream};
use crate::util::comments;
-use rustc_data_structures::thin_vec::ThinVec;
+use rustc_data_structures::sync::WorkerLocal;
use rustc_index::bit_set::GrowableBitSet;
use rustc_span::source_map::BytePos;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::Span;
+use std::cell::Cell;
use std::iter;
+#[cfg(debug_assertions)]
+use std::ops::BitXor;
+#[cfg(debug_assertions)]
+use std::sync::atomic::{AtomicU32, Ordering};
pub struct MarkedAttrs(GrowableBitSet<AttrId>);
@@ -114,7 +118,7 @@ impl Attribute {
#[inline]
pub fn has_name(&self, name: Symbol) -> bool {
match self.kind {
- AttrKind::Normal(ref item, _) => item.path == name,
+ AttrKind::Normal(ref normal) => normal.item.path == name,
AttrKind::DocComment(..) => false,
}
}
@@ -122,9 +126,9 @@ impl Attribute {
/// For a single-segment attribute, returns its name; otherwise, returns `None`.
pub fn ident(&self) -> Option<Ident> {
match self.kind {
- AttrKind::Normal(ref item, _) => {
- if item.path.segments.len() == 1 {
- Some(item.path.segments[0].ident)
+ AttrKind::Normal(ref normal) => {
+ if normal.item.path.segments.len() == 1 {
+ Some(normal.item.path.segments[0].ident)
} else {
None
}
@@ -138,14 +142,16 @@ impl Attribute {
pub fn value_str(&self) -> Option<Symbol> {
match self.kind {
- AttrKind::Normal(ref item, _) => item.meta_kind().and_then(|kind| kind.value_str()),
+ AttrKind::Normal(ref normal) => {
+ normal.item.meta_kind().and_then(|kind| kind.value_str())
+ }
AttrKind::DocComment(..) => None,
}
}
pub fn meta_item_list(&self) -> Option<Vec<NestedMetaItem>> {
match self.kind {
- AttrKind::Normal(ref item, _) => match item.meta_kind() {
+ AttrKind::Normal(ref normal) => match normal.item.meta_kind() {
Some(MetaItemKind::List(list)) => Some(list),
_ => None,
},
@@ -154,8 +160,8 @@ impl Attribute {
}
pub fn is_word(&self) -> bool {
- if let AttrKind::Normal(item, _) = &self.kind {
- matches!(item.args, MacArgs::Empty)
+ if let AttrKind::Normal(normal) = &self.kind {
+ matches!(normal.item.args, MacArgs::Empty)
} else {
false
}
@@ -182,13 +188,7 @@ impl MetaItem {
}
pub fn value_str(&self) -> Option<Symbol> {
- match self.kind {
- MetaItemKind::NameValue(ref v) => match v.kind {
- LitKind::Str(ref s, _) => Some(*s),
- _ => None,
- },
- _ => None,
- }
+ self.kind.value_str()
}
pub fn meta_item_list(&self) -> Option<&[NestedMetaItem]> {
@@ -237,6 +237,9 @@ impl AttrItem {
}
impl Attribute {
+ /// Returns `true` if it is a sugared doc comment (`///` or `//!` for example).
+ /// So `#[doc = "doc"]` (which is a doc comment) and `#[doc(...)]` (which is not
+ /// a doc comment) will return `false`.
pub fn is_doc_comment(&self) -> bool {
match self.kind {
AttrKind::Normal(..) => false,
@@ -244,10 +247,16 @@ impl Attribute {
}
}
+ /// Returns the documentation and its kind if this is a doc comment or a sugared doc comment.
+ /// * `///doc` returns `Some(("doc", CommentKind::Line))`.
+ /// * `/** doc */` returns `Some(("doc", CommentKind::Block))`.
+ /// * `#[doc = "doc"]` returns `Some(("doc", CommentKind::Line))`.
+ /// * `#[doc(...)]` returns `None`.
pub fn doc_str_and_comment_kind(&self) -> Option<(Symbol, CommentKind)> {
match self.kind {
AttrKind::DocComment(kind, data) => Some((data, kind)),
- AttrKind::Normal(ref item, _) if item.path == sym::doc => item
+ AttrKind::Normal(ref normal) if normal.item.path == sym::doc => normal
+ .item
.meta_kind()
.and_then(|kind| kind.value_str())
.map(|data| (data, CommentKind::Line)),
@@ -255,11 +264,15 @@ impl Attribute {
}
}
+ /// Returns the documentation if this is a doc comment or a sugared doc comment.
+ /// * `///doc` returns `Some("doc")`.
+ /// * `#[doc = "doc"]` returns `Some("doc")`.
+ /// * `#[doc(...)]` returns `None`.
pub fn doc_str(&self) -> Option<Symbol> {
match self.kind {
AttrKind::DocComment(.., data) => Some(data),
- AttrKind::Normal(ref item, _) if item.path == sym::doc => {
- item.meta_kind().and_then(|kind| kind.value_str())
+ AttrKind::Normal(ref normal) if normal.item.path == sym::doc => {
+ normal.item.meta_kind().and_then(|kind| kind.value_str())
}
_ => None,
}
@@ -271,14 +284,14 @@ impl Attribute {
pub fn get_normal_item(&self) -> &AttrItem {
match self.kind {
- AttrKind::Normal(ref item, _) => item,
+ AttrKind::Normal(ref normal) => &normal.item,
AttrKind::DocComment(..) => panic!("unexpected doc comment"),
}
}
pub fn unwrap_normal_item(self) -> AttrItem {
match self.kind {
- AttrKind::Normal(item, _) => item,
+ AttrKind::Normal(normal) => normal.into_inner().item,
AttrKind::DocComment(..) => panic!("unexpected doc comment"),
}
}
@@ -286,31 +299,30 @@ impl Attribute {
/// Extracts the MetaItem from inside this Attribute.
pub fn meta(&self) -> Option<MetaItem> {
match self.kind {
- AttrKind::Normal(ref item, _) => item.meta(self.span),
+ AttrKind::Normal(ref normal) => normal.item.meta(self.span),
AttrKind::DocComment(..) => None,
}
}
pub fn meta_kind(&self) -> Option<MetaItemKind> {
match self.kind {
- AttrKind::Normal(ref item, _) => item.meta_kind(),
+ AttrKind::Normal(ref normal) => normal.item.meta_kind(),
AttrKind::DocComment(..) => None,
}
}
- pub fn tokens(&self) -> AttrAnnotatedTokenStream {
+ pub fn tokens(&self) -> TokenStream {
match self.kind {
- AttrKind::Normal(_, ref tokens) => tokens
+ AttrKind::Normal(ref normal) => normal
+ .tokens
.as_ref()
.unwrap_or_else(|| panic!("attribute is missing tokens: {:?}", self))
- .create_token_stream(),
- AttrKind::DocComment(comment_kind, data) => AttrAnnotatedTokenStream::from((
- AttrAnnotatedTokenTree::Token(Token::new(
- token::DocComment(comment_kind, self.style, data),
- self.span,
- )),
+ .to_attr_token_stream()
+ .to_tokenstream(),
+ AttrKind::DocComment(comment_kind, data) => TokenStream::new(vec![TokenTree::Token(
+ Token::new(token::DocComment(comment_kind, self.style, data), self.span),
Spacing::Alone,
- )),
+ )]),
}
}
}
@@ -340,47 +352,86 @@ pub fn mk_nested_word_item(ident: Ident) -> NestedMetaItem {
NestedMetaItem::MetaItem(mk_word_item(ident))
}
-pub(crate) fn mk_attr_id() -> AttrId {
- use std::sync::atomic::AtomicU32;
- use std::sync::atomic::Ordering;
+pub struct AttrIdGenerator(WorkerLocal<Cell<u32>>);
- static NEXT_ATTR_ID: AtomicU32 = AtomicU32::new(0);
+#[cfg(debug_assertions)]
+static MAX_ATTR_ID: AtomicU32 = AtomicU32::new(u32::MAX);
- let id = NEXT_ATTR_ID.fetch_add(1, Ordering::SeqCst);
- assert!(id != u32::MAX);
- AttrId::from_u32(id)
+impl AttrIdGenerator {
+ pub fn new() -> Self {
+ // We use `(index as u32).reverse_bits()` to initialize the
+ // starting value of AttrId in each worker thread.
+ // The `index` is the index of the worker thread.
+ // This ensures that the AttrId generated in each thread is unique.
+ AttrIdGenerator(WorkerLocal::new(|index| {
+ let index: u32 = index.try_into().unwrap();
+
+ #[cfg(debug_assertions)]
+ {
+ let max_id = ((index + 1).next_power_of_two() - 1).bitxor(u32::MAX).reverse_bits();
+ MAX_ATTR_ID.fetch_min(max_id, Ordering::Release);
+ }
+
+ Cell::new(index.reverse_bits())
+ }))
+ }
+
+ pub fn mk_attr_id(&self) -> AttrId {
+ let id = self.0.get();
+
+ // Ensure the assigned attr_id does not overlap the bits
+ // representing the number of threads.
+ #[cfg(debug_assertions)]
+ assert!(id <= MAX_ATTR_ID.load(Ordering::Acquire));
+
+ self.0.set(id + 1);
+ AttrId::from_u32(id)
+ }
}
-pub fn mk_attr(style: AttrStyle, path: Path, args: MacArgs, span: Span) -> Attribute {
- mk_attr_from_item(AttrItem { path, args, tokens: None }, None, style, span)
+pub fn mk_attr(
+ g: &AttrIdGenerator,
+ style: AttrStyle,
+ path: Path,
+ args: MacArgs,
+ span: Span,
+) -> Attribute {
+ mk_attr_from_item(g, AttrItem { path, args, tokens: None }, None, style, span)
}
pub fn mk_attr_from_item(
+ g: &AttrIdGenerator,
item: AttrItem,
- tokens: Option<LazyTokenStream>,
+ tokens: Option<LazyAttrTokenStream>,
style: AttrStyle,
span: Span,
) -> Attribute {
- Attribute { kind: AttrKind::Normal(item, tokens), id: mk_attr_id(), style, span }
+ Attribute {
+ kind: AttrKind::Normal(P(ast::NormalAttr { item, tokens })),
+ id: g.mk_attr_id(),
+ style,
+ span,
+ }
}
/// Returns an inner attribute with the given value and span.
-pub fn mk_attr_inner(item: MetaItem) -> Attribute {
- mk_attr(AttrStyle::Inner, item.path, item.kind.mac_args(item.span), item.span)
+pub fn mk_attr_inner(g: &AttrIdGenerator, item: MetaItem) -> Attribute {
+ mk_attr(g, AttrStyle::Inner, item.path, item.kind.mac_args(item.span), item.span)
}
/// Returns an outer attribute with the given value and span.
-pub fn mk_attr_outer(item: MetaItem) -> Attribute {
- mk_attr(AttrStyle::Outer, item.path, item.kind.mac_args(item.span), item.span)
+pub fn mk_attr_outer(g: &AttrIdGenerator, item: MetaItem) -> Attribute {
+ mk_attr(g, AttrStyle::Outer, item.path, item.kind.mac_args(item.span), item.span)
}
pub fn mk_doc_comment(
+ g: &AttrIdGenerator,
comment_kind: CommentKind,
style: AttrStyle,
data: Symbol,
span: Span,
) -> Attribute {
- Attribute { kind: AttrKind::DocComment(comment_kind, data), id: mk_attr_id(), style, span }
+ Attribute { kind: AttrKind::DocComment(comment_kind, data), id: g.mk_attr_id(), style, span }
}
pub fn list_contains_name(items: &[NestedMetaItem], name: Symbol) -> bool {
@@ -484,7 +535,7 @@ impl MetaItemKind {
id: ast::DUMMY_NODE_ID,
kind: ast::ExprKind::Lit(lit.clone()),
span: lit.span,
- attrs: ThinVec::new(),
+ attrs: ast::AttrVec::new(),
tokens: None,
});
MacArgs::Eq(span, MacArgsEq::Ast(expr))
diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs
index 4b94ec0d6..eeb7e56e2 100644
--- a/compiler/rustc_ast/src/lib.rs
+++ b/compiler/rustc_ast/src/lib.rs
@@ -13,17 +13,21 @@
#![feature(const_default_impls)]
#![feature(const_trait_impl)]
#![feature(if_let_guard)]
-#![feature(label_break_value)]
#![feature(let_chains)]
#![feature(min_specialization)]
#![feature(negative_impls)]
#![feature(slice_internals)]
#![feature(stmt_expr_attributes)]
#![recursion_limit = "256"]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+
pub mod util {
pub mod classify;
pub mod comments;
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 01bd498b3..b970e57e0 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -14,7 +14,6 @@ use crate::tokenstream::*;
use rustc_data_structures::map_in_place::MapInPlace;
use rustc_data_structures::sync::Lrc;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::Ident;
use rustc_span::Span;
@@ -153,6 +152,12 @@ pub trait MutVisitor: Sized {
noop_visit_expr(e, self);
}
+ /// This method is a hack to workaround unstable of `stmt_expr_attributes`.
+ /// It can be removed once that feature is stabilized.
+ fn visit_method_receiver_expr(&mut self, ex: &mut P<Expr>) {
+ self.visit_expr(ex)
+ }
+
fn filter_map_expr(&mut self, e: P<Expr>) -> Option<P<Expr>> {
noop_filter_map_expr(e, self)
}
@@ -338,12 +343,7 @@ where
}
// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-pub fn visit_attrs<T: MutVisitor>(attrs: &mut Vec<Attribute>, vis: &mut T) {
- visit_vec(attrs, |attr| vis.visit_attribute(attr));
-}
-
-// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-pub fn visit_thin_attrs<T: MutVisitor>(attrs: &mut AttrVec, vis: &mut T) {
+pub fn visit_attrs<T: MutVisitor>(attrs: &mut AttrVec, vis: &mut T) {
for attr in attrs.iter_mut() {
vis.visit_attribute(attr);
}
@@ -398,7 +398,7 @@ pub fn noop_flat_map_pat_field<T: MutVisitor>(
vis.visit_ident(ident);
vis.visit_pat(pat);
vis.visit_span(span);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
smallvec![fp]
}
@@ -424,7 +424,7 @@ pub fn noop_visit_use_tree<T: MutVisitor>(use_tree: &mut UseTree, vis: &mut T) {
pub fn noop_flat_map_arm<T: MutVisitor>(mut arm: Arm, vis: &mut T) -> SmallVec<[Arm; 1]> {
let Arm { attrs, pat, guard, body, span, id, is_placeholder: _ } = &mut arm;
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
vis.visit_id(id);
vis.visit_pat(pat);
visit_opt(guard, |guard| vis.visit_expr(guard));
@@ -507,7 +507,7 @@ pub fn noop_flat_map_variant<T: MutVisitor>(
let Variant { ident, vis, attrs, id, data, disr_expr, span, is_placeholder: _ } = &mut variant;
visitor.visit_ident(ident);
visitor.visit_vis(vis);
- visit_thin_attrs(attrs, visitor);
+ visit_attrs(attrs, visitor);
visitor.visit_id(id);
visitor.visit_variant_data(data);
visit_opt(disr_expr, |disr_expr| visitor.visit_anon_const(disr_expr));
@@ -589,14 +589,16 @@ pub fn noop_visit_local<T: MutVisitor>(local: &mut P<Local>, vis: &mut T) {
}
}
vis.visit_span(span);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
visit_lazy_tts(tokens, vis);
}
pub fn noop_visit_attribute<T: MutVisitor>(attr: &mut Attribute, vis: &mut T) {
let Attribute { kind, id: _, style: _, span } = attr;
match kind {
- AttrKind::Normal(AttrItem { path, args, tokens }, attr_tokens) => {
+ AttrKind::Normal(normal) => {
+ let NormalAttr { item: AttrItem { path, args, tokens }, tokens: attr_tokens } =
+ &mut **normal;
vis.visit_path(path);
visit_mac_args(args, vis);
visit_lazy_tts(tokens, vis);
@@ -638,7 +640,7 @@ pub fn noop_visit_meta_item<T: MutVisitor>(mi: &mut MetaItem, vis: &mut T) {
pub fn noop_flat_map_param<T: MutVisitor>(mut param: Param, vis: &mut T) -> SmallVec<[Param; 1]> {
let Param { attrs, id, pat, span, ty, is_placeholder: _ } = &mut param;
vis.visit_id(id);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
vis.visit_pat(pat);
vis.visit_span(span);
vis.visit_ty(ty);
@@ -646,21 +648,21 @@ pub fn noop_flat_map_param<T: MutVisitor>(mut param: Param, vis: &mut T) -> Smal
}
// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
-pub fn visit_attr_annotated_tt<T: MutVisitor>(tt: &mut AttrAnnotatedTokenTree, vis: &mut T) {
+pub fn visit_attr_tt<T: MutVisitor>(tt: &mut AttrTokenTree, vis: &mut T) {
match tt {
- AttrAnnotatedTokenTree::Token(token) => {
+ AttrTokenTree::Token(token, _) => {
visit_token(token, vis);
}
- AttrAnnotatedTokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
+ AttrTokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
vis.visit_span(open);
vis.visit_span(close);
- visit_attr_annotated_tts(tts, vis);
+ visit_attr_tts(tts, vis);
}
- AttrAnnotatedTokenTree::Attributes(data) => {
+ AttrTokenTree::Attributes(data) => {
for attr in &mut *data.attrs {
match &mut attr.kind {
- AttrKind::Normal(_, attr_tokens) => {
- visit_lazy_tts(attr_tokens, vis);
+ AttrKind::Normal(normal) => {
+ visit_lazy_tts(&mut normal.tokens, vis);
}
AttrKind::DocComment(..) => {
vis.visit_span(&mut attr.span);
@@ -694,27 +696,27 @@ pub fn visit_tts<T: MutVisitor>(TokenStream(tts): &mut TokenStream, vis: &mut T)
}
}
-pub fn visit_attr_annotated_tts<T: MutVisitor>(
- AttrAnnotatedTokenStream(tts): &mut AttrAnnotatedTokenStream,
- vis: &mut T,
-) {
+pub fn visit_attr_tts<T: MutVisitor>(AttrTokenStream(tts): &mut AttrTokenStream, vis: &mut T) {
if T::VISIT_TOKENS && !tts.is_empty() {
let tts = Lrc::make_mut(tts);
- visit_vec(tts, |(tree, _is_joint)| visit_attr_annotated_tt(tree, vis));
+ visit_vec(tts, |tree| visit_attr_tt(tree, vis));
}
}
-pub fn visit_lazy_tts_opt_mut<T: MutVisitor>(lazy_tts: Option<&mut LazyTokenStream>, vis: &mut T) {
+pub fn visit_lazy_tts_opt_mut<T: MutVisitor>(
+ lazy_tts: Option<&mut LazyAttrTokenStream>,
+ vis: &mut T,
+) {
if T::VISIT_TOKENS {
if let Some(lazy_tts) = lazy_tts {
- let mut tts = lazy_tts.create_token_stream();
- visit_attr_annotated_tts(&mut tts, vis);
- *lazy_tts = LazyTokenStream::new(tts);
+ let mut tts = lazy_tts.to_attr_token_stream();
+ visit_attr_tts(&mut tts, vis);
+ *lazy_tts = LazyAttrTokenStream::new(tts);
}
}
}
-pub fn visit_lazy_tts<T: MutVisitor>(lazy_tts: &mut Option<LazyTokenStream>, vis: &mut T) {
+pub fn visit_lazy_tts<T: MutVisitor>(lazy_tts: &mut Option<LazyAttrTokenStream>, vis: &mut T) {
visit_lazy_tts_opt_mut(lazy_tts.as_mut(), vis);
}
@@ -880,7 +882,7 @@ pub fn noop_flat_map_generic_param<T: MutVisitor>(
if let Some(ref mut colon_span) = colon_span {
vis.visit_span(colon_span);
}
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
visit_vec(bounds, |bound| noop_visit_param_bound(bound, vis));
match kind {
GenericParamKind::Lifetime => {}
@@ -933,8 +935,7 @@ pub fn noop_visit_where_predicate<T: MutVisitor>(pred: &mut WherePredicate, vis:
visit_vec(bounds, |bound| noop_visit_param_bound(bound, vis));
}
WherePredicate::EqPredicate(ep) => {
- let WhereEqPredicate { id, span, lhs_ty, rhs_ty } = ep;
- vis.visit_id(id);
+ let WhereEqPredicate { span, lhs_ty, rhs_ty } = ep;
vis.visit_span(span);
vis.visit_ty(lhs_ty);
vis.visit_ty(rhs_ty);
@@ -977,7 +978,7 @@ pub fn noop_flat_map_field_def<T: MutVisitor>(
visitor.visit_vis(vis);
visitor.visit_id(id);
visitor.visit_ty(ty);
- visit_thin_attrs(attrs, visitor);
+ visit_attrs(attrs, visitor);
smallvec![fd]
}
@@ -990,7 +991,7 @@ pub fn noop_flat_map_expr_field<T: MutVisitor>(
vis.visit_expr(expr);
vis.visit_id(id);
vis.visit_span(span);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
smallvec![f]
}
@@ -1111,7 +1112,7 @@ pub fn noop_flat_map_assoc_item<T: MutVisitor>(
visit_fn_sig(sig, visitor);
visit_opt(body, |body| visitor.visit_block(body));
}
- AssocItemKind::TyAlias(box TyAlias {
+ AssocItemKind::Type(box TyAlias {
defaultness,
generics,
where_clauses,
@@ -1302,10 +1303,11 @@ pub fn noop_visit_expr<T: MutVisitor>(
vis.visit_expr(f);
visit_exprs(args, vis);
}
- ExprKind::MethodCall(PathSegment { ident, id, args }, exprs, span) => {
+ ExprKind::MethodCall(PathSegment { ident, id, args }, receiver, exprs, span) => {
vis.visit_ident(ident);
vis.visit_id(id);
visit_opt(args, |args| vis.visit_generic_args(args));
+ vis.visit_method_receiver_expr(receiver);
visit_exprs(exprs, vis);
vis.visit_span(span);
}
@@ -1430,7 +1432,7 @@ pub fn noop_visit_expr<T: MutVisitor>(
}
vis.visit_id(id);
vis.visit_span(span);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
visit_lazy_tts(tokens, vis);
}
@@ -1476,7 +1478,7 @@ pub fn noop_flat_map_stmt_kind<T: MutVisitor>(
StmtKind::MacCall(mut mac) => {
let MacCallStmt { mac: mac_, style: _, attrs, tokens } = mac.deref_mut();
vis.visit_mac_call(mac_);
- visit_thin_attrs(attrs, vis);
+ visit_attrs(attrs, vis);
visit_lazy_tts(tokens, vis);
smallvec![StmtKind::MacCall(mac)]
}
@@ -1486,7 +1488,7 @@ pub fn noop_flat_map_stmt_kind<T: MutVisitor>(
pub fn noop_visit_vis<T: MutVisitor>(visibility: &mut Visibility, vis: &mut T) {
match &mut visibility.kind {
VisibilityKind::Public | VisibilityKind::Inherited => {}
- VisibilityKind::Restricted { path, id } => {
+ VisibilityKind::Restricted { path, id, shorthand: _ } => {
vis.visit_path(path);
vis.visit_id(id);
}
@@ -1511,12 +1513,6 @@ impl<T: DummyAstNode + 'static> DummyAstNode for P<T> {
}
}
-impl<T> DummyAstNode for ThinVec<T> {
- fn dummy() -> Self {
- Default::default()
- }
-}
-
impl DummyAstNode for Item {
fn dummy() -> Self {
Item {
@@ -1599,3 +1595,9 @@ impl DummyAstNode for Crate {
}
}
}
+
+impl<N: DummyAstNode, T: DummyAstNode> DummyAstNode for crate::ast_traits::AstNodeWrapper<N, T> {
+ fn dummy() -> Self {
+ crate::ast_traits::AstNodeWrapper::new(N::dummy(), T::dummy())
+ }
+}
diff --git a/compiler/rustc_ast/src/node_id.rs b/compiler/rustc_ast/src/node_id.rs
index 7f928cb57..7b5acc3f4 100644
--- a/compiler/rustc_ast/src/node_id.rs
+++ b/compiler/rustc_ast/src/node_id.rs
@@ -13,7 +13,7 @@ rustc_index::newtype_index! {
}
}
-rustc_data_structures::define_id_collections!(NodeMap, NodeSet, NodeId);
+rustc_data_structures::define_id_collections!(NodeMap, NodeSet, NodeMapEntry, NodeId);
/// The [`NodeId`] used to represent the root of the crate.
pub const CRATE_NODE_ID: NodeId = NodeId::from_u32(0);
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index 85d9687c6..83b10d906 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -13,7 +13,7 @@ use rustc_span::symbol::{kw, sym};
use rustc_span::symbol::{Ident, Symbol};
use rustc_span::{self, edition::Edition, Span, DUMMY_SP};
use std::borrow::Cow;
-use std::{fmt, mem};
+use std::fmt;
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum CommentKind {
@@ -256,10 +256,6 @@ pub enum TokenKind {
Eof,
}
-// `TokenKind` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(TokenKind, 16);
-
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct Token {
pub kind: TokenKind,
@@ -335,11 +331,6 @@ impl Token {
Token::new(Ident(ident.name, ident.is_raw_guess()), ident.span)
}
- /// Return this token by value and leave a dummy token in its place.
- pub fn take(&mut self) -> Self {
- mem::replace(self, Token::dummy())
- }
-
/// For interpolated tokens, returns a span of the fragment to which the interpolated
/// token refers. For all other tokens this is just a regular span.
/// It is particularly important to use this for identifiers and lifetimes
@@ -354,17 +345,14 @@ impl Token {
}
pub fn is_op(&self) -> bool {
- !matches!(
- self.kind,
- OpenDelim(..)
- | CloseDelim(..)
- | Literal(..)
- | DocComment(..)
- | Ident(..)
- | Lifetime(..)
- | Interpolated(..)
- | Eof
- )
+ match self.kind {
+ Eq | Lt | Le | EqEq | Ne | Ge | Gt | AndAnd | OrOr | Not | Tilde | BinOp(_)
+ | BinOpEq(_) | At | Dot | DotDot | DotDotDot | DotDotEq | Comma | Semi | Colon
+ | ModSep | RArrow | LArrow | FatArrow | Pound | Dollar | Question | SingleQuote => true,
+
+ OpenDelim(..) | CloseDelim(..) | Literal(..) | DocComment(..) | Ident(..)
+ | Lifetime(..) | Interpolated(..) | Eof => false,
+ }
}
pub fn is_like_plus(&self) -> bool {
@@ -398,6 +386,30 @@ impl Token {
}
}
+ /// Returns `true` if the token can appear at the start of an pattern.
+ ///
+ /// Shamelessly borrowed from `can_begin_expr`, only used for diagnostics right now.
+ pub fn can_begin_pattern(&self) -> bool {
+ match self.uninterpolate().kind {
+ Ident(name, is_raw) =>
+ ident_can_begin_expr(name, self.span, is_raw), // value name or keyword
+ | OpenDelim(Delimiter::Bracket | Delimiter::Parenthesis) // tuple or array
+ | Literal(..) // literal
+ | BinOp(Minus) // unary minus
+ | BinOp(And) // reference
+ | AndAnd // double reference
+ // DotDotDot is no longer supported
+ | DotDot | DotDotDot | DotDotEq // ranges
+ | Lt | BinOp(Shl) // associated path
+ | ModSep => true, // global path
+ Interpolated(ref nt) => matches!(**nt, NtLiteral(..) |
+ NtPat(..) |
+ NtBlock(..) |
+ NtPath(..)),
+ _ => false,
+ }
+ }
+
/// Returns `true` if the token can appear at the start of a type.
pub fn can_begin_type(&self) -> bool {
match self.uninterpolate().kind {
@@ -436,6 +448,31 @@ impl Token {
|| self == &OpenDelim(Delimiter::Parenthesis)
}
+ /// Returns `true` if the token can appear at the start of an item.
+ pub fn can_begin_item(&self) -> bool {
+ match self.kind {
+ Ident(name, _) => [
+ kw::Fn,
+ kw::Use,
+ kw::Struct,
+ kw::Enum,
+ kw::Pub,
+ kw::Trait,
+ kw::Extern,
+ kw::Impl,
+ kw::Unsafe,
+ kw::Const,
+ kw::Static,
+ kw::Union,
+ kw::Macro,
+ kw::Mod,
+ kw::Type,
+ ]
+ .contains(&name),
+ _ => false,
+ }
+ }
+
/// Returns `true` if the token is any literal.
pub fn is_lit(&self) -> bool {
matches!(self.kind, Literal(..))
@@ -684,6 +721,7 @@ impl Token {
}
impl PartialEq<TokenKind> for Token {
+ #[inline]
fn eq(&self, rhs: &TokenKind) -> bool {
self.kind == *rhs
}
@@ -707,10 +745,6 @@ pub enum Nonterminal {
NtVis(P<ast::Visibility>),
}
-// `Nonterminal` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(Nonterminal, 16);
-
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable)]
pub enum NonterminalKind {
Item,
@@ -849,3 +883,17 @@ where
panic!("interpolated tokens should not be present in the HIR")
}
}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(Lit, 12);
+ static_assert_size!(LitKind, 2);
+ static_assert_size!(Nonterminal, 16);
+ static_assert_size!(Token, 24);
+ static_assert_size!(TokenKind, 16);
+ // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index 9e4a22e1f..015f5c1ee 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -47,10 +47,6 @@ pub enum TokenTree {
Delimited(DelimSpan, Delimiter, TokenStream),
}
-// This type is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(TokenTree, 32);
-
// Ensure all fields of `TokenTree` is `Send` and `Sync`.
#[cfg(parallel_compiler)]
fn _dummy()
@@ -121,12 +117,12 @@ where
}
}
-pub trait CreateTokenStream: sync::Send + sync::Sync {
- fn create_token_stream(&self) -> AttrAnnotatedTokenStream;
+pub trait ToAttrTokenStream: sync::Send + sync::Sync {
+ fn to_attr_token_stream(&self) -> AttrTokenStream;
}
-impl CreateTokenStream for AttrAnnotatedTokenStream {
- fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
+impl ToAttrTokenStream for AttrTokenStream {
+ fn to_attr_token_stream(&self) -> AttrTokenStream {
self.clone()
}
}
@@ -135,68 +131,68 @@ impl CreateTokenStream for AttrAnnotatedTokenStream {
/// of an actual `TokenStream` until it is needed.
/// `Box` is here only to reduce the structure size.
#[derive(Clone)]
-pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
+pub struct LazyAttrTokenStream(Lrc<Box<dyn ToAttrTokenStream>>);
-impl LazyTokenStream {
- pub fn new(inner: impl CreateTokenStream + 'static) -> LazyTokenStream {
- LazyTokenStream(Lrc::new(Box::new(inner)))
+impl LazyAttrTokenStream {
+ pub fn new(inner: impl ToAttrTokenStream + 'static) -> LazyAttrTokenStream {
+ LazyAttrTokenStream(Lrc::new(Box::new(inner)))
}
- pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
- self.0.create_token_stream()
+ pub fn to_attr_token_stream(&self) -> AttrTokenStream {
+ self.0.to_attr_token_stream()
}
}
-impl fmt::Debug for LazyTokenStream {
+impl fmt::Debug for LazyAttrTokenStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "LazyTokenStream({:?})", self.create_token_stream())
+ write!(f, "LazyAttrTokenStream({:?})", self.to_attr_token_stream())
}
}
-impl<S: Encoder> Encodable<S> for LazyTokenStream {
+impl<S: Encoder> Encodable<S> for LazyAttrTokenStream {
fn encode(&self, s: &mut S) {
// Used by AST json printing.
- Encodable::encode(&self.create_token_stream(), s);
+ Encodable::encode(&self.to_attr_token_stream(), s);
}
}
-impl<D: Decoder> Decodable<D> for LazyTokenStream {
+impl<D: Decoder> Decodable<D> for LazyAttrTokenStream {
fn decode(_d: &mut D) -> Self {
- panic!("Attempted to decode LazyTokenStream");
+ panic!("Attempted to decode LazyAttrTokenStream");
}
}
-impl<CTX> HashStable<CTX> for LazyTokenStream {
+impl<CTX> HashStable<CTX> for LazyAttrTokenStream {
fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
- panic!("Attempted to compute stable hash for LazyTokenStream");
+ panic!("Attempted to compute stable hash for LazyAttrTokenStream");
}
}
-/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
+/// An `AttrTokenStream` is similar to a `TokenStream`, but with extra
/// information about the tokens for attribute targets. This is used
/// during expansion to perform early cfg-expansion, and to process attributes
/// during proc-macro invocations.
#[derive(Clone, Debug, Default, Encodable, Decodable)]
-pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
+pub struct AttrTokenStream(pub Lrc<Vec<AttrTokenTree>>);
-/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
+/// Like `TokenTree`, but for `AttrTokenStream`.
#[derive(Clone, Debug, Encodable, Decodable)]
-pub enum AttrAnnotatedTokenTree {
- Token(Token),
- Delimited(DelimSpan, Delimiter, AttrAnnotatedTokenStream),
+pub enum AttrTokenTree {
+ Token(Token, Spacing),
+ Delimited(DelimSpan, Delimiter, AttrTokenStream),
/// Stores the attributes for an attribute target,
/// along with the tokens for that attribute target.
/// See `AttributesData` for more information
Attributes(AttributesData),
}
-impl AttrAnnotatedTokenStream {
- pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
- AttrAnnotatedTokenStream(Lrc::new(tokens))
+impl AttrTokenStream {
+ pub fn new(tokens: Vec<AttrTokenTree>) -> AttrTokenStream {
+ AttrTokenStream(Lrc::new(tokens))
}
- /// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
- /// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
+ /// Converts this `AttrTokenStream` to a plain `TokenStream`.
+ /// During conversion, `AttrTokenTree::Attributes` get 'flattened'
/// back to a `TokenStream` of the form `outer_attr attr_target`.
/// If there are inner attributes, they are inserted into the proper
/// place in the attribute target tokens.
@@ -204,31 +200,27 @@ impl AttrAnnotatedTokenStream {
let trees: Vec<_> = self
.0
.iter()
- .flat_map(|tree| match &tree.0 {
- AttrAnnotatedTokenTree::Token(inner) => {
- smallvec![TokenTree::Token(inner.clone(), tree.1)].into_iter()
+ .flat_map(|tree| match &tree {
+ AttrTokenTree::Token(inner, spacing) => {
+ smallvec![TokenTree::Token(inner.clone(), *spacing)].into_iter()
}
- AttrAnnotatedTokenTree::Delimited(span, delim, stream) => {
+ AttrTokenTree::Delimited(span, delim, stream) => {
smallvec![TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),]
.into_iter()
}
- AttrAnnotatedTokenTree::Attributes(data) => {
+ AttrTokenTree::Attributes(data) => {
let mut outer_attrs = Vec::new();
let mut inner_attrs = Vec::new();
for attr in &data.attrs {
match attr.style {
- crate::AttrStyle::Outer => {
- outer_attrs.push(attr);
- }
- crate::AttrStyle::Inner => {
- inner_attrs.push(attr);
- }
+ crate::AttrStyle::Outer => outer_attrs.push(attr),
+ crate::AttrStyle::Inner => inner_attrs.push(attr),
}
}
let mut target_tokens: Vec<_> = data
.tokens
- .create_token_stream()
+ .to_attr_token_stream()
.to_tokenstream()
.0
.iter()
@@ -239,9 +231,9 @@ impl AttrAnnotatedTokenStream {
// Check the last two trees (to account for a trailing semi)
for tree in target_tokens.iter_mut().rev().take(2) {
if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
- // Inner attributes are only supported on extern blocks, functions, impls,
- // and modules. All of these have their inner attributes placed at
- // the beginning of the rightmost outermost braced group:
+ // Inner attributes are only supported on extern blocks, functions,
+ // impls, and modules. All of these have their inner attributes
+ // placed at the beginning of the rightmost outermost braced group:
// e.g. fn foo() { #![my_attr} }
//
// Therefore, we can insert them back into the right location
@@ -253,12 +245,12 @@ impl AttrAnnotatedTokenStream {
// properly implemented - we always synthesize fake tokens,
// so we never reach this code.
- let mut builder = TokenStreamBuilder::new();
+ let mut stream = TokenStream::default();
for inner_attr in inner_attrs {
- builder.push(inner_attr.tokens().to_tokenstream());
+ stream.push_stream(inner_attr.tokens());
}
- builder.push(delim_tokens.clone());
- *tree = TokenTree::Delimited(*span, *delim, builder.build());
+ stream.push_stream(delim_tokens.clone());
+ *tree = TokenTree::Delimited(*span, *delim, stream);
found = true;
break;
}
@@ -273,7 +265,7 @@ impl AttrAnnotatedTokenStream {
let mut flat: SmallVec<[_; 1]> = SmallVec::new();
for attr in outer_attrs {
// FIXME: Make this more efficient
- flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
+ flat.extend(attr.tokens().0.clone().iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
@@ -300,7 +292,7 @@ pub struct AttributesData {
pub attrs: AttrVec,
/// The underlying tokens for the attribute target that `attrs`
/// are applied to
- pub tokens: LazyTokenStream,
+ pub tokens: LazyAttrTokenStream,
}
/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
@@ -312,13 +304,20 @@ pub struct AttributesData {
#[derive(Clone, Debug, Default, Encodable, Decodable)]
pub struct TokenStream(pub(crate) Lrc<Vec<TokenTree>>);
-// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(TokenStream, 8);
-
+/// Similar to `proc_macro::Spacing`, but for tokens.
+///
+/// Note that all `ast::TokenTree::Token` instances have a `Spacing`, but when
+/// we convert to `proc_macro::TokenTree` for proc macros only `Punct`
+/// `TokenTree`s have a `proc_macro::Spacing`.
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
pub enum Spacing {
+ /// The token is not immediately followed by an operator token (as
+ /// determined by `Token::is_op`). E.g. a `+` token is `Alone` in `+ =`,
+ /// `+/*foo*/=`, `+ident`, and `+()`.
Alone,
+
+ /// The token is immediately followed by an operator token. E.g. a `+`
+ /// token is `Joint` in `+=` and `++`.
Joint,
}
@@ -363,12 +362,6 @@ impl TokenStream {
}
}
-impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
- fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
- AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
- }
-}
-
impl iter::FromIterator<TokenTree> for TokenStream {
fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
TokenStream::new(iter.into_iter().collect::<Vec<TokenTree>>())
@@ -420,21 +413,6 @@ impl TokenStream {
TokenStream(Lrc::new(self.0.iter().enumerate().map(|(i, tree)| f(i, tree)).collect()))
}
- fn opt_from_ast(node: &(impl HasAttrs + HasTokens)) -> Option<TokenStream> {
- let tokens = node.tokens()?;
- let attrs = node.attrs();
- let attr_annotated = if attrs.is_empty() {
- tokens.create_token_stream()
- } else {
- let attr_data = AttributesData { attrs: attrs.to_vec().into(), tokens: tokens.clone() };
- AttrAnnotatedTokenStream::new(vec![(
- AttrAnnotatedTokenTree::Attributes(attr_data),
- Spacing::Alone,
- )])
- };
- Some(attr_annotated.to_tokenstream())
- }
-
// Create a token stream containing a single token with alone spacing.
pub fn token_alone(kind: TokenKind, span: Span) -> TokenStream {
TokenStream::new(vec![TokenTree::token_alone(kind, span)])
@@ -451,8 +429,18 @@ impl TokenStream {
}
pub fn from_ast(node: &(impl HasAttrs + HasSpan + HasTokens + fmt::Debug)) -> TokenStream {
- TokenStream::opt_from_ast(node)
- .unwrap_or_else(|| panic!("missing tokens for node at {:?}: {:?}", node.span(), node))
+ let Some(tokens) = node.tokens() else {
+ panic!("missing tokens for node at {:?}: {:?}", node.span(), node);
+ };
+ let attrs = node.attrs();
+ let attr_stream = if attrs.is_empty() {
+ tokens.to_attr_token_stream()
+ } else {
+ let attr_data =
+ AttributesData { attrs: attrs.iter().cloned().collect(), tokens: tokens.clone() };
+ AttrTokenStream::new(vec![AttrTokenTree::Attributes(attr_data)])
+ };
+ attr_stream.to_tokenstream()
}
pub fn from_nonterminal_ast(nt: &Nonterminal) -> TokenStream {
@@ -517,76 +505,49 @@ impl TokenStream {
self.trees().map(|tree| TokenStream::flatten_token_tree(tree)).collect()
}
-}
-// 99.5%+ of the time we have 1 or 2 elements in this vector.
-#[derive(Clone)]
-pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
-
-impl TokenStreamBuilder {
- pub fn new() -> TokenStreamBuilder {
- TokenStreamBuilder(SmallVec::new())
- }
-
- pub fn push(&mut self, stream: TokenStream) {
- self.0.push(stream);
- }
-
- pub fn build(self) -> TokenStream {
- let mut streams = self.0;
- match streams.len() {
- 0 => TokenStream::default(),
- 1 => streams.pop().unwrap(),
- _ => {
- // We will extend the first stream in `streams` with the
- // elements from the subsequent streams. This requires using
- // `make_mut()` on the first stream, and in practice this
- // doesn't cause cloning 99.9% of the time.
- //
- // One very common use case is when `streams` has two elements,
- // where the first stream has any number of elements within
- // (often 1, but sometimes many more) and the second stream has
- // a single element within.
-
- // Determine how much the first stream will be extended.
- // Needed to avoid quadratic blow up from on-the-fly
- // reallocations (#57735).
- let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
-
- // Get the first stream, which will become the result stream.
- // If it's `None`, create an empty stream.
- let mut iter = streams.drain(..);
- let mut res_stream_lrc = iter.next().unwrap().0;
-
- // Append the subsequent elements to the result stream, after
- // reserving space for them.
- let res_vec_mut = Lrc::make_mut(&mut res_stream_lrc);
- res_vec_mut.reserve(num_appends);
- for stream in iter {
- let stream_iter = stream.0.iter().cloned();
-
- // If (a) `res_mut_vec` is not empty and the last tree
- // within it is a token tree marked with `Joint`, and (b)
- // `stream` is not empty and the first tree within it is a
- // token tree, and (c) the two tokens can be glued
- // together...
- if let Some(TokenTree::Token(last_tok, Spacing::Joint)) = res_vec_mut.last()
- && let Some(TokenTree::Token(tok, spacing)) = stream.0.first()
- && let Some(glued_tok) = last_tok.glue(&tok)
- {
- // ...then overwrite the last token tree in
- // `res_vec_mut` with the glued token, and skip the
- // first token tree from `stream`.
- *res_vec_mut.last_mut().unwrap() = TokenTree::Token(glued_tok, *spacing);
- res_vec_mut.extend(stream_iter.skip(1));
- } else {
- // Append all of `stream`.
- res_vec_mut.extend(stream_iter);
- }
- }
+ // If `vec` is not empty, try to glue `tt` onto its last token. The return
+ // value indicates if gluing took place.
+ fn try_glue_to_last(vec: &mut Vec<TokenTree>, tt: &TokenTree) -> bool {
+ if let Some(TokenTree::Token(last_tok, Spacing::Joint)) = vec.last()
+ && let TokenTree::Token(tok, spacing) = tt
+ && let Some(glued_tok) = last_tok.glue(&tok)
+ {
+ // ...then overwrite the last token tree in `vec` with the
+ // glued token, and skip the first token tree from `stream`.
+ *vec.last_mut().unwrap() = TokenTree::Token(glued_tok, *spacing);
+ true
+ } else {
+ false
+ }
+ }
- TokenStream(res_stream_lrc)
- }
+ // Push `tt` onto the end of the stream, possibly gluing it to the last
+ // token. Uses `make_mut` to maximize efficiency.
+ pub fn push_tree(&mut self, tt: TokenTree) {
+ let vec_mut = Lrc::make_mut(&mut self.0);
+
+ if Self::try_glue_to_last(vec_mut, &tt) {
+ // nothing else to do
+ } else {
+ vec_mut.push(tt);
+ }
+ }
+
+ // Push `stream` onto the end of the stream, possibly gluing the first
+ // token tree to the last token. (No other token trees will be glued.)
+ // Uses `make_mut` to maximize efficiency.
+ pub fn push_stream(&mut self, stream: TokenStream) {
+ let vec_mut = Lrc::make_mut(&mut self.0);
+
+ let stream_iter = stream.0.iter().cloned();
+
+ if let Some(first) = stream.0.first() && Self::try_glue_to_last(vec_mut, first) {
+ // Now skip the first token tree from `stream`.
+ vec_mut.extend(stream_iter.skip(1));
+ } else {
+ // Append all of `stream`.
+ vec_mut.extend(stream_iter);
}
}
}
@@ -679,3 +640,17 @@ impl DelimSpan {
self.open.with_hi(self.close.hi())
}
}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(AttrTokenStream, 8);
+ static_assert_size!(AttrTokenTree, 32);
+ static_assert_size!(LazyAttrTokenStream, 8);
+ static_assert_size!(TokenStream, 8);
+ static_assert_size!(TokenTree, 32);
+ // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_ast/src/util/literal.rs b/compiler/rustc_ast/src/util/literal.rs
index 9c18f55c0..536b38560 100644
--- a/compiler/rustc_ast/src/util/literal.rs
+++ b/compiler/rustc_ast/src/util/literal.rs
@@ -9,7 +9,6 @@ use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::Span;
use std::ascii;
-use tracing::debug;
pub enum LitError {
NotLiteral,
@@ -23,7 +22,7 @@ pub enum LitError {
impl LitKind {
/// Converts literal token into a semantic literal.
- pub fn from_lit_token(lit: token::Lit) -> Result<LitKind, LitError> {
+ pub fn from_token_lit(lit: token::Lit) -> Result<LitKind, LitError> {
let token::Lit { kind, symbol, suffix } = lit;
if suffix.is_some() && !kind.may_have_suffix() {
return Err(LitError::InvalidSuffix);
@@ -146,14 +145,14 @@ impl LitKind {
LitKind::ByteStr(bytes.into())
}
- token::Err => LitKind::Err(symbol),
+ token::Err => LitKind::Err,
})
}
/// Attempts to recover a token from semantic literal.
/// This function is used when the original token doesn't exist (e.g. the literal is created
/// by an AST-based macro) or unavailable (e.g. from HIR pretty-printing).
- pub fn to_lit_token(&self) -> token::Lit {
+ pub fn to_token_lit(&self) -> token::Lit {
let (kind, symbol, suffix) = match *self {
LitKind::Str(symbol, ast::StrStyle::Cooked) => {
// Don't re-intern unless the escaped string is different.
@@ -164,12 +163,7 @@ impl LitKind {
}
LitKind::Str(symbol, ast::StrStyle::Raw(n)) => (token::StrRaw(n), symbol, None),
LitKind::ByteStr(ref bytes) => {
- let string = bytes
- .iter()
- .cloned()
- .flat_map(ascii::escape_default)
- .map(Into::<char>::into)
- .collect::<String>();
+ let string = bytes.escape_ascii().to_string();
(token::ByteStr, Symbol::intern(&string), None)
}
LitKind::Byte(byte) => {
@@ -199,7 +193,9 @@ impl LitKind {
let symbol = if value { kw::True } else { kw::False };
(token::Bool, symbol, None)
}
- LitKind::Err(symbol) => (token::Err, symbol, None),
+ // This only shows up in places like `-Zunpretty=hir` output, so we
+ // don't bother to produce something useful.
+ LitKind::Err => (token::Err, Symbol::intern("<bad-literal>"), None),
};
token::Lit::new(kind, symbol, suffix)
@@ -208,8 +204,8 @@ impl LitKind {
impl Lit {
/// Converts literal token into an AST literal.
- pub fn from_lit_token(token: token::Lit, span: Span) -> Result<Lit, LitError> {
- Ok(Lit { token, kind: LitKind::from_lit_token(token)?, span })
+ pub fn from_token_lit(token_lit: token::Lit, span: Span) -> Result<Lit, LitError> {
+ Ok(Lit { token_lit, kind: LitKind::from_token_lit(token_lit)?, span })
}
/// Converts arbitrary token into an AST literal.
@@ -232,21 +228,21 @@ impl Lit {
_ => return Err(LitError::NotLiteral),
};
- Lit::from_lit_token(lit, token.span)
+ Lit::from_token_lit(lit, token.span)
}
/// Attempts to recover an AST literal from semantic literal.
/// This function is used when the original token doesn't exist (e.g. the literal is created
/// by an AST-based macro) or unavailable (e.g. from HIR pretty-printing).
pub fn from_lit_kind(kind: LitKind, span: Span) -> Lit {
- Lit { token: kind.to_lit_token(), kind, span }
+ Lit { token_lit: kind.to_token_lit(), kind, span }
}
/// Losslessly convert an AST literal into a token.
pub fn to_token(&self) -> Token {
- let kind = match self.token.kind {
- token::Bool => token::Ident(self.token.symbol, false),
- _ => token::Literal(self.token),
+ let kind = match self.token_lit.kind {
+ token::Bool => token::Ident(self.token_lit.symbol, false),
+ _ => token::Literal(self.token_lit),
};
Token::new(kind, self.span)
}
diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs
index 74b7fe9e2..b40ad6f70 100644
--- a/compiler/rustc_ast/src/util/parser.rs
+++ b/compiler/rustc_ast/src/util/parser.rs
@@ -297,11 +297,11 @@ impl ExprPrecedence {
match self {
ExprPrecedence::Closure => PREC_CLOSURE,
- ExprPrecedence::Break |
- ExprPrecedence::Continue |
- ExprPrecedence::Ret |
- ExprPrecedence::Yield |
- ExprPrecedence::Yeet => PREC_JUMP,
+ ExprPrecedence::Break
+ | ExprPrecedence::Continue
+ | ExprPrecedence::Ret
+ | ExprPrecedence::Yield
+ | ExprPrecedence::Yeet => PREC_JUMP,
// `Range` claims to have higher precedence than `Assign`, but `x .. x = x` fails to
// parse, instead of parsing as `(x .. x) = x`. Giving `Range` a lower precedence
@@ -318,43 +318,43 @@ impl ExprPrecedence {
ExprPrecedence::AssignOp => AssocOp::Assign.precedence() as i8,
// Unary, prefix
- ExprPrecedence::Box |
- ExprPrecedence::AddrOf |
+ ExprPrecedence::Box
+ | ExprPrecedence::AddrOf
// Here `let pats = expr` has `let pats =` as a "unary" prefix of `expr`.
// However, this is not exactly right. When `let _ = a` is the LHS of a binop we
// need parens sometimes. E.g. we can print `(let _ = a) && b` as `let _ = a && b`
// but we need to print `(let _ = a) < b` as-is with parens.
- ExprPrecedence::Let |
- ExprPrecedence::Unary => PREC_PREFIX,
+ | ExprPrecedence::Let
+ | ExprPrecedence::Unary => PREC_PREFIX,
// Unary, postfix
- ExprPrecedence::Await |
- ExprPrecedence::Call |
- ExprPrecedence::MethodCall |
- ExprPrecedence::Field |
- ExprPrecedence::Index |
- ExprPrecedence::Try |
- ExprPrecedence::InlineAsm |
- ExprPrecedence::Mac => PREC_POSTFIX,
+ ExprPrecedence::Await
+ | ExprPrecedence::Call
+ | ExprPrecedence::MethodCall
+ | ExprPrecedence::Field
+ | ExprPrecedence::Index
+ | ExprPrecedence::Try
+ | ExprPrecedence::InlineAsm
+ | ExprPrecedence::Mac => PREC_POSTFIX,
// Never need parens
- ExprPrecedence::Array |
- ExprPrecedence::Repeat |
- ExprPrecedence::Tup |
- ExprPrecedence::Lit |
- ExprPrecedence::Path |
- ExprPrecedence::Paren |
- ExprPrecedence::If |
- ExprPrecedence::While |
- ExprPrecedence::ForLoop |
- ExprPrecedence::Loop |
- ExprPrecedence::Match |
- ExprPrecedence::ConstBlock |
- ExprPrecedence::Block |
- ExprPrecedence::TryBlock |
- ExprPrecedence::Async |
- ExprPrecedence::Struct |
- ExprPrecedence::Err => PREC_PAREN,
+ ExprPrecedence::Array
+ | ExprPrecedence::Repeat
+ | ExprPrecedence::Tup
+ | ExprPrecedence::Lit
+ | ExprPrecedence::Path
+ | ExprPrecedence::Paren
+ | ExprPrecedence::If
+ | ExprPrecedence::While
+ | ExprPrecedence::ForLoop
+ | ExprPrecedence::Loop
+ | ExprPrecedence::Match
+ | ExprPrecedence::ConstBlock
+ | ExprPrecedence::Block
+ | ExprPrecedence::TryBlock
+ | ExprPrecedence::Async
+ | ExprPrecedence::Struct
+ | ExprPrecedence::Err => PREC_PAREN,
}
}
}
@@ -396,9 +396,9 @@ pub fn contains_exterior_struct_lit(value: &ast::Expr) -> bool {
contains_exterior_struct_lit(&x)
}
- ast::ExprKind::MethodCall(.., ref exprs, _) => {
+ ast::ExprKind::MethodCall(_, ref receiver, _, _) => {
// X { y: 1 }.bar(...)
- contains_exterior_struct_lit(&exprs[0])
+ contains_exterior_struct_lit(&receiver)
}
_ => false,
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index d9594b323..6f56c1ef0 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -140,6 +140,11 @@ pub trait Visitor<'ast>: Sized {
fn visit_expr(&mut self, ex: &'ast Expr) {
walk_expr(self, ex)
}
+ /// This method is a hack to workaround unstable of `stmt_expr_attributes`.
+ /// It can be removed once that feature is stabilized.
+ fn visit_method_receiver_expr(&mut self, ex: &'ast Expr) {
+ self.visit_expr(ex)
+ }
fn visit_expr_post(&mut self, _ex: &'ast Expr) {}
fn visit_ty(&mut self, t: &'ast Ty) {
walk_ty(self, t)
@@ -156,8 +161,8 @@ pub trait Visitor<'ast>: Sized {
fn visit_where_predicate(&mut self, p: &'ast WherePredicate) {
walk_where_predicate(self, p)
}
- fn visit_fn(&mut self, fk: FnKind<'ast>, s: Span, _: NodeId) {
- walk_fn(self, fk, s)
+ fn visit_fn(&mut self, fk: FnKind<'ast>, _: Span, _: NodeId) {
+ walk_fn(self, fk)
}
fn visit_assoc_item(&mut self, i: &'ast AssocItem, ctxt: AssocCtxt) {
walk_assoc_item(self, i, ctxt)
@@ -168,8 +173,8 @@ pub trait Visitor<'ast>: Sized {
fn visit_param_bound(&mut self, bounds: &'ast GenericBound, _ctxt: BoundKind) {
walk_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
- walk_poly_trait_ref(self, t, m)
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef) {
+ walk_poly_trait_ref(self, t)
}
fn visit_variant_data(&mut self, s: &'ast VariantData) {
walk_struct_def(self, s)
@@ -177,14 +182,8 @@ pub trait Visitor<'ast>: Sized {
fn visit_field_def(&mut self, s: &'ast FieldDef) {
walk_field_def(self, s)
}
- fn visit_enum_def(
- &mut self,
- enum_definition: &'ast EnumDef,
- generics: &'ast Generics,
- item_id: NodeId,
- _: Span,
- ) {
- walk_enum_def(self, enum_definition, generics, item_id)
+ fn visit_enum_def(&mut self, enum_definition: &'ast EnumDef) {
+ walk_enum_def(self, enum_definition)
}
fn visit_variant(&mut self, v: &'ast Variant) {
walk_variant(self, v)
@@ -207,11 +206,11 @@ pub trait Visitor<'ast>: Sized {
fn visit_use_tree(&mut self, use_tree: &'ast UseTree, id: NodeId, _nested: bool) {
walk_use_tree(self, use_tree, id)
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
- walk_path_segment(self, path_span, path_segment)
+ fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
+ walk_path_segment(self, path_segment)
}
- fn visit_generic_args(&mut self, path_span: Span, generic_args: &'ast GenericArgs) {
- walk_generic_args(self, path_span, generic_args)
+ fn visit_generic_args(&mut self, generic_args: &'ast GenericArgs) {
+ walk_generic_args(self, generic_args)
}
fn visit_generic_arg(&mut self, generic_arg: &'ast GenericArg) {
walk_generic_arg(self, generic_arg)
@@ -250,14 +249,12 @@ pub trait Visitor<'ast>: Sized {
#[macro_export]
macro_rules! walk_list {
- ($visitor: expr, $method: ident, $list: expr) => {
- for elem in $list {
- $visitor.$method(elem)
- }
- };
- ($visitor: expr, $method: ident, $list: expr, $($extra_args: expr),*) => {
- for elem in $list {
- $visitor.$method(elem, $($extra_args,)*)
+ ($visitor: expr, $method: ident, $list: expr $(, $($extra_args: expr),* )?) => {
+ {
+ #[cfg_attr(not(bootstrap), allow(for_loops_over_fallibles))]
+ for elem in $list {
+ $visitor.$method(elem $(, $($extra_args,)* )?)
+ }
}
}
}
@@ -287,11 +284,8 @@ pub fn walk_lifetime<'a, V: Visitor<'a>>(visitor: &mut V, lifetime: &'a Lifetime
visitor.visit_ident(lifetime.ident);
}
-pub fn walk_poly_trait_ref<'a, V>(
- visitor: &mut V,
- trait_ref: &'a PolyTraitRef,
- _: &TraitBoundModifier,
-) where
+pub fn walk_poly_trait_ref<'a, V>(visitor: &mut V, trait_ref: &'a PolyTraitRef)
+where
V: Visitor<'a>,
{
walk_list!(visitor, visit_generic_param, &trait_ref.bound_generic_params);
@@ -334,7 +328,7 @@ pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
}
ItemKind::Enum(ref enum_definition, ref generics) => {
visitor.visit_generics(generics);
- visitor.visit_enum_def(enum_definition, generics, item.id, item.span)
+ visitor.visit_enum_def(enum_definition)
}
ItemKind::Impl(box Impl {
defaultness: _,
@@ -377,12 +371,7 @@ pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
walk_list!(visitor, visit_attribute, &item.attrs);
}
-pub fn walk_enum_def<'a, V: Visitor<'a>>(
- visitor: &mut V,
- enum_definition: &'a EnumDef,
- _: &'a Generics,
- _: NodeId,
-) {
+pub fn walk_enum_def<'a, V: Visitor<'a>>(visitor: &mut V, enum_definition: &'a EnumDef) {
walk_list!(visitor, visit_variant, &enum_definition.variants);
}
@@ -449,7 +438,7 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
pub fn walk_path<'a, V: Visitor<'a>>(visitor: &mut V, path: &'a Path) {
for segment in &path.segments {
- visitor.visit_path_segment(path.span, segment);
+ visitor.visit_path_segment(segment);
}
}
@@ -471,18 +460,14 @@ pub fn walk_use_tree<'a, V: Visitor<'a>>(visitor: &mut V, use_tree: &'a UseTree,
}
}
-pub fn walk_path_segment<'a, V: Visitor<'a>>(
- visitor: &mut V,
- path_span: Span,
- segment: &'a PathSegment,
-) {
+pub fn walk_path_segment<'a, V: Visitor<'a>>(visitor: &mut V, segment: &'a PathSegment) {
visitor.visit_ident(segment.ident);
if let Some(ref args) = segment.args {
- visitor.visit_generic_args(path_span, args);
+ visitor.visit_generic_args(args);
}
}
-pub fn walk_generic_args<'a, V>(visitor: &mut V, _path_span: Span, generic_args: &'a GenericArgs)
+pub fn walk_generic_args<'a, V>(visitor: &mut V, generic_args: &'a GenericArgs)
where
V: Visitor<'a>,
{
@@ -516,7 +501,7 @@ where
pub fn walk_assoc_constraint<'a, V: Visitor<'a>>(visitor: &mut V, constraint: &'a AssocConstraint) {
visitor.visit_ident(constraint.ident);
if let Some(ref gen_args) = constraint.gen_args {
- visitor.visit_generic_args(gen_args.span(), gen_args);
+ visitor.visit_generic_args(gen_args);
}
match constraint.kind {
AssocConstraintKind::Equality { ref term } => match term {
@@ -598,7 +583,7 @@ pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a ForeignI
pub fn walk_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a GenericBound) {
match *bound {
- GenericBound::Trait(ref typ, ref modifier) => visitor.visit_poly_trait_ref(typ, modifier),
+ GenericBound::Trait(ref typ, ref _modifier) => visitor.visit_poly_trait_ref(typ),
GenericBound::Outlives(ref lifetime) => {
visitor.visit_lifetime(lifetime, LifetimeCtxt::Bound)
}
@@ -673,7 +658,7 @@ pub fn walk_fn_decl<'a, V: Visitor<'a>>(visitor: &mut V, function_declaration: &
visitor.visit_fn_ret_ty(&function_declaration.output);
}
-pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>, _span: Span) {
+pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>) {
match kind {
FnKind::Fn(_, _, sig, _, generics, body) => {
visitor.visit_generics(generics);
@@ -703,7 +688,7 @@ pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem,
let kind = FnKind::Fn(FnCtxt::Assoc(ctxt), ident, sig, vis, generics, body.as_deref());
visitor.visit_fn(kind, span, id);
}
- AssocItemKind::TyAlias(box TyAlias { generics, bounds, ty, .. }) => {
+ AssocItemKind::Type(box TyAlias { generics, bounds, ty, .. }) => {
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
walk_list!(visitor, visit_ty, ty);
@@ -813,8 +798,9 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
visitor.visit_expr(callee_expression);
walk_list!(visitor, visit_expr, arguments);
}
- ExprKind::MethodCall(ref segment, ref arguments, _span) => {
- visitor.visit_path_segment(expression.span, segment);
+ ExprKind::MethodCall(ref segment, ref receiver, ref arguments, _span) => {
+ visitor.visit_path_segment(segment);
+ visitor.visit_expr(receiver);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::Binary(_, ref left_expression, ref right_expression) => {
@@ -935,14 +921,14 @@ pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) {
}
pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) {
- if let VisibilityKind::Restricted { ref path, id } = vis.kind {
+ if let VisibilityKind::Restricted { ref path, id, shorthand: _ } = vis.kind {
visitor.visit_path(path, id);
}
}
pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) {
match attr.kind {
- AttrKind::Normal(ref item, ref _tokens) => walk_mac_args(visitor, &item.args),
+ AttrKind::Normal(ref normal) => walk_mac_args(visitor, &normal.item.args),
AttrKind::DocComment(..) => {}
}
}
diff --git a/compiler/rustc_ast_lowering/Cargo.toml b/compiler/rustc_ast_lowering/Cargo.toml
index 39ba62ef2..ce1c8d499 100644
--- a/compiler/rustc_ast_lowering/Cargo.toml
+++ b/compiler/rustc_ast_lowering/Cargo.toml
@@ -8,16 +8,18 @@ doctest = false
[dependencies]
rustc_arena = { path = "../rustc_arena" }
-tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
-rustc_hir = { path = "../rustc_hir" }
-rustc_target = { path = "../rustc_target" }
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_middle = { path = "../rustc_middle" }
+rustc_macros = { path = "../rustc_macros" }
rustc_query_system = { path = "../rustc_query_system" }
-rustc_span = { path = "../rustc_span" }
-rustc_errors = { path = "../rustc_errors" }
rustc_session = { path = "../rustc_session" }
-rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+thin-vec = "0.2.8"
+tracing = "0.1"
diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs
index 4166b4fc2..450cdf246 100644
--- a/compiler/rustc_ast_lowering/src/asm.rs
+++ b/compiler/rustc_ast_lowering/src/asm.rs
@@ -1,11 +1,17 @@
use crate::{ImplTraitContext, ImplTraitPosition, ParamMode, ResolverAstLoweringExt};
+use super::errors::{
+ AbiSpecifiedMultipleTimes, AttSyntaxOnlyX86, ClobberAbiNotSupported,
+ InlineAsmUnsupportedTarget, InvalidAbiClobberAbi, InvalidAsmTemplateModifierConst,
+ InvalidAsmTemplateModifierRegClass, InvalidAsmTemplateModifierRegClassSub,
+ InvalidAsmTemplateModifierSym, InvalidRegister, InvalidRegisterClass, RegisterClassOnlyClobber,
+ RegisterConflict,
+};
use super::LoweringContext;
use rustc_ast::ptr::P;
use rustc_ast::*;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::definitions::DefPathData;
@@ -26,13 +32,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let asm_arch =
if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };
if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {
- struct_span_err!(
- self.tcx.sess,
- sp,
- E0472,
- "inline assembly is unsupported on this target"
- )
- .emit();
+ self.tcx.sess.emit_err(InlineAsmUnsupportedTarget { span: sp });
}
if let Some(asm_arch) = asm_arch {
// Inline assembly is currently only stable for these architectures.
@@ -59,10 +59,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&& !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
&& !self.tcx.sess.opts.actually_rustdoc
{
- self.tcx
- .sess
- .struct_span_err(sp, "the `att_syntax` option is only supported on x86")
- .emit();
+ self.tcx.sess.emit_err(AttSyntaxOnlyX86 { span: sp });
}
if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind {
feature_err(
@@ -82,51 +79,37 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// If the abi was already in the list, emit an error
match clobber_abis.get(&abi) {
Some((prev_name, prev_sp)) => {
- let mut err = self.tcx.sess.struct_span_err(
- *abi_span,
- &format!("`{}` ABI specified multiple times", prev_name),
- );
- err.span_label(*prev_sp, "previously specified here");
-
// Multiple different abi names may actually be the same ABI
// If the specified ABIs are not the same name, alert the user that they resolve to the same ABI
let source_map = self.tcx.sess.source_map();
- if source_map.span_to_snippet(*prev_sp)
- != source_map.span_to_snippet(*abi_span)
- {
- err.note("these ABIs are equivalent on the current target");
- }
+ let equivalent = (source_map.span_to_snippet(*prev_sp)
+ != source_map.span_to_snippet(*abi_span))
+ .then_some(());
- err.emit();
+ self.tcx.sess.emit_err(AbiSpecifiedMultipleTimes {
+ abi_span: *abi_span,
+ prev_name: *prev_name,
+ prev_span: *prev_sp,
+ equivalent,
+ });
}
None => {
- clobber_abis.insert(abi, (abi_name, *abi_span));
+ clobber_abis.insert(abi, (*abi_name, *abi_span));
}
}
}
Err(&[]) => {
- self.tcx
- .sess
- .struct_span_err(
- *abi_span,
- "`clobber_abi` is not supported on this target",
- )
- .emit();
+ self.tcx.sess.emit_err(ClobberAbiNotSupported { abi_span: *abi_span });
}
Err(supported_abis) => {
- let mut err = self
- .tcx
- .sess
- .struct_span_err(*abi_span, "invalid ABI for `clobber_abi`");
let mut abis = format!("`{}`", supported_abis[0]);
for m in &supported_abis[1..] {
let _ = write!(abis, ", `{}`", m);
}
- err.note(&format!(
- "the following ABIs are supported on this target: {}",
- abis
- ));
- err.emit();
+ self.tcx.sess.emit_err(InvalidAbiClobberAbi {
+ abi_span: *abi_span,
+ supported_abis: abis,
+ });
}
}
}
@@ -141,24 +124,28 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
.iter()
.map(|(op, op_sp)| {
let lower_reg = |reg| match reg {
- InlineAsmRegOrRegClass::Reg(s) => {
+ InlineAsmRegOrRegClass::Reg(reg) => {
asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
- asm::InlineAsmReg::parse(asm_arch, s).unwrap_or_else(|e| {
- let msg = format!("invalid register `{}`: {}", s, e);
- sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmReg::parse(asm_arch, reg).unwrap_or_else(|error| {
+ sess.emit_err(InvalidRegister { op_span: *op_sp, reg, error });
asm::InlineAsmReg::Err
})
} else {
asm::InlineAsmReg::Err
})
}
- InlineAsmRegOrRegClass::RegClass(s) => {
+ InlineAsmRegOrRegClass::RegClass(reg_class) => {
asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
- asm::InlineAsmRegClass::parse(asm_arch, s).unwrap_or_else(|e| {
- let msg = format!("invalid register class `{}`: {}", s, e);
- sess.struct_span_err(*op_sp, &msg).emit();
- asm::InlineAsmRegClass::Err
- })
+ asm::InlineAsmRegClass::parse(asm_arch, reg_class).unwrap_or_else(
+ |error| {
+ sess.emit_err(InvalidRegisterClass {
+ op_span: *op_sp,
+ reg_class,
+ error,
+ });
+ asm::InlineAsmRegClass::Err
+ },
+ )
} else {
asm::InlineAsmRegClass::Err
})
@@ -168,26 +155,26 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let op = match *op {
InlineAsmOperand::In { reg, ref expr } => hir::InlineAsmOperand::In {
reg: lower_reg(reg),
- expr: self.lower_expr_mut(expr),
+ expr: self.lower_expr(expr),
},
InlineAsmOperand::Out { reg, late, ref expr } => hir::InlineAsmOperand::Out {
reg: lower_reg(reg),
late,
- expr: expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ expr: expr.as_ref().map(|expr| self.lower_expr(expr)),
},
InlineAsmOperand::InOut { reg, late, ref expr } => {
hir::InlineAsmOperand::InOut {
reg: lower_reg(reg),
late,
- expr: self.lower_expr_mut(expr),
+ expr: self.lower_expr(expr),
}
}
InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => {
hir::InlineAsmOperand::SplitInOut {
reg: lower_reg(reg),
late,
- in_expr: self.lower_expr_mut(in_expr),
- out_expr: out_expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ in_expr: self.lower_expr(in_expr),
+ out_expr: out_expr.as_ref().map(|expr| self.lower_expr(expr)),
}
}
InlineAsmOperand::Const { ref anon_const } => {
@@ -205,26 +192,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
InlineAsmOperand::Sym { ref sym } => {
- if !self.tcx.features().asm_sym {
- feature_err(
- &sess.parse_sess,
- sym::asm_sym,
- *op_sp,
- "sym operands for inline assembly are unstable",
- )
- .emit();
- }
-
let static_def_id = self
.resolver
.get_partial_res(sym.id)
- .filter(|res| res.unresolved_segments() == 0)
- .and_then(|res| {
- if let Res::Def(DefKind::Static(_), def_id) = res.base_res() {
- Some(def_id)
- } else {
- None
- }
+ .and_then(|res| res.full_res())
+ .and_then(|res| match res {
+ Res::Def(DefKind::Static(_), def_id) => Some(def_id),
+ _ => None,
});
if let Some(def_id) = static_def_id {
@@ -233,7 +207,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&sym.qself,
&sym.path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
hir::InlineAsmOperand::SymStatic { path, def_id }
} else {
@@ -250,7 +224,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Wrap the expression in an AnonConst.
let parent_def_id = self.current_hir_id_owner;
let node_id = self.next_node_id();
- self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+ self.create_def(parent_def_id.def_id, node_id, DefPathData::AnonConst);
let anon_const = AnonConst { id: node_id, value: P(expr) };
hir::InlineAsmOperand::SymFn {
anon_const: self.lower_anon_const(&anon_const),
@@ -282,50 +256,39 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
if !valid_modifiers.contains(&modifier) {
- let mut err = sess.struct_span_err(
- placeholder_span,
- "invalid asm template modifier for this register class",
- );
- err.span_label(placeholder_span, "template modifier");
- err.span_label(op_sp, "argument");
- if !valid_modifiers.is_empty() {
+ let sub = if !valid_modifiers.is_empty() {
let mut mods = format!("`{}`", valid_modifiers[0]);
for m in &valid_modifiers[1..] {
let _ = write!(mods, ", `{}`", m);
}
- err.note(&format!(
- "the `{}` register class supports \
- the following template modifiers: {}",
- class.name(),
- mods
- ));
+ InvalidAsmTemplateModifierRegClassSub::SupportModifier {
+ class_name: class.name(),
+ modifiers: mods,
+ }
} else {
- err.note(&format!(
- "the `{}` register class does not support template modifiers",
- class.name()
- ));
- }
- err.emit();
+ InvalidAsmTemplateModifierRegClassSub::DoesNotSupportModifier {
+ class_name: class.name(),
+ }
+ };
+ sess.emit_err(InvalidAsmTemplateModifierRegClass {
+ placeholder_span,
+ op_span: op_sp,
+ sub,
+ });
}
}
hir::InlineAsmOperand::Const { .. } => {
- let mut err = sess.struct_span_err(
+ sess.emit_err(InvalidAsmTemplateModifierConst {
placeholder_span,
- "asm template modifiers are not allowed for `const` arguments",
- );
- err.span_label(placeholder_span, "template modifier");
- err.span_label(op_sp, "argument");
- err.emit();
+ op_span: op_sp,
+ });
}
hir::InlineAsmOperand::SymFn { .. }
| hir::InlineAsmOperand::SymStatic { .. } => {
- let mut err = sess.struct_span_err(
+ sess.emit_err(InvalidAsmTemplateModifierSym {
placeholder_span,
- "asm template modifiers are not allowed for `sym` arguments",
- );
- err.span_label(placeholder_span, "template modifier");
- err.span_label(op_sp, "argument");
- err.emit();
+ op_span: op_sp,
+ });
}
}
}
@@ -346,12 +309,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// require that the operand name an explicit register, not a
// register class.
if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() {
- let msg = format!(
- "register class `{}` can only be used as a clobber, \
- not as an input or output",
- reg_class.name()
- );
- sess.struct_span_err(op_sp, &msg).emit();
+ sess.emit_err(RegisterClassOnlyClobber {
+ op_span: op_sp,
+ reg_class_name: reg_class.name(),
+ });
continue;
}
@@ -391,16 +352,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
unreachable!();
};
- let msg = format!(
- "register `{}` conflicts with register `{}`",
- reg.name(),
- reg2.name()
- );
- let mut err = sess.struct_span_err(op_sp, &msg);
- err.span_label(op_sp, &format!("register `{}`", reg.name()));
- err.span_label(op_sp2, &format!("register `{}`", reg2.name()));
-
- match (op, op2) {
+ let in_out = match (op, op2) {
(
hir::InlineAsmOperand::In { .. },
hir::InlineAsmOperand::Out { late, .. },
@@ -411,14 +363,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
) => {
assert!(!*late);
let out_op_sp = if input { op_sp2 } else { op_sp };
- let msg = "use `lateout` instead of \
- `out` to avoid conflict";
- err.span_help(out_op_sp, msg);
- }
- _ => {}
- }
+ Some(out_op_sp)
+ },
+ _ => None,
+ };
- err.emit();
+ sess.emit_err(RegisterConflict {
+ op_span1: op_sp,
+ op_span2: op_sp2,
+ reg1_name: reg.name(),
+ reg2_name: reg2.name(),
+ in_out
+ });
}
Entry::Vacant(v) => {
if r == reg {
diff --git a/compiler/rustc_ast_lowering/src/block.rs b/compiler/rustc_ast_lowering/src/block.rs
index 7cbfe143b..12a0cc0d2 100644
--- a/compiler/rustc_ast_lowering/src/block.rs
+++ b/compiler/rustc_ast_lowering/src/block.rs
@@ -1,8 +1,6 @@
use crate::{ImplTraitContext, ImplTraitPosition, LoweringContext};
use rustc_ast::{Block, BlockCheckMode, Local, LocalKind, Stmt, StmtKind};
use rustc_hir as hir;
-use rustc_session::parse::feature_err;
-use rustc_span::sym;
use smallvec::SmallVec;
@@ -87,20 +85,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let ty = l
.ty
.as_ref()
- .map(|t| self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Variable)));
+ .map(|t| self.lower_ty(t, &ImplTraitContext::Disallowed(ImplTraitPosition::Variable)));
let init = l.kind.init().map(|init| self.lower_expr(init));
let hir_id = self.lower_node_id(l.id);
let pat = self.lower_pat(&l.pat);
let els = if let LocalKind::InitElse(_, els) = &l.kind {
- if !self.tcx.features().let_else {
- feature_err(
- &self.tcx.sess.parse_sess,
- sym::let_else,
- l.span,
- "`let...else` statements are unstable",
- )
- .emit();
- }
Some(self.lower_block(els, false))
} else {
None
diff --git a/compiler/rustc_ast_lowering/src/errors.rs b/compiler/rustc_ast_lowering/src/errors.rs
new file mode 100644
index 000000000..157f46501
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/errors.rs
@@ -0,0 +1,347 @@
+use rustc_errors::DiagnosticArgFromDisplay;
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_span::{symbol::Ident, Span, Symbol};
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_generic_type_with_parentheses, code = "E0214")]
+pub struct GenericTypeWithParentheses {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: Option<UseAngleBrackets>,
+}
+
+#[derive(Clone, Copy, Subdiagnostic)]
+#[multipart_suggestion(ast_lowering_use_angle_brackets, applicability = "maybe-incorrect")]
+pub struct UseAngleBrackets {
+ #[suggestion_part(code = "<")]
+ pub open_param: Span,
+ #[suggestion_part(code = ">")]
+ pub close_param: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_lowering_invalid_abi, code = "E0703")]
+#[note]
+pub struct InvalidAbi {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub abi: Symbol,
+ pub command: String,
+ #[subdiagnostic]
+ pub suggestion: Option<InvalidAbiSuggestion>,
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion(
+ ast_lowering_invalid_abi_suggestion,
+ code = "{suggestion}",
+ applicability = "maybe-incorrect"
+)]
+pub struct InvalidAbiSuggestion {
+ #[primary_span]
+ pub span: Span,
+ pub suggestion: String,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_assoc_ty_parentheses)]
+pub struct AssocTyParentheses {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: AssocTyParenthesesSub,
+}
+
+#[derive(Clone, Copy, Subdiagnostic)]
+pub enum AssocTyParenthesesSub {
+ #[multipart_suggestion(ast_lowering_remove_parentheses)]
+ Empty {
+ #[suggestion_part(code = "")]
+ parentheses_span: Span,
+ },
+ #[multipart_suggestion(ast_lowering_use_angle_brackets)]
+ NotEmpty {
+ #[suggestion_part(code = "<")]
+ open_param: Span,
+ #[suggestion_part(code = ">")]
+ close_param: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_lowering_misplaced_impl_trait, code = "E0562")]
+pub struct MisplacedImplTrait<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub position: DiagnosticArgFromDisplay<'a>,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_rustc_box_attribute_error)]
+pub struct RustcBoxAttributeError {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_underscore_expr_lhs_assign)]
+pub struct UnderscoreExprLhsAssign {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_base_expression_double_dot)]
+pub struct BaseExpressionDoubleDot {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_await_only_in_async_fn_and_blocks, code = "E0728")]
+pub struct AwaitOnlyInAsyncFnAndBlocks {
+ #[primary_span]
+ #[label]
+ pub dot_await_span: Span,
+ #[label(ast_lowering_this_not_async)]
+ pub item_span: Option<Span>,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_generator_too_many_parameters, code = "E0628")]
+pub struct GeneratorTooManyParameters {
+ #[primary_span]
+ pub fn_decl_span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_closure_cannot_be_static, code = "E0697")]
+pub struct ClosureCannotBeStatic {
+ #[primary_span]
+ pub fn_decl_span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[help]
+#[diag(ast_lowering_async_non_move_closure_not_supported, code = "E0708")]
+pub struct AsyncNonMoveClosureNotSupported {
+ #[primary_span]
+ pub fn_decl_span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_functional_record_update_destructuring_assignment)]
+pub struct FunctionalRecordUpdateDestructuringAssignemnt {
+ #[primary_span]
+ #[suggestion(code = "", applicability = "machine-applicable")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_async_generators_not_supported, code = "E0727")]
+pub struct AsyncGeneratorsNotSupported {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_inline_asm_unsupported_target, code = "E0472")]
+pub struct InlineAsmUnsupportedTarget {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_att_syntax_only_x86)]
+pub struct AttSyntaxOnlyX86 {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_abi_specified_multiple_times)]
+pub struct AbiSpecifiedMultipleTimes {
+ #[primary_span]
+ pub abi_span: Span,
+ pub prev_name: Symbol,
+ #[label]
+ pub prev_span: Span,
+ #[note]
+ pub equivalent: Option<()>,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_clobber_abi_not_supported)]
+pub struct ClobberAbiNotSupported {
+ #[primary_span]
+ pub abi_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[note]
+#[diag(ast_lowering_invalid_abi_clobber_abi)]
+pub struct InvalidAbiClobberAbi {
+ #[primary_span]
+ pub abi_span: Span,
+ pub supported_abis: String,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_invalid_register)]
+pub struct InvalidRegister<'a> {
+ #[primary_span]
+ pub op_span: Span,
+ pub reg: Symbol,
+ pub error: &'a str,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_invalid_register_class)]
+pub struct InvalidRegisterClass<'a> {
+ #[primary_span]
+ pub op_span: Span,
+ pub reg_class: Symbol,
+ pub error: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_lowering_invalid_asm_template_modifier_reg_class)]
+pub struct InvalidAsmTemplateModifierRegClass {
+ #[primary_span]
+ #[label(ast_lowering_template_modifier)]
+ pub placeholder_span: Span,
+ #[label(ast_lowering_argument)]
+ pub op_span: Span,
+ #[subdiagnostic]
+ pub sub: InvalidAsmTemplateModifierRegClassSub,
+}
+
+#[derive(Subdiagnostic)]
+pub enum InvalidAsmTemplateModifierRegClassSub {
+ #[note(ast_lowering_support_modifiers)]
+ SupportModifier { class_name: Symbol, modifiers: String },
+ #[note(ast_lowering_does_not_support_modifiers)]
+ DoesNotSupportModifier { class_name: Symbol },
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_invalid_asm_template_modifier_const)]
+pub struct InvalidAsmTemplateModifierConst {
+ #[primary_span]
+ #[label(ast_lowering_template_modifier)]
+ pub placeholder_span: Span,
+ #[label(ast_lowering_argument)]
+ pub op_span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_invalid_asm_template_modifier_sym)]
+pub struct InvalidAsmTemplateModifierSym {
+ #[primary_span]
+ #[label(ast_lowering_template_modifier)]
+ pub placeholder_span: Span,
+ #[label(ast_lowering_argument)]
+ pub op_span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_register_class_only_clobber)]
+pub struct RegisterClassOnlyClobber {
+ #[primary_span]
+ pub op_span: Span,
+ pub reg_class_name: Symbol,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_register_conflict)]
+pub struct RegisterConflict<'a> {
+ #[primary_span]
+ #[label(ast_lowering_register1)]
+ pub op_span1: Span,
+ #[label(ast_lowering_register2)]
+ pub op_span2: Span,
+ pub reg1_name: &'a str,
+ pub reg2_name: &'a str,
+ #[help]
+ pub in_out: Option<Span>,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[help]
+#[diag(ast_lowering_sub_tuple_binding)]
+pub struct SubTupleBinding<'a> {
+ #[primary_span]
+ #[label]
+ #[suggestion_verbose(
+ ast_lowering_sub_tuple_binding_suggestion,
+ code = "..",
+ applicability = "maybe-incorrect"
+ )]
+ pub span: Span,
+ pub ident: Ident,
+ pub ident_name: Symbol,
+ pub ctx: &'a str,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_extra_double_dot)]
+pub struct ExtraDoubleDot<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(ast_lowering_previously_used_here)]
+ pub prev_span: Span,
+ pub ctx: &'a str,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[note]
+#[diag(ast_lowering_misplaced_double_dot)]
+pub struct MisplacedDoubleDot {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_misplaced_relax_trait_bound)]
+pub struct MisplacedRelaxTraitBound {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_not_supported_for_lifetime_binder_async_closure)]
+pub struct NotSupportedForLifetimeBinderAsyncClosure {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_arbitrary_expression_in_pattern)]
+pub struct ArbitraryExpressionInPattern {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_inclusive_range_with_no_end)]
+pub struct InclusiveRangeWithNoEnd {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic, Clone, Copy)]
+#[diag(ast_lowering_trait_fn_async, code = "E0706")]
+#[note]
+#[note(note2)]
+pub struct TraitFnAsync {
+ #[primary_span]
+ pub fn_span: Span,
+ #[label]
+ pub span: Span,
+}
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index fb6715ff1..ec9c39350 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -1,19 +1,23 @@
+use super::errors::{
+ AsyncGeneratorsNotSupported, AsyncNonMoveClosureNotSupported, AwaitOnlyInAsyncFnAndBlocks,
+ BaseExpressionDoubleDot, ClosureCannotBeStatic, FunctionalRecordUpdateDestructuringAssignemnt,
+ GeneratorTooManyParameters, InclusiveRangeWithNoEnd, NotSupportedForLifetimeBinderAsyncClosure,
+ RustcBoxAttributeError, UnderscoreExprLhsAssign,
+};
use super::ResolverAstLoweringExt;
use super::{ImplTraitContext, LoweringContext, ParamMode, ParenthesizedGenericArgs};
use crate::{FnDeclKind, ImplTraitPosition};
-
use rustc_ast::attr;
use rustc_ast::ptr::P as AstP;
use rustc_ast::*;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::definitions::DefPathData;
use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned};
use rustc_span::symbol::{sym, Ident};
use rustc_span::DUMMY_SP;
+use thin_vec::thin_vec;
impl<'hir> LoweringContext<'_, 'hir> {
fn lower_exprs(&mut self, exprs: &[AstP<Expr>]) -> &'hir [hir::Expr<'hir>] {
@@ -46,13 +50,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let hir_id = self.lower_node_id(e.id);
return hir::Expr { hir_id, kind, span: self.lower_span(e.span) };
} else {
- self.tcx.sess
- .struct_span_err(
- e.span,
- "#[rustc_box] requires precisely one argument \
- and no other attributes are allowed",
- )
- .emit();
+ self.tcx.sess.emit_err(RustcBoxAttributeError { span: e.span });
hir::ExprKind::Err
}
} else if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
@@ -62,16 +60,18 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::ExprKind::Call(f, self.lower_exprs(args))
}
}
- ExprKind::MethodCall(ref seg, ref args, span) => {
+ ExprKind::MethodCall(ref seg, ref receiver, ref args, span) => {
let hir_seg = self.arena.alloc(self.lower_path_segment(
e.span,
seg,
ParamMode::Optional,
ParenthesizedGenericArgs::Err,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
));
- let args = self.lower_exprs(args);
- hir::ExprKind::MethodCall(hir_seg, args, self.lower_span(span))
+ let receiver = self.lower_expr(receiver);
+ let args =
+ self.arena.alloc_from_iter(args.iter().map(|x| self.lower_expr_mut(x)));
+ hir::ExprKind::MethodCall(hir_seg, receiver, args, self.lower_span(span))
}
ExprKind::Binary(binop, ref lhs, ref rhs) => {
let binop = self.lower_binop(binop);
@@ -90,13 +90,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::Cast(ref expr, ref ty) => {
let expr = self.lower_expr(expr);
let ty =
- self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
hir::ExprKind::Cast(expr, ty)
}
ExprKind::Type(ref expr, ref ty) => {
let expr = self.lower_expr(expr);
let ty =
- self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
hir::ExprKind::Type(expr, ty)
}
ExprKind::AddrOf(k, m, ref ohs) => {
@@ -146,13 +146,19 @@ impl<'hir> LoweringContext<'_, 'hir> {
|this| this.with_new_scopes(|this| this.lower_block_expr(block)),
),
ExprKind::Await(ref expr) => {
- let span = if expr.span.hi() < e.span.hi() {
- expr.span.shrink_to_hi().with_hi(e.span.hi())
+ let dot_await_span = if expr.span.hi() < e.span.hi() {
+ let span_with_whitespace = self
+ .tcx
+ .sess
+ .source_map()
+ .span_extend_while(expr.span, char::is_whitespace)
+ .unwrap_or(expr.span);
+ span_with_whitespace.shrink_to_hi().with_hi(e.span.hi())
} else {
// this is a recovered `await expr`
e.span
};
- self.lower_expr_await(span, expr)
+ self.lower_expr_await(dot_await_span, expr)
}
ExprKind::Closure(
ref binder,
@@ -210,13 +216,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.lower_expr_range(e.span, e1.as_deref(), e2.as_deref(), lims)
}
ExprKind::Underscore => {
- self.tcx
- .sess.struct_span_err(
- e.span,
- "in expressions, `_` can only be used on the left-hand side of an assignment",
- )
- .span_label(e.span, "`_` not allowed here")
- .emit();
+ self.tcx.sess.emit_err(UnderscoreExprLhsAssign { span: e.span });
hir::ExprKind::Err
}
ExprKind::Path(ref qself, ref path) => {
@@ -225,7 +225,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
hir::ExprKind::Path(qpath)
}
@@ -248,11 +248,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let rest = match &se.rest {
StructRest::Base(e) => Some(self.lower_expr(e)),
StructRest::Rest(sp) => {
- self.tcx
- .sess
- .struct_span_err(*sp, "base expression required after `..`")
- .span_label(*sp, "add a base expression here")
- .emit();
+ self.tcx.sess.emit_err(BaseExpressionDoubleDot { span: *sp });
Some(&*self.arena.alloc(self.expr_err(*sp)))
}
StructRest::None => None,
@@ -263,7 +259,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
&se.qself,
&se.path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
)),
self.arena
.alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))),
@@ -363,7 +359,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let node_id = self.next_node_id();
// Add a definition for the in-band const def.
- self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+ self.create_def(parent_def_id.def_id, node_id, DefPathData::AnonConst);
let anon_const = AnonConst { id: node_id, value: arg };
generic_args.push(AngleBracketedArg::Arg(GenericArg::Const(anon_const)));
@@ -391,32 +387,58 @@ impl<'hir> LoweringContext<'_, 'hir> {
then: &Block,
else_opt: Option<&Expr>,
) -> hir::ExprKind<'hir> {
- let lowered_cond = self.lower_expr(cond);
- let new_cond = self.manage_let_cond(lowered_cond);
+ let lowered_cond = self.lower_cond(cond);
let then_expr = self.lower_block_expr(then);
if let Some(rslt) = else_opt {
- hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), Some(self.lower_expr(rslt)))
+ hir::ExprKind::If(
+ lowered_cond,
+ self.arena.alloc(then_expr),
+ Some(self.lower_expr(rslt)),
+ )
} else {
- hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), None)
+ hir::ExprKind::If(lowered_cond, self.arena.alloc(then_expr), None)
}
}
- // If `cond` kind is `let`, returns `let`. Otherwise, wraps and returns `cond`
- // in a temporary block.
- fn manage_let_cond(&mut self, cond: &'hir hir::Expr<'hir>) -> &'hir hir::Expr<'hir> {
- fn has_let_expr<'hir>(expr: &'hir hir::Expr<'hir>) -> bool {
- match expr.kind {
- hir::ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
- hir::ExprKind::Let(..) => true,
+ // Lowers a condition (i.e. `cond` in `if cond` or `while cond`), wrapping it in a terminating scope
+ // so that temporaries created in the condition don't live beyond it.
+ fn lower_cond(&mut self, cond: &Expr) -> &'hir hir::Expr<'hir> {
+ fn has_let_expr(expr: &Expr) -> bool {
+ match &expr.kind {
+ ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
+ ExprKind::Let(..) => true,
_ => false,
}
}
- if has_let_expr(cond) {
- cond
- } else {
- let reason = DesugaringKind::CondTemporary;
- let span_block = self.mark_span_with_reason(reason, cond.span, None);
- self.expr_drop_temps(span_block, cond, AttrVec::new())
+
+ // We have to take special care for `let` exprs in the condition, e.g. in
+ // `if let pat = val` or `if foo && let pat = val`, as we _do_ want `val` to live beyond the
+ // condition in this case.
+ //
+ // In order to mantain the drop behavior for the non `let` parts of the condition,
+ // we still wrap them in terminating scopes, e.g. `if foo && let pat = val` essentially
+ // gets transformed into `if { let _t = foo; _t } && let pat = val`
+ match &cond.kind {
+ ExprKind::Binary(op @ Spanned { node: ast::BinOpKind::And, .. }, lhs, rhs)
+ if has_let_expr(cond) =>
+ {
+ let op = self.lower_binop(*op);
+ let lhs = self.lower_cond(lhs);
+ let rhs = self.lower_cond(rhs);
+
+ self.arena.alloc(self.expr(
+ cond.span,
+ hir::ExprKind::Binary(op, lhs, rhs),
+ AttrVec::new(),
+ ))
+ }
+ ExprKind::Let(..) => self.lower_expr(cond),
+ _ => {
+ let cond = self.lower_expr(cond);
+ let reason = DesugaringKind::CondTemporary;
+ let span_block = self.mark_span_with_reason(reason, cond.span, None);
+ self.expr_drop_temps(span_block, cond, AttrVec::new())
+ }
}
}
@@ -443,15 +465,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
body: &Block,
opt_label: Option<Label>,
) -> hir::ExprKind<'hir> {
- let lowered_cond = self.with_loop_condition_scope(|t| t.lower_expr(cond));
- let new_cond = self.manage_let_cond(lowered_cond);
+ let lowered_cond = self.with_loop_condition_scope(|t| t.lower_cond(cond));
let then = self.lower_block_expr(body);
- let expr_break = self.expr_break(span, ThinVec::new());
+ let expr_break = self.expr_break(span, AttrVec::new());
let stmt_break = self.stmt_expr(span, expr_break);
let else_blk = self.block_all(span, arena_vec![self; stmt_break], None);
- let else_expr = self.arena.alloc(self.expr_block(else_blk, ThinVec::new()));
- let if_kind = hir::ExprKind::If(new_cond, self.arena.alloc(then), Some(else_expr));
- let if_expr = self.expr(span, if_kind, ThinVec::new());
+ let else_expr = self.arena.alloc(self.expr_block(else_blk, AttrVec::new()));
+ let if_kind = hir::ExprKind::If(lowered_cond, self.arena.alloc(then), Some(else_expr));
+ let if_expr = self.expr(span, if_kind, AttrVec::new());
let block = self.block_expr(self.arena.alloc(if_expr));
let span = self.lower_span(span.with_hi(cond.span.hi()));
let opt_label = self.lower_label(opt_label);
@@ -510,7 +531,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let constructor = self.arena.alloc(self.expr_lang_item_path(
method_span,
lang_item,
- ThinVec::new(),
+ AttrVec::new(),
None,
));
self.expr_call(overall_span, constructor, std::slice::from_ref(expr))
@@ -562,7 +583,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
) -> hir::ExprKind<'hir> {
let output = match ret_ty {
Some(ty) => hir::FnRetTy::Return(
- self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::AsyncBlock)),
+ self.lower_ty(&ty, &ImplTraitContext::Disallowed(ImplTraitPosition::AsyncBlock)),
),
None => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
};
@@ -587,7 +608,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (pat, task_context_hid) = self.pat_ident_binding_mode(
span,
Ident::with_dummy_span(sym::_task_context),
- hir::BindingAnnotation::Mutable,
+ hir::BindingAnnotation::MUT,
);
let param = hir::Param {
hir_id: self.next_id(),
@@ -633,7 +654,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let gen_future = self.expr_lang_item_path(
unstable_span,
hir::LangItem::FromGenerator,
- ThinVec::new(),
+ AttrVec::new(),
None,
);
@@ -661,17 +682,10 @@ impl<'hir> LoweringContext<'_, 'hir> {
match self.generator_kind {
Some(hir::GeneratorKind::Async(_)) => {}
Some(hir::GeneratorKind::Gen) | None => {
- let mut err = struct_span_err!(
- self.tcx.sess,
+ self.tcx.sess.emit_err(AwaitOnlyInAsyncFnAndBlocks {
dot_await_span,
- E0728,
- "`await` is only allowed inside `async` functions and blocks"
- );
- err.span_label(dot_await_span, "only allowed inside `async` functions and blocks");
- if let Some(item_sp) = self.current_item {
- err.span_label(item_sp, "this is not `async`");
- }
- err.emit();
+ item_span: self.current_item,
+ });
}
}
let span = self.mark_span_with_reason(DesugaringKind::Await, dot_await_span, None);
@@ -688,7 +702,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// this name to identify what is being awaited by a suspended async functions.
let awaitee_ident = Ident::with_dummy_span(sym::__awaitee);
let (awaitee_pat, awaitee_pat_hid) =
- self.pat_ident_binding_mode(span, awaitee_ident, hir::BindingAnnotation::Mutable);
+ self.pat_ident_binding_mode(span, awaitee_ident, hir::BindingAnnotation::MUT);
let task_context_ident = Ident::with_dummy_span(sym::_task_context);
@@ -745,7 +759,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let break_x = self.with_loop_scope(loop_node_id, move |this| {
let expr_break =
hir::ExprKind::Break(this.lower_loop_destination(None), Some(x_expr));
- this.arena.alloc(this.expr(gen_future_span, expr_break, ThinVec::new()))
+ this.arena.alloc(this.expr(gen_future_span, expr_break, AttrVec::new()))
});
self.arm(ready_pat, break_x)
};
@@ -778,7 +792,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let yield_expr = self.expr(
span,
hir::ExprKind::Yield(unit, hir::YieldSource::Await { expr: Some(expr_hir_id) }),
- ThinVec::new(),
+ AttrVec::new(),
);
let yield_expr = self.arena.alloc(yield_expr);
@@ -866,7 +880,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
// Lower outside new scope to preserve `is_in_loop_condition`.
- let fn_decl = self.lower_fn_decl(decl, None, FnDeclKind::Closure, None);
+ let fn_decl = self.lower_fn_decl(decl, None, fn_decl_span, FnDeclKind::Closure, None);
let c = self.arena.alloc(hir::Closure {
binder: binder_clause,
@@ -891,13 +905,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
match generator_kind {
Some(hir::GeneratorKind::Gen) => {
if decl.inputs.len() > 1 {
- struct_span_err!(
- self.tcx.sess,
- fn_decl_span,
- E0628,
- "too many parameters for a generator (expected 0 or 1 parameters)"
- )
- .emit();
+ self.tcx.sess.emit_err(GeneratorTooManyParameters { fn_decl_span });
}
Some(movability)
}
@@ -906,13 +914,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
None => {
if movability == Movability::Static {
- struct_span_err!(
- self.tcx.sess,
- fn_decl_span,
- E0697,
- "closures cannot be static"
- )
- .emit();
+ self.tcx.sess.emit_err(ClosureCannotBeStatic { fn_decl_span });
}
None
}
@@ -945,10 +947,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn_decl_span: Span,
) -> hir::ExprKind<'hir> {
if let &ClosureBinder::For { span, .. } = binder {
- self.tcx.sess.span_err(
- span,
- "`for<...>` binders on `async` closures are not currently supported",
- );
+ self.tcx.sess.emit_err(NotSupportedForLifetimeBinderAsyncClosure { span });
}
let (binder_clause, generic_params) = self.lower_closure_binder(binder);
@@ -959,17 +958,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let body = self.with_new_scopes(|this| {
// FIXME(cramertj): allow `async` non-`move` closures with arguments.
if capture_clause == CaptureBy::Ref && !decl.inputs.is_empty() {
- struct_span_err!(
- this.tcx.sess,
- fn_decl_span,
- E0708,
- "`async` non-`move` closures with parameters are not currently supported",
- )
- .help(
- "consider using `let` statements to manually capture \
- variables by reference before entering an `async move` closure",
- )
- .emit();
+ this.tcx.sess.emit_err(AsyncNonMoveClosureNotSupported { fn_decl_span });
}
// Transform `async |x: u8| -> X { ... }` into
@@ -985,17 +974,17 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::AsyncGeneratorKind::Closure,
|this| this.with_new_scopes(|this| this.lower_expr_mut(body)),
);
- this.expr(fn_decl_span, async_body, ThinVec::new())
+ this.expr(fn_decl_span, async_body, AttrVec::new())
});
body_id
});
let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
-
// We need to lower the declaration outside the new scope, because we
// have to conserve the state of being inside a loop condition for the
// closure argument types.
- let fn_decl = self.lower_fn_decl(&outer_decl, None, FnDeclKind::Closure, None);
+ let fn_decl =
+ self.lower_fn_decl(&outer_decl, None, fn_decl_span, FnDeclKind::Closure, None);
let c = self.arena.alloc(hir::Closure {
binder: binder_clause,
@@ -1080,9 +1069,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
if let ExprKind::Path(qself, path) = &expr.kind {
// Does the path resolve to something disallowed in a tuple struct/variant pattern?
if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
- if partial_res.unresolved_segments() == 0
- && !partial_res.base_res().expected_in_tuple_struct_pat()
- {
+ if let Some(res) = partial_res.full_res() && !res.expected_in_tuple_struct_pat() {
return None;
}
}
@@ -1102,9 +1089,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
if let ExprKind::Path(qself, path) = &expr.kind {
// Does the path resolve to something disallowed in a unit struct/variant pattern?
if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
- if partial_res.unresolved_segments() == 0
- && !partial_res.base_res().expected_in_unit_struct_pat()
- {
+ if let Some(res) = partial_res.full_res() && !res.expected_in_unit_struct_pat() {
return None;
}
}
@@ -1165,11 +1150,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
// Destructure like a tuple struct.
- let tuple_struct_pat =
- hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0));
+ let tuple_struct_pat = hir::PatKind::TupleStruct(
+ qpath,
+ pats,
+ hir::DotDotPos::new(rest.map(|r| r.0)),
+ );
return self.pat_without_dbm(lhs.span, tuple_struct_pat);
}
}
@@ -1181,7 +1169,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
// Destructure like a unit struct.
let unit_struct_pat = hir::PatKind::Path(qpath);
@@ -1205,24 +1193,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
&se.qself,
&se.path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
let fields_omitted = match &se.rest {
StructRest::Base(e) => {
- self.tcx
- .sess
- .struct_span_err(
- e.span,
- "functional record updates are not allowed in destructuring \
- assignments",
- )
- .span_suggestion(
- e.span,
- "consider removing the trailing pattern",
- "",
- rustc_errors::Applicability::MachineApplicable,
- )
- .emit();
+ self.tcx.sess.emit_err(FunctionalRecordUpdateDestructuringAssignemnt {
+ span: e.span,
+ });
true
}
StructRest::Rest(_) => true,
@@ -1235,13 +1212,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
ExprKind::Tup(elements) => {
let (pats, rest) =
self.destructure_sequence(elements, "tuple", eq_sign_span, assignments);
- let tuple_pat = hir::PatKind::Tuple(pats, rest.map(|r| r.0));
+ let tuple_pat = hir::PatKind::Tuple(pats, hir::DotDotPos::new(rest.map(|r| r.0)));
return self.pat_without_dbm(lhs.span, tuple_pat);
}
ExprKind::Paren(e) => {
// We special-case `(..)` for consistency with patterns.
if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
- let tuple_pat = hir::PatKind::Tuple(&[], Some(0));
+ let tuple_pat = hir::PatKind::Tuple(&[], hir::DotDotPos::new(Some(0)));
return self.pat_without_dbm(lhs.span, tuple_pat);
} else {
return self.destructure_assign_mut(e, eq_sign_span, assignments);
@@ -1255,7 +1232,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let ident = self.expr_ident(lhs.span, ident, binding);
let assign =
hir::ExprKind::Assign(self.lower_expr(lhs), ident, self.lower_span(eq_sign_span));
- let expr = self.expr(lhs.span, assign, ThinVec::new());
+ let expr = self.expr(lhs.span, assign, AttrVec::new());
assignments.push(self.stmt_expr(lhs.span, expr));
pat
}
@@ -1297,7 +1274,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let fn_path =
hir::QPath::LangItem(hir::LangItem::RangeInclusiveNew, self.lower_span(span), None);
let fn_expr =
- self.arena.alloc(self.expr(span, hir::ExprKind::Path(fn_path), ThinVec::new()));
+ self.arena.alloc(self.expr(span, hir::ExprKind::Path(fn_path), AttrVec::new()));
hir::ExprKind::Call(fn_expr, arena_vec![self; e1, e2])
}
@@ -1317,7 +1294,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
(Some(..), Some(..), HalfOpen) => hir::LangItem::Range,
(None, Some(..), Closed) => hir::LangItem::RangeToInclusive,
(Some(..), Some(..), Closed) => unreachable!(),
- (_, None, Closed) => self.diagnostic().span_fatal(span, "inclusive range with no end"),
+ (start, None, Closed) => {
+ self.tcx.sess.emit_err(InclusiveRangeWithNoEnd { span });
+ match start {
+ Some(..) => hir::LangItem::RangeFrom,
+ None => hir::LangItem::RangeFull,
+ }
+ }
};
let fields = self.arena.alloc_from_iter(
@@ -1404,8 +1387,10 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
fn lower_expr_field(&mut self, f: &ExprField) -> hir::ExprField<'hir> {
+ let hir_id = self.lower_node_id(f.id);
+ self.lower_attrs(hir_id, &f.attrs);
hir::ExprField {
- hir_id: self.next_id(),
+ hir_id,
ident: self.lower_ident(f.ident),
expr: self.lower_expr(&f.expr),
span: self.lower_span(f.span),
@@ -1417,13 +1402,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
match self.generator_kind {
Some(hir::GeneratorKind::Gen) => {}
Some(hir::GeneratorKind::Async(_)) => {
- struct_span_err!(
- self.tcx.sess,
- span,
- E0727,
- "`async` generators are not yet supported"
- )
- .emit();
+ self.tcx.sess.emit_err(AsyncGeneratorsNotSupported { span });
}
None => self.generator_kind = Some(hir::GeneratorKind::Gen),
}
@@ -1468,7 +1447,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
// `None => break`
let none_arm = {
let break_expr =
- self.with_loop_scope(e.id, |this| this.expr_break_alloc(for_span, ThinVec::new()));
+ self.with_loop_scope(e.id, |this| this.expr_break_alloc(for_span, AttrVec::new()));
let pat = self.pat_none(for_span);
self.arm(pat, break_expr)
};
@@ -1477,14 +1456,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
let some_arm = {
let some_pat = self.pat_some(pat_span, pat);
let body_block = self.with_loop_scope(e.id, |this| this.lower_block(body, false));
- let body_expr = self.arena.alloc(self.expr_block(body_block, ThinVec::new()));
+ let body_expr = self.arena.alloc(self.expr_block(body_block, AttrVec::new()));
self.arm(some_pat, body_expr)
};
// `mut iter`
let iter = Ident::with_dummy_span(sym::iter);
let (iter_pat, iter_pat_nid) =
- self.pat_ident_binding_mode(head_span, iter, hir::BindingAnnotation::Mutable);
+ self.pat_ident_binding_mode(head_span, iter, hir::BindingAnnotation::MUT);
// `match Iterator::next(&mut iter) { ... }`
let match_expr = {
@@ -1534,15 +1513,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::MatchSource::ForLoopDesugar,
));
- let attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
-
// This is effectively `{ let _result = ...; _result }`.
// The construct was introduced in #21984 and is necessary to make sure that
// temporaries in the `head` expression are dropped and do not leak to the
// surrounding scope of the `match` since the `match` is not a terminating scope.
//
// Also, add the attributes to the outer returned expr node.
- self.expr_drop_temps_mut(for_span, match_expr, attrs.into())
+ self.expr_drop_temps_mut(for_span, match_expr, e.attrs.clone())
}
/// Desugar `ExprKind::Try` from: `<expr>?` into:
@@ -1592,9 +1569,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
let uc_nested = attr::mk_nested_word_item(uc_ident);
attr::mk_list_item(allow_ident, vec![uc_nested])
};
- attr::mk_attr_outer(allow)
+ attr::mk_attr_outer(&self.tcx.sess.parse_sess.attr_id_generator, allow)
};
- let attrs = vec![attr];
+ let attrs: AttrVec = thin_vec![attr];
// `ControlFlow::Continue(val) => #[allow(unreachable_code)] val,`
let continue_arm = {
@@ -1604,7 +1581,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
span,
val_ident,
val_pat_nid,
- ThinVec::from(attrs.clone()),
+ attrs.clone(),
));
let continue_pat = self.pat_cf_continue(unstable_span, val_pat);
self.arm(continue_pat, val_expr)
@@ -1623,7 +1600,6 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.arena.alloc(residual_expr),
unstable_span,
);
- let thin_attrs = ThinVec::from(attrs);
let ret_expr = if let Some(catch_node) = self.catch_scope {
let target_id = Ok(self.lower_node_id(catch_node));
self.arena.alloc(self.expr(
@@ -1632,13 +1608,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir::Destination { label: None, target_id },
Some(from_residual_expr),
),
- thin_attrs,
+ attrs,
))
} else {
self.arena.alloc(self.expr(
try_span,
hir::ExprKind::Ret(Some(from_residual_expr)),
- thin_attrs,
+ attrs,
))
};
@@ -1654,11 +1630,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
/// Desugar `ExprKind::Yeet` from: `do yeet <expr>` into:
- /// ```rust
+ /// ```ignore(illustrative)
/// // If there is an enclosing `try {...}`:
- /// break 'catch_target FromResidual::from_residual(Yeet(residual)),
+ /// break 'catch_target FromResidual::from_residual(Yeet(residual));
/// // Otherwise:
- /// return FromResidual::from_residual(Yeet(residual)),
+ /// return FromResidual::from_residual(Yeet(residual));
/// ```
/// But to simplify this, there's a `from_yeet` lang item function which
/// handles the combined `FromResidual::from_residual(Yeet(residual))`.
@@ -1726,7 +1702,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
arms: &'hir [hir::Arm<'hir>],
source: hir::MatchSource,
) -> hir::Expr<'hir> {
- self.expr(span, hir::ExprKind::Match(arg, arms, source), ThinVec::new())
+ self.expr(span, hir::ExprKind::Match(arg, arms, source), AttrVec::new())
}
fn expr_break(&mut self, span: Span, attrs: AttrVec) -> hir::Expr<'hir> {
@@ -1743,12 +1719,12 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.expr(
span,
hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e),
- ThinVec::new(),
+ AttrVec::new(),
)
}
fn expr_unit(&mut self, sp: Span) -> &'hir hir::Expr<'hir> {
- self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[]), ThinVec::new()))
+ self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[]), AttrVec::new()))
}
fn expr_call_mut(
@@ -1757,7 +1733,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
e: &'hir hir::Expr<'hir>,
args: &'hir [hir::Expr<'hir>],
) -> hir::Expr<'hir> {
- self.expr(span, hir::ExprKind::Call(e, args), ThinVec::new())
+ self.expr(span, hir::ExprKind::Call(e, args), AttrVec::new())
}
fn expr_call(
@@ -1777,7 +1753,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
hir_id: Option<hir::HirId>,
) -> hir::Expr<'hir> {
let path =
- self.arena.alloc(self.expr_lang_item_path(span, lang_item, ThinVec::new(), hir_id));
+ self.arena.alloc(self.expr_lang_item_path(span, lang_item, AttrVec::new(), hir_id));
self.expr_call_mut(span, path, args)
}
@@ -1820,7 +1796,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
ident: Ident,
binding: hir::HirId,
) -> hir::Expr<'hir> {
- self.expr_ident_with_attrs(sp, ident, binding, ThinVec::new())
+ self.expr_ident_with_attrs(sp, ident, binding, AttrVec::new())
}
fn expr_ident_with_attrs(
@@ -1830,12 +1806,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
binding: hir::HirId,
attrs: AttrVec,
) -> hir::Expr<'hir> {
+ let hir_id = self.next_id();
+ let res = Res::Local(binding);
let expr_path = hir::ExprKind::Path(hir::QPath::Resolved(
None,
self.arena.alloc(hir::Path {
span: self.lower_span(span),
- res: Res::Local(binding),
- segments: arena_vec![self; hir::PathSegment::from_ident(ident)],
+ res,
+ segments: arena_vec![self; hir::PathSegment::new(ident, hir_id, res)],
}),
));
@@ -1858,13 +1836,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
}),
None,
),
- ThinVec::new(),
+ AttrVec::new(),
)
}
fn expr_block_empty(&mut self, span: Span) -> &'hir hir::Expr<'hir> {
let blk = self.block_all(span, &[], None);
- let expr = self.expr_block(blk, ThinVec::new());
+ let expr = self.expr_block(blk, AttrVec::new());
self.arena.alloc(expr)
}
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
index d5af74d47..f1851d7b4 100644
--- a/compiler/rustc_ast_lowering/src/index.rs
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -11,8 +11,6 @@ use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::{Span, DUMMY_SP};
-use tracing::debug;
-
/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
pub(super) struct NodeCollector<'a, 'hir> {
/// Source map
@@ -26,12 +24,12 @@ pub(super) struct NodeCollector<'a, 'hir> {
/// The parent of this node
parent_node: hir::ItemLocalId,
- owner: LocalDefId,
+ owner: OwnerId,
definitions: &'a definitions::Definitions,
}
-#[tracing::instrument(level = "debug", skip(sess, definitions, bodies))]
+#[instrument(level = "debug", skip(sess, definitions, bodies))]
pub(super) fn index_hir<'hir>(
sess: &Session,
definitions: &definitions::Definitions,
@@ -67,10 +65,11 @@ pub(super) fn index_hir<'hir>(
}
impl<'a, 'hir> NodeCollector<'a, 'hir> {
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) {
debug_assert_eq!(self.owner, hir_id.owner);
debug_assert_ne!(hir_id.local_id.as_u32(), 0);
+ debug_assert_ne!(hir_id.local_id, self.parent_node);
// Make sure that the DepNode of some node coincides with the HirId
// owner of that node.
@@ -82,9 +81,9 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {
current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
self.source_map.span_to_diagnostic_string(span),
node,
- self.definitions.def_path(self.owner).to_string_no_crate_verbose(),
+ self.definitions.def_path(self.owner.def_id).to_string_no_crate_verbose(),
self.owner,
- self.definitions.def_path(hir_id.owner).to_string_no_crate_verbose(),
+ self.definitions.def_path(hir_id.owner.def_id).to_string_no_crate_verbose(),
hir_id.owner,
)
}
@@ -113,19 +112,19 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
fn visit_nested_item(&mut self, item: ItemId) {
debug!("visit_nested_item: {:?}", item);
- self.insert_nested(item.def_id);
+ self.insert_nested(item.owner_id.def_id);
}
fn visit_nested_trait_item(&mut self, item_id: TraitItemId) {
- self.insert_nested(item_id.def_id);
+ self.insert_nested(item_id.owner_id.def_id);
}
fn visit_nested_impl_item(&mut self, item_id: ImplItemId) {
- self.insert_nested(item_id.def_id);
+ self.insert_nested(item_id.owner_id.def_id);
}
fn visit_nested_foreign_item(&mut self, foreign_id: ForeignItemId) {
- self.insert_nested(foreign_id.def_id);
+ self.insert_nested(foreign_id.owner_id.def_id);
}
fn visit_nested_body(&mut self, id: BodyId) {
@@ -142,9 +141,9 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn visit_item(&mut self, i: &'hir Item<'hir>) {
- debug_assert_eq!(i.def_id, self.owner);
+ debug_assert_eq!(i.owner_id, self.owner);
self.with_parent(i.hir_id(), |this| {
if let ItemKind::Struct(ref struct_def, _) = i.kind {
// If this is a tuple or unit-like struct, register the constructor.
@@ -156,9 +155,9 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn visit_foreign_item(&mut self, fi: &'hir ForeignItem<'hir>) {
- debug_assert_eq!(fi.def_id, self.owner);
+ debug_assert_eq!(fi.owner_id, self.owner);
self.with_parent(fi.hir_id(), |this| {
intravisit::walk_foreign_item(this, fi);
});
@@ -175,17 +174,17 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
})
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
- debug_assert_eq!(ti.def_id, self.owner);
+ debug_assert_eq!(ti.owner_id, self.owner);
self.with_parent(ti.hir_id(), |this| {
intravisit::walk_trait_item(this, ti);
});
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) {
- debug_assert_eq!(ii.def_id, self.owner);
+ debug_assert_eq!(ii.owner_id, self.owner);
self.with_parent(ii.hir_id(), |this| {
intravisit::walk_impl_item(this, ii);
});
@@ -199,6 +198,13 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
+ fn visit_pat_field(&mut self, field: &'hir PatField<'hir>) {
+ self.insert(field.span, field.hir_id, Node::PatField(field));
+ self.with_parent(field.hir_id, |this| {
+ intravisit::walk_pat_field(this, field);
+ });
+ }
+
fn visit_arm(&mut self, arm: &'hir Arm<'hir>) {
let node = Node::Arm(arm);
@@ -225,6 +231,13 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
+ fn visit_expr_field(&mut self, field: &'hir ExprField<'hir>) {
+ self.insert(field.span, field.hir_id, Node::ExprField(field));
+ self.with_parent(field.hir_id, |this| {
+ intravisit::walk_expr_field(this, field);
+ });
+ }
+
fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));
@@ -233,11 +246,9 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
});
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment<'hir>) {
- if let Some(hir_id) = path_segment.hir_id {
- self.insert(path_span, hir_id, Node::PathSegment(path_segment));
- }
- intravisit::walk_path_segment(self, path_span, path_segment);
+ fn visit_path_segment(&mut self, path_segment: &'hir PathSegment<'hir>) {
+ self.insert(path_segment.ident.span, path_segment.hir_id, Node::PathSegment(path_segment));
+ intravisit::walk_path_segment(self, path_segment);
}
fn visit_ty(&mut self, ty: &'hir Ty<'hir>) {
@@ -269,12 +280,12 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
fk: intravisit::FnKind<'hir>,
fd: &'hir FnDecl<'hir>,
b: BodyId,
- s: Span,
+ _: Span,
id: HirId,
) {
assert_eq!(self.owner, id.owner);
assert_eq!(self.parent_node, id.local_id);
- intravisit::walk_fn(self, fk, fd, b, s, id);
+ intravisit::walk_fn(self, fk, fd, b, id);
}
fn visit_block(&mut self, block: &'hir Block<'hir>) {
@@ -295,14 +306,14 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime));
}
- fn visit_variant(&mut self, v: &'hir Variant<'hir>, g: &'hir Generics<'hir>, item_id: HirId) {
+ fn visit_variant(&mut self, v: &'hir Variant<'hir>) {
self.insert(v.span, v.id, Node::Variant(v));
self.with_parent(v.id, |this| {
// Register the constructor of this variant.
if let Some(ctor_hir_id) = v.data.ctor_hir_id() {
this.insert(v.span, ctor_hir_id, Node::Ctor(&v.data));
}
- intravisit::walk_variant(this, v, g, item_id);
+ intravisit::walk_variant(this, v);
});
}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index ee4c0036f..76316a574 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -1,5 +1,6 @@
+use super::errors::{InvalidAbi, InvalidAbiSuggestion, MisplacedRelaxTraitBound};
use super::ResolverAstLoweringExt;
-use super::{AstOwner, ImplTraitContext, ImplTraitPosition};
+use super::{Arena, AstOwner, ImplTraitContext, ImplTraitPosition};
use super::{FnDeclKind, LoweringContext, ParamMode};
use rustc_ast::ptr::P;
@@ -7,16 +8,16 @@ use rustc_ast::visit::AssocCtxt;
use rustc_ast::*;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sorted_map::SortedMap;
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::PredicateOrigin;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::ty::{DefIdTree, ResolverAstLowering, TyCtxt};
+use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::source_map::DesugaringKind;
use rustc_span::symbol::{kw, sym, Ident};
-use rustc_span::Span;
+use rustc_span::{Span, Symbol};
use rustc_target::spec::abi;
use smallvec::{smallvec, SmallVec};
@@ -25,6 +26,7 @@ use std::iter;
pub(super) struct ItemLowerer<'a, 'hir> {
pub(super) tcx: TyCtxt<'hir>,
pub(super) resolver: &'a mut ResolverAstLowering,
+ pub(super) ast_arena: &'a Arena<'static>,
pub(super) ast_index: &'a IndexVec<LocalDefId, AstOwner<'a>>,
pub(super) owners: &'a mut IndexVec<LocalDefId, hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>>>,
}
@@ -60,12 +62,13 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
tcx: self.tcx,
resolver: self.resolver,
arena: self.tcx.hir_arena,
+ ast_arena: self.ast_arena,
// HirId handling.
bodies: Vec::new(),
attrs: SortedMap::default(),
children: FxHashMap::default(),
- current_hir_id_owner: CRATE_DEF_ID,
+ current_hir_id_owner: hir::CRATE_OWNER_ID,
item_local_id_counter: hir::ItemLocalId::new(0),
node_id_to_local_id: Default::default(),
local_id_to_def_id: SortedMap::new(),
@@ -85,6 +88,7 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
allow_try_trait: Some([sym::try_trait_v2, sym::yeet_desugar_details][..].into()),
allow_gen_future: Some([sym::gen_future][..].into()),
allow_into_future: Some([sym::into_future][..].into()),
+ generics_def_id_map: Default::default(),
};
lctx.with_hir_id_owner(owner, |lctx| f(lctx));
@@ -120,7 +124,7 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
self.with_lctx(CRATE_NODE_ID, |lctx| {
let module = lctx.lower_mod(&c.items, &c.spans);
lctx.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
- hir::OwnerNode::Crate(lctx.arena.alloc(module))
+ hir::OwnerNode::Crate(module)
})
}
@@ -158,18 +162,23 @@ impl<'a, 'hir> ItemLowerer<'a, 'hir> {
}
impl<'hir> LoweringContext<'_, 'hir> {
- pub(super) fn lower_mod(&mut self, items: &[P<Item>], spans: &ModSpans) -> hir::Mod<'hir> {
- hir::Mod {
+ pub(super) fn lower_mod(
+ &mut self,
+ items: &[P<Item>],
+ spans: &ModSpans,
+ ) -> &'hir hir::Mod<'hir> {
+ self.arena.alloc(hir::Mod {
spans: hir::ModSpans {
inner_span: self.lower_span(spans.inner_span),
inject_use_span: self.lower_span(spans.inject_use_span),
},
item_ids: self.arena.alloc_from_iter(items.iter().flat_map(|x| self.lower_item_ref(x))),
- }
+ })
}
pub(super) fn lower_item_ref(&mut self, i: &Item) -> SmallVec<[hir::ItemId; 1]> {
- let mut node_ids = smallvec![hir::ItemId { def_id: self.local_def_id(i.id) }];
+ let mut node_ids =
+ smallvec![hir::ItemId { owner_id: hir::OwnerId { def_id: self.local_def_id(i.id) } }];
if let ItemKind::Use(ref use_tree) = &i.kind {
self.lower_item_id_use_tree(use_tree, i.id, &mut node_ids);
}
@@ -185,7 +194,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
match tree.kind {
UseTreeKind::Nested(ref nested_vec) => {
for &(ref nested, id) in nested_vec {
- vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ vec.push(hir::ItemId {
+ owner_id: hir::OwnerId { def_id: self.local_def_id(id) },
+ });
self.lower_item_id_use_tree(nested, id, vec);
}
}
@@ -194,7 +205,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
for (_, &id) in
iter::zip(self.expect_full_res_from_use(base_id).skip(1), &[id1, id2])
{
- vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ vec.push(hir::ItemId {
+ owner_id: hir::OwnerId { def_id: self.local_def_id(id) },
+ });
}
}
}
@@ -207,7 +220,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let attrs = self.lower_attrs(hir_id, &i.attrs);
let kind = self.lower_item_kind(i.span, i.id, hir_id, &mut ident, attrs, vis_span, &i.kind);
let item = hir::Item {
- def_id: hir_id.expect_owner(),
+ owner_id: hir_id.expect_owner(),
ident: self.lower_ident(ident),
kind,
vis_span,
@@ -259,10 +272,10 @@ impl<'hir> LoweringContext<'_, 'hir> {
let body_id =
this.lower_maybe_async_body(span, &decl, asyncness, body.as_deref());
- let itctx = ImplTraitContext::Universal;
- let (generics, decl) = this.lower_generics(generics, id, itctx, |this| {
+ let mut itctx = ImplTraitContext::Universal;
+ let (generics, decl) = this.lower_generics(generics, id, &mut itctx, |this| {
let ret_id = asyncness.opt_return_id();
- this.lower_fn_decl(&decl, Some(id), FnDeclKind::Fn, ret_id)
+ this.lower_fn_decl(&decl, Some(id), fn_sig_span, FnDeclKind::Fn, ret_id)
});
let sig = hir::FnSig {
decl,
@@ -306,8 +319,8 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, ty) = self.lower_generics(
&generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
- |this| this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_ty(ty, &ImplTraitContext::TypeAliasesOpaqueTy),
);
hir::ItemKind::TyAlias(ty, generics)
}
@@ -319,7 +332,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, ty) = self.lower_generics(
&generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| this.arena.alloc(this.ty(span, hir::TyKind::Err)),
);
hir::ItemKind::TyAlias(ty, generics)
@@ -328,7 +341,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, variants) = self.lower_generics(
generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| {
this.arena.alloc_from_iter(
enum_definition.variants.iter().map(|x| this.lower_variant(x)),
@@ -341,7 +354,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, struct_def) = self.lower_generics(
generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| this.lower_variant_data(hir_id, struct_def),
);
hir::ItemKind::Struct(struct_def, generics)
@@ -350,7 +363,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, vdata) = self.lower_generics(
generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| this.lower_variant_data(hir_id, vdata),
);
hir::ItemKind::Union(vdata, generics)
@@ -378,18 +391,18 @@ impl<'hir> LoweringContext<'_, 'hir> {
// method, it will not be considered an in-band
// lifetime to be added, but rather a reference to a
// parent lifetime.
- let itctx = ImplTraitContext::Universal;
+ let mut itctx = ImplTraitContext::Universal;
let (generics, (trait_ref, lowered_ty)) =
- self.lower_generics(ast_generics, id, itctx, |this| {
+ self.lower_generics(ast_generics, id, &mut itctx, |this| {
let trait_ref = trait_ref.as_ref().map(|trait_ref| {
this.lower_trait_ref(
trait_ref,
- ImplTraitContext::Disallowed(ImplTraitPosition::Trait),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Trait),
)
});
let lowered_ty = this
- .lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ .lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
(trait_ref, lowered_ty)
});
@@ -428,11 +441,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, (unsafety, items, bounds)) = self.lower_generics(
generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| {
let bounds = this.lower_param_bounds(
bounds,
- ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
);
let items = this.arena.alloc_from_iter(
items.iter().map(|item| this.lower_trait_item_ref(item)),
@@ -447,11 +460,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, bounds) = self.lower_generics(
generics,
id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| {
this.lower_param_bounds(
bounds,
- ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
)
},
);
@@ -474,7 +487,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
span: Span,
body: Option<&Expr>,
) -> (&'hir hir::Ty<'hir>, hir::BodyId) {
- let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let ty = self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
(ty, self.lower_const_body(span, body))
}
@@ -532,7 +545,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
let ident = *ident;
let mut path = path.clone();
for seg in &mut path.segments {
- seg.id = self.next_node_id();
+ // Give the cloned segment the same resolution information
+ // as the old one (this is needed for stability checking).
+ let new_id = self.next_node_id();
+ self.resolver.clone_res(seg.id, new_id);
+ seg.id = new_id;
}
let span = path.span;
@@ -545,7 +562,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
let item = hir::Item {
- def_id: new_id,
+ owner_id: hir::OwnerId { def_id: new_id },
ident: this.lower_ident(ident),
kind,
vis_span,
@@ -601,7 +618,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
// Give the segments new node-ids since they are being cloned.
for seg in &mut prefix.segments {
- seg.id = self.next_node_id();
+ // Give the cloned segment the same resolution information
+ // as the old one (this is needed for stability checking).
+ let new_id = self.next_node_id();
+ self.resolver.clone_res(seg.id, new_id);
+ seg.id = new_id;
}
// Each `use` import is an item and thus are owners of the
@@ -619,7 +640,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
let item = hir::Item {
- def_id: new_hir_id,
+ owner_id: hir::OwnerId { def_id: new_hir_id },
ident: this.lower_ident(ident),
kind,
vis_span,
@@ -639,20 +660,26 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_foreign_item(&mut self, i: &ForeignItem) -> &'hir hir::ForeignItem<'hir> {
let hir_id = self.lower_node_id(i.id);
- let def_id = hir_id.expect_owner();
+ let owner_id = hir_id.expect_owner();
self.lower_attrs(hir_id, &i.attrs);
let item = hir::ForeignItem {
- def_id,
+ owner_id,
ident: self.lower_ident(i.ident),
kind: match i.kind {
ForeignItemKind::Fn(box Fn { ref sig, ref generics, .. }) => {
let fdec = &sig.decl;
- let itctx = ImplTraitContext::Universal;
+ let mut itctx = ImplTraitContext::Universal;
let (generics, (fn_dec, fn_args)) =
- self.lower_generics(generics, i.id, itctx, |this| {
+ self.lower_generics(generics, i.id, &mut itctx, |this| {
(
// Disallow `impl Trait` in foreign items.
- this.lower_fn_decl(fdec, None, FnDeclKind::ExternFn, None),
+ this.lower_fn_decl(
+ fdec,
+ None,
+ sig.span,
+ FnDeclKind::ExternFn,
+ None,
+ ),
this.lower_fn_params_to_names(fdec),
)
});
@@ -661,7 +688,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
ForeignItemKind::Static(ref t, m, _) => {
let ty =
- self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ self.lower_ty(t, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
hir::ForeignItemKind::Static(ty, m)
}
ForeignItemKind::TyAlias(..) => hir::ForeignItemKind::Type,
@@ -675,7 +702,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_foreign_item_ref(&mut self, i: &ForeignItem) -> hir::ForeignItemRef {
hir::ForeignItemRef {
- id: hir::ForeignItemId { def_id: self.local_def_id(i.id) },
+ id: hir::ForeignItemId { owner_id: hir::OwnerId { def_id: self.local_def_id(i.id) } },
ident: self.lower_ident(i.ident),
span: self.lower_span(i.span),
}
@@ -729,11 +756,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
qself,
path,
ParamMode::ExplicitNamed, // no `'_` in declarations (Issue #61124)
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
self.arena.alloc(t)
} else {
- self.lower_ty(&f.ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ self.lower_ty(&f.ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type))
};
let hir_id = self.lower_node_id(f.id);
self.lower_attrs(hir_id, &f.attrs);
@@ -756,14 +783,20 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, kind, has_default) = match i.kind {
AssocItemKind::Const(_, ref ty, ref default) => {
- let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let ty = self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
let body = default.as_ref().map(|x| self.lower_const_body(i.span, Some(x)));
(hir::Generics::empty(), hir::TraitItemKind::Const(ty, body), body.is_some())
}
AssocItemKind::Fn(box Fn { ref sig, ref generics, body: None, .. }) => {
+ let asyncness = sig.header.asyncness;
let names = self.lower_fn_params_to_names(&sig.decl);
- let (generics, sig) =
- self.lower_method_sig(generics, sig, i.id, FnDeclKind::Trait, None);
+ let (generics, sig) = self.lower_method_sig(
+ generics,
+ sig,
+ i.id,
+ FnDeclKind::Trait,
+ asyncness.opt_return_id(),
+ );
(generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false)
}
AssocItemKind::Fn(box Fn { ref sig, ref generics, body: Some(ref body), .. }) => {
@@ -779,7 +812,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
);
(generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true)
}
- AssocItemKind::TyAlias(box TyAlias {
+ AssocItemKind::Type(box TyAlias {
ref generics,
where_clauses,
ref bounds,
@@ -791,15 +824,15 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, kind) = self.lower_generics(
&generics,
i.id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| {
let ty = ty.as_ref().map(|x| {
- this.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ this.lower_ty(x, &ImplTraitContext::Disallowed(ImplTraitPosition::Type))
});
hir::TraitItemKind::Type(
this.lower_param_bounds(
bounds,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
),
ty,
)
@@ -812,7 +845,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
self.lower_attrs(hir_id, &i.attrs);
let item = hir::TraitItem {
- def_id: trait_item_def_id,
+ owner_id: trait_item_def_id,
ident: self.lower_ident(i.ident),
generics,
kind,
@@ -825,13 +858,13 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_trait_item_ref(&mut self, i: &AssocItem) -> hir::TraitItemRef {
let kind = match &i.kind {
AssocItemKind::Const(..) => hir::AssocItemKind::Const,
- AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Type(..) => hir::AssocItemKind::Type,
AssocItemKind::Fn(box Fn { sig, .. }) => {
hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
}
AssocItemKind::MacCall(..) => unimplemented!(),
};
- let id = hir::TraitItemId { def_id: self.local_def_id(i.id) };
+ let id = hir::TraitItemId { owner_id: hir::OwnerId { def_id: self.local_def_id(i.id) } };
hir::TraitItemRef {
id,
ident: self.lower_ident(i.ident),
@@ -852,7 +885,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (generics, kind) = match &i.kind {
AssocItemKind::Const(_, ty, expr) => {
- let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let ty = self.lower_ty(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
(
hir::Generics::empty(),
hir::ImplItemKind::Const(ty, self.lower_const_body(i.span, expr.as_deref())),
@@ -873,21 +906,21 @@ impl<'hir> LoweringContext<'_, 'hir> {
(generics, hir::ImplItemKind::Fn(sig, body_id))
}
- AssocItemKind::TyAlias(box TyAlias { generics, where_clauses, ty, .. }) => {
+ AssocItemKind::Type(box TyAlias { generics, where_clauses, ty, .. }) => {
let mut generics = generics.clone();
add_ty_alias_where_clause(&mut generics, *where_clauses, false);
self.lower_generics(
&generics,
i.id,
- ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
|this| match ty {
None => {
let ty = this.arena.alloc(this.ty(i.span, hir::TyKind::Err));
- hir::ImplItemKind::TyAlias(ty)
+ hir::ImplItemKind::Type(ty)
}
Some(ty) => {
- let ty = this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy);
- hir::ImplItemKind::TyAlias(ty)
+ let ty = this.lower_ty(ty, &ImplTraitContext::TypeAliasesOpaqueTy);
+ hir::ImplItemKind::Type(ty)
}
},
)
@@ -898,7 +931,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let hir_id = self.lower_node_id(i.id);
self.lower_attrs(hir_id, &i.attrs);
let item = hir::ImplItem {
- def_id: hir_id.expect_owner(),
+ owner_id: hir_id.expect_owner(),
ident: self.lower_ident(i.ident),
generics,
kind,
@@ -911,18 +944,21 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_impl_item_ref(&mut self, i: &AssocItem) -> hir::ImplItemRef {
hir::ImplItemRef {
- id: hir::ImplItemId { def_id: self.local_def_id(i.id) },
+ id: hir::ImplItemId { owner_id: hir::OwnerId { def_id: self.local_def_id(i.id) } },
ident: self.lower_ident(i.ident),
span: self.lower_span(i.span),
kind: match &i.kind {
AssocItemKind::Const(..) => hir::AssocItemKind::Const,
- AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Type(..) => hir::AssocItemKind::Type,
AssocItemKind::Fn(box Fn { sig, .. }) => {
hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
}
AssocItemKind::MacCall(..) => unimplemented!(),
},
- trait_item_def_id: self.resolver.get_partial_res(i.id).map(|r| r.base_res().def_id()),
+ trait_item_def_id: self
+ .resolver
+ .get_partial_res(i.id)
+ .map(|r| r.expect_full_res().def_id()),
}
}
@@ -947,7 +983,11 @@ impl<'hir> LoweringContext<'_, 'hir> {
params: &'hir [hir::Param<'hir>],
value: hir::Expr<'hir>,
) -> hir::BodyId {
- let body = hir::Body { generator_kind: self.generator_kind, params, value };
+ let body = hir::Body {
+ generator_kind: self.generator_kind,
+ params,
+ value: self.arena.alloc(value),
+ };
let id = body.id();
debug_assert_eq!(id.hir_id.owner, self.current_hir_id_owner);
self.bodies.push((id.hir_id.local_id, self.arena.alloc(body)));
@@ -1026,9 +1066,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
asyncness: Async,
body: Option<&Block>,
) -> hir::BodyId {
- let closure_id = match asyncness {
- Async::Yes { closure_id, .. } => closure_id,
- Async::No => return self.lower_fn_body_block(span, decl, body),
+ let (closure_id, body) = match (asyncness, body) {
+ (Async::Yes { closure_id, .. }, Some(body)) => (closure_id, body),
+ _ => return self.lower_fn_body_block(span, decl, body),
};
self.lower_body(|this| {
@@ -1074,12 +1114,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
// Check if this is a binding pattern, if so, we can optimize and avoid adding a
// `let <pat> = __argN;` statement. In this case, we do not rename the parameter.
let (ident, is_simple_parameter) = match parameter.pat.kind {
- hir::PatKind::Binding(
- hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
- _,
- ident,
- _,
- ) => (ident, true),
+ hir::PatKind::Binding(hir::BindingAnnotation(ByRef::No, _), _, ident, _) => {
+ (ident, true)
+ }
// For `ref mut` or wildcard arguments, we can't reuse the binding, but
// we can keep the same name for the parameter.
// This lets rustdoc render it correctly in documentation.
@@ -1144,7 +1181,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
let (move_pat, move_id) = this.pat_ident_binding_mode(
desugared_span,
ident,
- hir::BindingAnnotation::Mutable,
+ hir::BindingAnnotation::MUT,
);
let move_expr = this.expr_ident(desugared_span, ident, new_parameter_id);
let move_stmt = this.stmt_let_pat(
@@ -1173,16 +1210,15 @@ impl<'hir> LoweringContext<'_, 'hir> {
parameters.push(new_parameter);
}
- let body_span = body.map_or(span, |b| b.span);
let async_expr = this.make_async_expr(
CaptureBy::Value,
closure_id,
None,
- body_span,
+ body.span,
hir::AsyncGeneratorKind::Fn,
|this| {
// Create a block from the user's function body:
- let user_body = this.lower_block_expr_opt(body_span, body);
+ let user_body = this.lower_block_expr(body);
// Transform into `drop-temps { <user-body> }`, an expression:
let desugared_span =
@@ -1214,7 +1250,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
(
this.arena.alloc_from_iter(parameters),
- this.expr(body_span, async_expr, AttrVec::new()),
+ this.expr(body.span, async_expr, AttrVec::new()),
)
})
}
@@ -1225,12 +1261,12 @@ impl<'hir> LoweringContext<'_, 'hir> {
sig: &FnSig,
id: NodeId,
kind: FnDeclKind,
- is_async: Option<NodeId>,
+ is_async: Option<(NodeId, Span)>,
) -> (&'hir hir::Generics<'hir>, hir::FnSig<'hir>) {
let header = self.lower_fn_header(sig.header);
- let itctx = ImplTraitContext::Universal;
- let (generics, decl) = self.lower_generics(generics, id, itctx, |this| {
- this.lower_fn_decl(&sig.decl, Some(id), kind, is_async)
+ let mut itctx = ImplTraitContext::Universal;
+ let (generics, decl) = self.lower_generics(generics, id, &mut itctx, |this| {
+ this.lower_fn_decl(&sig.decl, Some(id), sig.span, kind, is_async)
});
(generics, hir::FnSig { header, decl, span: self.lower_span(sig.span) })
}
@@ -1260,10 +1296,20 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
fn error_on_invalid_abi(&self, abi: StrLit) {
- struct_span_err!(self.tcx.sess, abi.span, E0703, "invalid ABI: found `{}`", abi.symbol)
- .span_label(abi.span, "invalid ABI")
- .help(&format!("valid ABIs: {}", abi::all_names().join(", ")))
- .emit();
+ let abi_names = abi::enabled_names(self.tcx.features(), abi.span)
+ .iter()
+ .map(|s| Symbol::intern(s))
+ .collect::<Vec<_>>();
+ let suggested_name = find_best_match_for_name(&abi_names, abi.symbol_unescaped, None);
+ self.tcx.sess.emit_err(InvalidAbi {
+ abi: abi.symbol_unescaped,
+ span: abi.span,
+ suggestion: suggested_name.map(|suggested_name| InvalidAbiSuggestion {
+ span: abi.span,
+ suggestion: format!("\"{suggested_name}\""),
+ }),
+ command: "rustc --print=calling-conventions".to_string(),
+ });
}
fn lower_asyncness(&mut self, a: Async) -> hir::IsAsync {
@@ -1294,7 +1340,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
&mut self,
generics: &Generics,
parent_node_id: NodeId,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
f: impl FnOnce(&mut Self) -> T,
) -> (&'hir hir::Generics<'hir>, T) {
debug_assert!(self.impl_trait_defs.is_empty());
@@ -1314,9 +1360,9 @@ impl<'hir> LoweringContext<'_, 'hir> {
match self
.resolver
.get_partial_res(bound_pred.bounded_ty.id)
- .map(|d| (d.base_res(), d.unresolved_segments()))
+ .and_then(|r| r.full_res())
{
- Some((Res::Def(DefKind::TyParam, def_id), 0))
+ Some(Res::Def(DefKind::TyParam, def_id))
if bound_pred.bound_generic_params.is_empty() =>
{
generics
@@ -1338,11 +1384,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
}
let is_param = *is_param.get_or_insert_with(compute_is_param);
if !is_param {
- self.diagnostic().span_err(
- bound.span(),
- "`?Trait` bounds are only permitted at the \
- point where a type parameter is declared",
- );
+ self.tcx.sess.emit_err(MisplacedRelaxTraitBound { span: bound.span() });
}
}
}
@@ -1403,7 +1445,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
id: NodeId,
kind: &GenericParamKind,
bounds: &[GenericBound],
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
origin: PredicateOrigin,
) -> Option<hir::WherePredicate<'hir>> {
// Do not create a clause if we do not have anything inside it.
@@ -1434,15 +1476,20 @@ impl<'hir> LoweringContext<'_, 'hir> {
GenericParamKind::Const { .. } => None,
GenericParamKind::Type { .. } => {
let def_id = self.local_def_id(id).to_def_id();
+ let hir_id = self.next_id();
+ let res = Res::Def(DefKind::TyParam, def_id);
let ty_path = self.arena.alloc(hir::Path {
span: param_span,
- res: Res::Def(DefKind::TyParam, def_id),
- segments: self.arena.alloc_from_iter([hir::PathSegment::from_ident(ident)]),
+ res,
+ segments: self
+ .arena
+ .alloc_from_iter([hir::PathSegment::new(ident, hir_id, res)]),
});
let ty_id = self.next_id();
let bounded_ty =
self.ty_path(ty_id, param_span, hir::QPath::Resolved(None, ty_path));
Some(hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ hir_id: self.next_id(),
bounded_ty: self.arena.alloc(bounded_ty),
bounds,
span,
@@ -1473,13 +1520,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
ref bounds,
span,
}) => hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ hir_id: self.next_id(),
bound_generic_params: self.lower_generic_params(bound_generic_params),
bounded_ty: self
- .lower_ty(bounded_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ .lower_ty(bounded_ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
bounds: self.arena.alloc_from_iter(bounds.iter().map(|bound| {
self.lower_param_bound(
bound,
- ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
)
})),
span: self.lower_span(span),
@@ -1494,17 +1542,16 @@ impl<'hir> LoweringContext<'_, 'hir> {
lifetime: self.lower_lifetime(lifetime),
bounds: self.lower_param_bounds(
bounds,
- ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
),
in_where_clause: true,
}),
- WherePredicate::EqPredicate(WhereEqPredicate { id, ref lhs_ty, ref rhs_ty, span }) => {
+ WherePredicate::EqPredicate(WhereEqPredicate { ref lhs_ty, ref rhs_ty, span }) => {
hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
- hir_id: self.lower_node_id(id),
lhs_ty: self
- .lower_ty(lhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ .lower_ty(lhs_ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
rhs_ty: self
- .lower_ty(rhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ .lower_ty(rhs_ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
span: self.lower_span(span),
})
}
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 224dc3c23..ff29d15f1 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -32,14 +32,19 @@
#![feature(box_patterns)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(never_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
+use crate::errors::{AssocTyParentheses, AssocTyParenthesesSub, MisplacedImplTrait, TraitFnAsync};
+
+use rustc_arena::declare_arena;
+use rustc_ast::ptr::P;
use rustc_ast::visit;
use rustc_ast::{self as ast, *};
use rustc_ast_pretty::pprust;
@@ -49,15 +54,15 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
-use rustc_errors::{struct_span_err, Applicability, Handler};
+use rustc_errors::{DiagnosticArgFromDisplay, Handler, StashKey};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::definitions::DefPathData;
use rustc_hir::{ConstArg, GenericArg, ItemLocalId, ParamName, TraitCandidate};
use rustc_index::vec::{Idx, IndexVec};
-use rustc_middle::span_bug;
use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
+use rustc_middle::{bug, span_bug};
use rustc_session::parse::feature_err;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::DesugaringKind;
@@ -75,6 +80,7 @@ macro_rules! arena_vec {
mod asm;
mod block;
+mod errors;
mod expr;
mod index;
mod item;
@@ -89,6 +95,13 @@ struct LoweringContext<'a, 'hir> {
/// Used to allocate HIR nodes.
arena: &'hir hir::Arena<'hir>,
+ /// Used to allocate temporary AST nodes for use during lowering.
+ /// This allows us to create "fake" AST -- these nodes can sometimes
+ /// be allocated on the stack, but other times we need them to live longer
+ /// than the current stack frame, so they can be collected into vectors
+ /// and things like that.
+ ast_arena: &'a Arena<'static>,
+
/// Bodies inside the owner being lowered.
bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>,
/// Attributes inside the owner being lowered.
@@ -112,7 +125,7 @@ struct LoweringContext<'a, 'hir> {
is_in_trait_impl: bool,
is_in_dyn_type: bool,
- current_hir_id_owner: LocalDefId,
+ current_hir_id_owner: hir::OwnerId,
item_local_id_counter: hir::ItemLocalId,
local_id_to_def_id: SortedMap<ItemLocalId, LocalDefId>,
trait_map: FxHashMap<ItemLocalId, Box<[TraitCandidate]>>,
@@ -126,22 +139,35 @@ struct LoweringContext<'a, 'hir> {
allow_try_trait: Option<Lrc<[Symbol]>>,
allow_gen_future: Option<Lrc<[Symbol]>>,
allow_into_future: Option<Lrc<[Symbol]>>,
+
+ /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
+ /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
+ /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
+ /// field from the original parameter 'a to the new parameter 'a1.
+ generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
}
+declare_arena!([
+ [] tys: rustc_ast::Ty,
+ [] aba: rustc_ast::AngleBracketedArgs,
+ [] ptr: rustc_ast::PolyTraitRef,
+ // This _marker field is needed because `declare_arena` creates `Arena<'tcx>` and we need to
+ // use `'tcx`. If we don't have this we get a compile error.
+ [] _marker: std::marker::PhantomData<&'tcx ()>,
+]);
+
trait ResolverAstLoweringExt {
fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>>;
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
+ // Clones the resolution (if any) on 'source' and applies it
+ // to 'target'. Used when desugaring a `UseTreeKind::Nested` to
+ // multiple `UseTreeKind::Simple`s
+ fn clone_res(&mut self, source: NodeId, target: NodeId);
fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind;
- /// Record the map from `from` local def id to `to` local def id, on `generics_def_id_map`
- /// field.
- fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId);
- /// Get the previously recorded `to` local def id given the `from` local def id, obtained using
- /// `generics_def_id_map` field.
- fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId;
}
impl ResolverAstLoweringExt for ResolverAstLowering {
@@ -153,12 +179,7 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
return None;
}
- let partial_res = self.partial_res_map.get(&expr.id)?;
- if partial_res.unresolved_segments() != 0 {
- return None;
- }
-
- if let Res::Def(DefKind::Fn, def_id) = partial_res.base_res() {
+ if let Res::Def(DefKind::Fn, def_id) = self.partial_res_map.get(&expr.id)?.full_res()? {
// We only support cross-crate argument rewriting. Uses
// within the same crate should be updated to use the new
// const generics style.
@@ -175,6 +196,12 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
None
}
+ fn clone_res(&mut self, source: NodeId, target: NodeId) {
+ if let Some(res) = self.partial_res_map.get(&source) {
+ self.partial_res_map.insert(target, *res);
+ }
+ }
+
/// Obtains resolution for a `NodeId` with a single resolution.
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
self.partial_res_map.get(&id).copied()
@@ -209,41 +236,6 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind {
self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang)
}
-
- /// Push a remapping into the top-most map.
- /// Panics if no map has been pushed.
- /// Remapping is used when creating lowering `-> impl Trait` return
- /// types to create the resulting opaque type.
- #[tracing::instrument(level = "debug", skip(self))]
- fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId) {
- self.generics_def_id_map.last_mut().expect("no map pushed").insert(from, to);
- }
-
- fn get_remapped_def_id(&self, mut local_def_id: LocalDefId) -> LocalDefId {
- // `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
- // push new mappings so we need to try first the latest mappings, hence `iter().rev()`.
- //
- // Consider:
- //
- // `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
- //
- // We would end with a generics_def_id_map like:
- //
- // `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
- //
- // for the opaque type generated on `impl Sized + 'b`, We want the result to be:
- // impl_sized#'b, so iterating forward is the wrong thing to do.
- for map in self.generics_def_id_map.iter().rev() {
- if let Some(r) = map.get(&local_def_id) {
- debug!("def_id_remapper: remapping from `{local_def_id:?}` to `{r:?}`");
- local_def_id = *r;
- } else {
- debug!("def_id_remapper: no remapping for `{local_def_id:?}` found in map");
- }
- }
-
- local_def_id
- }
}
/// Context of `impl Trait` in code, which determines whether it is allowed in an HIR subtree,
@@ -264,6 +256,7 @@ enum ImplTraitContext {
ReturnPositionOpaqueTy {
/// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
origin: hir::OpaqueTyOrigin,
+ in_trait: bool,
},
/// Impl trait in type aliases.
TypeAliasesOpaqueTy,
@@ -323,7 +316,7 @@ impl std::fmt::Display for ImplTraitPosition {
}
}
-#[derive(Debug)]
+#[derive(Debug, PartialEq, Eq)]
enum FnDeclKind {
Fn,
Inherent,
@@ -335,9 +328,20 @@ enum FnDeclKind {
}
impl FnDeclKind {
- fn impl_trait_return_allowed(&self) -> bool {
+ fn impl_trait_allowed(&self, tcx: TyCtxt<'_>) -> bool {
match self {
FnDeclKind::Fn | FnDeclKind::Inherent => true,
+ FnDeclKind::Impl if tcx.features().return_position_impl_trait_in_trait => true,
+ FnDeclKind::Trait if tcx.features().return_position_impl_trait_in_trait => true,
+ _ => false,
+ }
+ }
+
+ fn async_fn_allowed(&self, tcx: TyCtxt<'_>) -> bool {
+ match self {
+ FnDeclKind::Fn | FnDeclKind::Inherent => true,
+ FnDeclKind::Impl if tcx.features().async_fn_in_trait => true,
+ FnDeclKind::Trait if tcx.features().async_fn_in_trait => true,
_ => false,
}
}
@@ -430,10 +434,13 @@ pub fn lower_to_hir<'hir>(tcx: TyCtxt<'hir>, (): ()) -> hir::Crate<'hir> {
tcx.definitions_untracked().def_index_count(),
);
+ let ast_arena = Arena::default();
+
for def_id in ast_index.indices() {
item::ItemLowerer {
tcx,
resolver: &mut resolver,
+ ast_arena: &ast_arena,
ast_index: &ast_index,
owners: &mut owners,
}
@@ -500,6 +507,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
/// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
+ /// resolver (if any).
+ fn orig_opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
+ self.resolver.node_id_to_def_id.get(&node).map(|local_def_id| *local_def_id)
+ }
+
+ fn orig_local_def_id(&self, node: NodeId) -> LocalDefId {
+ self.orig_opt_local_def_id(node)
+ .unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
+ }
+
+ /// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
/// resolver (if any), after applying any remapping from `get_remapped_def_id`.
///
/// For example, in a function like `fn foo<'a>(x: &'a u32)`,
@@ -513,16 +531,36 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
/// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
- self.resolver
- .node_id_to_def_id
- .get(&node)
- .map(|local_def_id| self.resolver.get_remapped_def_id(*local_def_id))
+ self.orig_opt_local_def_id(node).map(|local_def_id| self.get_remapped_def_id(local_def_id))
}
fn local_def_id(&self, node: NodeId) -> LocalDefId {
self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
}
+ /// Get the previously recorded `to` local def id given the `from` local def id, obtained using
+ /// `generics_def_id_map` field.
+ fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId {
+ // `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
+ // push new mappings, so we first need to get the latest (innermost) mappings, hence `iter().rev()`.
+ //
+ // Consider:
+ //
+ // `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
+ //
+ // We would end with a generics_def_id_map like:
+ //
+ // `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
+ //
+ // for the opaque type generated on `impl Sized + 'b`, we want the result to be: impl_sized#'b.
+ // So, if we were trying to find first from the start (outermost) would give the wrong result, impl_trait#'b.
+ self.generics_def_id_map
+ .iter()
+ .rev()
+ .find_map(|map| map.get(&local_def_id).map(|local_def_id| *local_def_id))
+ .unwrap_or(local_def_id)
+ }
+
/// Freshen the `LoweringContext` and ready it to lower a nested item.
/// The lowered item is registered into `self.children`.
///
@@ -541,7 +579,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let current_node_ids = std::mem::take(&mut self.node_id_to_local_id);
let current_id_to_def_id = std::mem::take(&mut self.local_id_to_def_id);
let current_trait_map = std::mem::take(&mut self.trait_map);
- let current_owner = std::mem::replace(&mut self.current_hir_id_owner, def_id);
+ let current_owner =
+ std::mem::replace(&mut self.current_hir_id_owner, hir::OwnerId { def_id });
let current_local_counter =
std::mem::replace(&mut self.item_local_id_counter, hir::ItemLocalId::new(1));
let current_impl_trait_defs = std::mem::take(&mut self.impl_trait_defs);
@@ -556,7 +595,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
debug_assert_eq!(_old, None);
let item = f(self);
- debug_assert_eq!(def_id, item.def_id());
+ debug_assert_eq!(def_id, item.def_id().def_id);
// `f` should have consumed all the elements in these vectors when constructing `item`.
debug_assert!(self.impl_trait_defs.is_empty());
debug_assert!(self.impl_trait_bounds.is_empty());
@@ -591,9 +630,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
remap: FxHashMap<LocalDefId, LocalDefId>,
f: impl FnOnce(&mut Self) -> R,
) -> R {
- self.resolver.generics_def_id_map.push(remap);
+ self.generics_def_id_map.push(remap);
let res = f(self);
- self.resolver.generics_def_id_map.pop();
+ self.generics_def_id_map.pop();
res
}
@@ -644,14 +683,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
) -> (Fingerprint, Fingerprint) {
self.tcx.with_stable_hashing_context(|mut hcx| {
let mut stable_hasher = StableHasher::new();
- hcx.with_hir_bodies(true, node.def_id(), bodies, |hcx| {
+ hcx.with_hir_bodies(node.def_id(), bodies, |hcx| {
node.hash_stable(hcx, &mut stable_hasher)
});
let hash_including_bodies = stable_hasher.finish();
let mut stable_hasher = StableHasher::new();
- hcx.with_hir_bodies(false, node.def_id(), bodies, |hcx| {
- node.hash_stable(hcx, &mut stable_hasher)
- });
+ hcx.without_hir_bodies(|hcx| node.hash_stable(hcx, &mut stable_hasher));
let hash_without_bodies = stable_hasher.finish();
(hash_including_bodies, hash_without_bodies)
})
@@ -663,6 +700,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// actually used in the HIR, as that would trigger an assertion in the
/// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
/// properly. Calling the method twice with the same `NodeId` is fine though.
+ #[instrument(level = "debug", skip(self), ret)]
fn lower_node_id(&mut self, ast_node_id: NodeId) -> hir::HirId {
assert_ne!(ast_node_id, DUMMY_NODE_ID);
@@ -696,6 +734,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
/// Generate a new `HirId` without a backing `NodeId`.
+ #[instrument(level = "debug", skip(self), ret)]
fn next_id(&mut self) -> hir::HirId {
let owner = self.current_hir_id_owner;
let local_id = self.item_local_id_counter;
@@ -722,12 +761,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
fn expect_full_res(&mut self, id: NodeId) -> Res<NodeId> {
- self.resolver.get_partial_res(id).map_or(Res::Err, |pr| {
- if pr.unresolved_segments() != 0 {
- panic!("path not fully resolved: {:?}", pr);
- }
- pr.base_res()
- })
+ self.resolver.get_partial_res(id).map_or(Res::Err, |pr| pr.expect_full_res())
}
fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator<Item = Res<NodeId>> {
@@ -755,7 +789,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Mark a span as relative to the current owning item.
fn lower_span(&self, span: Span) -> Span {
if self.tcx.sess.opts.unstable_opts.incremental_relative_spans {
- span.with_parent(Some(self.current_hir_id_owner))
+ span.with_parent(Some(self.current_hir_id_owner.def_id))
} else {
// Do not make spans relative when not using incremental compilation.
span
@@ -767,7 +801,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
/// Converts a lifetime into a new generic parameter.
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn lifetime_res_to_generic_param(
&mut self,
ident: Ident,
@@ -781,7 +815,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
LifetimeRes::Fresh { param, .. } => {
// Late resolution delegates to us the creation of the `LocalDefId`.
let _def_id = self.create_def(
- self.current_hir_id_owner,
+ self.current_hir_id_owner.def_id,
param,
DefPathData::LifetimeNs(kw::UnderscoreLifetime),
);
@@ -811,7 +845,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// name resolver owing to lifetime elision; this also populates the resolver's node-id->def-id
/// map, so that later calls to `opt_node_id_to_def_id` that refer to these extra lifetime
/// parameters will be successful.
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
#[inline]
fn lower_lifetime_binder(
&mut self,
@@ -874,14 +908,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// the `HirId`s. We don't actually need HIR version of attributes anyway.
// Tokens are also not needed after macro expansion and parsing.
let kind = match attr.kind {
- AttrKind::Normal(ref item, _) => AttrKind::Normal(
- AttrItem {
- path: item.path.clone(),
- args: self.lower_mac_args(&item.args),
+ AttrKind::Normal(ref normal) => AttrKind::Normal(P(NormalAttr {
+ item: AttrItem {
+ path: normal.item.path.clone(),
+ args: self.lower_mac_args(&normal.item.args),
tokens: None,
},
- None,
- ),
+ tokens: None,
+ })),
AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data),
};
@@ -929,8 +963,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
lit.clone()
} else {
Lit {
- token: token::Lit::new(token::LitKind::Err, kw::Empty, None),
- kind: LitKind::Err(kw::Empty),
+ token_lit: token::Lit::new(token::LitKind::Err, kw::Empty, None),
+ kind: LitKind::Err,
span: DUMMY_SP,
}
};
@@ -956,7 +990,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_assoc_ty_constraint(
&mut self,
constraint: &AssocConstraint,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::TypeBinding<'hir> {
debug!("lower_assoc_ty_constraint(constraint={:?}, itctx={:?})", constraint, itctx);
// lower generic arguments of identifier in constraint
@@ -967,18 +1001,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
GenericArgs::Parenthesized(ref data) => {
self.emit_bad_parenthesized_trait_in_assoc_ty(data);
- self.lower_angle_bracketed_parameter_data(
- &data.as_angle_bracketed_args(),
- ParamMode::Explicit,
- itctx,
- )
- .0
+ let aba = self.ast_arena.aba.alloc(data.as_angle_bracketed_args());
+ self.lower_angle_bracketed_parameter_data(aba, ParamMode::Explicit, itctx).0
}
};
gen_args_ctor.into_generic_args(self)
} else {
self.arena.alloc(hir::GenericArgs::none())
};
+ let itctx_tait = &ImplTraitContext::TypeAliasesOpaqueTy;
let kind = match constraint.kind {
AssocConstraintKind::Equality { ref term } => {
@@ -1016,9 +1047,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// then to an opaque type).
//
// FIXME: this is only needed until `impl Trait` is allowed in type aliases.
- ImplTraitContext::Disallowed(_) if self.is_in_dyn_type => {
- (true, ImplTraitContext::TypeAliasesOpaqueTy)
- }
+ ImplTraitContext::Disallowed(_) if self.is_in_dyn_type => (true, itctx_tait),
// We are in the parameter position, but not within a dyn type:
//
@@ -1034,21 +1063,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Desugar `AssocTy: Bounds` into `AssocTy = impl Bounds`. We do this by
// constructing the HIR for `impl bounds...` and then lowering that.
- let parent_def_id = self.current_hir_id_owner;
let impl_trait_node_id = self.next_node_id();
- self.create_def(parent_def_id, impl_trait_node_id, DefPathData::ImplTrait);
self.with_dyn_type_scope(false, |this| {
let node_id = this.next_node_id();
- let ty = this.lower_ty(
- &Ty {
- id: node_id,
- kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
- span: this.lower_span(constraint.span),
- tokens: None,
- },
- itctx,
- );
+ let ty = this.ast_arena.tys.alloc(Ty {
+ id: node_id,
+ kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
+ span: this.lower_span(constraint.span),
+ tokens: None,
+ });
+ let ty = this.lower_ty(ty, itctx);
hir::TypeBindingKind::Equality { term: ty.into() }
})
@@ -1072,19 +1097,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
fn emit_bad_parenthesized_trait_in_assoc_ty(&self, data: &ParenthesizedArgs) {
- let mut err = self.tcx.sess.struct_span_err(
- data.span,
- "parenthesized generic arguments cannot be used in associated type constraints",
- );
// Suggest removing empty parentheses: "Trait()" -> "Trait"
- if data.inputs.is_empty() {
+ let sub = if data.inputs.is_empty() {
let parentheses_span =
data.inputs_span.shrink_to_lo().to(data.inputs_span.shrink_to_hi());
- err.multipart_suggestion(
- "remove these parentheses",
- vec![(parentheses_span, String::new())],
- Applicability::MaybeIncorrect,
- );
+ AssocTyParenthesesSub::Empty { parentheses_span }
}
// Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
else {
@@ -1098,20 +1115,16 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// End of last argument to end of parameters
let close_param =
data.inputs.last().unwrap().span.shrink_to_hi().to(data.inputs_span.shrink_to_hi());
- err.multipart_suggestion(
- &format!("use angle brackets instead",),
- vec![(open_param, String::from("<")), (close_param, String::from(">"))],
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
+ AssocTyParenthesesSub::NotEmpty { open_param, close_param }
+ };
+ self.tcx.sess.emit_err(AssocTyParentheses { span: data.span, sub });
}
#[instrument(level = "debug", skip(self))]
fn lower_generic_arg(
&mut self,
arg: &ast::GenericArg,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::GenericArg<'hir> {
match arg {
ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(&lt)),
@@ -1128,8 +1141,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// type and value namespaces. If we resolved the path in the value namespace, we
// transform it into a generic const argument.
TyKind::Path(ref qself, ref path) => {
- if let Some(partial_res) = self.resolver.get_partial_res(ty.id) {
- let res = partial_res.base_res();
+ if let Some(res) = self
+ .resolver
+ .get_partial_res(ty.id)
+ .and_then(|partial_res| partial_res.full_res())
+ {
if !res.matches_ns(Namespace::TypeNS) {
debug!(
"lower_generic_arg: Lowering type argument as const argument: {:?}",
@@ -1142,7 +1158,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let node_id = self.next_node_id();
// Add a definition for the in-band const def.
- self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+ self.create_def(
+ parent_def_id.def_id,
+ node_id,
+ DefPathData::AnonConst,
+ );
let span = self.lower_span(ty.span);
let path_expr = Expr {
@@ -1163,7 +1183,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
_ => {}
}
- GenericArg::Type(self.lower_ty_direct(&ty, itctx))
+ GenericArg::Type(self.lower_ty(&ty, itctx))
}
ast::GenericArg::Const(ct) => GenericArg::Const(ConstArg {
value: self.lower_anon_const(&ct),
@@ -1173,7 +1193,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
#[instrument(level = "debug", skip(self))]
- fn lower_ty(&mut self, t: &Ty, itctx: ImplTraitContext) -> &'hir hir::Ty<'hir> {
+ fn lower_ty(&mut self, t: &Ty, itctx: &ImplTraitContext) -> &'hir hir::Ty<'hir> {
self.arena.alloc(self.lower_ty_direct(t, itctx))
}
@@ -1183,32 +1203,32 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
qself: &Option<QSelf>,
path: &Path,
param_mode: ParamMode,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::Ty<'hir> {
// Check whether we should interpret this as a bare trait object.
// This check mirrors the one in late resolution. We only introduce this special case in
- // the rare occurence we need to lower `Fresh` anonymous lifetimes.
+ // the rare occurrence we need to lower `Fresh` anonymous lifetimes.
// The other cases when a qpath should be opportunistically made a trait object are handled
// by `ty_path`.
if qself.is_none()
&& let Some(partial_res) = self.resolver.get_partial_res(t.id)
- && partial_res.unresolved_segments() == 0
- && let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = partial_res.base_res()
+ && let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) = partial_res.full_res()
{
let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
+ let poly_trait_ref = this.ast_arena.ptr.alloc(PolyTraitRef {
+ bound_generic_params: vec![],
+ trait_ref: TraitRef { path: path.clone(), ref_id: t.id },
+ span: t.span
+ });
let bound = this.lower_poly_trait_ref(
- &PolyTraitRef {
- bound_generic_params: vec![],
- trait_ref: TraitRef { path: path.clone(), ref_id: t.id },
- span: t.span
- },
+ poly_trait_ref,
itctx,
);
let bounds = this.arena.alloc_from_iter([bound]);
let lifetime_bound = this.elided_dyn_bound(t.span);
(bounds, lifetime_bound)
});
- let kind = hir::TyKind::TraitObject(bounds, lifetime_bound, TraitObjectSyntax::None);
+ let kind = hir::TyKind::TraitObject(bounds, &lifetime_bound, TraitObjectSyntax::None);
return hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.next_id() };
}
@@ -1225,7 +1245,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.ty(span, hir::TyKind::Tup(tys))
}
- fn lower_ty_direct(&mut self, t: &Ty, itctx: ImplTraitContext) -> hir::Ty<'hir> {
+ fn lower_ty_direct(&mut self, t: &Ty, itctx: &ImplTraitContext) -> hir::Ty<'hir> {
let kind = match t.kind {
TyKind::Infer => hir::TyKind::Infer,
TyKind::Err => hir::TyKind::Err,
@@ -1241,7 +1261,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
} else {
self.next_node_id()
};
- let span = self.tcx.sess.source_map().next_point(t.span.shrink_to_lo());
+ let span = self.tcx.sess.source_map().start_point(t.span);
Lifetime { ident: Ident::new(kw::UnderscoreLifetime, span), id }
});
let lifetime = self.lower_lifetime(&region);
@@ -1253,7 +1273,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
generic_params,
unsafety: self.lower_unsafety(f.unsafety),
abi: self.lower_extern(f.ext),
- decl: self.lower_fn_decl(&f.decl, None, FnDeclKind::Pointer, None),
+ decl: self.lower_fn_decl(&f.decl, None, t.span, FnDeclKind::Pointer, None),
param_names: self.lower_fn_params_to_names(&f.decl),
}))
}
@@ -1268,14 +1288,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
return self.lower_path_ty(t, qself, path, ParamMode::Explicit, itctx);
}
TyKind::ImplicitSelf => {
+ let hir_id = self.next_id();
let res = self.expect_full_res(t.id);
let res = self.lower_res(res);
hir::TyKind::Path(hir::QPath::Resolved(
None,
self.arena.alloc(hir::Path {
res,
- segments: arena_vec![self; hir::PathSegment::from_ident(
- Ident::with_dummy_span(kw::SelfUpper)
+ segments: arena_vec![self; hir::PathSegment::new(
+ Ident::with_dummy_span(kw::SelfUpper),
+ hir_id,
+ res
)],
span: self.lower_span(t.span),
}),
@@ -1318,20 +1341,29 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
TyKind::ImplTrait(def_node_id, ref bounds) => {
let span = t.span;
match itctx {
- ImplTraitContext::ReturnPositionOpaqueTy { origin } => {
- self.lower_opaque_impl_trait(span, origin, def_node_id, bounds, itctx)
- }
- ImplTraitContext::TypeAliasesOpaqueTy => {
- let nested_itctx = ImplTraitContext::TypeAliasesOpaqueTy;
- self.lower_opaque_impl_trait(
+ ImplTraitContext::ReturnPositionOpaqueTy { origin, in_trait } => self
+ .lower_opaque_impl_trait(
span,
- hir::OpaqueTyOrigin::TyAlias,
+ *origin,
def_node_id,
bounds,
- nested_itctx,
- )
- }
+ *in_trait,
+ itctx,
+ ),
+ ImplTraitContext::TypeAliasesOpaqueTy => self.lower_opaque_impl_trait(
+ span,
+ hir::OpaqueTyOrigin::TyAlias,
+ def_node_id,
+ bounds,
+ false,
+ itctx,
+ ),
ImplTraitContext::Universal => {
+ self.create_def(
+ self.current_hir_id_owner.def_id,
+ def_node_id,
+ DefPathData::ImplTrait,
+ );
let span = t.span;
let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
let (param, bounds, path) =
@@ -1342,15 +1374,26 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
path
}
+ ImplTraitContext::Disallowed(
+ position @ (ImplTraitPosition::TraitReturn | ImplTraitPosition::ImplReturn),
+ ) => {
+ self.tcx
+ .sess
+ .create_feature_err(
+ MisplacedImplTrait {
+ span: t.span,
+ position: DiagnosticArgFromDisplay(&position),
+ },
+ sym::return_position_impl_trait_in_trait,
+ )
+ .emit();
+ hir::TyKind::Err
+ }
ImplTraitContext::Disallowed(position) => {
- let mut err = struct_span_err!(
- self.tcx.sess,
- t.span,
- E0562,
- "`impl Trait` only allowed in function and inherent method return types, not in {}",
- position
- );
- err.emit();
+ self.tcx.sess.emit_err(MisplacedImplTrait {
+ span: t.span,
+ position: DiagnosticArgFromDisplay(&position),
+ });
hir::TyKind::Err
}
}
@@ -1397,14 +1440,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// added explicitly in the HIR). But this includes all the lifetimes, and we only want to
/// capture the lifetimes that are referenced in the bounds. Therefore, we add *extra* lifetime parameters
/// for the lifetimes that get captured (`'x`, in our example above) and reference those.
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
fn lower_opaque_impl_trait(
&mut self,
span: Span,
origin: hir::OpaqueTyOrigin,
opaque_ty_node_id: NodeId,
bounds: &GenericBounds,
- itctx: ImplTraitContext,
+ in_trait: bool,
+ itctx: &ImplTraitContext,
) -> hir::TyKind<'hir> {
// Make sure we know that some funky desugaring has been going on here.
// This is a first: there is code in other places like for loop
@@ -1413,7 +1457,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
- let opaque_ty_def_id = self.local_def_id(opaque_ty_node_id);
+ let opaque_ty_def_id = match origin {
+ hir::OpaqueTyOrigin::TyAlias => self.create_def(
+ self.current_hir_id_owner.def_id,
+ opaque_ty_node_id,
+ DefPathData::ImplTrait,
+ ),
+ hir::OpaqueTyOrigin::FnReturn(fn_def_id) => {
+ self.create_def(fn_def_id, opaque_ty_node_id, DefPathData::ImplTrait)
+ }
+ hir::OpaqueTyOrigin::AsyncFn(..) => bug!("unreachable"),
+ };
debug!(?opaque_ty_def_id);
// Contains the new lifetime definitions created for the TAIT (if any).
@@ -1492,6 +1546,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}),
bounds: hir_bounds,
origin,
+ in_trait,
};
debug!(?opaque_ty_item);
@@ -1518,7 +1573,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
debug!(?lifetimes);
// `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
- hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, lifetimes)
+ hir::TyKind::OpaqueDef(
+ hir::ItemId { owner_id: hir::OwnerId { def_id: opaque_ty_def_id } },
+ lifetimes,
+ in_trait,
+ )
}
/// Registers a new opaque type with the proper `NodeId`s and
@@ -1534,7 +1593,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Generate an `type Foo = impl Trait;` declaration.
trace!("registering opaque type with id {:#?}", opaque_ty_id);
let opaque_ty_item = hir::Item {
- def_id: opaque_ty_id,
+ owner_id: hir::OwnerId { def_id: opaque_ty_id },
ident: Ident::empty(),
kind: opaque_ty_item_kind,
vis_span: self.lower_span(span.shrink_to_lo()),
@@ -1577,7 +1636,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
LifetimeRes::Fresh { param, binder: _ } => {
debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
- if let Some(old_def_id) = self.opt_local_def_id(param) && remapping.get(&old_def_id).is_none() {
+ if let Some(old_def_id) = self.orig_opt_local_def_id(param) && remapping.get(&old_def_id).is_none() {
let node_id = self.next_node_id();
let new_def_id = self.create_def(
@@ -1626,19 +1685,17 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// `fn_def_id`: if `Some`, impl Trait arguments are lowered into generic parameters on the
// given DefId, otherwise impl Trait is disallowed. Must be `Some` if
// `make_ret_async` is also `Some`.
- // `impl_trait_return_allow`: determines whether `impl Trait` can be used in return position.
- // This guards against trait declarations and implementations where `impl Trait` is
- // disallowed.
// `make_ret_async`: if `Some`, converts `-> T` into `-> impl Future<Output = T>` in the
// return type. This is used for `async fn` declarations. The `NodeId` is the ID of the
- // return type `impl Trait` item.
- #[tracing::instrument(level = "debug", skip(self))]
+ // return type `impl Trait` item, and the `Span` points to the `async` keyword.
+ #[instrument(level = "debug", skip(self))]
fn lower_fn_decl(
&mut self,
decl: &FnDecl,
fn_node_id: Option<NodeId>,
+ fn_span: Span,
kind: FnDeclKind,
- make_ret_async: Option<NodeId>,
+ make_ret_async: Option<(NodeId, Span)>,
) -> &'hir hir::FnDecl<'hir> {
let c_variadic = decl.c_variadic();
@@ -1651,11 +1708,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let inputs = self.arena.alloc_from_iter(inputs.iter().map(|param| {
if fn_node_id.is_some() {
- self.lower_ty_direct(&param.ty, ImplTraitContext::Universal)
+ self.lower_ty_direct(&param.ty, &ImplTraitContext::Universal)
} else {
self.lower_ty_direct(
&param.ty,
- ImplTraitContext::Disallowed(match kind {
+ &ImplTraitContext::Disallowed(match kind {
FnDeclKind::Fn | FnDeclKind::Inherent => {
unreachable!("fn should allow in-band lifetimes")
}
@@ -1669,20 +1726,39 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}));
- let output = if let Some(ret_id) = make_ret_async {
+ let output = if let Some((ret_id, span)) = make_ret_async {
+ if !kind.async_fn_allowed(self.tcx) {
+ match kind {
+ FnDeclKind::Trait | FnDeclKind::Impl => {
+ self.tcx
+ .sess
+ .create_feature_err(
+ TraitFnAsync { fn_span, span },
+ sym::async_fn_in_trait,
+ )
+ .emit();
+ }
+ _ => {
+ self.tcx.sess.emit_err(TraitFnAsync { fn_span, span });
+ }
+ }
+ }
+
self.lower_async_fn_ret_ty(
&decl.output,
fn_node_id.expect("`make_ret_async` but no `fn_def_id`"),
ret_id,
+ matches!(kind, FnDeclKind::Trait),
)
} else {
match decl.output {
FnRetTy::Ty(ref ty) => {
- let context = match fn_node_id {
- Some(fn_node_id) if kind.impl_trait_return_allowed() => {
+ let mut context = match fn_node_id {
+ Some(fn_node_id) if kind.impl_trait_allowed(self.tcx) => {
let fn_def_id = self.local_def_id(fn_node_id);
ImplTraitContext::ReturnPositionOpaqueTy {
origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ in_trait: matches!(kind, FnDeclKind::Trait),
}
}
_ => ImplTraitContext::Disallowed(match kind {
@@ -1696,7 +1772,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
FnDeclKind::Impl => ImplTraitPosition::ImplReturn,
}),
};
- hir::FnRetTy::Return(self.lower_ty(ty, context))
+ hir::FnRetTy::Return(self.lower_ty(ty, &mut context))
}
FnRetTy::Default(span) => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
}
@@ -1707,10 +1783,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
output,
c_variadic,
implicit_self: decl.inputs.get(0).map_or(hir::ImplicitSelfKind::None, |arg| {
- use BindingMode::{ByRef, ByValue};
let is_mutable_pat = matches!(
arg.pat.kind,
- PatKind::Ident(ByValue(Mutability::Mut) | ByRef(Mutability::Mut), ..)
+ PatKind::Ident(hir::BindingAnnotation(_, Mutability::Mut), ..)
);
match arg.ty.kind {
@@ -1741,12 +1816,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// `output`: unlowered output type (`T` in `-> T`)
// `fn_def_id`: `DefId` of the parent function (used to create child impl trait definition)
// `opaque_ty_node_id`: `NodeId` of the opaque `impl Trait` type that should be created
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn lower_async_fn_ret_ty(
&mut self,
output: &FnRetTy,
fn_node_id: NodeId,
opaque_ty_node_id: NodeId,
+ in_trait: bool,
) -> hir::FnRetTy<'hir> {
let span = output.span();
@@ -1805,7 +1881,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let extra_lifetime_params = self.resolver.take_extra_lifetime_params(opaque_ty_node_id);
debug!(?extra_lifetime_params);
for (ident, outer_node_id, outer_res) in extra_lifetime_params {
- let outer_def_id = self.local_def_id(outer_node_id);
+ let outer_def_id = self.orig_local_def_id(outer_node_id);
let inner_node_id = self.next_node_id();
// Add a definition for the in scope lifetime def.
@@ -1873,8 +1949,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
//
// Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
// hence the elision takes place at the fn site.
- let future_bound =
- this.lower_async_fn_output_type_to_future_bound(output, fn_def_id, span);
+ let future_bound = this.lower_async_fn_output_type_to_future_bound(
+ output,
+ span,
+ if in_trait && !this.tcx.features().return_position_impl_trait_in_trait {
+ ImplTraitContext::Disallowed(ImplTraitPosition::TraitReturn)
+ } else {
+ ImplTraitContext::ReturnPositionOpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ in_trait,
+ }
+ },
+ );
let generic_params = this.arena.alloc_from_iter(collected_lifetimes.iter().map(
|&(new_node_id, lifetime, _)| {
@@ -1912,6 +1998,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}),
bounds: arena_vec![this; future_bound],
origin: hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ in_trait,
};
trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
@@ -1948,8 +2035,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let res = res.unwrap_or(
self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error),
);
- let l = self.new_named_lifetime_with_res(id, span, ident, res);
- hir::GenericArg::Lifetime(l)
+ hir::GenericArg::Lifetime(self.new_named_lifetime_with_res(id, span, ident, res))
},
));
@@ -1957,8 +2043,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Foo = impl Trait` is, internally, created as a child of the
// async fn, so the *type parameters* are inherited. It's
// only the lifetime parameters that we must supply.
- let opaque_ty_ref =
- hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, generic_args);
+ let opaque_ty_ref = hir::TyKind::OpaqueDef(
+ hir::ItemId { owner_id: hir::OwnerId { def_id: opaque_ty_def_id } },
+ generic_args,
+ in_trait,
+ );
let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
}
@@ -1967,8 +2056,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_async_fn_output_type_to_future_bound(
&mut self,
output: &FnRetTy,
- fn_def_id: LocalDefId,
span: Span,
+ mut nested_impl_trait_context: ImplTraitContext,
) -> hir::GenericBound<'hir> {
// Compute the `T` in `Future<Output = T>` from the return type.
let output_ty = match output {
@@ -1976,10 +2065,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Not `OpaqueTyOrigin::AsyncFn`: that's only used for the
// `impl Future` opaque type that `async fn` implicitly
// generates.
- let context = ImplTraitContext::ReturnPositionOpaqueTy {
- origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
- };
- self.lower_ty(ty, context)
+ self.lower_ty(ty, &mut nested_impl_trait_context)
}
FnRetTy::Default(ret_ty_span) => self.arena.alloc(self.ty_tup(*ret_ty_span, &[])),
};
@@ -2005,7 +2091,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_param_bound(
&mut self,
tpb: &GenericBound,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::GenericBound<'hir> {
match tpb {
GenericBound::Trait(p, modifier) => hir::GenericBound::Trait(
@@ -2018,24 +2104,24 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
- fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
+ fn lower_lifetime(&mut self, l: &Lifetime) -> &'hir hir::Lifetime {
let span = self.lower_span(l.ident.span);
let ident = self.lower_ident(l.ident);
self.new_named_lifetime(l.id, l.id, span, ident)
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn new_named_lifetime_with_res(
&mut self,
id: NodeId,
span: Span,
ident: Ident,
res: LifetimeRes,
- ) -> hir::Lifetime {
+ ) -> &'hir hir::Lifetime {
let name = match res {
LifetimeRes::Param { param, .. } => {
let p_name = ParamName::Plain(ident);
- let param = self.resolver.get_remapped_def_id(param);
+ let param = self.get_remapped_def_id(param);
hir::LifetimeName::Param(param, p_name)
}
@@ -2052,17 +2138,21 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
};
debug!(?name);
- hir::Lifetime { hir_id: self.lower_node_id(id), span: self.lower_span(span), name }
+ self.arena.alloc(hir::Lifetime {
+ hir_id: self.lower_node_id(id),
+ span: self.lower_span(span),
+ name,
+ })
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn new_named_lifetime(
&mut self,
id: NodeId,
new_id: NodeId,
span: Span,
ident: Ident,
- ) -> hir::Lifetime {
+ ) -> &'hir hir::Lifetime {
let res = self.resolver.get_lifetime_res(id).unwrap_or(LifetimeRes::Error);
self.new_named_lifetime_with_res(new_id, span, ident, res)
}
@@ -2117,7 +2207,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
GenericParamKind::Type { ref default, .. } => {
let kind = hir::GenericParamKind::Type {
default: default.as_ref().map(|x| {
- self.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ self.lower_ty(x, &ImplTraitContext::Disallowed(ImplTraitPosition::Type))
}),
synthetic: false,
};
@@ -2125,7 +2215,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
(hir::ParamName::Plain(self.lower_ident(param.ident)), kind)
}
GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
- let ty = self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let ty = self.lower_ty(&ty, &ImplTraitContext::Disallowed(ImplTraitPosition::Type));
let default = default.as_ref().map(|def| self.lower_anon_const(def));
(
hir::ParamName::Plain(self.lower_ident(param.ident)),
@@ -2135,7 +2225,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
- fn lower_trait_ref(&mut self, p: &TraitRef, itctx: ImplTraitContext) -> hir::TraitRef<'hir> {
+ fn lower_trait_ref(&mut self, p: &TraitRef, itctx: &ImplTraitContext) -> hir::TraitRef<'hir> {
let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
hir::QPath::Resolved(None, path) => path,
qpath => panic!("lower_trait_ref: unexpected QPath `{:?}`", qpath),
@@ -2143,11 +2233,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
hir::TraitRef { path, hir_ref_id: self.lower_node_id(p.ref_id) }
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn lower_poly_trait_ref(
&mut self,
p: &PolyTraitRef,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::PolyTraitRef<'hir> {
let bound_generic_params =
self.lower_lifetime_binder(p.trait_ref.ref_id, &p.bound_generic_params);
@@ -2155,14 +2245,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
hir::PolyTraitRef { bound_generic_params, trait_ref, span: self.lower_span(p.span) }
}
- fn lower_mt(&mut self, mt: &MutTy, itctx: ImplTraitContext) -> hir::MutTy<'hir> {
+ fn lower_mt(&mut self, mt: &MutTy, itctx: &ImplTraitContext) -> hir::MutTy<'hir> {
hir::MutTy { ty: self.lower_ty(&mt.ty, itctx), mutbl: mt.mutbl }
}
+ #[instrument(level = "debug", skip(self), ret)]
fn lower_param_bounds(
&mut self,
bounds: &[GenericBound],
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::GenericBounds<'hir> {
self.arena.alloc_from_iter(self.lower_param_bounds_mut(bounds, itctx))
}
@@ -2170,11 +2261,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_param_bounds_mut<'s>(
&'s mut self,
bounds: &'s [GenericBound],
- itctx: ImplTraitContext,
+ itctx: &'s ImplTraitContext,
) -> impl Iterator<Item = hir::GenericBound<'hir>> + Captures<'s> + Captures<'a> {
bounds.iter().map(move |bound| self.lower_param_bound(bound, itctx))
}
+ #[instrument(level = "debug", skip(self), ret)]
fn lower_generic_and_bounds(
&mut self,
node_id: NodeId,
@@ -2200,16 +2292,19 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
node_id,
&GenericParamKind::Type { default: None },
bounds,
- ImplTraitContext::Universal,
+ &ImplTraitContext::Universal,
hir::PredicateOrigin::ImplTrait,
);
+ let hir_id = self.next_id();
+ let res = Res::Def(DefKind::TyParam, def_id.to_def_id());
let ty = hir::TyKind::Path(hir::QPath::Resolved(
None,
self.arena.alloc(hir::Path {
span: self.lower_span(span),
- res: Res::Def(DefKind::TyParam, def_id.to_def_id()),
- segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
+ res,
+ segments:
+ arena_vec![self; hir::PathSegment::new(self.lower_ident(ident), hir_id, res)],
}),
));
@@ -2235,7 +2330,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
c.value.span,
"using `_` for array lengths is unstable",
)
- .emit();
+ .stash(c.value.span, StashKey::UnderscoreForArrayLengths);
hir::ArrayLen::Body(self.lower_anon_const(c))
}
}
@@ -2372,11 +2467,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
fn pat_ident(&mut self, span: Span, ident: Ident) -> (&'hir hir::Pat<'hir>, hir::HirId) {
- self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated)
+ self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::NONE)
}
fn pat_ident_mut(&mut self, span: Span, ident: Ident) -> (hir::Pat<'hir>, hir::HirId) {
- self.pat_ident_binding_mode_mut(span, ident, hir::BindingAnnotation::Unannotated)
+ self.pat_ident_binding_mode_mut(span, ident, hir::BindingAnnotation::NONE)
}
fn pat_ident_binding_mode(
@@ -2465,14 +2560,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// bound, like the bound in `Box<dyn Debug>`. This method is not invoked
/// when the bound is written, even if it is written with `'_` like in
/// `Box<dyn Debug + '_>`. In those cases, `lower_lifetime` is invoked.
- fn elided_dyn_bound(&mut self, span: Span) -> hir::Lifetime {
+ fn elided_dyn_bound(&mut self, span: Span) -> &'hir hir::Lifetime {
let r = hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
name: hir::LifetimeName::ImplicitObjectLifetimeDefault,
};
debug!("elided_dyn_bound: r={:?}", r);
- r
+ self.arena.alloc(r)
}
}
diff --git a/compiler/rustc_ast_lowering/src/lifetime_collector.rs b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
index 81006e00f..914fc5f58 100644
--- a/compiler/rustc_ast_lowering/src/lifetime_collector.rs
+++ b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
@@ -1,9 +1,6 @@
use super::ResolverAstLoweringExt;
use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
-use rustc_ast::{
- FnRetTy, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, TraitBoundModifier, Ty,
- TyKind,
-};
+use rustc_ast::{FnRetTy, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, Ty, TyKind};
use rustc_hir::def::LifetimeRes;
use rustc_middle::span_bug;
use rustc_middle::ty::ResolverAstLowering;
@@ -66,15 +63,15 @@ impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
self.record_lifetime_use(*lifetime);
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
- self.record_elided_anchor(path_segment.id, path_span);
- visit::walk_path_segment(self, path_span, path_segment);
+ fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
+ self.record_elided_anchor(path_segment.id, path_segment.ident.span);
+ visit::walk_path_segment(self, path_segment);
}
- fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef) {
self.current_binders.push(t.trait_ref.ref_id);
- visit::walk_poly_trait_ref(self, t, m);
+ visit::walk_poly_trait_ref(self, t);
self.current_binders.pop();
}
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
index bd2e76e55..1af1633b5 100644
--- a/compiler/rustc_ast_lowering/src/pat.rs
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -1,3 +1,6 @@
+use super::errors::{
+ ArbitraryExpressionInPattern, ExtraDoubleDot, MisplacedDoubleDot, SubTupleBinding,
+};
use super::ResolverAstLoweringExt;
use super::{ImplTraitContext, LoweringContext, ParamMode};
use crate::ImplTraitPosition;
@@ -5,7 +8,6 @@ use crate::ImplTraitPosition;
use rustc_ast::ptr::P;
use rustc_ast::*;
use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_span::symbol::Ident;
@@ -22,7 +24,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let node = loop {
match pattern.kind {
PatKind::Wild => break hir::PatKind::Wild,
- PatKind::Ident(ref binding_mode, ident, ref sub) => {
+ PatKind::Ident(binding_mode, ident, ref sub) => {
let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(&*s));
break self.lower_pat_ident(pattern, binding_mode, ident, lower_sub);
}
@@ -35,7 +37,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &mut ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
break hir::PatKind::TupleStruct(qpath, pats, ddpos);
@@ -51,7 +53,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &mut ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
break hir::PatKind::Path(qpath);
}
@@ -61,15 +63,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
qself,
path,
ParamMode::Optional,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &mut ImplTraitContext::Disallowed(ImplTraitPosition::Path),
);
- let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::PatField {
- hir_id: self.next_id(),
- ident: self.lower_ident(f.ident),
- pat: self.lower_pat(&f.pat),
- is_shorthand: f.is_shorthand,
- span: self.lower_span(f.span),
+ let fs = self.arena.alloc_from_iter(fields.iter().map(|f| {
+ let hir_id = self.lower_node_id(f.id);
+ self.lower_attrs(hir_id, &f.attrs);
+
+ hir::PatField {
+ hir_id,
+ ident: self.lower_ident(f.ident),
+ pat: self.lower_pat(&f.pat),
+ is_shorthand: f.is_shorthand,
+ span: self.lower_span(f.span),
+ }
}));
break hir::PatKind::Struct(qpath, fs, etc);
}
@@ -109,7 +116,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&mut self,
pats: &[P<Pat>],
ctx: &str,
- ) -> (&'hir [hir::Pat<'hir>], Option<usize>) {
+ ) -> (&'hir [hir::Pat<'hir>], hir::DotDotPos) {
let mut elems = Vec::with_capacity(pats.len());
let mut rest = None;
@@ -129,20 +136,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// This is not allowed as a sub-tuple pattern
PatKind::Ident(ref _bm, ident, Some(ref sub)) if sub.is_rest() => {
let sp = pat.span;
- self.diagnostic()
- .struct_span_err(
- sp,
- &format!("`{} @` is not allowed in a {}", ident.name, ctx),
- )
- .span_label(sp, "this is only allowed in slice patterns")
- .help("remove this and bind each tuple field independently")
- .span_suggestion_verbose(
- sp,
- &format!("if you don't need to use the contents of {}, discard the tuple's remaining fields", ident),
- "..",
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.tcx.sess.emit_err(SubTupleBinding {
+ span: sp,
+ ident_name: ident.name,
+ ident,
+ ctx,
+ });
}
_ => {}
}
@@ -161,7 +160,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
}
- (self.arena.alloc_from_iter(elems), rest.map(|(ddpos, _)| ddpos))
+ (self.arena.alloc_from_iter(elems), hir::DotDotPos::new(rest.map(|(ddpos, _)| ddpos)))
}
/// Lower a slice pattern of form `[pat_0, ..., pat_n]` into
@@ -177,9 +176,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let mut prev_rest_span = None;
// Lowers `$bm $ident @ ..` to `$bm $ident @ _`.
- let lower_rest_sub = |this: &mut Self, pat, bm, ident, sub| {
+ let lower_rest_sub = |this: &mut Self, pat, ann, ident, sub| {
let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub));
- let node = this.lower_pat_ident(pat, bm, ident, lower_sub);
+ let node = this.lower_pat_ident(pat, ann, ident, lower_sub);
this.pat_with_node_id_of(pat, node)
};
@@ -195,9 +194,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
// Found a sub-slice pattern `$binding_mode $ident @ ..`.
// Record, lower it to `$binding_mode $ident @ _`, and stop here.
- PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ PatKind::Ident(ann, ident, Some(ref sub)) if sub.is_rest() => {
prev_rest_span = Some(sub.span);
- slice = Some(self.arena.alloc(lower_rest_sub(self, pat, bm, ident, sub)));
+ slice = Some(self.arena.alloc(lower_rest_sub(self, pat, ann, ident, sub)));
break;
}
// It was not a subslice pattern so lower it normally.
@@ -210,9 +209,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// There was a previous subslice pattern; make sure we don't allow more.
let rest_span = match pat.kind {
PatKind::Rest => Some(pat.span),
- PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ PatKind::Ident(ann, ident, Some(ref sub)) if sub.is_rest() => {
// #69103: Lower into `binding @ _` as above to avoid ICEs.
- after.push(lower_rest_sub(self, pat, bm, ident, sub));
+ after.push(lower_rest_sub(self, pat, ann, ident, sub));
Some(sub.span)
}
_ => None,
@@ -236,11 +235,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
fn lower_pat_ident(
&mut self,
p: &Pat,
- binding_mode: &BindingMode,
+ annotation: BindingAnnotation,
ident: Ident,
lower_sub: impl FnOnce(&mut Self) -> Option<&'hir hir::Pat<'hir>>,
) -> hir::PatKind<'hir> {
- match self.resolver.get_partial_res(p.id).map(|d| d.base_res()) {
+ match self.resolver.get_partial_res(p.id).map(|d| d.expect_full_res()) {
// `None` can occur in body-less function signatures
res @ (None | Some(Res::Local(_))) => {
let canonical_id = match res {
@@ -249,29 +248,24 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
};
hir::PatKind::Binding(
- self.lower_binding_mode(binding_mode),
+ annotation,
self.lower_node_id(canonical_id),
self.lower_ident(ident),
lower_sub(self),
)
}
- Some(res) => hir::PatKind::Path(hir::QPath::Resolved(
- None,
- self.arena.alloc(hir::Path {
- span: self.lower_span(ident.span),
- res: self.lower_res(res),
- segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
- }),
- )),
- }
- }
-
- fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation {
- match *b {
- BindingMode::ByValue(Mutability::Not) => hir::BindingAnnotation::Unannotated,
- BindingMode::ByRef(Mutability::Not) => hir::BindingAnnotation::Ref,
- BindingMode::ByValue(Mutability::Mut) => hir::BindingAnnotation::Mutable,
- BindingMode::ByRef(Mutability::Mut) => hir::BindingAnnotation::RefMut,
+ Some(res) => {
+ let hir_id = self.next_id();
+ let res = self.lower_res(res);
+ hir::PatKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(ident.span),
+ res,
+ segments: arena_vec![self; hir::PathSegment::new(self.lower_ident(ident), hir_id, res)],
+ }),
+ ))
+ }
}
}
@@ -291,19 +285,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.
pub(crate) fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
- self.diagnostic()
- .struct_span_err(sp, &format!("`..` can only be used once per {} pattern", ctx))
- .span_label(sp, &format!("can only be used once per {} pattern", ctx))
- .span_label(prev_sp, "previously used here")
- .emit();
+ self.tcx.sess.emit_err(ExtraDoubleDot { span: sp, prev_span: prev_sp, ctx });
}
/// Used to ban the `..` pattern in places it shouldn't be semantically.
fn ban_illegal_rest_pat(&self, sp: Span) -> hir::PatKind<'hir> {
- self.diagnostic()
- .struct_span_err(sp, "`..` patterns are not allowed here")
- .note("only allowed in tuple, tuple struct, and slice patterns")
- .emit();
+ self.tcx.sess.emit_err(MisplacedDoubleDot { span: sp });
// We're not in a list context so `..` can be reasonably treated
// as `_` because it should always be valid and roughly matches the
@@ -340,8 +327,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
ExprKind::Path(..) if allow_paths => {}
ExprKind::Unary(UnOp::Neg, ref inner) if matches!(inner.kind, ExprKind::Lit(_)) => {}
_ => {
- self.diagnostic()
- .span_err(expr.span, "arbitrary expressions aren't allowed in patterns");
+ self.tcx.sess.emit_err(ArbitraryExpressionInPattern { span: expr.span });
return self.arena.alloc(self.expr_err(expr.span));
}
}
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
index 393be3b45..888776ccc 100644
--- a/compiler/rustc_ast_lowering/src/path.rs
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -1,11 +1,11 @@
use crate::ImplTraitPosition;
+use super::errors::{GenericTypeWithParentheses, UseAngleBrackets};
use super::ResolverAstLoweringExt;
use super::{GenericArgsCtor, LifetimeRes, ParenthesizedGenericArgs};
use super::{ImplTraitContext, LoweringContext, ParamMode};
use rustc_ast::{self as ast, *};
-use rustc_errors::{struct_span_err, Applicability};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, PartialRes, Res};
use rustc_hir::GenericArg;
@@ -13,7 +13,6 @@ use rustc_span::symbol::{kw, Ident};
use rustc_span::{BytePos, Span, DUMMY_SP};
use smallvec::smallvec;
-use tracing::debug;
impl<'a, 'hir> LoweringContext<'a, 'hir> {
#[instrument(level = "trace", skip(self))]
@@ -23,18 +22,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
qself: &Option<QSelf>,
p: &Path,
param_mode: ParamMode,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::QPath<'hir> {
let qself_position = qself.as_ref().map(|q| q.position);
let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty, itctx));
let partial_res =
self.resolver.get_partial_res(id).unwrap_or_else(|| PartialRes::new(Res::Err));
+ let base_res = partial_res.base_res();
+ let unresolved_segments = partial_res.unresolved_segments();
let path_span_lo = p.span.shrink_to_lo();
- let proj_start = p.segments.len() - partial_res.unresolved_segments();
+ let proj_start = p.segments.len() - unresolved_segments;
let path = self.arena.alloc(hir::Path {
- res: self.lower_res(partial_res.base_res()),
+ res: self.lower_res(base_res),
segments: self.arena.alloc_from_iter(p.segments[..proj_start].iter().enumerate().map(
|(i, segment)| {
let param_mode = match (qself_position, param_mode) {
@@ -47,7 +48,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
_ => param_mode,
};
- let parenthesized_generic_args = match partial_res.base_res() {
+ let parenthesized_generic_args = match base_res {
// `a::b::Trait(Args)`
Res::Def(DefKind::Trait, _) if i + 1 == proj_start => {
ParenthesizedGenericArgs::Ok
@@ -84,7 +85,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// Simple case, either no projections, or only fully-qualified.
// E.g., `std::mem::size_of` or `<I as Iterator>::Item`.
- if partial_res.unresolved_segments() == 0 {
+ if unresolved_segments == 0 {
return hir::QPath::Resolved(qself, path);
}
@@ -157,7 +158,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
segment,
param_mode,
ParenthesizedGenericArgs::Err,
- ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
)
})),
span: self.lower_span(p.span),
@@ -181,11 +182,10 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
segment: &PathSegment,
param_mode: ParamMode,
parenthesized_generic_args: ParenthesizedGenericArgs,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> hir::PathSegment<'hir> {
debug!("path_span: {:?}, lower_path_segment(segment: {:?})", path_span, segment,);
let (mut generic_args, infer_args) = if let Some(ref generic_args) = segment.args {
- let msg = "parenthesized type parameters may only be used with a `Fn` trait";
match **generic_args {
GenericArgs::AngleBracketed(ref data) => {
self.lower_angle_bracketed_parameter_data(data, param_mode, itctx)
@@ -193,10 +193,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
GenericArgs::Parenthesized(ref data) => match parenthesized_generic_args {
ParenthesizedGenericArgs::Ok => self.lower_parenthesized_parameter_data(data),
ParenthesizedGenericArgs::Err => {
- let mut err = struct_span_err!(self.tcx.sess, data.span, E0214, "{}", msg);
- err.span_label(data.span, "only `Fn` traits may use parentheses");
// Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
- if !data.inputs.is_empty() {
+ let sub = if !data.inputs.is_empty() {
// Start of the span to the 1st character of 1st argument
let open_param = data.inputs_span.shrink_to_lo().to(data
.inputs
@@ -212,16 +210,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
.span
.shrink_to_hi()
.to(data.inputs_span.shrink_to_hi());
- err.multipart_suggestion(
- &format!("use angle brackets instead",),
- vec![
- (open_param, String::from("<")),
- (close_param, String::from(">")),
- ],
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
+
+ Some(UseAngleBrackets { open_param, close_param })
+ } else {
+ None
+ };
+ self.tcx.sess.emit_err(GenericTypeWithParentheses { span: data.span, sub });
(
self.lower_angle_bracketed_parameter_data(
&data.as_angle_bracketed_args(),
@@ -258,16 +252,16 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let res = self.expect_full_res(segment.id);
- let id = self.lower_node_id(segment.id);
+ let hir_id = self.lower_node_id(segment.id);
debug!(
"lower_path_segment: ident={:?} original-id={:?} new-id={:?}",
- segment.ident, segment.id, id,
+ segment.ident, segment.id, hir_id,
);
hir::PathSegment {
ident: self.lower_ident(segment.ident),
- hir_id: Some(id),
- res: Some(self.lower_res(res)),
+ hir_id,
+ res: self.lower_res(res),
infer_args,
args: if generic_args.is_empty() && generic_args.span.is_empty() {
None
@@ -324,7 +318,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
&mut self,
data: &AngleBracketedArgs,
param_mode: ParamMode,
- itctx: ImplTraitContext,
+ itctx: &ImplTraitContext,
) -> (GenericArgsCtor<'hir>, bool) {
let has_non_lt_args = data.args.iter().any(|arg| match arg {
AngleBracketedArg::Arg(ast::GenericArg::Lifetime(_))
@@ -358,15 +352,15 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// we generally don't permit such things (see #51008).
let ParenthesizedArgs { span, inputs, inputs_span, output } = data;
let inputs = self.arena.alloc_from_iter(inputs.iter().map(|ty| {
- self.lower_ty_direct(ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitParam))
+ self.lower_ty_direct(ty, &ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitParam))
}));
let output_ty = match output {
FnRetTy::Ty(ty) => {
- self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitReturn))
+ self.lower_ty(&ty, &ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitReturn))
}
FnRetTy::Default(_) => self.arena.alloc(self.ty_tup(*span, &[])),
};
- let args = smallvec![GenericArg::Type(self.ty_tup(*inputs_span, inputs))];
+ let args = smallvec![GenericArg::Type(self.arena.alloc(self.ty_tup(*inputs_span, inputs)))];
let binding = self.output_ty_binding(output_ty.span, output_ty);
(
GenericArgsCtor {
diff --git a/compiler/rustc_ast_passes/Cargo.toml b/compiler/rustc_ast_passes/Cargo.toml
index 22742b2ad..37eff9207 100644
--- a/compiler/rustc_ast_passes/Cargo.toml
+++ b/compiler/rustc_ast_passes/Cargo.toml
@@ -11,6 +11,7 @@ rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_feature = { path = "../rustc_feature" }
+rustc_macros = { path = "../rustc_macros" }
rustc_parse = { path = "../rustc_parse" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index 2d9d0073f..036643244 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -13,9 +13,8 @@ use rustc_ast::walk_list;
use rustc_ast::*;
use rustc_ast_pretty::pprust::{self, State};
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{
- error_code, pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
-};
+use rustc_errors::{error_code, fluent, pluralize, struct_span_err, Applicability};
+use rustc_macros::Subdiagnostic;
use rustc_parse::validate_attr;
use rustc_session::lint::builtin::{
DEPRECATED_WHERE_CLAUSE_LOCATION, MISSING_ABI, PATTERNS_IN_FNS_WITHOUT_BODY,
@@ -29,6 +28,8 @@ use rustc_target::spec::abi;
use std::mem;
use std::ops::{Deref, DerefMut};
+use crate::errors::*;
+
const MORE_EXTERN: &str =
"for more information, visit https://doc.rust-lang.org/std/keyword.extern.html";
@@ -38,6 +39,13 @@ enum SelfSemantic {
No,
}
+/// What is the context that prevents using `~const`?
+enum DisallowTildeConstContext<'a> {
+ TraitObject,
+ ImplTrait,
+ Fn(FnKind<'a>),
+}
+
struct AstValidator<'a> {
session: &'a Session,
@@ -56,7 +64,7 @@ struct AstValidator<'a> {
/// e.g., `impl Iterator<Item = impl Debug>`.
outer_impl_trait: Option<Span>,
- is_tilde_const_allowed: bool,
+ disallow_tilde_const: Option<DisallowTildeConstContext<'a>>,
/// Used to ban `impl Trait` in path projections like `<impl Iterator>::Item`
/// or `Foo::Bar<impl Trait>`
@@ -93,18 +101,26 @@ impl<'a> AstValidator<'a> {
self.is_impl_trait_banned = old;
}
- fn with_tilde_const(&mut self, allowed: bool, f: impl FnOnce(&mut Self)) {
- let old = mem::replace(&mut self.is_tilde_const_allowed, allowed);
+ fn with_tilde_const(
+ &mut self,
+ disallowed: Option<DisallowTildeConstContext<'a>>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ let old = mem::replace(&mut self.disallow_tilde_const, disallowed);
f(self);
- self.is_tilde_const_allowed = old;
+ self.disallow_tilde_const = old;
}
fn with_tilde_const_allowed(&mut self, f: impl FnOnce(&mut Self)) {
- self.with_tilde_const(true, f)
+ self.with_tilde_const(None, f)
}
- fn with_banned_tilde_const(&mut self, f: impl FnOnce(&mut Self)) {
- self.with_tilde_const(false, f)
+ fn with_banned_tilde_const(
+ &mut self,
+ ctx: DisallowTildeConstContext<'a>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ self.with_tilde_const(Some(ctx), f)
}
fn with_let_management(
@@ -121,30 +137,9 @@ impl<'a> AstValidator<'a> {
fn ban_let_expr(&self, expr: &'a Expr, forbidden_let_reason: ForbiddenLetReason) {
let sess = &self.session;
if sess.opts.unstable_features.is_nightly_build() {
- let err = "`let` expressions are not supported here";
- let mut diag = sess.struct_span_err(expr.span, err);
- diag.note("only supported directly in conditions of `if` and `while` expressions");
- match forbidden_let_reason {
- ForbiddenLetReason::GenericForbidden => {}
- ForbiddenLetReason::NotSupportedOr(span) => {
- diag.span_note(
- span,
- "`||` operators are not supported in let chain expressions",
- );
- }
- ForbiddenLetReason::NotSupportedParentheses(span) => {
- diag.span_note(
- span,
- "`let`s wrapped in parentheses are not supported in a context with let \
- chains",
- );
- }
- }
- diag.emit();
+ sess.emit_err(ForbiddenLet { span: expr.span, reason: forbidden_let_reason });
} else {
- sess.struct_span_err(expr.span, "expected expression, found statement (`let`)")
- .note("variable declaration using `let` is a statement")
- .emit();
+ sess.emit_err(ForbiddenLetStable { span: expr.span });
}
}
@@ -175,7 +170,7 @@ impl<'a> AstValidator<'a> {
DEPRECATED_WHERE_CLAUSE_LOCATION,
id,
where_clauses.0.1,
- "where clause not allowed here",
+ fluent::ast_passes_deprecated_where_clause_location,
BuiltinLintDiagnostics::DeprecatedWhereclauseLocation(
where_clauses.1.1.shrink_to_hi(),
suggestion,
@@ -193,7 +188,7 @@ impl<'a> AstValidator<'a> {
fn with_impl_trait(&mut self, outer: Option<Span>, f: impl FnOnce(&mut Self)) {
let old = mem::replace(&mut self.outer_impl_trait, outer);
if outer.is_some() {
- self.with_banned_tilde_const(f);
+ self.with_banned_tilde_const(DisallowTildeConstContext::ImplTrait, f);
} else {
f(self);
}
@@ -205,10 +200,7 @@ impl<'a> AstValidator<'a> {
AssocConstraintKind::Equality { .. } => {}
AssocConstraintKind::Bound { .. } => {
if self.is_assoc_ty_bound_banned {
- self.err_handler().span_err(
- constraint.span,
- "associated type bounds are not allowed within structs, enums, or unions",
- );
+ self.session.emit_err(ForbiddenAssocConstraint { span: constraint.span });
}
}
}
@@ -221,7 +213,10 @@ impl<'a> AstValidator<'a> {
TyKind::ImplTrait(..) => {
self.with_impl_trait(Some(t.span), |this| visit::walk_ty(this, t))
}
- TyKind::TraitObject(..) => self.with_banned_tilde_const(|this| visit::walk_ty(this, t)),
+ TyKind::TraitObject(..) => self
+ .with_banned_tilde_const(DisallowTildeConstContext::TraitObject, |this| {
+ visit::walk_ty(this, t)
+ }),
TyKind::Path(ref qself, ref path) => {
// We allow these:
// - `Option<impl Trait>`
@@ -247,11 +242,9 @@ impl<'a> AstValidator<'a> {
for (i, segment) in path.segments.iter().enumerate() {
// Allow `impl Trait` iff we're on the final path segment
if i == path.segments.len() - 1 {
- self.visit_path_segment(path.span, segment);
+ self.visit_path_segment(segment);
} else {
- self.with_banned_impl_trait(|this| {
- this.visit_path_segment(path.span, segment)
- });
+ self.with_banned_impl_trait(|this| this.visit_path_segment(segment));
}
}
}
@@ -259,20 +252,6 @@ impl<'a> AstValidator<'a> {
}
}
- fn visit_struct_field_def(&mut self, field: &'a FieldDef) {
- if let Some(ident) = field.ident {
- if ident.name == kw::Underscore {
- self.visit_vis(&field.vis);
- self.visit_ident(ident);
- self.visit_ty_common(&field.ty);
- self.walk_ty(&field.ty);
- walk_list!(self, visit_attribute, &field.attrs);
- return;
- }
- }
- self.visit_field_def(field);
- }
-
fn err_handler(&self) -> &rustc_errors::Handler {
&self.session.diagnostic()
}
@@ -280,38 +259,33 @@ impl<'a> AstValidator<'a> {
fn check_lifetime(&self, ident: Ident) {
let valid_names = [kw::UnderscoreLifetime, kw::StaticLifetime, kw::Empty];
if !valid_names.contains(&ident.name) && ident.without_first_quote().is_reserved() {
- self.err_handler().span_err(ident.span, "lifetimes cannot use keyword names");
+ self.session.emit_err(KeywordLifetime { span: ident.span });
}
}
fn check_label(&self, ident: Ident) {
if ident.without_first_quote().is_reserved() {
- self.err_handler()
- .span_err(ident.span, &format!("invalid label name `{}`", ident.name));
+ self.session.emit_err(InvalidLabel { span: ident.span, name: ident.name });
}
}
- fn invalid_visibility(&self, vis: &Visibility, note: Option<&str>) {
+ fn invalid_visibility(&self, vis: &Visibility, note: Option<InvalidVisibilityNote>) {
if let VisibilityKind::Inherited = vis.kind {
return;
}
- let mut err =
- struct_span_err!(self.session, vis.span, E0449, "unnecessary visibility qualifier");
- if vis.kind.is_pub() {
- err.span_label(vis.span, "`pub` not permitted here because it's implied");
- }
- if let Some(note) = note {
- err.note(note);
- }
- err.emit();
+ self.session.emit_err(InvalidVisibility {
+ span: vis.span,
+ implied: if vis.kind.is_pub() { Some(vis.span) } else { None },
+ note,
+ });
}
fn check_decl_no_pat(decl: &FnDecl, mut report_err: impl FnMut(Span, Option<Ident>, bool)) {
for Param { pat, .. } in &decl.inputs {
match pat.kind {
- PatKind::Ident(BindingMode::ByValue(Mutability::Not), _, None) | PatKind::Wild => {}
- PatKind::Ident(BindingMode::ByValue(Mutability::Mut), ident, None) => {
+ PatKind::Ident(BindingAnnotation::NONE, _, None) | PatKind::Wild => {}
+ PatKind::Ident(BindingAnnotation::MUT, ident, None) => {
report_err(pat.span, Some(ident), true)
}
_ => report_err(pat.span, None, false),
@@ -319,31 +293,9 @@ impl<'a> AstValidator<'a> {
}
}
- fn check_trait_fn_not_async(&self, fn_span: Span, asyncness: Async) {
- if let Async::Yes { span, .. } = asyncness {
- struct_span_err!(
- self.session,
- fn_span,
- E0706,
- "functions in traits cannot be declared `async`"
- )
- .span_label(span, "`async` because of this")
- .note("`async` trait functions are not currently supported")
- .note("consider using the `async-trait` crate: https://crates.io/crates/async-trait")
- .emit();
- }
- }
-
fn check_trait_fn_not_const(&self, constness: Const) {
if let Const::Yes(span) = constness {
- struct_span_err!(
- self.session,
- span,
- E0379,
- "functions in traits cannot be declared const"
- )
- .span_label(span, "functions in traits cannot be const")
- .emit();
+ self.session.emit_err(TraitFnConst { span });
}
}
@@ -356,8 +308,7 @@ impl<'a> AstValidator<'a> {
GenericParamKind::Lifetime { .. } => {
if !param.bounds.is_empty() {
let spans: Vec<_> = param.bounds.iter().map(|b| b.span()).collect();
- self.err_handler()
- .span_err(spans, "lifetime bounds cannot be used in this context");
+ self.session.emit_err(ForbiddenLifetimeBound { spans });
}
None
}
@@ -365,10 +316,7 @@ impl<'a> AstValidator<'a> {
})
.collect();
if !non_lt_param_spans.is_empty() {
- self.err_handler().span_err(
- non_lt_param_spans,
- "only lifetime parameters can be used in this context",
- );
+ self.session.emit_err(ForbiddenNonLifetimeParam { spans: non_lt_param_spans });
}
}
@@ -385,10 +333,7 @@ impl<'a> AstValidator<'a> {
let max_num_args: usize = u16::MAX.into();
if fn_decl.inputs.len() > max_num_args {
let Param { span, .. } = fn_decl.inputs[0];
- self.err_handler().span_fatal(
- span,
- &format!("function can not have more than {} arguments", max_num_args),
- );
+ self.session.emit_fatal(FnParamTooMany { span, max_num_args });
}
}
@@ -396,19 +341,13 @@ impl<'a> AstValidator<'a> {
match &*fn_decl.inputs {
[Param { ty, span, .. }] => {
if let TyKind::CVarArgs = ty.kind {
- self.err_handler().span_err(
- *span,
- "C-variadic function must be declared with at least one named argument",
- );
+ self.session.emit_err(FnParamCVarArgsOnly { span: *span });
}
}
[ps @ .., _] => {
for Param { ty, span, .. } in ps {
if let TyKind::CVarArgs = ty.kind {
- self.err_handler().span_err(
- *span,
- "`...` must be the last argument of a C-variadic function",
- );
+ self.session.emit_err(FnParamCVarArgsNotLast { span: *span });
}
}
}
@@ -435,19 +374,9 @@ impl<'a> AstValidator<'a> {
})
.for_each(|attr| {
if attr.is_doc_comment() {
- self.err_handler()
- .struct_span_err(
- attr.span,
- "documentation comments cannot be applied to function parameters",
- )
- .span_label(attr.span, "doc comments are not allowed here")
- .emit();
+ self.session.emit_err(FnParamDocComment { span: attr.span });
} else {
- self.err_handler().span_err(
- attr.span,
- "allow, cfg, cfg_attr, deny, expect, \
- forbid, and warn are the only allowed built-in attributes in function parameters",
- );
+ self.session.emit_err(FnParamForbiddenAttr { span: attr.span });
}
});
}
@@ -455,14 +384,7 @@ impl<'a> AstValidator<'a> {
fn check_decl_self_param(&self, fn_decl: &FnDecl, self_semantic: SelfSemantic) {
if let (SelfSemantic::No, [param, ..]) = (self_semantic, &*fn_decl.inputs) {
if param.is_self() {
- self.err_handler()
- .struct_span_err(
- param.span,
- "`self` parameter is only allowed in associated functions",
- )
- .span_label(param.span, "not semantically valid as function parameter")
- .note("associated functions are those in `impl` or `trait` definitions")
- .emit();
+ self.session.emit_err(FnParamForbiddenSelf { span: param.span });
}
}
}
@@ -470,47 +392,20 @@ impl<'a> AstValidator<'a> {
fn check_defaultness(&self, span: Span, defaultness: Defaultness) {
if let Defaultness::Default(def_span) = defaultness {
let span = self.session.source_map().guess_head_span(span);
- self.err_handler()
- .struct_span_err(span, "`default` is only allowed on items in trait impls")
- .span_label(def_span, "`default` because of this")
- .emit();
+ self.session.emit_err(ForbiddenDefault { span, def_span });
}
}
- fn error_item_without_body(&self, sp: Span, ctx: &str, msg: &str, sugg: &str) {
- self.error_item_without_body_with_help(sp, ctx, msg, sugg, |_| ());
- }
-
- fn error_item_without_body_with_help(
- &self,
- sp: Span,
- ctx: &str,
- msg: &str,
- sugg: &str,
- help: impl FnOnce(&mut DiagnosticBuilder<'_, ErrorGuaranteed>),
- ) {
+ /// If `sp` ends with a semicolon, returns it as a `Span`
+ /// Otherwise, returns `sp.shrink_to_hi()`
+ fn ending_semi_or_hi(&self, sp: Span) -> Span {
let source_map = self.session.source_map();
let end = source_map.end_point(sp);
- let replace_span = if source_map.span_to_snippet(end).map(|s| s == ";").unwrap_or(false) {
+
+ if source_map.span_to_snippet(end).map(|s| s == ";").unwrap_or(false) {
end
} else {
sp.shrink_to_hi()
- };
- let mut err = self.err_handler().struct_span_err(sp, msg);
- err.span_suggestion(
- replace_span,
- &format!("provide a definition for the {}", ctx),
- sugg,
- Applicability::HasPlaceholders,
- );
- help(&mut err);
- err.emit();
- }
-
- fn check_impl_item_provided<T>(&self, sp: Span, body: &Option<T>, ctx: &str, sugg: &str) {
- if body.is_none() {
- let msg = format!("associated {} in `impl` without body", ctx);
- self.error_item_without_body(sp, ctx, &msg, sugg);
}
}
@@ -947,10 +842,10 @@ fn validate_generic_param_order(
let (kind, bounds, span) = (&param.kind, &param.bounds, ident.span);
let (ord_kind, ident) = match &param.kind {
GenericParamKind::Lifetime => (ParamKindOrd::Lifetime, ident.to_string()),
- GenericParamKind::Type { default: _ } => (ParamKindOrd::Type, ident.to_string()),
+ GenericParamKind::Type { default: _ } => (ParamKindOrd::TypeOrConst, ident.to_string()),
GenericParamKind::Const { ref ty, kw_span: _, default: _ } => {
let ty = pprust::ty_to_string(ty);
- (ParamKindOrd::Const, format!("const {}: {}", ident, ty))
+ (ParamKindOrd::TypeOrConst, format!("const {}: {}", ident, ty))
}
};
param_idents.push((kind, ord_kind, bounds, idx, ident));
@@ -1097,8 +992,8 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
visit::walk_lifetime(self, lifetime);
}
- fn visit_field_def(&mut self, s: &'a FieldDef) {
- visit::walk_field_def(self, s)
+ fn visit_field_def(&mut self, field: &'a FieldDef) {
+ visit::walk_field_def(self, field)
}
fn visit_item(&mut self, item: &'a Item) {
@@ -1180,7 +1075,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.invalid_visibility(
&item.vis,
- Some("place qualifiers on individual impl items instead"),
+ Some(InvalidVisibilityNote::IndividualImplItems),
);
if let Unsafe::Yes(span) = unsafety {
error(span, "unsafe").code(error_code!(E0197)).emit();
@@ -1203,37 +1098,23 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.check_defaultness(item.span, defaultness);
if body.is_none() {
- let msg = "free function without a body";
- let ext = sig.header.ext;
-
- let f = |e: &mut DiagnosticBuilder<'_, _>| {
- if let Extern::Implicit(start_span) | Extern::Explicit(_, start_span) = &ext
- {
- let start_suggestion = if let Extern::Explicit(abi, _) = ext {
- format!("extern \"{}\" {{", abi.symbol_unescaped)
- } else {
- "extern {".to_owned()
- };
-
- let end_suggestion = " }".to_owned();
- let end_span = item.span.shrink_to_hi();
-
- e
- .multipart_suggestion(
- "if you meant to declare an externally defined function, use an `extern` block",
- vec![(*start_span, start_suggestion), (end_span, end_suggestion)],
- Applicability::MaybeIncorrect,
- );
- }
- };
-
- self.error_item_without_body_with_help(
- item.span,
- "function",
- msg,
- " { <body> }",
- f,
- );
+ self.session.emit_err(FnWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ extern_block_suggestion: match sig.header.ext {
+ Extern::None => None,
+ Extern::Implicit(start_span) => Some(ExternBlockSuggestion {
+ start_span,
+ end_span: item.span.shrink_to_hi(),
+ abi: None,
+ }),
+ Extern::Explicit(abi, start_span) => Some(ExternBlockSuggestion {
+ start_span,
+ end_span: item.span.shrink_to_hi(),
+ abi: Some(abi.symbol_unescaped),
+ }),
+ },
+ });
}
self.visit_vis(&item.vis);
@@ -1248,7 +1129,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
let old_item = mem::replace(&mut self.extern_mod, Some(item));
self.invalid_visibility(
&item.vis,
- Some("place qualifiers on individual foreign items instead"),
+ Some(InvalidVisibilityNote::IndividualForeignItems),
);
if let Unsafe::Yes(span) = unsafety {
self.err_handler().span_err(span, "extern block cannot be declared unsafe");
@@ -1300,51 +1181,23 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.check_mod_file_item_asciionly(item.ident);
}
}
- ItemKind::Struct(ref vdata, ref generics) => match vdata {
- // Duplicating the `Visitor` logic allows catching all cases
- // of `Anonymous(Struct, Union)` outside of a field struct or union.
- //
- // Inside `visit_ty` the validator catches every `Anonymous(Struct, Union)` it
- // encounters, and only on `ItemKind::Struct` and `ItemKind::Union`
- // it uses `visit_ty_common`, which doesn't contain that specific check.
- VariantData::Struct(ref fields, ..) => {
- self.visit_vis(&item.vis);
- self.visit_ident(item.ident);
- self.visit_generics(generics);
- self.with_banned_assoc_ty_bound(|this| {
- walk_list!(this, visit_struct_field_def, fields);
- });
- walk_list!(self, visit_attribute, &item.attrs);
- return;
- }
- _ => {}
- },
- ItemKind::Union(ref vdata, ref generics) => {
+ ItemKind::Union(ref vdata, ..) => {
if vdata.fields().is_empty() {
self.err_handler().span_err(item.span, "unions cannot have zero fields");
}
- match vdata {
- VariantData::Struct(ref fields, ..) => {
- self.visit_vis(&item.vis);
- self.visit_ident(item.ident);
- self.visit_generics(generics);
- self.with_banned_assoc_ty_bound(|this| {
- walk_list!(this, visit_struct_field_def, fields);
- });
- walk_list!(self, visit_attribute, &item.attrs);
- return;
- }
- _ => {}
- }
}
ItemKind::Const(def, .., None) => {
self.check_defaultness(item.span, def);
- let msg = "free constant item without body";
- self.error_item_without_body(item.span, "constant", msg, " = <expr>;");
+ self.session.emit_err(ConstWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
}
ItemKind::Static(.., None) => {
- let msg = "free static item without body";
- self.error_item_without_body(item.span, "static", msg, " = <expr>;");
+ self.session.emit_err(StaticWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
}
ItemKind::TyAlias(box TyAlias {
defaultness,
@@ -1355,8 +1208,10 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}) => {
self.check_defaultness(item.span, defaultness);
if ty.is_none() {
- let msg = "free type alias without body";
- self.error_item_without_body(item.span, "type", msg, " = <type>;");
+ self.session.emit_err(TyAliasWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
}
self.check_type_no_bounds(bounds, "this context");
if where_clauses.1.0 {
@@ -1409,7 +1264,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
// Mirrors `visit::walk_generic_args`, but tracks relevant state.
- fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) {
+ fn visit_generic_args(&mut self, generic_args: &'a GenericArgs) {
match *generic_args {
GenericArgs::AngleBracketed(ref data) => {
self.check_generic_args_before_constraints(data);
@@ -1529,13 +1384,15 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
);
err.emit();
}
- (_, TraitBoundModifier::MaybeConst) => {
- if !self.is_tilde_const_allowed {
- self.err_handler()
- .struct_span_err(bound.span(), "`~const` is not allowed here")
- .note("only allowed on bounds on traits' associated types and functions, const fns, const impls and its associated functions")
- .emit();
- }
+ (_, TraitBoundModifier::MaybeConst) if let Some(reason) = &self.disallow_tilde_const => {
+ let mut err = self.err_handler().struct_span_err(bound.span(), "`~const` is not allowed here");
+ match reason {
+ DisallowTildeConstContext::TraitObject => err.note("trait objects cannot have `~const` trait bounds"),
+ DisallowTildeConstContext::ImplTrait => err.note("`impl Trait`s cannot have `~const` trait bounds"),
+ DisallowTildeConstContext::Fn(FnKind::Closure(..)) => err.note("closures cannot have `~const` trait bounds"),
+ DisallowTildeConstContext::Fn(FnKind::Fn(_, ident, ..)) => err.span_note(ident.span, "this function is not `const`, so it cannot have `~const` trait bounds"),
+ };
+ err.emit();
}
(_, TraitBoundModifier::MaybeConstMaybe) => {
self.err_handler()
@@ -1548,25 +1405,17 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
visit::walk_param_bound(self, bound)
}
- fn visit_poly_trait_ref(&mut self, t: &'a PolyTraitRef, m: &'a TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, t: &'a PolyTraitRef) {
self.check_late_bound_lifetime_defs(&t.bound_generic_params);
- visit::walk_poly_trait_ref(self, t, m);
+ visit::walk_poly_trait_ref(self, t);
}
fn visit_variant_data(&mut self, s: &'a VariantData) {
self.with_banned_assoc_ty_bound(|this| visit::walk_struct_def(this, s))
}
- fn visit_enum_def(
- &mut self,
- enum_definition: &'a EnumDef,
- generics: &'a Generics,
- item_id: NodeId,
- _: Span,
- ) {
- self.with_banned_assoc_ty_bound(|this| {
- visit::walk_enum_def(this, enum_definition, generics, item_id)
- })
+ fn visit_enum_def(&mut self, enum_definition: &'a EnumDef) {
+ self.with_banned_assoc_ty_bound(|this| visit::walk_enum_def(this, enum_definition))
}
fn visit_fn(&mut self, fk: FnKind<'a>, span: Span, id: NodeId) {
@@ -1650,10 +1499,12 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
let tilde_const_allowed =
- matches!(fk.header(), Some(FnHeader { constness: Const::Yes(_), .. }))
+ matches!(fk.header(), Some(FnHeader { constness: ast::Const::Yes(_), .. }))
|| matches!(fk.ctxt(), Some(FnCtxt::Assoc(_)));
- self.with_tilde_const(tilde_const_allowed, |this| visit::walk_fn(this, fk, span));
+ let disallowed = (!tilde_const_allowed).then(|| DisallowTildeConstContext::Fn(fk));
+
+ self.with_tilde_const(disallowed, |this| visit::walk_fn(this, fk));
}
fn visit_assoc_item(&mut self, item: &'a AssocItem, ctxt: AssocCtxt) {
@@ -1668,12 +1519,22 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
if ctxt == AssocCtxt::Impl {
match &item.kind {
AssocItemKind::Const(_, _, body) => {
- self.check_impl_item_provided(item.span, body, "constant", " = <expr>;");
+ if body.is_none() {
+ self.session.emit_err(AssocConstWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
+ }
}
AssocItemKind::Fn(box Fn { body, .. }) => {
- self.check_impl_item_provided(item.span, body, "function", " { <body> }");
+ if body.is_none() {
+ self.session.emit_err(AssocFnWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
+ }
}
- AssocItemKind::TyAlias(box TyAlias {
+ AssocItemKind::Type(box TyAlias {
generics,
where_clauses,
where_predicates_split,
@@ -1681,7 +1542,12 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
ty,
..
}) => {
- self.check_impl_item_provided(item.span, ty, "type", " = <type>;");
+ if ty.is_none() {
+ self.session.emit_err(AssocTypeWithoutBody {
+ span: item.span,
+ replace_span: self.ending_semi_or_hi(item.span),
+ });
+ }
self.check_type_no_bounds(bounds, "`impl`s");
if ty.is_some() {
self.check_gat_where(
@@ -1699,7 +1565,6 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.invalid_visibility(&item.vis, None);
if let AssocItemKind::Fn(box Fn { sig, .. }) = &item.kind {
self.check_trait_fn_not_const(sig.header.constness);
- self.check_trait_fn_not_async(item.span, sig.header.asyncness);
}
}
@@ -1708,7 +1573,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
match item.kind {
- AssocItemKind::TyAlias(box TyAlias { ref generics, ref bounds, ref ty, .. })
+ AssocItemKind::Type(box TyAlias { ref generics, ref bounds, ref ty, .. })
if ctxt == AssocCtxt::Trait =>
{
self.visit_vis(&item.vis);
@@ -1883,7 +1748,7 @@ pub fn check_crate(session: &Session, krate: &Crate, lints: &mut LintBuffer) ->
in_const_trait_impl: false,
has_proc_macro_decls: false,
outer_impl_trait: None,
- is_tilde_const_allowed: false,
+ disallow_tilde_const: None,
is_impl_trait_banned: false,
is_assoc_ty_bound_banned: false,
forbidden_let_reason: Some(ForbiddenLetReason::GenericForbidden),
@@ -1895,15 +1760,17 @@ pub fn check_crate(session: &Session, krate: &Crate, lints: &mut LintBuffer) ->
}
/// Used to forbid `let` expressions in certain syntactic locations.
-#[derive(Clone, Copy)]
-enum ForbiddenLetReason {
+#[derive(Clone, Copy, Subdiagnostic)]
+pub(crate) enum ForbiddenLetReason {
/// `let` is not valid and the source environment is not important
GenericForbidden,
/// A let chain with the `||` operator
- NotSupportedOr(Span),
+ #[note(not_supported_or)]
+ NotSupportedOr(#[primary_span] Span),
/// A let chain with invalid parentheses
///
- /// For exemple, `let 1 = 1 && (expr && expr)` is allowed
+ /// For example, `let 1 = 1 && (expr && expr)` is allowed
/// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
- NotSupportedParentheses(Span),
+ #[note(not_supported_parentheses)]
+ NotSupportedParentheses(#[primary_span] Span),
}
diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs
new file mode 100644
index 000000000..59f582f10
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/errors.rs
@@ -0,0 +1,234 @@
+//! Errors emitted by ast_passes.
+
+use rustc_errors::{fluent, AddToDiagnostic, Applicability, Diagnostic, SubdiagnosticMessage};
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_span::{Span, Symbol};
+
+use crate::ast_validation::ForbiddenLetReason;
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_let)]
+#[note]
+pub struct ForbiddenLet {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub(crate) reason: ForbiddenLetReason,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_let_stable)]
+#[note]
+pub struct ForbiddenLetStable {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_assoc_constraint)]
+pub struct ForbiddenAssocConstraint {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_keyword_lifetime)]
+pub struct KeywordLifetime {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_label)]
+pub struct InvalidLabel {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_visibility, code = "E0449")]
+pub struct InvalidVisibility {
+ #[primary_span]
+ pub span: Span,
+ #[label(implied)]
+ pub implied: Option<Span>,
+ #[subdiagnostic]
+ pub note: Option<InvalidVisibilityNote>,
+}
+
+#[derive(Subdiagnostic)]
+pub enum InvalidVisibilityNote {
+ #[note(individual_impl_items)]
+ IndividualImplItems,
+ #[note(individual_foreign_items)]
+ IndividualForeignItems,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_trait_fn_const, code = "E0379")]
+pub struct TraitFnConst {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_lifetime_bound)]
+pub struct ForbiddenLifetimeBound {
+ #[primary_span]
+ pub spans: Vec<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_non_lifetime_param)]
+pub struct ForbiddenNonLifetimeParam {
+ #[primary_span]
+ pub spans: Vec<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_too_many)]
+pub struct FnParamTooMany {
+ #[primary_span]
+ pub span: Span,
+ pub max_num_args: usize,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_c_var_args_only)]
+pub struct FnParamCVarArgsOnly {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_c_var_args_not_last)]
+pub struct FnParamCVarArgsNotLast {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_doc_comment)]
+pub struct FnParamDocComment {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_forbidden_attr)]
+pub struct FnParamForbiddenAttr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_param_forbidden_self)]
+#[note]
+pub struct FnParamForbiddenSelf {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_forbidden_default)]
+pub struct ForbiddenDefault {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub def_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_assoc_const_without_body)]
+pub struct AssocConstWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " = <expr>;", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_assoc_fn_without_body)]
+pub struct AssocFnWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " {{ <body> }}", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_assoc_type_without_body)]
+pub struct AssocTypeWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " = <type>;", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_const_without_body)]
+pub struct ConstWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " = <expr>;", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_static_without_body)]
+pub struct StaticWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " = <expr>;", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_ty_alias_without_body)]
+pub struct TyAliasWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " = <type>;", applicability = "has-placeholders")]
+ pub replace_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_fn_without_body)]
+pub struct FnWithoutBody {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = " {{ <body> }}", applicability = "has-placeholders")]
+ pub replace_span: Span,
+ #[subdiagnostic]
+ pub extern_block_suggestion: Option<ExternBlockSuggestion>,
+}
+
+pub struct ExternBlockSuggestion {
+ pub start_span: Span,
+ pub end_span: Span,
+ pub abi: Option<Symbol>,
+}
+
+impl AddToDiagnostic for ExternBlockSuggestion {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ let start_suggestion = if let Some(abi) = self.abi {
+ format!("extern \"{}\" {{", abi)
+ } else {
+ "extern {".to_owned()
+ };
+ let end_suggestion = " }".to_owned();
+
+ diag.multipart_suggestion(
+ fluent::extern_block_suggestion,
+ vec![(self.start_span, start_suggestion), (self.end_span, end_suggestion)],
+ Applicability::MaybeIncorrect,
+ );
+ }
+}
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 789eca1f0..546010135 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -1,17 +1,15 @@
use rustc_ast as ast;
use rustc_ast::visit::{self, AssocCtxt, FnCtxt, FnKind, Visitor};
use rustc_ast::{AssocConstraint, AssocConstraintKind, NodeId};
-use rustc_ast::{PatKind, RangeEnd, VariantData};
-use rustc_errors::{struct_span_err, Applicability};
-use rustc_feature::{AttributeGate, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
-use rustc_feature::{Features, GateIssue};
-use rustc_session::parse::{feature_err, feature_err_issue};
+use rustc_ast::{PatKind, RangeEnd};
+use rustc_errors::{struct_span_err, Applicability, StashKey};
+use rustc_feature::{AttributeGate, BuiltinAttribute, Features, GateIssue, BUILTIN_ATTRIBUTE_MAP};
+use rustc_session::parse::{feature_err, feature_err_issue, feature_warn};
use rustc_session::Session;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::sym;
use rustc_span::Span;
-
-use tracing::debug;
+use rustc_target::spec::abi;
macro_rules! gate_feature_fn {
($visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr, $help: expr) => {{
@@ -20,9 +18,7 @@ macro_rules! gate_feature_fn {
let has_feature: bool = has_feature(visitor.features);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
if !has_feature && !span.allows_unstable($name) {
- feature_err_issue(&visitor.sess.parse_sess, name, span, GateIssue::Language, explain)
- .help(help)
- .emit();
+ feature_err(&visitor.sess.parse_sess, name, span, explain).help(help).emit();
}
}};
($visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{
@@ -31,8 +27,19 @@ macro_rules! gate_feature_fn {
let has_feature: bool = has_feature(visitor.features);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
if !has_feature && !span.allows_unstable($name) {
- feature_err_issue(&visitor.sess.parse_sess, name, span, GateIssue::Language, explain)
- .emit();
+ feature_err(&visitor.sess.parse_sess, name, span, explain).emit();
+ }
+ }};
+ (future_incompatible; $visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{
+ let (visitor, has_feature, span, name, explain) =
+ (&*$visitor, $has_feature, $span, $name, $explain);
+ let has_feature: bool = has_feature(visitor.features);
+ debug!(
+ "gate_feature(feature = {:?}, span = {:?}); has? {} (future_incompatible)",
+ name, span, has_feature
+ );
+ if !has_feature && !span.allows_unstable($name) {
+ feature_warn(&visitor.sess.parse_sess, name, span, explain);
}
}};
}
@@ -44,6 +51,9 @@ macro_rules! gate_feature_post {
($visitor: expr, $feature: ident, $span: expr, $explain: expr) => {
gate_feature_fn!($visitor, |x: &Features| x.$feature, $span, sym::$feature, $explain)
};
+ (future_incompatible; $visitor: expr, $feature: ident, $span: expr, $explain: expr) => {
+ gate_feature_fn!(future_incompatible; $visitor, |x: &Features| x.$feature, $span, sym::$feature, $explain)
+ };
}
pub fn check_attribute(attr: &ast::Attribute, sess: &Session, features: &Features) {
@@ -74,210 +84,26 @@ impl<'a> PostExpansionVisitor<'a> {
}
}
- match symbol_unescaped.as_str() {
- // Stable
- "Rust" | "C" | "cdecl" | "stdcall" | "fastcall" | "aapcs" | "win64" | "sysv64"
- | "system" => {}
- "rust-intrinsic" => {
- gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change");
- }
- "platform-intrinsic" => {
- gate_feature_post!(
- &self,
- platform_intrinsics,
- span,
- "platform intrinsics are experimental and possibly buggy"
- );
- }
- "vectorcall" => {
- gate_feature_post!(
- &self,
- abi_vectorcall,
- span,
- "vectorcall is experimental and subject to change"
- );
- }
- "thiscall" => {
- gate_feature_post!(
- &self,
- abi_thiscall,
- span,
- "thiscall is experimental and subject to change"
- );
- }
- "rust-call" => {
- gate_feature_post!(
- &self,
- unboxed_closures,
- span,
- "rust-call ABI is subject to change"
- );
- }
- "rust-cold" => {
- gate_feature_post!(
- &self,
- rust_cold_cc,
- span,
- "rust-cold is experimental and subject to change"
- );
- }
- "ptx-kernel" => {
- gate_feature_post!(
- &self,
- abi_ptx,
- span,
- "PTX ABIs are experimental and subject to change"
- );
- }
- "unadjusted" => {
- gate_feature_post!(
- &self,
- abi_unadjusted,
- span,
- "unadjusted ABI is an implementation detail and perma-unstable"
- );
- }
- "msp430-interrupt" => {
- gate_feature_post!(
- &self,
- abi_msp430_interrupt,
- span,
- "msp430-interrupt ABI is experimental and subject to change"
- );
- }
- "x86-interrupt" => {
- gate_feature_post!(
- &self,
- abi_x86_interrupt,
- span,
- "x86-interrupt ABI is experimental and subject to change"
- );
- }
- "amdgpu-kernel" => {
- gate_feature_post!(
- &self,
- abi_amdgpu_kernel,
- span,
- "amdgpu-kernel ABI is experimental and subject to change"
- );
- }
- "avr-interrupt" | "avr-non-blocking-interrupt" => {
- gate_feature_post!(
- &self,
- abi_avr_interrupt,
- span,
- "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change"
- );
- }
- "efiapi" => {
- gate_feature_post!(
- &self,
- abi_efiapi,
- span,
- "efiapi ABI is experimental and subject to change"
- );
- }
- "C-cmse-nonsecure-call" => {
- gate_feature_post!(
- &self,
- abi_c_cmse_nonsecure_call,
- span,
- "C-cmse-nonsecure-call ABI is experimental and subject to change"
- );
- }
- "C-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "C-unwind ABI is experimental and subject to change"
- );
- }
- "stdcall-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "stdcall-unwind ABI is experimental and subject to change"
- );
- }
- "system-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "system-unwind ABI is experimental and subject to change"
- );
- }
- "thiscall-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "thiscall-unwind ABI is experimental and subject to change"
- );
- }
- "cdecl-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "cdecl-unwind ABI is experimental and subject to change"
- );
- }
- "fastcall-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "fastcall-unwind ABI is experimental and subject to change"
- );
- }
- "vectorcall-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
+ match abi::is_enabled(&self.features, span, symbol_unescaped.as_str()) {
+ Ok(()) => (),
+ Err(abi::AbiDisabled::Unstable { feature, explain }) => {
+ feature_err_issue(
+ &self.sess.parse_sess,
+ feature,
span,
- "vectorcall-unwind ABI is experimental and subject to change"
- );
- }
- "aapcs-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "aapcs-unwind ABI is experimental and subject to change"
- );
- }
- "win64-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "win64-unwind ABI is experimental and subject to change"
- );
- }
- "sysv64-unwind" => {
- gate_feature_post!(
- &self,
- c_unwind,
- span,
- "sysv64-unwind ABI is experimental and subject to change"
- );
- }
- "wasm" => {
- gate_feature_post!(
- &self,
- wasm_abi,
- span,
- "wasm ABI is experimental and subject to change"
- );
+ GateIssue::Language,
+ explain,
+ )
+ .emit();
}
- abi => {
+ Err(abi::AbiDisabled::Unrecognized) => {
if self.sess.opts.pretty.map_or(true, |ppm| ppm.needs_hir()) {
self.sess.parse_sess.span_diagnostic.delay_span_bug(
span,
- &format!("unrecognized ABI not caught in lowering: {}", abi),
+ &format!(
+ "unrecognized ABI not caught in lowering: {}",
+ symbol_unescaped.as_str()
+ ),
);
}
}
@@ -290,65 +116,6 @@ impl<'a> PostExpansionVisitor<'a> {
}
}
- fn maybe_report_invalid_custom_discriminants(&self, variants: &[ast::Variant]) {
- let has_fields = variants.iter().any(|variant| match variant.data {
- VariantData::Tuple(..) | VariantData::Struct(..) => true,
- VariantData::Unit(..) => false,
- });
-
- let discriminant_spans = variants
- .iter()
- .filter(|variant| match variant.data {
- VariantData::Tuple(..) | VariantData::Struct(..) => false,
- VariantData::Unit(..) => true,
- })
- .filter_map(|variant| variant.disr_expr.as_ref().map(|c| c.value.span))
- .collect::<Vec<_>>();
-
- if !discriminant_spans.is_empty() && has_fields {
- let mut err = feature_err(
- &self.sess.parse_sess,
- sym::arbitrary_enum_discriminant,
- discriminant_spans.clone(),
- "custom discriminant values are not allowed in enums with tuple or struct variants",
- );
- for sp in discriminant_spans {
- err.span_label(sp, "disallowed custom discriminant");
- }
- for variant in variants.iter() {
- match &variant.data {
- VariantData::Struct(..) => {
- err.span_label(variant.span, "struct variant defined here");
- }
- VariantData::Tuple(..) => {
- err.span_label(variant.span, "tuple variant defined here");
- }
- VariantData::Unit(..) => {}
- }
- }
- err.emit();
- }
- }
-
- fn check_gat(&self, generics: &ast::Generics, span: Span) {
- if !generics.params.is_empty() {
- gate_feature_post!(
- &self,
- generic_associated_types,
- span,
- "generic associated types are unstable"
- );
- }
- if !generics.where_clause.predicates.is_empty() {
- gate_feature_post!(
- &self,
- generic_associated_types,
- span,
- "where clauses on associated types are unstable"
- );
- }
- }
-
/// Feature gate `impl Trait` inside `type Alias = $type_expr;`.
fn check_impl_trait(&self, ty: &ast::Ty) {
struct ImplTraitVisitor<'a> {
@@ -417,6 +184,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
|| attr.has_name(sym::stable)
|| attr.has_name(sym::rustc_const_unstable)
|| attr.has_name(sym::rustc_const_stable)
+ || attr.has_name(sym::rustc_default_body_unstable)
{
struct_span_err!(
self.sess,
@@ -465,26 +233,6 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
}
}
- ast::ItemKind::Enum(ast::EnumDef { ref variants, .. }, ..) => {
- for variant in variants {
- match (&variant.data, &variant.disr_expr) {
- (ast::VariantData::Unit(..), _) => {}
- (_, Some(disr_expr)) => gate_feature_post!(
- &self,
- arbitrary_enum_discriminant,
- disr_expr.value.span,
- "discriminants on non-unit variants are experimental"
- ),
- _ => {}
- }
- }
-
- let has_feature = self.features.arbitrary_enum_discriminant;
- if !has_feature && !i.span.allows_unstable(sym::arbitrary_enum_discriminant) {
- self.maybe_report_invalid_custom_discriminants(&variants);
- }
- }
-
ast::ItemKind::Impl(box ast::Impl { polarity, defaultness, ref of_trait, .. }) => {
if let ast::ImplPolarity::Negative(span) = polarity {
gate_feature_post!(
@@ -562,6 +310,9 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
ast::TyKind::Never => {
gate_feature_post!(&self, never_type, ty.span, "the `!` type is experimental");
}
+ ast::TyKind::TraitObject(_, ast::TraitObjectSyntax::DynStar, ..) => {
+ gate_feature_post!(&self, dyn_star, ty.span, "dyn* trait objects are unstable");
+ }
_ => {}
}
visit::walk_ty(self, ty)
@@ -587,11 +338,10 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
{
// When we encounter a statement of the form `foo: Ty = val;`, this will emit a type
// ascription error, but the likely intention was to write a `let` statement. (#78907).
- feature_err_issue(
+ feature_err(
&self.sess.parse_sess,
sym::type_ascription,
lhs.span,
- GateIssue::Language,
"type ascription is experimental",
).span_suggestion_verbose(
lhs.span.shrink_to_lo(),
@@ -614,28 +364,27 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
);
}
ast::ExprKind::Type(..) => {
- // To avoid noise about type ascription in common syntax errors, only emit if it
- // is the *only* error.
if self.sess.parse_sess.span_diagnostic.err_count() == 0 {
+ // To avoid noise about type ascription in common syntax errors,
+ // only emit if it is the *only* error.
gate_feature_post!(
&self,
type_ascription,
e.span,
"type ascription is experimental"
);
+ } else {
+ // And if it isn't, cancel the early-pass warning.
+ self.sess
+ .parse_sess
+ .span_diagnostic
+ .steal_diagnostic(e.span, StashKey::EarlySyntaxWarning)
+ .map(|err| err.cancel());
}
}
ast::ExprKind::TryBlock(_) => {
gate_feature_post!(&self, try_blocks, e.span, "`try` expression is experimental");
}
- ast::ExprKind::Block(_, Some(label)) => {
- gate_feature_post!(
- &self,
- label_break_value,
- label.ident.span,
- "labels on blocks are unstable"
- );
- }
_ => {}
}
visit::walk_expr(self, e)
@@ -652,7 +401,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
if let PatKind::Range(Some(_), None, Spanned { .. }) = inner_pat.kind {
gate_feature_post!(
&self,
- half_open_range_patterns,
+ half_open_range_patterns_in_slices,
pat.span,
"`X..` patterns in slices are experimental"
);
@@ -690,7 +439,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
gate_feature_post!(&self, c_variadic, span, "C-variadic functions are unstable");
}
- visit::walk_fn(self, fn_kind, span)
+ visit::walk_fn(self, fn_kind)
}
fn visit_assoc_constraint(&mut self, constraint: &'a AssocConstraint) {
@@ -708,7 +457,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
fn visit_assoc_item(&mut self, i: &'a ast::AssocItem, ctxt: AssocCtxt) {
let is_fn = match i.kind {
ast::AssocItemKind::Fn(_) => true,
- ast::AssocItemKind::TyAlias(box ast::TyAlias { ref generics, ref ty, .. }) => {
+ ast::AssocItemKind::Type(box ast::TyAlias { ref ty, .. }) => {
if let (Some(_), AssocCtxt::Trait) = (ty, ctxt) {
gate_feature_post!(
&self,
@@ -720,7 +469,6 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
if let Some(ty) = ty {
self.check_impl_trait(ty);
}
- self.check_gat(generics, i.span);
false
}
_ => false,
@@ -781,7 +529,10 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) {
gate_all!(generators, "yield syntax is experimental");
gate_all!(raw_ref_op, "raw address of syntax is experimental");
gate_all!(const_trait_impl, "const trait impls are experimental");
- gate_all!(half_open_range_patterns, "half-open range patterns are unstable");
+ gate_all!(
+ half_open_range_patterns_in_slices,
+ "half-open range patterns in slices are unstable"
+ );
gate_all!(inline_const, "inline-const is experimental");
gate_all!(inline_const_pat, "inline-const in pattern position is experimental");
gate_all!(associated_const_equality, "associated const equality is incomplete");
@@ -789,14 +540,12 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) {
// All uses of `gate_all!` below this point were added in #65742,
// and subsequently disabled (with the non-early gating readded).
+ // We emit an early future-incompatible warning for these.
+ // New syntax gates should go above here to get a hard error gate.
macro_rules! gate_all {
($gate:ident, $msg:literal) => {
- // FIXME(eddyb) do something more useful than always
- // disabling these uses of early feature-gatings.
- if false {
- for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
- gate_feature_post!(&visitor, $gate, *span, $msg);
- }
+ for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
+ gate_feature_post!(future_incompatible; &visitor, $gate, *span, $msg);
}
};
}
@@ -807,13 +556,8 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session) {
gate_all!(box_patterns, "box pattern syntax is experimental");
gate_all!(exclusive_range_pattern, "exclusive range pattern syntax is experimental");
gate_all!(try_blocks, "`try` blocks are unstable");
- gate_all!(label_break_value, "labels on blocks are unstable");
gate_all!(box_syntax, "box expression syntax is experimental; you can call `Box::new` instead");
- // To avoid noise about type ascription in common syntax errors,
- // only emit if it is the *only* error. (Also check it last.)
- if sess.parse_sess.span_diagnostic.err_count() == 0 {
- gate_all!(type_ascription, "type ascription is experimental");
- }
+ gate_all!(type_ascription, "type ascription is experimental");
visit::walk_crate(&mut visitor, krate);
}
diff --git a/compiler/rustc_ast_passes/src/lib.rs b/compiler/rustc_ast_passes/src/lib.rs
index 9d52c3288..f58fffc91 100644
--- a/compiler/rustc_ast_passes/src/lib.rs
+++ b/compiler/rustc_ast_passes/src/lib.rs
@@ -9,10 +9,13 @@
#![feature(if_let_guard)]
#![feature(iter_is_partitioned)]
#![feature(let_chains)]
-#![feature(let_else)]
#![recursion_limit = "256"]
+#[macro_use]
+extern crate tracing;
+
pub mod ast_validation;
+mod errors;
pub mod feature_gate;
pub mod node_count;
pub mod show_span;
diff --git a/compiler/rustc_ast_passes/src/node_count.rs b/compiler/rustc_ast_passes/src/node_count.rs
index 9c7369c83..fa42f8778 100644
--- a/compiler/rustc_ast_passes/src/node_count.rs
+++ b/compiler/rustc_ast_passes/src/node_count.rs
@@ -63,9 +63,9 @@ impl<'ast> Visitor<'ast> for NodeCounter {
self.count += 1;
walk_generics(self, g)
}
- fn visit_fn(&mut self, fk: visit::FnKind<'_>, s: Span, _: NodeId) {
+ fn visit_fn(&mut self, fk: visit::FnKind<'_>, _: Span, _: NodeId) {
self.count += 1;
- walk_fn(self, fk, s)
+ walk_fn(self, fk)
}
fn visit_assoc_item(&mut self, ti: &AssocItem, ctxt: AssocCtxt) {
self.count += 1;
@@ -79,9 +79,9 @@ impl<'ast> Visitor<'ast> for NodeCounter {
self.count += 1;
walk_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef) {
self.count += 1;
- walk_poly_trait_ref(self, t, m)
+ walk_poly_trait_ref(self, t)
}
fn visit_variant_data(&mut self, s: &VariantData) {
self.count += 1;
@@ -91,15 +91,9 @@ impl<'ast> Visitor<'ast> for NodeCounter {
self.count += 1;
walk_field_def(self, s)
}
- fn visit_enum_def(
- &mut self,
- enum_definition: &EnumDef,
- generics: &Generics,
- item_id: NodeId,
- _: Span,
- ) {
+ fn visit_enum_def(&mut self, enum_definition: &EnumDef) {
self.count += 1;
- walk_enum_def(self, enum_definition, generics, item_id)
+ walk_enum_def(self, enum_definition)
}
fn visit_variant(&mut self, v: &Variant) {
self.count += 1;
@@ -121,9 +115,9 @@ impl<'ast> Visitor<'ast> for NodeCounter {
self.count += 1;
walk_use_tree(self, use_tree, id)
}
- fn visit_generic_args(&mut self, path_span: Span, generic_args: &GenericArgs) {
+ fn visit_generic_args(&mut self, generic_args: &GenericArgs) {
self.count += 1;
- walk_generic_args(self, path_span, generic_args)
+ walk_generic_args(self, generic_args)
}
fn visit_assoc_constraint(&mut self, constraint: &AssocConstraint) {
self.count += 1;
diff --git a/compiler/rustc_ast_pretty/Cargo.toml b/compiler/rustc_ast_pretty/Cargo.toml
index 5ad8714e9..a3e3e823b 100644
--- a/compiler/rustc_ast_pretty/Cargo.toml
+++ b/compiler/rustc_ast_pretty/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_ast_pretty/src/lib.rs b/compiler/rustc_ast_pretty/src/lib.rs
index 79178830b..bf094af5f 100644
--- a/compiler/rustc_ast_pretty/src/lib.rs
+++ b/compiler/rustc_ast_pretty/src/lib.rs
@@ -1,3 +1,5 @@
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![feature(associated_type_bounds)]
#![feature(box_patterns)]
#![feature(with_negative_coherence)]
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 5eb7bf634..b87c6f78d 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -11,8 +11,8 @@ use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::util::classify;
use rustc_ast::util::comments::{gather_comments, Comment, CommentStyle};
use rustc_ast::util::parser;
-use rustc_ast::{self as ast, BlockCheckMode, PatKind, RangeEnd, RangeSyntax};
-use rustc_ast::{attr, Term};
+use rustc_ast::{self as ast, BlockCheckMode, Mutability, PatKind, RangeEnd, RangeSyntax};
+use rustc_ast::{attr, BindingAnnotation, ByRef, Term};
use rustc_ast::{GenericArg, MacArgs, MacArgsEq};
use rustc_ast::{GenericBound, SelfKind, TraitBoundModifier};
use rustc_ast::{InlineAsmOperand, InlineAsmRegOrRegClass};
@@ -22,6 +22,7 @@ use rustc_span::source_map::{SourceMap, Spanned};
use rustc_span::symbol::{kw, sym, Ident, IdentPrinter, Symbol};
use rustc_span::{BytePos, FileName, Span};
+use rustc_ast::attr::AttrIdGenerator;
use std::borrow::Cow;
pub use self::delimited::IterDelimited;
@@ -107,6 +108,7 @@ pub fn print_crate<'a>(
ann: &'a dyn PpAnn,
is_expanded: bool,
edition: Edition,
+ g: &AttrIdGenerator,
) -> String {
let mut s =
State { s: pp::Printer::new(), comments: Some(Comments::new(sm, filename, input)), ann };
@@ -120,7 +122,7 @@ pub fn print_crate<'a>(
// `#![feature(prelude_import)]`
let pi_nested = attr::mk_nested_word_item(Ident::with_dummy_span(sym::prelude_import));
let list = attr::mk_list_item(Ident::with_dummy_span(sym::feature), vec![pi_nested]);
- let fake_attr = attr::mk_attr_inner(list);
+ let fake_attr = attr::mk_attr_inner(g, list);
s.print_attribute(&fake_attr);
// Currently, in Rust 2018 we don't have `extern crate std;` at the crate
@@ -128,7 +130,7 @@ pub fn print_crate<'a>(
if edition == Edition::Edition2015 {
// `#![no_std]`
let no_std_meta = attr::mk_word_item(Ident::with_dummy_span(sym::no_std));
- let fake_attr = attr::mk_attr_inner(no_std_meta);
+ let fake_attr = attr::mk_attr_inner(g, no_std_meta);
s.print_attribute(&fake_attr);
}
}
@@ -372,7 +374,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
fn print_literal(&mut self, lit: &ast::Lit) {
self.maybe_print_comment(lit.span.lo());
- self.word(lit.token.to_string())
+ self.word(lit.token_lit.to_string())
}
fn print_string(&mut self, st: &str, style: ast::StrStyle) {
@@ -442,12 +444,12 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
}
self.maybe_print_comment(attr.span.lo());
match attr.kind {
- ast::AttrKind::Normal(ref item, _) => {
+ ast::AttrKind::Normal(ref normal) => {
match attr.style {
ast::AttrStyle::Inner => self.word("#!["),
ast::AttrStyle::Outer => self.word("#["),
}
- self.print_attr_item(&item, attr.span);
+ self.print_attr_item(&normal.item, attr.span);
self.word("]");
}
ast::AttrKind::DocComment(comment_kind, data) => {
@@ -1399,16 +1401,12 @@ impl<'a> State<'a> {
is that it doesn't matter */
match pat.kind {
PatKind::Wild => self.word("_"),
- PatKind::Ident(binding_mode, ident, ref sub) => {
- match binding_mode {
- ast::BindingMode::ByRef(mutbl) => {
- self.word_nbsp("ref");
- self.print_mutability(mutbl, false);
- }
- ast::BindingMode::ByValue(ast::Mutability::Not) => {}
- ast::BindingMode::ByValue(ast::Mutability::Mut) => {
- self.word_nbsp("mut");
- }
+ PatKind::Ident(BindingAnnotation(by_ref, mutbl), ident, ref sub) => {
+ if by_ref == ByRef::Yes {
+ self.word_nbsp("ref");
+ }
+ if mutbl == Mutability::Mut {
+ self.word_nbsp("mut");
}
self.print_ident(ident);
if let Some(ref p) = *sub {
@@ -1487,12 +1485,10 @@ impl<'a> State<'a> {
}
PatKind::Ref(ref inner, mutbl) => {
self.word("&");
- if mutbl == ast::Mutability::Mut {
+ if mutbl == Mutability::Mut {
self.word("mut ");
}
- if let PatKind::Ident(ast::BindingMode::ByValue(ast::Mutability::Mut), ..) =
- inner.kind
- {
+ if let PatKind::Ident(ast::BindingAnnotation::MUT, ..) = inner.kind {
self.popen();
self.print_pat(inner);
self.pclose();
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
index ead38caee..bcefa8ce0 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
@@ -193,9 +193,13 @@ impl<'a> State<'a> {
self.print_call_post(args)
}
- fn print_expr_method_call(&mut self, segment: &ast::PathSegment, args: &[P<ast::Expr>]) {
- let base_args = &args[1..];
- self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX);
+ fn print_expr_method_call(
+ &mut self,
+ segment: &ast::PathSegment,
+ receiver: &ast::Expr,
+ base_args: &[P<ast::Expr>],
+ ) {
+ self.print_expr_maybe_paren(receiver, parser::PREC_POSTFIX);
self.word(".");
self.print_ident(segment.ident);
if let Some(ref args) = segment.args {
@@ -303,8 +307,8 @@ impl<'a> State<'a> {
ast::ExprKind::Call(ref func, ref args) => {
self.print_expr_call(func, &args);
}
- ast::ExprKind::MethodCall(ref segment, ref args, _) => {
- self.print_expr_method_call(segment, &args);
+ ast::ExprKind::MethodCall(ref segment, ref receiver, ref args, _) => {
+ self.print_expr_method_call(segment, &receiver, &args);
}
ast::ExprKind::Binary(op, ref lhs, ref rhs) => {
self.print_expr_binary(op, lhs, rhs);
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
index f1caf22f3..159853c9e 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
@@ -218,6 +218,8 @@ impl<'a> State<'a> {
ast::ItemKind::GlobalAsm(ref asm) => {
self.head(visibility_qualified(&item.vis, "global_asm!"));
self.print_inline_asm(asm);
+ self.word(";");
+ self.end();
self.end();
}
ast::ItemKind::TyAlias(box ast::TyAlias {
@@ -412,9 +414,9 @@ impl<'a> State<'a> {
pub(crate) fn print_visibility(&mut self, vis: &ast::Visibility) {
match vis.kind {
ast::VisibilityKind::Public => self.word_nbsp("pub"),
- ast::VisibilityKind::Restricted { ref path, .. } => {
+ ast::VisibilityKind::Restricted { ref path, id: _, shorthand } => {
let path = Self::to_string(|s| s.print_path(path, false, 0));
- if path == "crate" || path == "self" || path == "super" {
+ if shorthand && (path == "crate" || path == "self" || path == "super") {
self.word_nbsp(format!("pub({})", path))
} else {
self.word_nbsp(format!("pub(in {})", path))
@@ -514,7 +516,7 @@ impl<'a> State<'a> {
ast::AssocItemKind::Const(def, ty, body) => {
self.print_item_const(ident, None, ty, body.as_deref(), vis, *def);
}
- ast::AssocItemKind::TyAlias(box ast::TyAlias {
+ ast::AssocItemKind::Type(box ast::TyAlias {
defaultness,
generics,
where_clauses,
diff --git a/compiler/rustc_attr/Cargo.toml b/compiler/rustc_attr/Cargo.toml
index ba310a686..6349ddf31 100644
--- a/compiler/rustc_attr/Cargo.toml
+++ b/compiler/rustc_attr/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index 10a9cfb62..753f62dd5 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -3,7 +3,6 @@
use rustc_ast as ast;
use rustc_ast::{Attribute, Lit, LitKind, MetaItem, MetaItemKind, NestedMetaItem, NodeId};
use rustc_ast_pretty::pprust;
-use rustc_errors::{struct_span_err, Applicability};
use rustc_feature::{find_gated_cfg, is_builtin_attr_name, Features, GatedCfg};
use rustc_macros::HashStable_Generic;
use rustc_session::lint::builtin::UNEXPECTED_CFGS;
@@ -14,6 +13,20 @@ use rustc_span::hygiene::Transparency;
use rustc_span::{symbol::sym, symbol::Symbol, Span};
use std::num::NonZeroU32;
+use crate::session_diagnostics::{self, IncorrectReprFormatGenericCause};
+
+/// The version placeholder that recently stabilized features contain inside the
+/// `since` field of the `#[stable]` attribute.
+///
+/// For more, see [this pull request](https://github.com/rust-lang/rust/pull/100591).
+pub const VERSION_PLACEHOLDER: &str = "CURRENT_RUSTC_VERSION";
+
+pub fn rust_version_symbol() -> Symbol {
+ let version = option_env!("CFG_VERSION").unwrap_or("<current>");
+ let version = version.split(' ').next().unwrap();
+ Symbol::intern(&version)
+}
+
pub fn is_builtin_attr(attr: &Attribute) -> bool {
attr.is_doc_comment() || attr.ident().filter(|ident| is_builtin_attr_name(ident.name)).is_some()
}
@@ -25,46 +38,43 @@ enum AttrError {
NonIdentFeature,
MissingFeature,
MultipleStabilityLevels,
- UnsupportedLiteral(&'static str, /* is_bytestr */ bool),
+ UnsupportedLiteral(UnsupportedLiteralReason, /* is_bytestr */ bool),
+}
+
+pub(crate) enum UnsupportedLiteralReason {
+ Generic,
+ CfgString,
+ DeprecatedString,
+ DeprecatedKvPair,
}
fn handle_errors(sess: &ParseSess, span: Span, error: AttrError) {
- let diag = &sess.span_diagnostic;
match error {
AttrError::MultipleItem(item) => {
- struct_span_err!(diag, span, E0538, "multiple '{}' items", item).emit();
+ sess.emit_err(session_diagnostics::MultipleItem { span, item });
}
AttrError::UnknownMetaItem(item, expected) => {
- let expected = expected.iter().map(|name| format!("`{}`", name)).collect::<Vec<_>>();
- struct_span_err!(diag, span, E0541, "unknown meta item '{}'", item)
- .span_label(span, format!("expected one of {}", expected.join(", ")))
- .emit();
+ sess.emit_err(session_diagnostics::UnknownMetaItem { span, item, expected });
}
AttrError::MissingSince => {
- struct_span_err!(diag, span, E0542, "missing 'since'").emit();
+ sess.emit_err(session_diagnostics::MissingSince { span });
}
AttrError::NonIdentFeature => {
- struct_span_err!(diag, span, E0546, "'feature' is not an identifier").emit();
+ sess.emit_err(session_diagnostics::NonIdentFeature { span });
}
AttrError::MissingFeature => {
- struct_span_err!(diag, span, E0546, "missing 'feature'").emit();
+ sess.emit_err(session_diagnostics::MissingFeature { span });
}
AttrError::MultipleStabilityLevels => {
- struct_span_err!(diag, span, E0544, "multiple stability levels").emit();
+ sess.emit_err(session_diagnostics::MultipleStabilityLevels { span });
}
- AttrError::UnsupportedLiteral(msg, is_bytestr) => {
- let mut err = struct_span_err!(diag, span, E0565, "{}", msg);
- if is_bytestr {
- if let Ok(lint_str) = sess.source_map().span_to_snippet(span) {
- err.span_suggestion(
- span,
- "consider removing the prefix",
- &lint_str[1..],
- Applicability::MaybeIncorrect,
- );
- }
- }
- err.emit();
+ AttrError::UnsupportedLiteral(reason, is_bytestr) => {
+ sess.emit_err(session_diagnostics::UnsupportedLiteral {
+ span,
+ reason,
+ is_bytestr,
+ start_point_span: sess.source_map().start_point(span),
+ });
}
}
}
@@ -131,6 +141,14 @@ impl ConstStability {
}
}
+/// Represents the `#[rustc_default_body_unstable]` attribute.
+#[derive(Encodable, Decodable, Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub struct DefaultBodyStability {
+ pub level: StabilityLevel,
+ pub feature: Symbol,
+}
+
/// The available stability levels.
#[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)]
#[derive(HashStable_Generic)]
@@ -214,7 +232,8 @@ pub fn find_stability(
sess: &Session,
attrs: &[Attribute],
item_sp: Span,
-) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>) {
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>, Option<(DefaultBodyStability, Span)>)
+{
find_stability_generic(sess, attrs.iter(), item_sp)
}
@@ -222,7 +241,7 @@ fn find_stability_generic<'a, I>(
sess: &Session,
attrs_iter: I,
item_sp: Span,
-) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>)
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>, Option<(DefaultBodyStability, Span)>)
where
I: Iterator<Item = &'a Attribute>,
{
@@ -230,11 +249,10 @@ where
let mut stab: Option<(Stability, Span)> = None;
let mut const_stab: Option<(ConstStability, Span)> = None;
+ let mut body_stab: Option<(DefaultBodyStability, Span)> = None;
let mut promotable = false;
let mut allowed_through_unstable_modules = false;
- let diagnostic = &sess.parse_sess.span_diagnostic;
-
'outer: for attr in attrs_iter {
if ![
sym::rustc_const_unstable,
@@ -243,6 +261,7 @@ where
sym::stable,
sym::rustc_promotable,
sym::rustc_allowed_through_unstable_modules,
+ sym::rustc_default_body_unstable,
]
.iter()
.any(|&s| attr.has_name(s))
@@ -273,14 +292,14 @@ where
*item = Some(v);
true
} else {
- struct_span_err!(diagnostic, meta.span, E0539, "incorrect meta item").emit();
+ sess.emit_err(session_diagnostics::IncorrectMetaItem { span: meta.span });
false
}
};
let meta_name = meta.name_or_empty();
match meta_name {
- sym::rustc_const_unstable | sym::unstable => {
+ sym::rustc_const_unstable | sym::rustc_default_body_unstable | sym::unstable => {
if meta_name == sym::unstable && stab.is_some() {
handle_errors(
&sess.parse_sess,
@@ -295,6 +314,13 @@ where
AttrError::MultipleStabilityLevels,
);
break;
+ } else if meta_name == sym::rustc_default_body_unstable && body_stab.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::MultipleStabilityLevels,
+ );
+ break;
}
let mut feature = None;
@@ -308,7 +334,7 @@ where
handle_errors(
&sess.parse_sess,
meta.span(),
- AttrError::UnsupportedLiteral("unsupported literal", false),
+ AttrError::UnsupportedLiteral(UnsupportedLiteralReason::Generic, false),
);
continue 'outer;
};
@@ -332,39 +358,28 @@ where
// is a name/value pair string literal.
issue_num = match issue.unwrap().as_str() {
"none" => None,
- issue => {
- let emit_diag = |msg: &str| {
- struct_span_err!(
- diagnostic,
- mi.span,
- E0545,
- "`issue` must be a non-zero numeric string \
- or \"none\"",
- )
- .span_label(mi.name_value_literal_span().unwrap(), msg)
- .emit();
- };
- match issue.parse() {
- Ok(0) => {
- emit_diag(
- "`issue` must not be \"0\", \
- use \"none\" instead",
- );
- continue 'outer;
- }
- Ok(num) => NonZeroU32::new(num),
- Err(err) => {
- emit_diag(&err.to_string());
- continue 'outer;
- }
+ issue => match issue.parse::<NonZeroU32>() {
+ Ok(num) => Some(num),
+ Err(err) => {
+ sess.emit_err(
+ session_diagnostics::InvalidIssueString {
+ span: mi.span,
+ cause: session_diagnostics::InvalidIssueStringCause::from_int_error_kind(
+ mi.name_value_literal_span().unwrap(),
+ err.kind(),
+ ),
+ },
+ );
+ continue 'outer;
}
- }
+ },
};
}
sym::soft => {
if !mi.is_word() {
- let msg = "`soft` should not have any arguments";
- sess.parse_sess.span_diagnostic.span_err(mi.span, msg);
+ sess.emit_err(session_diagnostics::SoftNoArgs {
+ span: mi.span,
+ });
}
is_soft = true;
}
@@ -405,11 +420,16 @@ where
};
if sym::unstable == meta_name {
stab = Some((Stability { level, feature }, attr.span));
- } else {
+ } else if sym::rustc_const_unstable == meta_name {
const_stab = Some((
ConstStability { level, feature, promotable: false },
attr.span,
));
+ } else if sym::rustc_default_body_unstable == meta_name {
+ body_stab =
+ Some((DefaultBodyStability { level, feature }, attr.span));
+ } else {
+ unreachable!("Unknown stability attribute {meta_name}");
}
}
(None, _, _) => {
@@ -417,8 +437,7 @@ where
continue;
}
_ => {
- struct_span_err!(diagnostic, attr.span, E0547, "missing 'issue'")
- .emit();
+ sess.emit_err(session_diagnostics::MissingIssue { span: attr.span });
continue;
}
}
@@ -471,13 +490,20 @@ where
handle_errors(
&sess.parse_sess,
lit.span,
- AttrError::UnsupportedLiteral("unsupported literal", false),
+ AttrError::UnsupportedLiteral(
+ UnsupportedLiteralReason::Generic,
+ false,
+ ),
);
continue 'outer;
}
}
}
+ if let Some(s) = since && s.as_str() == VERSION_PLACEHOLDER {
+ since = Some(rust_version_symbol());
+ }
+
match (feature, since) {
(Some(feature), Some(since)) => {
let level = Stable { since, allowed_through_unstable_modules: false };
@@ -510,14 +536,7 @@ where
if let Some((ref mut stab, _)) = const_stab {
stab.promotable = promotable;
} else {
- struct_span_err!(
- diagnostic,
- item_sp,
- E0717,
- "`rustc_promotable` attribute must be paired with either a `rustc_const_unstable` \
- or a `rustc_const_stable` attribute"
- )
- .emit();
+ sess.emit_err(session_diagnostics::RustcPromotablePairing { span: item_sp });
}
}
@@ -532,17 +551,11 @@ where
{
*allowed_through_unstable_modules = true;
} else {
- struct_span_err!(
- diagnostic,
- item_sp,
- E0789,
- "`rustc_allowed_through_unstable_modules` attribute must be paired with a `stable` attribute"
- )
- .emit();
+ sess.emit_err(session_diagnostics::RustcAllowedUnstablePairing { span: item_sp });
}
}
- (stab, const_stab)
+ (stab, const_stab, body_stab)
}
pub fn find_crate_name(sess: &Session, attrs: &[Attribute]) -> Option<Symbol> {
@@ -652,25 +665,18 @@ pub fn eval_condition(
NestedMetaItem::Literal(Lit { span, .. })
| NestedMetaItem::MetaItem(MetaItem { span, .. }),
] => {
- sess.span_diagnostic
- .struct_span_err(*span, "expected a version literal")
- .emit();
+ sess.emit_err(session_diagnostics::ExpectedVersionLiteral { span: *span });
return false;
}
[..] => {
- sess.span_diagnostic
- .struct_span_err(cfg.span, "expected single version literal")
- .emit();
+ sess.emit_err(session_diagnostics::ExpectedSingleVersionLiteral {
+ span: cfg.span,
+ });
return false;
}
};
let Some(min_version) = parse_version(min_version.as_str(), false) else {
- sess.span_diagnostic
- .struct_span_warn(
- *span,
- "unknown version literal format, assuming it refers to a future version",
- )
- .emit();
+ sess.emit_warning(session_diagnostics::UnknownVersionLiteral { span: *span });
return false;
};
let rustc_version = parse_version(env!("CFG_RELEASE"), true).unwrap();
@@ -688,7 +694,7 @@ pub fn eval_condition(
handle_errors(
sess,
mi.span(),
- AttrError::UnsupportedLiteral("unsupported literal", false),
+ AttrError::UnsupportedLiteral(UnsupportedLiteralReason::Generic, false),
);
return false;
}
@@ -713,13 +719,9 @@ pub fn eval_condition(
}),
sym::not => {
if mis.len() != 1 {
- struct_span_err!(
- sess.span_diagnostic,
- cfg.span,
- E0536,
- "expected 1 cfg-pattern"
- )
- .emit();
+ sess.emit_err(session_diagnostics::ExpectedOneCfgPattern {
+ span: cfg.span,
+ });
return false;
}
@@ -745,21 +747,16 @@ pub fn eval_condition(
})
}
_ => {
- struct_span_err!(
- sess.span_diagnostic,
- cfg.span,
- E0537,
- "invalid predicate `{}`",
- pprust::path_to_string(&cfg.path)
- )
- .emit();
+ sess.emit_err(session_diagnostics::InvalidPredicate {
+ span: cfg.span,
+ predicate: pprust::path_to_string(&cfg.path),
+ });
false
}
}
}
ast::MetaItemKind::Word | MetaItemKind::NameValue(..) if cfg.path.segments.len() != 1 => {
- sess.span_diagnostic
- .span_err(cfg.path.span, "`cfg` predicate key must be an identifier");
+ sess.emit_err(session_diagnostics::CfgPredicateIdentifier { span: cfg.path.span });
true
}
MetaItemKind::NameValue(ref lit) if !lit.kind.is_str() => {
@@ -767,7 +764,7 @@ pub fn eval_condition(
sess,
lit.span,
AttrError::UnsupportedLiteral(
- "literal in `cfg` predicate value must be a string",
+ UnsupportedLiteralReason::CfgString,
lit.kind.is_bytestr(),
),
);
@@ -811,7 +808,6 @@ where
I: Iterator<Item = &'a Attribute>,
{
let mut depr: Option<(Deprecation, Span)> = None;
- let diagnostic = &sess.parse_sess.span_diagnostic;
let is_rustc = sess.features_untracked().staged_api;
'outer: for attr in attrs_iter {
@@ -847,14 +843,14 @@ where
&sess.parse_sess,
lit.span,
AttrError::UnsupportedLiteral(
- "literal in `deprecated` \
- value must be a string",
+ UnsupportedLiteralReason::DeprecatedString,
lit.kind.is_bytestr(),
),
);
} else {
- struct_span_err!(diagnostic, meta.span, E0551, "incorrect meta item")
- .emit();
+ sess.emit_err(session_diagnostics::IncorrectMetaItem2 {
+ span: meta.span,
+ });
}
false
@@ -876,14 +872,11 @@ where
}
sym::suggestion => {
if !sess.features_untracked().deprecated_suggestion {
- let mut diag = sess.struct_span_err(
- mi.span,
- "suggestions on deprecated items are unstable",
- );
- if sess.is_nightly_build() {
- diag.help("add `#![feature(deprecated_suggestion)]` to the crate root");
- }
- diag.note("see #94785 for more details").emit();
+ sess.emit_err(session_diagnostics::DeprecatedItemSuggestion {
+ span: mi.span,
+ is_nightly: sess.is_nightly_build().then_some(()),
+ details: (),
+ });
}
if !get(mi, &mut suggestion) {
@@ -911,7 +904,7 @@ where
&sess.parse_sess,
lit.span,
AttrError::UnsupportedLiteral(
- "item in `deprecated` must be a key/value pair",
+ UnsupportedLiteralReason::DeprecatedKvPair,
false,
),
);
@@ -929,7 +922,7 @@ where
}
if note.is_none() {
- struct_span_err!(diagnostic, attr.span, E0543, "missing 'note'").emit();
+ sess.emit_err(session_diagnostics::MissingNote { span: attr.span });
continue;
}
}
@@ -999,19 +992,9 @@ pub fn parse_repr_attr(sess: &Session, attr: &Attribute) -> Vec<ReprAttr> {
sym::simd => Some(ReprSimd),
sym::transparent => Some(ReprTransparent),
sym::align => {
- let mut err = struct_span_err!(
- diagnostic,
- item.span(),
- E0589,
- "invalid `repr(align)` attribute: `align` needs an argument"
- );
- err.span_suggestion(
- item.span(),
- "supply an argument here",
- "align(...)",
- Applicability::HasPlaceholders,
- );
- err.emit();
+ sess.emit_err(session_diagnostics::InvalidReprAlignNeedArg {
+ span: item.span(),
+ });
recognised = true;
None
}
@@ -1040,109 +1023,64 @@ pub fn parse_repr_attr(sess: &Session, attr: &Attribute) -> Vec<ReprAttr> {
|| int_type_of_word(name).is_some()
{
recognised = true;
- struct_span_err!(
- diagnostic,
- item.span(),
- E0552,
- "invalid representation hint: `{}` does not take a parenthesized argument list",
- name.to_ident_string(),
- ).emit();
+ sess.emit_err(session_diagnostics::InvalidReprHintNoParen {
+ span: item.span(),
+ name: name.to_ident_string(),
+ });
}
if let Some(literal_error) = literal_error {
- struct_span_err!(
- diagnostic,
- item.span(),
- E0589,
- "invalid `repr({})` attribute: {}",
- name.to_ident_string(),
- literal_error
- )
- .emit();
+ sess.emit_err(session_diagnostics::InvalidReprGeneric {
+ span: item.span(),
+ repr_arg: name.to_ident_string(),
+ error_part: literal_error,
+ });
}
} else if let Some(meta_item) = item.meta_item() {
if let MetaItemKind::NameValue(ref value) = meta_item.kind {
if meta_item.has_name(sym::align) || meta_item.has_name(sym::packed) {
let name = meta_item.name_or_empty().to_ident_string();
recognised = true;
- let mut err = struct_span_err!(
- diagnostic,
- item.span(),
- E0693,
- "incorrect `repr({})` attribute format",
- name,
- );
- match value.kind {
- ast::LitKind::Int(int, ast::LitIntType::Unsuffixed) => {
- err.span_suggestion(
- item.span(),
- "use parentheses instead",
- format!("{}({})", name, int),
- Applicability::MachineApplicable,
- );
- }
- ast::LitKind::Str(s, _) => {
- err.span_suggestion(
- item.span(),
- "use parentheses instead",
- format!("{}({})", name, s),
- Applicability::MachineApplicable,
- );
- }
- _ => {}
- }
- err.emit();
- } else {
- if matches!(
- meta_item.name_or_empty(),
- sym::C | sym::simd | sym::transparent
- ) || int_type_of_word(meta_item.name_or_empty()).is_some()
- {
- recognised = true;
- struct_span_err!(
- diagnostic,
- meta_item.span,
- E0552,
- "invalid representation hint: `{}` does not take a value",
- meta_item.name_or_empty().to_ident_string(),
- )
- .emit();
- }
+ sess.emit_err(session_diagnostics::IncorrectReprFormatGeneric {
+ span: item.span(),
+ repr_arg: &name,
+ cause: IncorrectReprFormatGenericCause::from_lit_kind(
+ item.span(),
+ &value.kind,
+ &name,
+ ),
+ });
+ } else if matches!(
+ meta_item.name_or_empty(),
+ sym::C | sym::simd | sym::transparent
+ ) || int_type_of_word(meta_item.name_or_empty()).is_some()
+ {
+ recognised = true;
+ sess.emit_err(session_diagnostics::InvalidReprHintNoValue {
+ span: meta_item.span,
+ name: meta_item.name_or_empty().to_ident_string(),
+ });
}
} else if let MetaItemKind::List(_) = meta_item.kind {
if meta_item.has_name(sym::align) {
recognised = true;
- struct_span_err!(
- diagnostic,
- meta_item.span,
- E0693,
- "incorrect `repr(align)` attribute format: \
- `align` takes exactly one argument in parentheses"
- )
- .emit();
+ sess.emit_err(session_diagnostics::IncorrectReprFormatAlignOneArg {
+ span: meta_item.span,
+ });
} else if meta_item.has_name(sym::packed) {
recognised = true;
- struct_span_err!(
- diagnostic,
- meta_item.span,
- E0552,
- "incorrect `repr(packed)` attribute format: \
- `packed` takes exactly one parenthesized argument, \
- or no parentheses at all"
- )
- .emit();
+ sess.emit_err(session_diagnostics::IncorrectReprFormatPackedOneOrZeroArg {
+ span: meta_item.span,
+ });
} else if matches!(
meta_item.name_or_empty(),
sym::C | sym::simd | sym::transparent
) || int_type_of_word(meta_item.name_or_empty()).is_some()
{
recognised = true;
- struct_span_err!(
- diagnostic,
- meta_item.span,
- E0552,
- "invalid representation hint: `{}` does not take a parenthesized argument list",
- meta_item.name_or_empty().to_ident_string(),
- ).emit();
+ sess.emit_err(session_diagnostics::InvalidReprHintNoParen {
+ span: meta_item.span,
+ name: meta_item.name_or_empty().to_ident_string(),
+ });
}
}
}
@@ -1239,10 +1177,10 @@ fn allow_unstable<'a>(
let list = attrs
.filter_map(move |attr| {
attr.meta_item_list().or_else(|| {
- sess.diagnostic().span_err(
- attr.span,
- &format!("`{}` expects a list of feature names", symbol.to_ident_string()),
- );
+ sess.emit_err(session_diagnostics::ExpectsFeatureList {
+ span: attr.span,
+ name: symbol.to_ident_string(),
+ });
None
})
})
@@ -1251,10 +1189,10 @@ fn allow_unstable<'a>(
list.into_iter().filter_map(move |it| {
let name = it.ident().map(|ident| ident.name);
if name.is_none() {
- sess.diagnostic().span_err(
- it.span(),
- &format!("`{}` expects feature names", symbol.to_ident_string()),
- );
+ sess.emit_err(session_diagnostics::ExpectsFeatures {
+ span: it.span(),
+ name: symbol.to_ident_string(),
+ });
}
name
})
diff --git a/compiler/rustc_attr/src/lib.rs b/compiler/rustc_attr/src/lib.rs
index c3f9f0cf3..4580ffcc6 100644
--- a/compiler/rustc_attr/src/lib.rs
+++ b/compiler/rustc_attr/src/lib.rs
@@ -5,12 +5,14 @@
//! to this crate.
#![feature(let_chains)]
-#![feature(let_else)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
mod builtin;
+mod session_diagnostics;
pub use builtin::*;
pub use IntType::*;
diff --git a/compiler/rustc_attr/src/session_diagnostics.rs b/compiler/rustc_attr/src/session_diagnostics.rs
new file mode 100644
index 000000000..edccfa1c8
--- /dev/null
+++ b/compiler/rustc_attr/src/session_diagnostics.rs
@@ -0,0 +1,393 @@
+use std::num::IntErrorKind;
+
+use rustc_ast as ast;
+use rustc_errors::{
+ error_code, fluent, Applicability, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic,
+};
+use rustc_macros::Diagnostic;
+use rustc_span::{Span, Symbol};
+
+use crate::UnsupportedLiteralReason;
+
+#[derive(Diagnostic)]
+#[diag(attr_expected_one_cfg_pattern, code = "E0536")]
+pub(crate) struct ExpectedOneCfgPattern {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_predicate, code = "E0537")]
+pub(crate) struct InvalidPredicate {
+ #[primary_span]
+ pub span: Span,
+
+ pub predicate: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_multiple_item, code = "E0538")]
+pub(crate) struct MultipleItem {
+ #[primary_span]
+ pub span: Span,
+
+ pub item: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_incorrect_meta_item, code = "E0539")]
+pub(crate) struct IncorrectMetaItem {
+ #[primary_span]
+ pub span: Span,
+}
+
+// Error code: E0541
+pub(crate) struct UnknownMetaItem<'a> {
+ pub span: Span,
+ pub item: String,
+ pub expected: &'a [&'a str],
+}
+
+// Manual implementation to be able to format `expected` items correctly.
+impl<'a> IntoDiagnostic<'a> for UnknownMetaItem<'_> {
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let expected = self.expected.iter().map(|name| format!("`{}`", name)).collect::<Vec<_>>();
+ let mut diag = handler.struct_span_err_with_code(
+ self.span,
+ fluent::attr_unknown_meta_item,
+ error_code!(E0541),
+ );
+ diag.set_arg("item", self.item);
+ diag.set_arg("expected", expected.join(", "));
+ diag.span_label(self.span, fluent::label);
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_missing_since, code = "E0542")]
+pub(crate) struct MissingSince {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_missing_note, code = "E0543")]
+pub(crate) struct MissingNote {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_multiple_stability_levels, code = "E0544")]
+pub(crate) struct MultipleStabilityLevels {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_issue_string, code = "E0545")]
+pub(crate) struct InvalidIssueString {
+ #[primary_span]
+ pub span: Span,
+
+ #[subdiagnostic]
+ pub cause: Option<InvalidIssueStringCause>,
+}
+
+// The error kinds of `IntErrorKind` are duplicated here in order to allow the messages to be
+// translatable.
+#[derive(Subdiagnostic)]
+pub(crate) enum InvalidIssueStringCause {
+ #[label(must_not_be_zero)]
+ MustNotBeZero {
+ #[primary_span]
+ span: Span,
+ },
+
+ #[label(empty)]
+ Empty {
+ #[primary_span]
+ span: Span,
+ },
+
+ #[label(invalid_digit)]
+ InvalidDigit {
+ #[primary_span]
+ span: Span,
+ },
+
+ #[label(pos_overflow)]
+ PosOverflow {
+ #[primary_span]
+ span: Span,
+ },
+
+ #[label(neg_overflow)]
+ NegOverflow {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+impl InvalidIssueStringCause {
+ pub fn from_int_error_kind(span: Span, kind: &IntErrorKind) -> Option<Self> {
+ match kind {
+ IntErrorKind::Empty => Some(Self::Empty { span }),
+ IntErrorKind::InvalidDigit => Some(Self::InvalidDigit { span }),
+ IntErrorKind::PosOverflow => Some(Self::PosOverflow { span }),
+ IntErrorKind::NegOverflow => Some(Self::NegOverflow { span }),
+ IntErrorKind::Zero => Some(Self::MustNotBeZero { span }),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_missing_feature, code = "E0546")]
+pub(crate) struct MissingFeature {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_non_ident_feature, code = "E0546")]
+pub(crate) struct NonIdentFeature {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_missing_issue, code = "E0547")]
+pub(crate) struct MissingIssue {
+ #[primary_span]
+ pub span: Span,
+}
+
+// FIXME: This diagnostic is identical to `IncorrectMetaItem`, barring the error code. Consider
+// changing this to `IncorrectMetaItem`. See #51489.
+#[derive(Diagnostic)]
+#[diag(attr_incorrect_meta_item, code = "E0551")]
+pub(crate) struct IncorrectMetaItem2 {
+ #[primary_span]
+ pub span: Span,
+}
+
+// FIXME: Why is this the same error code as `InvalidReprHintNoParen` and `InvalidReprHintNoValue`?
+// It is more similar to `IncorrectReprFormatGeneric`.
+#[derive(Diagnostic)]
+#[diag(attr_incorrect_repr_format_packed_one_or_zero_arg, code = "E0552")]
+pub(crate) struct IncorrectReprFormatPackedOneOrZeroArg {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_repr_hint_no_paren, code = "E0552")]
+pub(crate) struct InvalidReprHintNoParen {
+ #[primary_span]
+ pub span: Span,
+
+ pub name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_repr_hint_no_value, code = "E0552")]
+pub(crate) struct InvalidReprHintNoValue {
+ #[primary_span]
+ pub span: Span,
+
+ pub name: String,
+}
+
+// Error code: E0565
+pub(crate) struct UnsupportedLiteral {
+ pub span: Span,
+ pub reason: UnsupportedLiteralReason,
+ pub is_bytestr: bool,
+ pub start_point_span: Span,
+}
+
+impl<'a> IntoDiagnostic<'a> for UnsupportedLiteral {
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut diag = handler.struct_span_err_with_code(
+ self.span,
+ match self.reason {
+ UnsupportedLiteralReason::Generic => fluent::attr_unsupported_literal_generic,
+ UnsupportedLiteralReason::CfgString => fluent::attr_unsupported_literal_cfg_string,
+ UnsupportedLiteralReason::DeprecatedString => {
+ fluent::attr_unsupported_literal_deprecated_string
+ }
+ UnsupportedLiteralReason::DeprecatedKvPair => {
+ fluent::attr_unsupported_literal_deprecated_kv_pair
+ }
+ },
+ error_code!(E0565),
+ );
+ if self.is_bytestr {
+ diag.span_suggestion(
+ self.start_point_span,
+ fluent::attr_unsupported_literal_suggestion,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_repr_align_need_arg, code = "E0589")]
+pub(crate) struct InvalidReprAlignNeedArg {
+ #[primary_span]
+ #[suggestion(code = "align(...)", applicability = "has-placeholders")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_invalid_repr_generic, code = "E0589")]
+pub(crate) struct InvalidReprGeneric<'a> {
+ #[primary_span]
+ pub span: Span,
+
+ pub repr_arg: String,
+ pub error_part: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_incorrect_repr_format_align_one_arg, code = "E0693")]
+pub(crate) struct IncorrectReprFormatAlignOneArg {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_incorrect_repr_format_generic, code = "E0693")]
+pub(crate) struct IncorrectReprFormatGeneric<'a> {
+ #[primary_span]
+ pub span: Span,
+
+ pub repr_arg: &'a str,
+
+ #[subdiagnostic]
+ pub cause: Option<IncorrectReprFormatGenericCause<'a>>,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum IncorrectReprFormatGenericCause<'a> {
+ #[suggestion(suggestion, code = "{name}({int})", applicability = "machine-applicable")]
+ Int {
+ #[primary_span]
+ span: Span,
+
+ #[skip_arg]
+ name: &'a str,
+
+ #[skip_arg]
+ int: u128,
+ },
+
+ #[suggestion(suggestion, code = "{name}({symbol})", applicability = "machine-applicable")]
+ Symbol {
+ #[primary_span]
+ span: Span,
+
+ #[skip_arg]
+ name: &'a str,
+
+ #[skip_arg]
+ symbol: Symbol,
+ },
+}
+
+impl<'a> IncorrectReprFormatGenericCause<'a> {
+ pub fn from_lit_kind(span: Span, kind: &ast::LitKind, name: &'a str) -> Option<Self> {
+ match kind {
+ ast::LitKind::Int(int, ast::LitIntType::Unsuffixed) => {
+ Some(Self::Int { span, name, int: *int })
+ }
+ ast::LitKind::Str(symbol, _) => Some(Self::Symbol { span, name, symbol: *symbol }),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_rustc_promotable_pairing, code = "E0717")]
+pub(crate) struct RustcPromotablePairing {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_rustc_allowed_unstable_pairing, code = "E0789")]
+pub(crate) struct RustcAllowedUnstablePairing {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_cfg_predicate_identifier)]
+pub(crate) struct CfgPredicateIdentifier {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_deprecated_item_suggestion)]
+pub(crate) struct DeprecatedItemSuggestion {
+ #[primary_span]
+ pub span: Span,
+
+ #[help]
+ pub is_nightly: Option<()>,
+
+ #[note]
+ pub details: (),
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_expected_single_version_literal)]
+pub(crate) struct ExpectedSingleVersionLiteral {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_expected_version_literal)]
+pub(crate) struct ExpectedVersionLiteral {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_expects_feature_list)]
+pub(crate) struct ExpectsFeatureList {
+ #[primary_span]
+ pub span: Span,
+
+ pub name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_expects_features)]
+pub(crate) struct ExpectsFeatures {
+ #[primary_span]
+ pub span: Span,
+
+ pub name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_soft_no_args)]
+pub(crate) struct SoftNoArgs {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(attr_unknown_version_literal)]
+pub(crate) struct UnknownVersionLiteral {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_borrowck/Cargo.toml b/compiler/rustc_borrowck/Cargo.toml
index fbf628e86..87c113f3e 100644
--- a/compiler/rustc_borrowck/Cargo.toml
+++ b/compiler/rustc_borrowck/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
either = "1.5.0"
diff --git a/compiler/rustc_borrowck/src/constraint_generation.rs b/compiler/rustc_borrowck/src/constraint_generation.rs
index 5e9cec5c3..f185e402f 100644
--- a/compiler/rustc_borrowck/src/constraint_generation.rs
+++ b/compiler/rustc_borrowck/src/constraint_generation.rs
@@ -14,8 +14,8 @@ use crate::{
places_conflict, region_infer::values::LivenessValues,
};
-pub(super) fn generate_constraints<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
+pub(super) fn generate_constraints<'tcx>(
+ infcx: &InferCtxt<'tcx>,
liveness_constraints: &mut LivenessValues<RegionVid>,
all_facts: &mut Option<AllFacts>,
location_table: &LocationTable,
@@ -31,14 +31,14 @@ pub(super) fn generate_constraints<'cx, 'tcx>(
body,
};
- for (bb, data) in body.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks.iter_enumerated() {
cg.visit_basic_block_data(bb, data);
}
}
/// 'cg = the duration of the constraint generation process itself.
-struct ConstraintGeneration<'cg, 'cx, 'tcx> {
- infcx: &'cg InferCtxt<'cx, 'tcx>,
+struct ConstraintGeneration<'cg, 'tcx> {
+ infcx: &'cg InferCtxt<'tcx>,
all_facts: &'cg mut Option<AllFacts>,
location_table: &'cg LocationTable,
liveness_constraints: &'cg mut LivenessValues<RegionVid>,
@@ -46,7 +46,7 @@ struct ConstraintGeneration<'cg, 'cx, 'tcx> {
body: &'cg Body<'tcx>,
}
-impl<'cg, 'cx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'tcx> {
+impl<'cg, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'tcx> {
fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) {
self.super_basic_block_data(bb, data);
}
@@ -156,7 +156,7 @@ impl<'cg, 'cx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'tcx> {
}
}
-impl<'cx, 'cg, 'tcx> ConstraintGeneration<'cx, 'cg, 'tcx> {
+impl<'cx, 'tcx> ConstraintGeneration<'cx, 'tcx> {
/// Some variable with type `live_ty` is "regular live" at
/// `location` -- i.e., it may be used later. This means that all
/// regions appearing in the type `live_ty` must be live at
diff --git a/compiler/rustc_borrowck/src/constraints/mod.rs b/compiler/rustc_borrowck/src/constraints/mod.rs
index a504d0c91..df0412813 100644
--- a/compiler/rustc_borrowck/src/constraints/mod.rs
+++ b/compiler/rustc_borrowck/src/constraints/mod.rs
@@ -21,10 +21,7 @@ pub(crate) struct OutlivesConstraintSet<'tcx> {
impl<'tcx> OutlivesConstraintSet<'tcx> {
pub(crate) fn push(&mut self, constraint: OutlivesConstraint<'tcx>) {
- debug!(
- "OutlivesConstraintSet::push({:?}: {:?} @ {:?}",
- constraint.sup, constraint.sub, constraint.locations
- );
+ debug!("OutlivesConstraintSet::push({:?})", constraint);
if constraint.sup == constraint.sub {
// 'a: 'a is pretty uninteresting
return;
@@ -73,7 +70,7 @@ impl<'tcx> Index<OutlivesConstraintIndex> for OutlivesConstraintSet<'tcx> {
}
}
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Copy, Clone, PartialEq, Eq)]
pub struct OutlivesConstraint<'tcx> {
// NB. The ordering here is not significant for correctness, but
// it is for convenience. Before we dump the constraints in the
@@ -105,8 +102,8 @@ impl<'tcx> fmt::Debug for OutlivesConstraint<'tcx> {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
formatter,
- "({:?}: {:?}) due to {:?} ({:?})",
- self.sup, self.sub, self.locations, self.variance_info
+ "({:?}: {:?}) due to {:?} ({:?}) ({:?})",
+ self.sup, self.sub, self.locations, self.variance_info, self.category,
)
}
}
diff --git a/compiler/rustc_borrowck/src/consumers.rs b/compiler/rustc_borrowck/src/consumers.rs
index efc17a173..b162095f8 100644
--- a/compiler/rustc_borrowck/src/consumers.rs
+++ b/compiler/rustc_borrowck/src/consumers.rs
@@ -31,9 +31,8 @@ pub fn get_body_with_borrowck_facts<'tcx>(
def: ty::WithOptConstParam<LocalDefId>,
) -> BodyWithBorrowckFacts<'tcx> {
let (input_body, promoted) = tcx.mir_promoted(def);
- tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(def.did)).enter(|infcx| {
- let input_body: &Body<'_> = &input_body.borrow();
- let promoted: &IndexVec<_, _> = &promoted.borrow();
- *super::do_mir_borrowck(&infcx, input_body, promoted, true).1.unwrap()
- })
+ let infcx = tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(def.did)).build();
+ let input_body: &Body<'_> = &input_body.borrow();
+ let promoted: &IndexVec<_, _> = &promoted.borrow();
+ *super::do_mir_borrowck(&infcx, input_body, promoted, true).1.unwrap()
}
diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs
index 97d5a8d15..9f7a4d499 100644
--- a/compiler/rustc_borrowck/src/dataflow.rs
+++ b/compiler/rustc_borrowck/src/dataflow.rs
@@ -143,7 +143,7 @@ struct OutOfScopePrecomputer<'a, 'tcx> {
impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
OutOfScopePrecomputer {
- visited: BitSet::new_empty(body.basic_blocks().len()),
+ visited: BitSet::new_empty(body.basic_blocks.len()),
visit_stack: vec![],
body,
regioncx,
@@ -391,7 +391,7 @@ impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Coverage(..)
- | mir::StatementKind::CopyNonOverlapping(..)
+ | mir::StatementKind::Intrinsic(..)
| mir::StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs
index 1ef2b0ae9..02071ed6b 100644
--- a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs
@@ -56,7 +56,7 @@ impl<'tcx> UniverseInfo<'tcx> {
) {
match self.0 {
UniverseInfoInner::RelateTys { expected, found } => {
- let err = mbcx.infcx.report_mismatched_types(
+ let err = mbcx.infcx.err_ctxt().report_mismatched_types(
&cause,
expected,
found,
@@ -238,20 +238,11 @@ impl<'tcx> TypeOpInfo<'tcx> for PredicateQuery<'tcx> {
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
- mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
- cause.span,
- &self.canonical_query,
- |ref infcx, key, _| {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
- type_op_prove_predicate_with_cause(infcx, &mut *fulfill_cx, key, cause);
- try_extract_error_from_fulfill_cx(
- fulfill_cx,
- infcx,
- placeholder_region,
- error_region,
- )
- },
- )
+ let (ref infcx, key, _) =
+ mbcx.infcx.tcx.infer_ctxt().build_with_canonical(cause.span, &self.canonical_query);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ type_op_prove_predicate_with_cause(infcx, &mut *fulfill_cx, key, cause);
+ try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
}
}
@@ -288,37 +279,24 @@ where
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
- mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
- cause.span,
- &self.canonical_query,
- |ref infcx, key, _| {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
-
- let mut selcx = SelectionContext::new(infcx);
-
- // FIXME(lqd): Unify and de-duplicate the following with the actual
- // `rustc_traits::type_op::type_op_normalize` query to allow the span we need in the
- // `ObligationCause`. The normalization results are currently different between
- // `AtExt::normalize` used in the query and `normalize` called below: the former fails
- // to normalize the `nll/relate_tys/impl-fn-ignore-binder-via-bottom.rs` test. Check
- // after #85499 lands to see if its fixes have erased this difference.
- let (param_env, value) = key.into_parts();
- let Normalized { value: _, obligations } = rustc_trait_selection::traits::normalize(
- &mut selcx,
- param_env,
- cause,
- value.value,
- );
- fulfill_cx.register_predicate_obligations(infcx, obligations);
-
- try_extract_error_from_fulfill_cx(
- fulfill_cx,
- infcx,
- placeholder_region,
- error_region,
- )
- },
- )
+ let (ref infcx, key, _) =
+ mbcx.infcx.tcx.infer_ctxt().build_with_canonical(cause.span, &self.canonical_query);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ let mut selcx = SelectionContext::new(infcx);
+
+ // FIXME(lqd): Unify and de-duplicate the following with the actual
+ // `rustc_traits::type_op::type_op_normalize` query to allow the span we need in the
+ // `ObligationCause`. The normalization results are currently different between
+ // `AtExt::normalize` used in the query and `normalize` called below: the former fails
+ // to normalize the `nll/relate_tys/impl-fn-ignore-binder-via-bottom.rs` test. Check
+ // after #85499 lands to see if its fixes have erased this difference.
+ let (param_env, value) = key.into_parts();
+ let Normalized { value: _, obligations } =
+ rustc_trait_selection::traits::normalize(&mut selcx, param_env, cause, value.value);
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+
+ try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
}
}
@@ -349,21 +327,11 @@ impl<'tcx> TypeOpInfo<'tcx> for AscribeUserTypeQuery<'tcx> {
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
- mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
- cause.span,
- &self.canonical_query,
- |ref infcx, key, _| {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
- type_op_ascribe_user_type_with_span(infcx, &mut *fulfill_cx, key, Some(cause.span))
- .ok()?;
- try_extract_error_from_fulfill_cx(
- fulfill_cx,
- infcx,
- placeholder_region,
- error_region,
- )
- },
- )
+ let (ref infcx, key, _) =
+ mbcx.infcx.tcx.infer_ctxt().build_with_canonical(cause.span, &self.canonical_query);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ type_op_ascribe_user_type_with_span(infcx, &mut *fulfill_cx, key, Some(cause.span)).ok()?;
+ try_extract_error_from_fulfill_cx(fulfill_cx, infcx, placeholder_region, error_region)
}
}
@@ -407,7 +375,7 @@ impl<'tcx> TypeOpInfo<'tcx> for crate::type_check::InstantiateOpaqueType<'tcx> {
#[instrument(skip(fulfill_cx, infcx), level = "debug")]
fn try_extract_error_from_fulfill_cx<'tcx>(
mut fulfill_cx: Box<dyn TraitEngine<'tcx> + 'tcx>,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
@@ -427,7 +395,7 @@ fn try_extract_error_from_fulfill_cx<'tcx>(
}
fn try_extract_error_from_region_constraints<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
placeholder_region: ty::Region<'tcx>,
error_region: Option<ty::Region<'tcx>>,
region_constraints: &RegionConstraintData<'tcx>,
@@ -449,43 +417,37 @@ fn try_extract_error_from_region_constraints<'tcx>(
})?;
debug!(?sub_region, "cause = {:#?}", cause);
- let nice_error = match (error_region, *sub_region) {
- (Some(error_region), ty::ReVar(vid)) => NiceRegionError::new(
- infcx,
- RegionResolutionError::SubSupConflict(
- vid,
- region_var_origin(vid),
- cause.clone(),
- error_region,
- cause.clone(),
- placeholder_region,
- vec![],
- ),
- ),
- (Some(error_region), _) => NiceRegionError::new(
- infcx,
- RegionResolutionError::ConcreteFailure(cause.clone(), error_region, placeholder_region),
+ let error = match (error_region, *sub_region) {
+ (Some(error_region), ty::ReVar(vid)) => RegionResolutionError::SubSupConflict(
+ vid,
+ region_var_origin(vid),
+ cause.clone(),
+ error_region,
+ cause.clone(),
+ placeholder_region,
+ vec![],
),
+ (Some(error_region), _) => {
+ RegionResolutionError::ConcreteFailure(cause.clone(), error_region, placeholder_region)
+ }
// Note universe here is wrong...
- (None, ty::ReVar(vid)) => NiceRegionError::new(
- infcx,
- RegionResolutionError::UpperBoundUniverseConflict(
- vid,
- region_var_origin(vid),
- universe_of_region(vid),
- cause.clone(),
- placeholder_region,
- ),
- ),
- (None, _) => NiceRegionError::new(
- infcx,
- RegionResolutionError::ConcreteFailure(cause.clone(), sub_region, placeholder_region),
+ (None, ty::ReVar(vid)) => RegionResolutionError::UpperBoundUniverseConflict(
+ vid,
+ region_var_origin(vid),
+ universe_of_region(vid),
+ cause.clone(),
+ placeholder_region,
),
+ (None, _) => {
+ RegionResolutionError::ConcreteFailure(cause.clone(), sub_region, placeholder_region)
+ }
};
- nice_error.try_report_from_nll().or_else(|| {
+ NiceRegionError::new(&infcx.err_ctxt(), error).try_report_from_nll().or_else(|| {
if let SubregionOrigin::Subtype(trace) = cause {
Some(
- infcx.report_and_explain_type_error(*trace, &TypeError::RegionsPlaceholderMismatch),
+ infcx
+ .err_ctxt()
+ .report_and_explain_type_error(*trace, TypeError::RegionsPlaceholderMismatch),
)
} else {
None
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index 8bc8964bb..583bc2e28 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -16,7 +16,7 @@ use rustc_middle::mir::{
FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, Operand, Place, PlaceRef,
ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm,
};
-use rustc_middle::ty::{self, subst::Subst, suggest_constraining_type_params, PredicateKind, Ty};
+use rustc_middle::ty::{self, suggest_constraining_type_params, PredicateKind, Ty};
use rustc_mir_dataflow::move_paths::{InitKind, MoveOutIndex, MovePathIndex};
use rustc_span::def_id::LocalDefId;
use rustc_span::hygiene::DesugaringKind;
@@ -198,7 +198,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
move_span,
move_spans,
*moved_place,
- Some(used_place),
partially_str,
loop_message,
move_msg,
@@ -258,7 +257,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let ty = place.ty(self.body, self.infcx.tcx).ty;
// If we're in pattern, we do nothing in favor of the previous suggestion (#80913).
- if is_loop_move & !in_pattern {
+ // Same for if we're in a loop, see #101119.
+ if is_loop_move & !in_pattern && !matches!(use_spans, UseSpans::ClosureUse { .. }) {
if let ty::Ref(_, _, hir::Mutability::Mut) = ty.kind() {
// We have a `&mut` ref, we need to reborrow on each iteration (#62112).
err.span_suggestion_verbose(
@@ -368,6 +368,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let mut visitor = ConditionVisitor { spans: &spans, name: &name, errors: vec![] };
visitor.visit_body(&body);
+ let mut show_assign_sugg = false;
let isnt_initialized = if let InitializationRequiringAction::PartialAssignment
| InitializationRequiringAction::Assignment = desired_action
{
@@ -395,6 +396,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
.count()
== 0
{
+ show_assign_sugg = true;
"isn't initialized"
} else {
"is possibly-uninitialized"
@@ -445,13 +447,87 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
}
}
+
err.span_label(decl_span, "binding declared here but left uninitialized");
+ if show_assign_sugg {
+ struct LetVisitor {
+ decl_span: Span,
+ sugg_span: Option<Span>,
+ }
+
+ impl<'v> Visitor<'v> for LetVisitor {
+ fn visit_stmt(&mut self, ex: &'v hir::Stmt<'v>) {
+ if self.sugg_span.is_some() {
+ return;
+ }
+ if let hir::StmtKind::Local(hir::Local {
+ span, ty, init: None, ..
+ }) = &ex.kind && span.contains(self.decl_span) {
+ self.sugg_span = ty.map_or(Some(self.decl_span), |ty| Some(ty.span));
+ }
+ hir::intravisit::walk_stmt(self, ex);
+ }
+ }
+
+ let mut visitor = LetVisitor { decl_span, sugg_span: None };
+ visitor.visit_body(&body);
+ if let Some(span) = visitor.sugg_span {
+ self.suggest_assign_value(&mut err, moved_place, span);
+ }
+ }
err
}
+ fn suggest_assign_value(
+ &self,
+ err: &mut Diagnostic,
+ moved_place: PlaceRef<'tcx>,
+ sugg_span: Span,
+ ) {
+ let ty = moved_place.ty(self.body, self.infcx.tcx).ty;
+ debug!("ty: {:?}, kind: {:?}", ty, ty.kind());
+
+ let tcx = self.infcx.tcx;
+ let implements_default = |ty, param_env| {
+ let Some(default_trait) = tcx.get_diagnostic_item(sym::Default) else {
+ return false;
+ };
+ // Regions are already solved, so we must use a fresh InferCtxt,
+ // but the type has region variables, so erase those.
+ tcx.infer_ctxt()
+ .build()
+ .type_implements_trait(
+ default_trait,
+ tcx.erase_regions(ty),
+ ty::List::empty(),
+ param_env,
+ )
+ .must_apply_modulo_regions()
+ };
+
+ let assign_value = match ty.kind() {
+ ty::Bool => "false",
+ ty::Float(_) => "0.0",
+ ty::Int(_) | ty::Uint(_) => "0",
+ ty::Never | ty::Error(_) => "",
+ ty::Adt(def, _) if Some(def.did()) == tcx.get_diagnostic_item(sym::Vec) => "vec![]",
+ ty::Adt(_, _) if implements_default(ty, self.param_env) => "Default::default()",
+ _ => "todo!()",
+ };
+
+ if !assign_value.is_empty() {
+ err.span_suggestion_verbose(
+ sugg_span.shrink_to_hi(),
+ format!("consider assigning a value"),
+ format!(" = {}", assign_value),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
fn suggest_borrow_fn_like(
&self,
- err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ err: &mut Diagnostic,
ty: Ty<'tcx>,
move_sites: &[MoveSite],
value_name: &str,
@@ -526,12 +602,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
true
}
- fn suggest_adding_copy_bounds(
- &self,
- err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
- ty: Ty<'tcx>,
- span: Span,
- ) {
+ fn suggest_adding_copy_bounds(&self, err: &mut Diagnostic, ty: Ty<'tcx>, span: Span) {
let tcx = self.infcx.tcx;
let generics = tcx.generics_of(self.mir_def_id());
@@ -541,41 +612,40 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
.and_then(|def_id| tcx.hir().get_generics(def_id))
else { return; };
// Try to find predicates on *generic params* that would allow copying `ty`
- let predicates: Result<Vec<_>, _> = tcx.infer_ctxt().enter(|infcx| {
- let mut fulfill_cx = <dyn rustc_infer::traits::TraitEngine<'_>>::new(infcx.tcx);
+ let infcx = tcx.infer_ctxt().build();
+ let mut fulfill_cx = <dyn rustc_infer::traits::TraitEngine<'_>>::new(infcx.tcx);
- let copy_did = infcx.tcx.lang_items().copy_trait().unwrap();
- let cause = ObligationCause::new(
- span,
- self.mir_hir_id(),
- rustc_infer::traits::ObligationCauseCode::MiscObligation,
- );
- fulfill_cx.register_bound(
- &infcx,
- self.param_env,
- // Erase any region vids from the type, which may not be resolved
- infcx.tcx.erase_regions(ty),
- copy_did,
- cause,
- );
- // Select all, including ambiguous predicates
- let errors = fulfill_cx.select_all_or_error(&infcx);
-
- // Only emit suggestion if all required predicates are on generic
- errors
- .into_iter()
- .map(|err| match err.obligation.predicate.kind().skip_binder() {
- PredicateKind::Trait(predicate) => match predicate.self_ty().kind() {
- ty::Param(param_ty) => Ok((
- generics.type_param(param_ty, tcx),
- predicate.trait_ref.print_only_trait_path().to_string(),
- )),
- _ => Err(()),
- },
+ let copy_did = infcx.tcx.lang_items().copy_trait().unwrap();
+ let cause = ObligationCause::new(
+ span,
+ self.mir_hir_id(),
+ rustc_infer::traits::ObligationCauseCode::MiscObligation,
+ );
+ fulfill_cx.register_bound(
+ &infcx,
+ self.param_env,
+ // Erase any region vids from the type, which may not be resolved
+ infcx.tcx.erase_regions(ty),
+ copy_did,
+ cause,
+ );
+ // Select all, including ambiguous predicates
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+
+ // Only emit suggestion if all required predicates are on generic
+ let predicates: Result<Vec<_>, _> = errors
+ .into_iter()
+ .map(|err| match err.obligation.predicate.kind().skip_binder() {
+ PredicateKind::Trait(predicate) => match predicate.self_ty().kind() {
+ ty::Param(param_ty) => Ok((
+ generics.type_param(param_ty, tcx),
+ predicate.trait_ref.print_only_trait_path().to_string(),
+ )),
_ => Err(()),
- })
- .collect()
- });
+ },
+ _ => Err(()),
+ })
+ .collect();
if let Ok(predicates) = predicates {
suggest_constraining_type_params(
@@ -1124,6 +1194,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
/// short a lifetime. (But sometimes it is more useful to report
/// it as a more direct conflict between the execution of a
/// `Drop::drop` with an aliasing borrow.)
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn report_borrowed_value_does_not_live_long_enough(
&mut self,
location: Location,
@@ -1131,13 +1202,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
place_span: (Place<'tcx>, Span),
kind: Option<WriteKind>,
) {
- debug!(
- "report_borrowed_value_does_not_live_long_enough(\
- {:?}, {:?}, {:?}, {:?}\
- )",
- location, borrow, place_span, kind
- );
-
let drop_span = place_span.1;
let root_place =
self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
@@ -1194,10 +1258,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let kind_place = kind.filter(|_| place_desc.is_some()).map(|k| (k, place_span.0));
let explanation = self.explain_why_borrow_contains_point(location, &borrow, kind_place);
- debug!(
- "report_borrowed_value_does_not_live_long_enough(place_desc: {:?}, explanation: {:?})",
- place_desc, explanation
- );
+ debug!(?place_desc, ?explanation);
+
let err = match (place_desc, explanation) {
// If the outlives constraint comes from inside the closure,
// for example:
@@ -1469,6 +1531,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
err
}
+ #[instrument(level = "debug", skip(self))]
fn report_temporary_value_does_not_live_long_enough(
&mut self,
location: Location,
@@ -1478,13 +1541,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
proper_span: Span,
explanation: BorrowExplanation<'tcx>,
) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
- debug!(
- "report_temporary_value_does_not_live_long_enough(\
- {:?}, {:?}, {:?}, {:?}\
- )",
- location, borrow, drop_span, proper_span
- );
-
if let BorrowExplanation::MustBeValidFor { category, span, from_closure: false, .. } =
explanation
{
@@ -2164,7 +2220,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
StorageDeadOrDrop::Destructor(_) => kind,
},
- ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => {
+ ProjectionElem::OpaqueCast { .. }
+ | ProjectionElem::Field(..)
+ | ProjectionElem::Downcast(..) => {
match place_ty.ty.kind() {
ty::Adt(def, _) if def.has_dtor(tcx) => {
// Report the outermost adt with a destructor
diff --git a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
index 72aee0267..582d683dd 100644
--- a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
@@ -1,8 +1,5 @@
//! Print diagnostics to explain why values are borrowed.
-use std::collections::VecDeque;
-
-use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, Diagnostic};
use rustc_index::vec::IndexVec;
use rustc_infer::infer::NllRegionVariableOrigin;
@@ -15,7 +12,7 @@ use rustc_middle::ty::{self, RegionVid, TyCtxt};
use rustc_span::symbol::{kw, Symbol};
use rustc_span::{sym, DesugaringKind, Span};
-use crate::region_infer::BlameConstraint;
+use crate::region_infer::{BlameConstraint, ExtraConstraintInfo};
use crate::{
borrow_set::BorrowData, nll::ConstraintDescription, region_infer::Cause, MirBorrowckCtxt,
WriteKind,
@@ -38,6 +35,7 @@ pub(crate) enum BorrowExplanation<'tcx> {
span: Span,
region_name: RegionName,
opt_place_desc: Option<String>,
+ extra_info: Vec<ExtraConstraintInfo>,
},
Unexplained,
}
@@ -243,6 +241,7 @@ impl<'tcx> BorrowExplanation<'tcx> {
ref region_name,
ref opt_place_desc,
from_closure: _,
+ ref extra_info,
} => {
region_name.highlight_region_name(err);
@@ -268,18 +267,30 @@ impl<'tcx> BorrowExplanation<'tcx> {
);
};
+ for extra in extra_info {
+ match extra {
+ ExtraConstraintInfo::PlaceholderFromPredicate(span) => {
+ err.span_note(*span, format!("due to current limitations in the borrow checker, this implies a `'static` lifetime"));
+ }
+ }
+ }
+
self.add_lifetime_bound_suggestion_to_diagnostic(err, &category, span, region_name);
}
_ => {}
}
}
- pub(crate) fn add_lifetime_bound_suggestion_to_diagnostic(
+
+ fn add_lifetime_bound_suggestion_to_diagnostic(
&self,
err: &mut Diagnostic,
category: &ConstraintCategory<'tcx>,
span: Span,
region_name: &RegionName,
) {
+ if !span.is_desugaring(DesugaringKind::OpaqueTy) {
+ return;
+ }
if let ConstraintCategory::OpaqueType = category {
let suggestable_name =
if region_name.was_named() { region_name.name } else { kw::UnderscoreLifetime };
@@ -305,18 +316,17 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
&self,
borrow_region: RegionVid,
outlived_region: RegionVid,
- ) -> (ConstraintCategory<'tcx>, bool, Span, Option<RegionName>) {
- let BlameConstraint { category, from_closure, cause, variance_info: _ } =
- self.regioncx.best_blame_constraint(
- &self.body,
- borrow_region,
- NllRegionVariableOrigin::FreeRegion,
- |r| self.regioncx.provides_universal_region(r, borrow_region, outlived_region),
- );
+ ) -> (ConstraintCategory<'tcx>, bool, Span, Option<RegionName>, Vec<ExtraConstraintInfo>) {
+ let (blame_constraint, extra_info) = self.regioncx.best_blame_constraint(
+ borrow_region,
+ NllRegionVariableOrigin::FreeRegion,
+ |r| self.regioncx.provides_universal_region(r, borrow_region, outlived_region),
+ );
+ let BlameConstraint { category, from_closure, cause, .. } = blame_constraint;
let outlived_fr_name = self.give_region_a_name(outlived_region);
- (category, from_closure, cause.span, outlived_fr_name)
+ (category, from_closure, cause.span, outlived_fr_name, extra_info)
}
/// Returns structured explanation for *why* the borrow contains the
@@ -332,37 +342,51 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
/// - second half is the place being accessed
///
/// [d]: https://rust-lang.github.io/rfcs/2094-nll.html#leveraging-intuition-framing-errors-in-terms-of-points
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn explain_why_borrow_contains_point(
&self,
location: Location,
borrow: &BorrowData<'tcx>,
kind_place: Option<(WriteKind, Place<'tcx>)>,
) -> BorrowExplanation<'tcx> {
- debug!(
- "explain_why_borrow_contains_point(location={:?}, borrow={:?}, kind_place={:?})",
- location, borrow, kind_place
- );
-
let regioncx = &self.regioncx;
let body: &Body<'_> = &self.body;
let tcx = self.infcx.tcx;
let borrow_region_vid = borrow.region;
- debug!("explain_why_borrow_contains_point: borrow_region_vid={:?}", borrow_region_vid);
-
- let region_sub = self.regioncx.find_sub_region_live_at(borrow_region_vid, location);
- debug!("explain_why_borrow_contains_point: region_sub={:?}", region_sub);
+ debug!(?borrow_region_vid);
+
+ let mut region_sub = self.regioncx.find_sub_region_live_at(borrow_region_vid, location);
+ debug!(?region_sub);
+
+ let mut use_location = location;
+ let mut use_in_later_iteration_of_loop = false;
+
+ if region_sub == borrow_region_vid {
+ // When `region_sub` is the same as `borrow_region_vid` (the location where the borrow is
+ // issued is the same location that invalidates the reference), this is likely a loop iteration
+ // - in this case, try using the loop terminator location in `find_sub_region_live_at`.
+ if let Some(loop_terminator_location) =
+ regioncx.find_loop_terminator_location(borrow.region, body)
+ {
+ region_sub = self
+ .regioncx
+ .find_sub_region_live_at(borrow_region_vid, loop_terminator_location);
+ debug!("explain_why_borrow_contains_point: region_sub in loop={:?}", region_sub);
+ use_location = loop_terminator_location;
+ use_in_later_iteration_of_loop = true;
+ }
+ }
- match find_use::find(body, regioncx, tcx, region_sub, location) {
+ match find_use::find(body, regioncx, tcx, region_sub, use_location) {
Some(Cause::LiveVar(local, location)) => {
let span = body.source_info(location).span;
let spans = self
.move_spans(Place::from(local).as_ref(), location)
.or_else(|| self.borrow_spans(span, location));
- let borrow_location = location;
- if self.is_use_in_later_iteration_of_loop(borrow_location, location) {
- let later_use = self.later_use_kind(borrow, spans, location);
+ if use_in_later_iteration_of_loop {
+ let later_use = self.later_use_kind(borrow, spans, use_location);
BorrowExplanation::UsedLaterInLoop(later_use.0, later_use.1, later_use.2)
} else {
// Check if the location represents a `FakeRead`, and adapt the error
@@ -392,7 +416,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
None => {
if let Some(region) = self.to_error_region_vid(borrow_region_vid) {
- let (category, from_closure, span, region_name) =
+ let (category, from_closure, span, region_name, extra_info) =
self.free_region_constraint_info(borrow_region_vid, region);
if let Some(region_name) = region_name {
let opt_place_desc = self.describe_place(borrow.borrowed_place.as_ref());
@@ -402,150 +426,20 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
span,
region_name,
opt_place_desc,
+ extra_info,
}
} else {
- debug!(
- "explain_why_borrow_contains_point: \
- Could not generate a region name"
- );
+ debug!("Could not generate a region name");
BorrowExplanation::Unexplained
}
} else {
- debug!(
- "explain_why_borrow_contains_point: \
- Could not generate an error region vid"
- );
+ debug!("Could not generate an error region vid");
BorrowExplanation::Unexplained
}
}
}
}
- /// true if `borrow_location` can reach `use_location` by going through a loop and
- /// `use_location` is also inside of that loop
- fn is_use_in_later_iteration_of_loop(
- &self,
- borrow_location: Location,
- use_location: Location,
- ) -> bool {
- let back_edge = self.reach_through_backedge(borrow_location, use_location);
- back_edge.map_or(false, |back_edge| self.can_reach_head_of_loop(use_location, back_edge))
- }
-
- /// Returns the outmost back edge if `from` location can reach `to` location passing through
- /// that back edge
- fn reach_through_backedge(&self, from: Location, to: Location) -> Option<Location> {
- let mut visited_locations = FxHashSet::default();
- let mut pending_locations = VecDeque::new();
- visited_locations.insert(from);
- pending_locations.push_back(from);
- debug!("reach_through_backedge: from={:?} to={:?}", from, to,);
-
- let mut outmost_back_edge = None;
- while let Some(location) = pending_locations.pop_front() {
- debug!(
- "reach_through_backedge: location={:?} outmost_back_edge={:?}
- pending_locations={:?} visited_locations={:?}",
- location, outmost_back_edge, pending_locations, visited_locations
- );
-
- if location == to && outmost_back_edge.is_some() {
- // We've managed to reach the use location
- debug!("reach_through_backedge: found!");
- return outmost_back_edge;
- }
-
- let block = &self.body.basic_blocks()[location.block];
-
- if location.statement_index < block.statements.len() {
- let successor = location.successor_within_block();
- if visited_locations.insert(successor) {
- pending_locations.push_back(successor);
- }
- } else {
- pending_locations.extend(
- block
- .terminator()
- .successors()
- .map(|bb| Location { statement_index: 0, block: bb })
- .filter(|s| visited_locations.insert(*s))
- .map(|s| {
- if self.is_back_edge(location, s) {
- match outmost_back_edge {
- None => {
- outmost_back_edge = Some(location);
- }
-
- Some(back_edge)
- if location.dominates(back_edge, &self.dominators) =>
- {
- outmost_back_edge = Some(location);
- }
-
- Some(_) => {}
- }
- }
-
- s
- }),
- );
- }
- }
-
- None
- }
-
- /// true if `from` location can reach `loop_head` location and `loop_head` dominates all the
- /// intermediate nodes
- fn can_reach_head_of_loop(&self, from: Location, loop_head: Location) -> bool {
- self.find_loop_head_dfs(from, loop_head, &mut FxHashSet::default())
- }
-
- fn find_loop_head_dfs(
- &self,
- from: Location,
- loop_head: Location,
- visited_locations: &mut FxHashSet<Location>,
- ) -> bool {
- visited_locations.insert(from);
-
- if from == loop_head {
- return true;
- }
-
- if loop_head.dominates(from, &self.dominators) {
- let block = &self.body.basic_blocks()[from.block];
-
- if from.statement_index < block.statements.len() {
- let successor = from.successor_within_block();
-
- if !visited_locations.contains(&successor)
- && self.find_loop_head_dfs(successor, loop_head, visited_locations)
- {
- return true;
- }
- } else {
- for bb in block.terminator().successors() {
- let successor = Location { statement_index: 0, block: bb };
-
- if !visited_locations.contains(&successor)
- && self.find_loop_head_dfs(successor, loop_head, visited_locations)
- {
- return true;
- }
- }
- }
- }
-
- false
- }
-
- /// True if an edge `source -> target` is a backedge -- in other words, if the target
- /// dominates the source.
- fn is_back_edge(&self, source: Location, target: Location) -> bool {
- target.dominates(source, &self.dominators)
- }
-
/// Determine how the borrow was later used.
/// First span returned points to the location of the conflicting use
/// Second span if `Some` is returned in the case of closures and points
@@ -564,7 +458,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
UseSpans::PatUse(span)
| UseSpans::OtherUse(span)
| UseSpans::FnSelfUse { var_span: span, .. } => {
- let block = &self.body.basic_blocks()[location.block];
+ let block = &self.body.basic_blocks[location.block];
let kind = if let Some(&Statement {
kind: StatementKind::FakeRead(box (FakeReadCause::ForLet(_), _)),
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index 098e8de94..534d9ecae 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -237,6 +237,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
ProjectionElem::Downcast(..) if opt.including_downcast => return None,
ProjectionElem::Downcast(..) => (),
+ ProjectionElem::OpaqueCast(..) => (),
ProjectionElem::Field(field, _ty) => {
// FIXME(project-rfc_2229#36): print capture precisely here.
if let Some(field) = self.is_upvar_field_projection(PlaceRef {
@@ -317,6 +318,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
PlaceRef { local, projection: proj_base }.ty(self.body, self.infcx.tcx)
}
ProjectionElem::Downcast(..) => place.ty(self.body, self.infcx.tcx),
+ ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(*ty),
ProjectionElem::Field(_, field_type) => PlaceTy::from_ty(*field_type),
},
};
@@ -970,7 +972,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
move_span: Span,
move_spans: UseSpans<'tcx>,
moved_place: Place<'tcx>,
- used_place: Option<PlaceRef<'tcx>>,
partially_str: &str,
loop_message: &str,
move_msg: &str,
@@ -1024,7 +1025,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
if let Some((CallDesugaringKind::ForLoopIntoIter, _)) = desugaring {
let ty = moved_place.ty(self.body, self.infcx.tcx).ty;
let suggest = match self.infcx.tcx.get_diagnostic_item(sym::IntoIterator) {
- Some(def_id) => self.infcx.tcx.infer_ctxt().enter(|infcx| {
+ Some(def_id) => {
+ let infcx = self.infcx.tcx.infer_ctxt().build();
type_known_to_meet_bound_modulo_regions(
&infcx,
self.param_env,
@@ -1035,7 +1037,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
def_id,
DUMMY_SP,
)
- }),
+ }
_ => false,
};
if suggest {
@@ -1058,9 +1060,11 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
place_name, partially_str, loop_message
),
);
- // If we have a `&mut` ref, we need to reborrow.
- if let Some(ty::Ref(_, _, hir::Mutability::Mut)) = used_place
- .map(|used_place| used_place.ty(self.body, self.infcx.tcx).ty.kind())
+ // If the moved place was a `&mut` ref, then we can
+ // suggest to reborrow it where it was moved, so it
+ // will still be valid by the time we get to the usage.
+ if let ty::Ref(_, _, hir::Mutability::Mut) =
+ moved_place.ty(self.body, self.infcx.tcx).ty.kind()
{
// If we are in a loop this will be suggested later.
if !is_loop_move {
@@ -1086,14 +1090,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
),
);
}
- if is_option_or_result && maybe_reinitialized_locations_is_empty {
- err.span_suggestion_verbose(
- fn_call_span.shrink_to_lo(),
- "consider calling `.as_ref()` to borrow the type's contents",
- "as_ref().",
- Applicability::MachineApplicable,
- );
- }
// Avoid pointing to the same function in multiple different
// error messages.
if span != DUMMY_SP && self.fn_self_span_reported.insert(self_arg.span) {
@@ -1102,6 +1098,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
&format!("this function takes ownership of the receiver `self`, which moves {}", place_name)
);
}
+ if is_option_or_result && maybe_reinitialized_locations_is_empty {
+ err.span_label(
+ var_span,
+ "help: consider calling `.as_ref()` or `.as_mut()` to borrow the type's contents",
+ );
+ }
}
// Other desugarings takes &self, which cannot cause a move
_ => {}
diff --git a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
index cb3cd479a..5a47f4567 100644
--- a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
@@ -88,7 +88,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let Some(StatementKind::Assign(box (
place,
Rvalue::Use(Operand::Move(move_from)),
- ))) = self.body.basic_blocks()[location.block]
+ ))) = self.body.basic_blocks[location.block]
.statements
.get(location.statement_index)
.map(|stmt| &stmt.kind)
@@ -360,7 +360,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
diag.span_label(upvar_span, "captured outer variable");
diag.span_label(
- self.body.span,
+ self.infcx.tcx.def_span(def_id),
format!("captured by this `{closure_kind}` closure"),
);
@@ -401,7 +401,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
};
if let Some(use_spans) = use_spans {
self.explain_captures(
- &mut err, span, span, use_spans, move_place, None, "", "", "", false, true,
+ &mut err, span, span, use_spans, move_place, "", "", "", false, true,
);
}
err
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
index 0ad4abbce..8ad40c0aa 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -1,23 +1,23 @@
+use rustc_errors::{
+ Applicability, Diagnostic, DiagnosticBuilder, EmissionGuarantee, ErrorGuaranteed,
+};
use rustc_hir as hir;
+use rustc_hir::intravisit::Visitor;
use rustc_hir::Node;
use rustc_middle::hir::map::Map;
use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{
hir::place::PlaceBase,
- mir::{
- self, BindingForm, ClearCrossCrate, ImplicitSelfKind, Local, LocalDecl, LocalInfo,
- LocalKind, Location,
- },
+ mir::{self, BindingForm, ClearCrossCrate, Local, LocalDecl, LocalInfo, LocalKind, Location},
};
use rustc_span::source_map::DesugaringKind;
use rustc_span::symbol::{kw, Symbol};
-use rustc_span::{BytePos, Span};
+use rustc_span::{sym, BytePos, Span};
use crate::diagnostics::BorrowedContentSource;
use crate::MirBorrowckCtxt;
use rustc_const_eval::util::collect_writes::FindAssignments;
-use rustc_errors::{Applicability, Diagnostic};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) enum AccessKind {
@@ -169,6 +169,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
..,
ProjectionElem::Index(_)
| ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..),
],
@@ -309,7 +310,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
&& !matches!(
decl.local_info,
Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(
- ImplicitSelfKind::MutRef
+ hir::ImplicitSelfKind::MutRef
))))
)
{
@@ -364,7 +365,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let Some(Node::Pat(pat)) = self.infcx.tcx.hir().find(upvar_hir_id)
&& let hir::PatKind::Binding(
- hir::BindingAnnotation::Unannotated,
+ hir::BindingAnnotation::NONE,
_,
upvar_ident,
_,
@@ -614,6 +615,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
"trait `IndexMut` is required to modify indexed content, \
but it is not implemented for `{ty}`",
));
+ self.suggest_map_index_mut_alternatives(ty, &mut err, span);
}
_ => (),
}
@@ -627,6 +629,127 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
self.buffer_error(err);
}
+ fn suggest_map_index_mut_alternatives(
+ &self,
+ ty: Ty<'_>,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ span: Span,
+ ) {
+ let Some(adt) = ty.ty_adt_def() else { return };
+ let did = adt.did();
+ if self.infcx.tcx.is_diagnostic_item(sym::HashMap, did)
+ || self.infcx.tcx.is_diagnostic_item(sym::BTreeMap, did)
+ {
+ struct V<'a, 'b, 'tcx, G: EmissionGuarantee> {
+ assign_span: Span,
+ err: &'a mut DiagnosticBuilder<'b, G>,
+ ty: Ty<'tcx>,
+ suggested: bool,
+ }
+ impl<'a, 'b: 'a, 'hir, 'tcx, G: EmissionGuarantee> Visitor<'hir> for V<'a, 'b, 'tcx, G> {
+ fn visit_stmt(&mut self, stmt: &'hir hir::Stmt<'hir>) {
+ hir::intravisit::walk_stmt(self, stmt);
+ let expr = match stmt.kind {
+ hir::StmtKind::Semi(expr) | hir::StmtKind::Expr(expr) => expr,
+ hir::StmtKind::Local(hir::Local { init: Some(expr), .. }) => expr,
+ _ => {
+ return;
+ }
+ };
+ if let hir::ExprKind::Assign(place, rv, _sp) = expr.kind
+ && let hir::ExprKind::Index(val, index) = place.kind
+ && (expr.span == self.assign_span || place.span == self.assign_span)
+ {
+ // val[index] = rv;
+ // ---------- place
+ self.err.multipart_suggestions(
+ &format!(
+ "to modify a `{}`, use `.get_mut()`, `.insert()` or the entry API",
+ self.ty,
+ ),
+ vec![
+ vec![ // val.insert(index, rv);
+ (
+ val.span.shrink_to_hi().with_hi(index.span.lo()),
+ ".insert(".to_string(),
+ ),
+ (
+ index.span.shrink_to_hi().with_hi(rv.span.lo()),
+ ", ".to_string(),
+ ),
+ (rv.span.shrink_to_hi(), ")".to_string()),
+ ],
+ vec![ // val.get_mut(index).map(|v| { *v = rv; });
+ (
+ val.span.shrink_to_hi().with_hi(index.span.lo()),
+ ".get_mut(".to_string(),
+ ),
+ (
+ index.span.shrink_to_hi().with_hi(place.span.hi()),
+ ").map(|val| { *val".to_string(),
+ ),
+ (
+ rv.span.shrink_to_hi(),
+ "; })".to_string(),
+ ),
+ ],
+ vec![ // let x = val.entry(index).or_insert(rv);
+ (val.span.shrink_to_lo(), "let val = ".to_string()),
+ (
+ val.span.shrink_to_hi().with_hi(index.span.lo()),
+ ".entry(".to_string(),
+ ),
+ (
+ index.span.shrink_to_hi().with_hi(rv.span.lo()),
+ ").or_insert(".to_string(),
+ ),
+ (rv.span.shrink_to_hi(), ")".to_string()),
+ ],
+ ].into_iter(),
+ Applicability::MachineApplicable,
+ );
+ self.suggested = true;
+ } else if let hir::ExprKind::MethodCall(_path, receiver, _, sp) = expr.kind
+ && let hir::ExprKind::Index(val, index) = receiver.kind
+ && expr.span == self.assign_span
+ {
+ // val[index].path(args..);
+ self.err.multipart_suggestion(
+ &format!("to modify a `{}` use `.get_mut()`", self.ty),
+ vec![
+ (
+ val.span.shrink_to_hi().with_hi(index.span.lo()),
+ ".get_mut(".to_string(),
+ ),
+ (
+ index.span.shrink_to_hi().with_hi(receiver.span.hi()),
+ ").map(|val| val".to_string(),
+ ),
+ (sp.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ self.suggested = true;
+ }
+ }
+ }
+ let hir_map = self.infcx.tcx.hir();
+ let def_id = self.body.source.def_id();
+ let hir_id = hir_map.local_def_id_to_hir_id(def_id.as_local().unwrap());
+ let node = hir_map.find(hir_id);
+ let Some(hir::Node::Item(item)) = node else { return; };
+ let hir::ItemKind::Fn(.., body_id) = item.kind else { return; };
+ let body = self.infcx.tcx.hir().body(body_id);
+ let mut v = V { assign_span: span, err, ty, suggested: false };
+ v.visit_body(body);
+ if !v.suggested {
+ err.help(&format!(
+ "to modify a `{ty}`, use `.get_mut()`, `.insert()` or the entry API",
+ ));
+ }
+ }
+ }
+
/// User cannot make signature of a trait mutable without changing the
/// trait. So we find if this error belongs to a trait and if so we move
/// suggestion to the trait or disable it if it is out of scope of this crate
@@ -786,11 +909,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
[
Expr {
kind:
- MethodCall(
- path_segment,
- _args,
- span,
- ),
+ MethodCall(path_segment, _, _, span),
hir_id,
..
},
@@ -810,10 +929,11 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
_,
) = hir_map.body(fn_body_id).value.kind
{
- let opt_suggestions = path_segment
- .hir_id
- .map(|path_hir_id| self.infcx.tcx.typeck(path_hir_id.owner))
- .and_then(|typeck| typeck.type_dependent_def_id(*hir_id))
+ let opt_suggestions = self
+ .infcx
+ .tcx
+ .typeck(path_segment.hir_id.owner.def_id)
+ .type_dependent_def_id(*hir_id)
.and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
.map(|def_id| self.infcx.tcx.associated_items(def_id))
.map(|assoc_items| {
@@ -851,6 +971,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let hir = self.infcx.tcx.hir();
let closure_id = self.mir_hir_id();
+ let closure_span = self.infcx.tcx.def_span(self.mir_def_id());
let fn_call_id = hir.get_parent_node(closure_id);
let node = hir.get(fn_call_id);
let def_id = hir.enclosing_body_owner(fn_call_id);
@@ -902,7 +1023,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let Some(span) = arg {
err.span_label(span, "change this to accept `FnMut` instead of `Fn`");
err.span_label(func.span, "expects `Fn` instead of `FnMut`");
- err.span_label(self.body.span, "in this closure");
+ err.span_label(closure_span, "in this closure");
look_at_return = false;
}
}
@@ -911,7 +1032,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if look_at_return && hir.get_return_block(closure_id).is_some() {
// ...otherwise we are probably in the tail expression of the function, point at the
// return type.
- match hir.get_by_def_id(hir.get_parent_item(fn_call_id)) {
+ match hir.get_by_def_id(hir.get_parent_item(fn_call_id).def_id) {
hir::Node::Item(hir::Item { ident, kind: hir::ItemKind::Fn(sig, ..), .. })
| hir::Node::TraitItem(hir::TraitItem {
ident,
@@ -928,7 +1049,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
sig.decl.output.span(),
"change this to return `FnMut` instead of `Fn`",
);
- err.span_label(self.body.span, "in this closure");
+ err.span_label(closure_span, "in this closure");
}
_ => {}
}
@@ -952,7 +1073,7 @@ fn mut_borrow_of_mutable_ref(local_decl: &LocalDecl<'_>, local_name: Option<Symb
//
// Deliberately fall into this case for all implicit self types,
// so that we don't fall in to the next case with them.
- *kind == mir::ImplicitSelfKind::MutRef
+ *kind == hir::ImplicitSelfKind::MutRef
}
_ if Some(kw::SelfLower) == local_name => {
// Otherwise, check if the name is the `self` keyword - in which case
diff --git a/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs b/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs
index d359d7efb..35c3df768 100644
--- a/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs
@@ -6,7 +6,6 @@ use rustc_errors::Diagnostic;
use rustc_middle::ty::RegionVid;
use smallvec::SmallVec;
use std::collections::BTreeMap;
-use tracing::debug;
use crate::MirBorrowckCtxt;
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
index 176090c3b..15230718d 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
@@ -1,3 +1,5 @@
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
//! Error reporting machinery for lifetime errors.
use rustc_data_structures::fx::FxHashSet;
@@ -23,10 +25,13 @@ use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
use crate::borrowck_errors;
-use crate::session_diagnostics::GenericDoesNotLiveLongEnough;
+use crate::session_diagnostics::{
+ FnMutError, FnMutReturnTypeErr, GenericDoesNotLiveLongEnough, LifetimeOutliveErr,
+ LifetimeReturnCategoryErr, RequireStaticErr, VarHereDenote,
+};
use super::{OutlivesSuggestionBuilder, RegionName};
-use crate::region_infer::BlameConstraint;
+use crate::region_infer::{BlameConstraint, ExtraConstraintInfo};
use crate::{
nll::ConstraintDescription,
region_infer::{values::RegionElement, TypeTest},
@@ -181,7 +186,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let Some(lower_bound_region) = lower_bound_region {
let generic_ty = type_test.generic_kind.to_ty(self.infcx.tcx);
let origin = RelateParamBound(type_test_span, generic_ty, None);
- self.buffer_error(self.infcx.construct_generic_bound_failure(
+ self.buffer_error(self.infcx.err_ctxt().construct_generic_bound_failure(
self.body.source.def_id().expect_local(),
type_test_span,
Some(origin),
@@ -229,7 +234,6 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// Find the code to blame for the fact that `longer_fr` outlives `error_fr`.
let (_, cause) = self.regioncx.find_outlives_blame_span(
- &self.body,
longer_fr,
NllRegionVariableOrigin::Placeholder(placeholder),
error_vid,
@@ -277,7 +281,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let tcx = self.infcx.tcx;
match tcx.hir().get_if_local(def_id) {
Some(Node::ImplItem(impl_item)) => {
- match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id())) {
+ match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id()).def_id)
+ {
Some(Node::Item(Item {
kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
..
@@ -287,7 +292,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
Some(Node::TraitItem(trait_item)) => {
let trait_did = tcx.hir().get_parent_item(trait_item.hir_id());
- match tcx.hir().find_by_def_id(trait_did) {
+ match tcx.hir().find_by_def_id(trait_did.def_id) {
Some(Node::Item(Item { kind: ItemKind::Trait(..), .. })) => {
// The method being called is defined in the `trait`, but the `'static`
// obligation comes from the `impl`. Find that `impl` so that we can point
@@ -336,7 +341,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
/// Report an error because the universal region `fr` was required to outlive
/// `outlived_fr` but it is not known to do so. For example:
///
- /// ```compile_fail,E0312
+ /// ```compile_fail
/// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
/// ```
///
@@ -350,16 +355,18 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
) {
debug!("report_region_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr);
- let BlameConstraint { category, cause, variance_info, from_closure: _ } =
- self.regioncx.best_blame_constraint(&self.body, fr, fr_origin, |r| {
+ let (blame_constraint, extra_info) =
+ self.regioncx.best_blame_constraint(fr, fr_origin, |r| {
self.regioncx.provides_universal_region(r, fr, outlived_fr)
});
+ let BlameConstraint { category, cause, variance_info, .. } = blame_constraint;
debug!("report_region_error: category={:?} {:?} {:?}", category, cause, variance_info);
// Check if we can use one of the "nice region errors".
if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) {
- let nice = NiceRegionError::new_from_span(self.infcx, cause.span, o, f);
+ let infer_err = self.infcx.err_ctxt();
+ let nice = NiceRegionError::new_from_span(&infer_err, cause.span, o, f);
if let Some(diag) = nice.try_report_from_nll() {
self.buffer_error(diag);
return;
@@ -462,6 +469,14 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
}
+ for extra in extra_info {
+ match extra {
+ ExtraConstraintInfo::PlaceholderFromPredicate(span) => {
+ diag.span_note(span, format!("due to current limitations in the borrow checker, this implies a `'static` lifetime"));
+ }
+ }
+ }
+
self.buffer_error(diag);
}
@@ -488,12 +503,6 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let ErrorConstraintInfo { outlived_fr, span, .. } = errci;
- let mut diag = self
- .infcx
- .tcx
- .sess
- .struct_span_err(*span, "captured variable cannot escape `FnMut` closure body");
-
let mut output_ty = self.regioncx.universal_regions().unnormalized_output_ty;
if let ty::Opaque(def_id, _) = *output_ty.kind() {
output_ty = self.infcx.tcx.type_of(def_id)
@@ -501,19 +510,20 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
debug!("report_fnmut_error: output_ty={:?}", output_ty);
- let message = match output_ty.kind() {
- ty::Closure(_, _) => {
- "returns a closure that contains a reference to a captured variable, which then \
- escapes the closure body"
- }
- ty::Adt(def, _) if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did()) => {
- "returns an `async` block that contains a reference to a captured variable, which then \
- escapes the closure body"
- }
- _ => "returns a reference to a captured variable which escapes the closure body",
+ let err = FnMutError {
+ span: *span,
+ ty_err: match output_ty.kind() {
+ ty::Closure(_, _) => FnMutReturnTypeErr::ReturnClosure { span: *span },
+ ty::Adt(def, _)
+ if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did()) =>
+ {
+ FnMutReturnTypeErr::ReturnAsyncBlock { span: *span }
+ }
+ _ => FnMutReturnTypeErr::ReturnRef { span: *span },
+ },
};
- diag.span_label(*span, message);
+ let mut diag = self.infcx.tcx.sess.create_err(err);
if let ReturnConstraint::ClosureUpvar(upvar_field) = kind {
let def_id = match self.regioncx.universal_regions().defining_ty {
@@ -532,20 +542,16 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let upvars_map = self.infcx.tcx.upvars_mentioned(def_id).unwrap();
let upvar_def_span = self.infcx.tcx.hir().span(def_hir);
let upvar_span = upvars_map.get(&def_hir).unwrap().span;
- diag.span_label(upvar_def_span, "variable defined here");
- diag.span_label(upvar_span, "variable captured here");
+ diag.subdiagnostic(VarHereDenote::Defined { span: upvar_def_span });
+ diag.subdiagnostic(VarHereDenote::Captured { span: upvar_span });
}
}
if let Some(fr_span) = self.give_region_a_name(*outlived_fr).unwrap().span() {
- diag.span_label(fr_span, "inferred to be a `FnMut` closure");
+ diag.subdiagnostic(VarHereDenote::FnMutInferred { span: fr_span });
}
- diag.note(
- "`FnMut` closures only have access to their captured variables while they are \
- executing...",
- );
- diag.note("...therefore, they cannot allow references to captured variables to escape");
+ self.suggest_move_on_borrowing_closure(&mut diag);
diag
}
@@ -562,6 +568,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
/// LL | ref_obj(x)
/// | ^^^^^^^^^^ `x` escapes the function body here
/// ```
+ #[instrument(level = "debug", skip(self))]
fn report_escaping_data_error(
&self,
errci: &ErrorConstraintInfo<'tcx>,
@@ -680,42 +687,37 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
..
} = errci;
- let mut diag =
- self.infcx.tcx.sess.struct_span_err(*span, "lifetime may not live long enough");
-
let (_, mir_def_name) =
self.infcx.tcx.article_and_description(self.mir_def_id().to_def_id());
+ let err = LifetimeOutliveErr { span: *span };
+ let mut diag = self.infcx.tcx.sess.create_err(err);
+
let fr_name = self.give_region_a_name(*fr).unwrap();
fr_name.highlight_region_name(&mut diag);
let outlived_fr_name = self.give_region_a_name(*outlived_fr).unwrap();
outlived_fr_name.highlight_region_name(&mut diag);
- match (category, outlived_fr_is_local, fr_is_local) {
- (ConstraintCategory::Return(_), true, _) => {
- diag.span_label(
- *span,
- format!(
- "{mir_def_name} was supposed to return data with lifetime `{outlived_fr_name}` but it is returning \
- data with lifetime `{fr_name}`",
- ),
- );
- }
- _ => {
- diag.span_label(
- *span,
- format!(
- "{}requires that `{}` must outlive `{}`",
- category.description(),
- fr_name,
- outlived_fr_name,
- ),
- );
- }
- }
+ let err_category = match (category, outlived_fr_is_local, fr_is_local) {
+ (ConstraintCategory::Return(_), true, _) => LifetimeReturnCategoryErr::WrongReturn {
+ span: *span,
+ mir_def_name,
+ outlived_fr_name,
+ fr_name: &fr_name,
+ },
+ _ => LifetimeReturnCategoryErr::ShortReturn {
+ span: *span,
+ category_desc: category.description(),
+ free_region_name: &fr_name,
+ outlived_fr_name,
+ },
+ };
+
+ diag.subdiagnostic(err_category);
self.add_static_impl_trait_suggestion(&mut diag, *fr, fr_name, *outlived_fr);
self.suggest_adding_lifetime_params(&mut diag, *fr, *outlived_fr);
+ self.suggest_move_on_borrowing_closure(&mut diag);
diag
}
@@ -783,7 +785,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
fn maybe_suggest_constrain_dyn_trait_impl(
&self,
- diag: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ diag: &mut Diagnostic,
f: Region<'tcx>,
o: Region<'tcx>,
category: &ConstraintCategory<'tcx>,
@@ -860,7 +862,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
ident.span,
"calling this method introduces the `impl`'s 'static` requirement",
);
- err.span_note(multi_span, "the used `impl` has a `'static` requirement");
+ err.subdiagnostic(RequireStaticErr::UsedImpl { multi_span });
err.span_suggestion_verbose(
span.shrink_to_hi(),
"consider relaxing the implicit `'static` requirement",
@@ -901,4 +903,46 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
suggest_adding_lifetime_params(self.infcx.tcx, sub, ty_sup, ty_sub, diag);
}
+
+ fn suggest_move_on_borrowing_closure(&self, diag: &mut Diagnostic) {
+ let map = self.infcx.tcx.hir();
+ let body_id = map.body_owned_by(self.mir_def_id());
+ let expr = &map.body(body_id).value;
+ let mut closure_span = None::<rustc_span::Span>;
+ match expr.kind {
+ hir::ExprKind::MethodCall(.., args, _) => {
+ for arg in args {
+ if let hir::ExprKind::Closure(hir::Closure {
+ capture_clause: hir::CaptureBy::Ref,
+ ..
+ }) = arg.kind
+ {
+ closure_span = Some(arg.span.shrink_to_lo());
+ break;
+ }
+ }
+ }
+ hir::ExprKind::Block(blk, _) => {
+ if let Some(ref expr) = blk.expr {
+ // only when the block is a closure
+ if let hir::ExprKind::Closure(hir::Closure {
+ capture_clause: hir::CaptureBy::Ref,
+ ..
+ }) = expr.kind
+ {
+ closure_span = Some(expr.span.shrink_to_lo());
+ }
+ }
+ }
+ _ => {}
+ }
+ if let Some(closure_span) = closure_span {
+ diag.span_suggestion_verbose(
+ closure_span,
+ "consider adding 'move' keyword before the nested closure",
+ "move ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
}
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
index a87e8bd5b..c044dbaba 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -251,7 +251,8 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
.or_else(|| self.give_name_if_anonymous_region_appears_in_upvars(fr))
.or_else(|| self.give_name_if_anonymous_region_appears_in_output(fr))
.or_else(|| self.give_name_if_anonymous_region_appears_in_yield_ty(fr))
- .or_else(|| self.give_name_if_anonymous_region_appears_in_impl_signature(fr));
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_impl_signature(fr))
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_arg_position_impl_trait(fr));
if let Some(ref value) = value {
self.region_names.try_borrow_mut().unwrap().insert(fr, value.clone());
@@ -265,7 +266,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
/// *user* has a name for. In that case, we'll be able to map
/// `fr` to a `Region<'tcx>`, and that region will be one of
/// named variants.
- #[tracing::instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self))]
fn give_name_from_error_region(&self, fr: RegionVid) -> Option<RegionName> {
let error_region = self.to_error_region(fr)?;
@@ -357,11 +358,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
ty::BoundRegionKind::BrAnon(_) => None,
},
- ty::ReLateBound(..)
- | ty::ReVar(..)
- | ty::RePlaceholder(..)
- | ty::ReEmpty(_)
- | ty::ReErased => None,
+ ty::ReLateBound(..) | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReErased => None,
}
}
@@ -373,7 +370,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
/// | fn foo(x: &u32) { .. }
/// ------- fully elaborated type of `x` is `&'1 u32`
/// ```
- #[tracing::instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self))]
fn give_name_if_anonymous_region_appears_in_arguments(
&self,
fr: RegionVid,
@@ -662,7 +659,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
/// | let x = Some(&22);
/// - fully elaborated type of `x` is `Option<&'1 u32>`
/// ```
- #[tracing::instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self))]
fn give_name_if_anonymous_region_appears_in_upvars(&self, fr: RegionVid) -> Option<RegionName> {
let upvar_index = self.regioncx.get_upvar_index_for_region(self.infcx.tcx, fr)?;
let (upvar_name, upvar_span) = self.regioncx.get_upvar_name_and_span_for_region(
@@ -682,7 +679,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
/// must be a closure since, in a free fn, such an argument would
/// have to either also appear in an argument (if using elision)
/// or be early bound (named, not in argument).
- #[tracing::instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self))]
fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Option<RegionName> {
let tcx = self.infcx.tcx;
let hir = tcx.hir();
@@ -711,7 +708,8 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
hir::AsyncGeneratorKind::Block => " of async block",
hir::AsyncGeneratorKind::Closure => " of async closure",
hir::AsyncGeneratorKind::Fn => {
- let parent_item = hir.get_by_def_id(hir.get_parent_item(mir_hir_id));
+ let parent_item =
+ hir.get_by_def_id(hir.get_parent_item(mir_hir_id).def_id);
let output = &parent_item
.fn_decl()
.expect("generator lowered from async fn should be in fn")
@@ -772,7 +770,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
let hir = self.infcx.tcx.hir();
- let hir::TyKind::OpaqueDef(id, _) = hir_ty.kind else {
+ let hir::TyKind::OpaqueDef(id, _, _) = hir_ty.kind else {
span_bug!(
hir_ty.span,
"lowered return type of async fn is not OpaqueDef: {:?}",
@@ -814,7 +812,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
}
}
- #[tracing::instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self))]
fn give_name_if_anonymous_region_appears_in_yield_ty(
&self,
fr: RegionVid,
@@ -867,20 +865,13 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
};
let tcx = self.infcx.tcx;
- let body_parent_did = tcx.opt_parent(self.mir_def_id().to_def_id())?;
- if tcx.parent(region.def_id) != body_parent_did
- || tcx.def_kind(body_parent_did) != DefKind::Impl
- {
+ let region_parent = tcx.parent(region.def_id);
+ if tcx.def_kind(region_parent) != DefKind::Impl {
return None;
}
- let mut found = false;
- tcx.fold_regions(tcx.type_of(body_parent_did), |r: ty::Region<'tcx>, _| {
- if *r == ty::ReEarlyBound(region) {
- found = true;
- }
- r
- });
+ let found = tcx
+ .any_free_region_meets(&tcx.type_of(region_parent), |r| *r == ty::ReEarlyBound(region));
Some(RegionName {
name: self.synthesize_region_name(),
@@ -893,4 +884,92 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
),
})
}
+
+ fn give_name_if_anonymous_region_appears_in_arg_position_impl_trait(
+ &self,
+ fr: RegionVid,
+ ) -> Option<RegionName> {
+ let ty::ReEarlyBound(region) = *self.to_error_region(fr)? else {
+ return None;
+ };
+ if region.has_name() {
+ return None;
+ };
+
+ let predicates = self
+ .infcx
+ .tcx
+ .predicates_of(self.body.source.def_id())
+ .instantiate_identity(self.infcx.tcx)
+ .predicates;
+
+ if let Some(upvar_index) = self
+ .regioncx
+ .universal_regions()
+ .defining_ty
+ .upvar_tys()
+ .position(|ty| self.any_param_predicate_mentions(&predicates, ty, region))
+ {
+ let (upvar_name, upvar_span) = self.regioncx.get_upvar_name_and_span_for_region(
+ self.infcx.tcx,
+ &self.upvars,
+ upvar_index,
+ );
+ let region_name = self.synthesize_region_name();
+
+ Some(RegionName {
+ name: region_name,
+ source: RegionNameSource::AnonRegionFromUpvar(upvar_span, upvar_name),
+ })
+ } else if let Some(arg_index) = self
+ .regioncx
+ .universal_regions()
+ .unnormalized_input_tys
+ .iter()
+ .position(|ty| self.any_param_predicate_mentions(&predicates, *ty, region))
+ {
+ let (arg_name, arg_span) = self.regioncx.get_argument_name_and_span_for_region(
+ self.body,
+ &self.local_names,
+ arg_index,
+ );
+ let region_name = self.synthesize_region_name();
+
+ Some(RegionName {
+ name: region_name,
+ source: RegionNameSource::AnonRegionFromArgument(
+ RegionNameHighlight::CannotMatchHirTy(arg_span, arg_name?.to_string()),
+ ),
+ })
+ } else {
+ None
+ }
+ }
+
+ fn any_param_predicate_mentions(
+ &self,
+ predicates: &[ty::Predicate<'tcx>],
+ ty: Ty<'tcx>,
+ region: ty::EarlyBoundRegion,
+ ) -> bool {
+ let tcx = self.infcx.tcx;
+ ty.walk().any(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Param(_) = ty.kind()
+ {
+ predicates.iter().any(|pred| {
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) if data.self_ty() == ty => {}
+ ty::PredicateKind::Projection(data) if data.projection_ty.self_ty() == ty => {}
+ _ => return false,
+ }
+ tcx.any_free_region_meets(pred, |r| {
+ *r == ty::ReEarlyBound(region)
+ })
+ })
+ } else {
+ false
+ }
+ })
+ }
}
diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs
index ec521b1cf..3157f861d 100644
--- a/compiler/rustc_borrowck/src/invalidation.rs
+++ b/compiler/rustc_borrowck/src/invalidation.rs
@@ -1,6 +1,6 @@
use rustc_data_structures::graph::dominators::Dominators;
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{BasicBlock, Body, Location, Place, Rvalue};
+use rustc_middle::mir::{self, BasicBlock, Body, Location, NonDivergingIntrinsic, Place, Rvalue};
use rustc_middle::mir::{BorrowKind, Mutability, Operand};
use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
use rustc_middle::mir::{Statement, StatementKind};
@@ -63,23 +63,24 @@ impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
StatementKind::FakeRead(box (_, _)) => {
// Only relevant for initialized/liveness/safety checks.
}
- StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
+ self.consume_operand(location, op);
+ }
+ StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
ref src,
ref dst,
ref count,
- }) => {
+ })) => {
self.consume_operand(location, src);
self.consume_operand(location, dst);
self.consume_operand(location, count);
}
- StatementKind::Nop
+ // Only relevant for mir typeck
+ StatementKind::AscribeUserType(..)
+ // Doesn't have any language semantics
| StatementKind::Coverage(..)
- | StatementKind::AscribeUserType(..)
- | StatementKind::Retag { .. }
- | StatementKind::StorageLive(..) => {
- // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
- // to borrow check.
- }
+ // Does not actually affect borrowck
+ | StatementKind::StorageLive(..) => {}
StatementKind::StorageDead(local) => {
self.access_place(
location,
@@ -88,7 +89,10 @@ impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
LocalMutationIsAllowed::Yes,
);
}
- StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => {
+ StatementKind::Nop
+ | StatementKind::Retag { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::SetDiscriminant { .. } => {
bug!("Statement not allowed in this MIR phase")
}
}
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index 3d8b07382..abfe253d4 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -3,7 +3,6 @@
#![allow(rustc::potential_query_instability)]
#![feature(box_patterns)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(rustc_attrs)]
@@ -19,15 +18,15 @@ extern crate tracing;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::graph::dominators::Dominators;
-use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_errors::{Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_index::bit_set::ChunkedBitSet;
use rustc_index::vec::IndexVec;
use rustc_infer::infer::{DefiningAnchor, InferCtxt, TyCtxtInferExt};
use rustc_middle::mir::{
- traversal, Body, ClearCrossCrate, Local, Location, Mutability, Operand, Place, PlaceElem,
- PlaceRef, VarDebugInfoContents,
+ traversal, Body, ClearCrossCrate, Local, Location, Mutability, NonDivergingIntrinsic, Operand,
+ Place, PlaceElem, PlaceRef, VarDebugInfoContents,
};
use rustc_middle::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
@@ -51,6 +50,8 @@ use rustc_mir_dataflow::move_paths::{InitLocation, LookupResult, MoveData, MoveE
use rustc_mir_dataflow::Analysis;
use rustc_mir_dataflow::MoveDataParamEnv;
+use crate::session_diagnostics::VarNeedNotMut;
+
use self::diagnostics::{AccessKind, RegionName};
use self::location::LocationTable;
use self::prefixes::PrefixSet;
@@ -130,14 +131,11 @@ fn mir_borrowck<'tcx>(
debug!("run query mir_borrowck: {}", tcx.def_path_str(def.did.to_def_id()));
let hir_owner = tcx.hir().local_def_id_to_hir_id(def.did).owner;
- let opt_closure_req = tcx
- .infer_ctxt()
- .with_opaque_type_inference(DefiningAnchor::Bind(hir_owner))
- .enter(|infcx| {
- let input_body: &Body<'_> = &input_body.borrow();
- let promoted: &IndexVec<_, _> = &promoted.borrow();
- do_mir_borrowck(&infcx, input_body, promoted, false).0
- });
+ let infcx =
+ tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(hir_owner.def_id)).build();
+ let input_body: &Body<'_> = &input_body.borrow();
+ let promoted: &IndexVec<_, _> = &promoted.borrow();
+ let opt_closure_req = do_mir_borrowck(&infcx, input_body, promoted, false).0;
debug!("mir_borrowck done");
tcx.arena.alloc(opt_closure_req)
@@ -149,8 +147,8 @@ fn mir_borrowck<'tcx>(
/// region ids on which the borrow checking was performed together with Polonius
/// facts.
#[instrument(skip(infcx, input_body, input_promoted), fields(id=?input_body.source.with_opt_param().as_local().unwrap()), level = "debug")]
-fn do_mir_borrowck<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+fn do_mir_borrowck<'tcx>(
+ infcx: &InferCtxt<'tcx>,
input_body: &Body<'tcx>,
input_promoted: &IndexVec<Promoted, Body<'tcx>>,
return_body_with_facts: bool,
@@ -425,17 +423,9 @@ fn do_mir_borrowck<'a, 'tcx>(
continue;
}
- tcx.struct_span_lint_hir(UNUSED_MUT, lint_root, span, |lint| {
- let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
- lint.build("variable does not need to be mutable")
- .span_suggestion_short(
- mut_span,
- "remove this `mut`",
- "",
- Applicability::MachineApplicable,
- )
- .emit();
- })
+ let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
+
+ tcx.emit_spanned_lint(UNUSED_MUT, lint_root, span, VarNeedNotMut { span: mut_span })
}
let tainted_by_errors = mbcx.emit_errors();
@@ -481,7 +471,7 @@ pub struct BodyWithBorrowckFacts<'tcx> {
}
struct MirBorrowckCtxt<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
body: &'cx Body<'tcx>,
move_data: &'cx MoveData<'tcx>,
@@ -597,22 +587,19 @@ impl<'cx, 'tcx> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx> for MirBorrowckCtx
flow_state,
);
}
- StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
- ..
- }) => {
- span_bug!(
+ StatementKind::Intrinsic(box ref kind) => match kind {
+ NonDivergingIntrinsic::Assume(op) => self.consume_operand(location, (op, span), flow_state),
+ NonDivergingIntrinsic::CopyNonOverlapping(..) => span_bug!(
span,
"Unexpected CopyNonOverlapping, should only appear after lower_intrinsics",
)
}
- StatementKind::Nop
+ // Only relevant for mir typeck
+ StatementKind::AscribeUserType(..)
+ // Doesn't have any language semantics
| StatementKind::Coverage(..)
- | StatementKind::AscribeUserType(..)
- | StatementKind::Retag { .. }
- | StatementKind::StorageLive(..) => {
- // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
- // to borrow check.
- }
+ // Does not actually affect borrowck
+ | StatementKind::StorageLive(..) => {}
StatementKind::StorageDead(local) => {
self.access_place(
location,
@@ -622,7 +609,10 @@ impl<'cx, 'tcx> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx> for MirBorrowckCtx
flow_state,
);
}
- StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => {
+ StatementKind::Nop
+ | StatementKind::Retag { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::SetDiscriminant { .. } => {
bug!("Statement not allowed in this MIR phase")
}
}
@@ -982,6 +972,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
}
+ #[instrument(level = "debug", skip(self, flow_state))]
fn check_access_for_conflict(
&mut self,
location: Location,
@@ -990,11 +981,6 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
rw: ReadOrWrite,
flow_state: &Flows<'cx, 'tcx>,
) -> bool {
- debug!(
- "check_access_for_conflict(location={:?}, place_span={:?}, sd={:?}, rw={:?})",
- location, place_span, sd, rw,
- );
-
let mut error_reported = false;
let tcx = self.infcx.tcx;
let body = self.body;
@@ -1458,13 +1444,13 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
/// Checks whether a borrow of this place is invalidated when the function
/// exits
+ #[instrument(level = "debug", skip(self))]
fn check_for_invalidation_at_exit(
&mut self,
location: Location,
borrow: &BorrowData<'tcx>,
span: Span,
) {
- debug!("check_for_invalidation_at_exit({:?})", borrow);
let place = borrow.borrowed_place;
let mut root_place = PlaceRef { local: place.local, projection: &[] };
@@ -1791,6 +1777,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
for (place_base, elem) in place.iter_projections().rev() {
match elem {
ProjectionElem::Index(_/*operand*/) |
+ ProjectionElem::OpaqueCast(_) |
ProjectionElem::ConstantIndex { .. } |
// assigning to P[i] requires P to be valid.
ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
@@ -2182,6 +2169,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Downcast(..) => {
let upvar_field_projection = self.is_upvar_field_projection(place);
if let Some(field) = upvar_field_projection {
diff --git a/compiler/rustc_borrowck/src/location.rs b/compiler/rustc_borrowck/src/location.rs
index 70a311694..877944d3d 100644
--- a/compiler/rustc_borrowck/src/location.rs
+++ b/compiler/rustc_borrowck/src/location.rs
@@ -33,7 +33,7 @@ impl LocationTable {
pub(crate) fn new(body: &Body<'_>) -> Self {
let mut num_points = 0;
let statements_before_block = body
- .basic_blocks()
+ .basic_blocks
.iter()
.map(|block_data| {
let v = num_points;
@@ -86,8 +86,7 @@ impl LocationTable {
let (block, &first_index) = self
.statements_before_block
.iter_enumerated()
- .filter(|(_, first_index)| **first_index <= point_index)
- .last()
+ .rfind(|&(_, &first_index)| first_index <= point_index)
.unwrap();
let statement_index = (point_index - first_index) / 2;
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
index 0961203d7..08fdd28eb 100644
--- a/compiler/rustc_borrowck/src/nll.rs
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -55,8 +55,8 @@ pub(crate) struct NllOutput<'tcx> {
/// regions (e.g., region parameters) declared on the function. That set will need to be given to
/// `compute_regions`.
#[instrument(skip(infcx, param_env, body, promoted), level = "debug")]
-pub(crate) fn replace_regions_in_mir<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
+pub(crate) fn replace_regions_in_mir<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &mut Body<'tcx>,
promoted: &mut IndexVec<Promoted, Body<'tcx>>,
@@ -155,7 +155,7 @@ fn populate_polonius_move_facts(
///
/// This may result in errors being reported.
pub(crate) fn compute_regions<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
universal_regions: UniversalRegions<'tcx>,
body: &Body<'tcx>,
promoted: &IndexVec<Promoted, Body<'tcx>>,
@@ -318,8 +318,8 @@ pub(crate) fn compute_regions<'cx, 'tcx>(
}
}
-pub(super) fn dump_mir_results<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub(super) fn dump_mir_results<'tcx>(
+ infcx: &InferCtxt<'tcx>,
body: &Body<'tcx>,
regioncx: &RegionInferenceContext<'tcx>,
closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
@@ -368,8 +368,8 @@ pub(super) fn dump_mir_results<'a, 'tcx>(
};
}
-pub(super) fn dump_annotation<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub(super) fn dump_annotation<'tcx>(
+ infcx: &InferCtxt<'tcx>,
body: &Body<'tcx>,
regioncx: &RegionInferenceContext<'tcx>,
closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
@@ -389,8 +389,9 @@ pub(super) fn dump_annotation<'a, 'tcx>(
// viewing the intraprocedural state, the -Zdump-mir output is
// better.
+ let def_span = tcx.def_span(body.source.def_id());
let mut err = if let Some(closure_region_requirements) = closure_region_requirements {
- let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "external requirements");
+ let mut err = tcx.sess.diagnostic().span_note_diag(def_span, "external requirements");
regioncx.annotate(tcx, &mut err);
@@ -409,7 +410,7 @@ pub(super) fn dump_annotation<'a, 'tcx>(
err
} else {
- let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "no external requirements");
+ let mut err = tcx.sess.diagnostic().span_note_diag(def_span, "no external requirements");
regioncx.annotate(tcx, &mut err);
err
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
index 97335fd0d..0e71efd6f 100644
--- a/compiler/rustc_borrowck/src/places_conflict.rs
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -44,6 +44,7 @@ pub(crate) fn places_conflict<'tcx>(
/// access depth. The `bias` parameter is used to determine how the unknowable (comparing runtime
/// array indices, for example) should be interpreted - this depends on what the caller wants in
/// order to make the conservative choice and preserve soundness.
+#[instrument(level = "debug", skip(tcx, body))]
pub(super) fn borrow_conflicts_with_place<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
@@ -53,11 +54,6 @@ pub(super) fn borrow_conflicts_with_place<'tcx>(
access: AccessDepth,
bias: PlaceConflictBias,
) -> bool {
- debug!(
- "borrow_conflicts_with_place({:?}, {:?}, {:?}, {:?})",
- borrow_place, access_place, access, bias,
- );
-
// This Local/Local case is handled by the more general code below, but
// it's so common that it's a speed win to check for it first.
if let Some(l1) = borrow_place.as_local() && let Some(l2) = access_place.as_local() {
@@ -140,10 +136,9 @@ fn place_components_conflict<'tcx>(
for (i, (borrow_c, &access_c)) in
iter::zip(borrow_place.projection, access_place.projection).enumerate()
{
- debug!("borrow_conflicts_with_place: borrow_c = {:?}", borrow_c);
- let borrow_proj_base = &borrow_place.projection[..i];
+ debug!(?borrow_c, ?access_c);
- debug!("borrow_conflicts_with_place: access_c = {:?}", access_c);
+ let borrow_proj_base = &borrow_place.projection[..i];
// Borrow and access path both have more components.
//
@@ -180,7 +175,7 @@ fn place_components_conflict<'tcx>(
// idea, at least for now, so just give up and
// report a conflict. This is unsafe code anyway so
// the user could always use raw pointers.
- debug!("borrow_conflicts_with_place: arbitrary -> conflict");
+ debug!("arbitrary -> conflict");
return true;
}
Overlap::EqualOrDisjoint => {
@@ -189,7 +184,7 @@ fn place_components_conflict<'tcx>(
Overlap::Disjoint => {
// We have proven the borrow disjoint - further
// projections will remain disjoint.
- debug!("borrow_conflicts_with_place: disjoint");
+ debug!("disjoint");
return false;
}
}
@@ -255,6 +250,7 @@ fn place_components_conflict<'tcx>(
| (ProjectionElem::Index { .. }, _, _)
| (ProjectionElem::ConstantIndex { .. }, _, _)
| (ProjectionElem::Subslice { .. }, _, _)
+ | (ProjectionElem::OpaqueCast { .. }, _, _)
| (ProjectionElem::Downcast { .. }, _, _) => {
// Recursive case. This can still be disjoint on a
// further iteration if this a shallow access and
@@ -322,6 +318,17 @@ fn place_projection_conflict<'tcx>(
debug!("place_element_conflict: DISJOINT-OR-EQ-DEREF");
Overlap::EqualOrDisjoint
}
+ (ProjectionElem::OpaqueCast(v1), ProjectionElem::OpaqueCast(v2)) => {
+ if v1 == v2 {
+ // same type - recur.
+ debug!("place_element_conflict: DISJOINT-OR-EQ-OPAQUE");
+ Overlap::EqualOrDisjoint
+ } else {
+ // Different types. Disjoint!
+ debug!("place_element_conflict: DISJOINT-OPAQUE");
+ Overlap::Disjoint
+ }
+ }
(ProjectionElem::Field(f1, _), ProjectionElem::Field(f2, _)) => {
if f1 == f2 {
// same field (e.g., `a.y` vs. `a.y`) - recur.
@@ -525,6 +532,7 @@ fn place_projection_conflict<'tcx>(
| ProjectionElem::Field(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..),
_,
diff --git a/compiler/rustc_borrowck/src/prefixes.rs b/compiler/rustc_borrowck/src/prefixes.rs
index bdf2becb7..2b50cbac9 100644
--- a/compiler/rustc_borrowck/src/prefixes.rs
+++ b/compiler/rustc_borrowck/src/prefixes.rs
@@ -81,6 +81,7 @@ impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
}
ProjectionElem::Downcast(..)
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::OpaqueCast { .. }
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Index(_) => {
cursor = cursor_base;
diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs
index 2894c6d29..8b63294fb 100644
--- a/compiler/rustc_borrowck/src/region_infer/mod.rs
+++ b/compiler/rustc_borrowck/src/region_infer/mod.rs
@@ -15,7 +15,7 @@ use rustc_infer::infer::region_constraints::{GenericKind, VarInfos, VerifyBound,
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin};
use rustc_middle::mir::{
Body, ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements,
- ConstraintCategory, Local, Location, ReturnConstraint,
+ ConstraintCategory, Local, Location, ReturnConstraint, TerminatorKind,
};
use rustc_middle::traits::ObligationCause;
use rustc_middle::traits::ObligationCauseCode;
@@ -135,7 +135,6 @@ pub struct RegionInferenceContext<'tcx> {
/// adds a new lower bound to the SCC it is analyzing: so you wind up
/// with `'R: 'O` where `'R` is the pick-region and `'O` is the
/// minimal viable option.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) struct AppliedMemberConstraint {
/// The SCC that was affected. (The "member region".)
///
@@ -246,6 +245,11 @@ enum Trace<'tcx> {
NotVisited,
}
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum ExtraConstraintInfo {
+ PlaceholderFromPredicate(Span),
+}
+
impl<'tcx> RegionInferenceContext<'tcx> {
/// Creates a new region inference context with a total of
/// `num_region_variables` valid inference variables; the first N
@@ -561,7 +565,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
#[instrument(skip(self, infcx, body, polonius_output), level = "debug")]
pub(super) fn solve(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &Body<'tcx>,
polonius_output: Option<Rc<PoloniusOutput>>,
@@ -591,13 +595,12 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// constraints were too strong, and if so, emit or propagate those errors.
if infcx.tcx.sess.opts.unstable_opts.polonius {
self.check_polonius_subset_errors(
- body,
outlives_requirements.as_mut(),
&mut errors_buffer,
polonius_output.expect("Polonius output is unavailable despite `-Z polonius`"),
);
} else {
- self.check_universal_regions(body, outlives_requirements.as_mut(), &mut errors_buffer);
+ self.check_universal_regions(outlives_requirements.as_mut(), &mut errors_buffer);
}
if errors_buffer.is_empty() {
@@ -832,7 +835,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// 'a`. See `TypeTest` for more details.
fn check_type_tests(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &Body<'tcx>,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
@@ -920,7 +923,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
#[instrument(level = "debug", skip(self, infcx, propagated_outlives_requirements))]
fn try_promote_type_test(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &Body<'tcx>,
type_test: &TypeTest<'tcx>,
@@ -1033,7 +1036,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
#[instrument(level = "debug", skip(self, infcx))]
fn try_promote_type_test_subject(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
ty: Ty<'tcx>,
) -> Option<ClosureOutlivesSubject<'tcx>> {
let tcx = infcx.tcx;
@@ -1139,7 +1142,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// include the CFG anyhow.
/// - For each `end('x)` element in `'r`, compute the mutual LUB, yielding
/// a result `'y`.
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self), level = "debug", ret)]
pub(crate) fn universal_upper_bound(&self, r: RegionVid) -> RegionVid {
debug!(r = %self.region_value_str(r));
@@ -1151,8 +1154,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
}
- debug!(?lub);
-
lub
}
@@ -1167,8 +1168,9 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// Therefore, this method should only be used in diagnostic code,
/// where displaying *some* named universal region is better than
/// falling back to 'static.
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn approx_universal_upper_bound(&self, r: RegionVid) -> RegionVid {
- debug!("approx_universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+ debug!("{}", self.region_value_str(r));
// Find the smallest universal region that contains all other
// universal regions within `region`.
@@ -1177,7 +1179,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let static_r = self.universal_regions.fr_static;
for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
let new_lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
- debug!("approx_universal_upper_bound: ur={:?} lub={:?} new_lub={:?}", ur, lub, new_lub);
+ debug!(?ur, ?lub, ?new_lub);
// The upper bound of two non-static regions is static: this
// means we know nothing about the relationship between these
// two regions. Pick a 'better' one to use when constructing
@@ -1201,7 +1203,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
}
- debug!("approx_universal_upper_bound: r={:?} lub={:?}", r, lub);
+ debug!(?r, ?lub);
lub
}
@@ -1210,7 +1212,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// `point`.
fn eval_verify_bound(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &Body<'tcx>,
generic_ty: Ty<'tcx>,
@@ -1260,7 +1262,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
fn eval_if_eq(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
generic_ty: Ty<'tcx>,
lower_bound: RegionVid,
@@ -1332,15 +1334,15 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
// Evaluate whether `sup_region: sub_region`.
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self), level = "debug", ret)]
fn eval_outlives(&self, sup_region: RegionVid, sub_region: RegionVid) -> bool {
debug!(
- "eval_outlives: sup_region's value = {:?} universal={:?}",
+ "sup_region's value = {:?} universal={:?}",
self.region_value_str(sup_region),
self.universal_regions.is_universal_region(sup_region),
);
debug!(
- "eval_outlives: sub_region's value = {:?} universal={:?}",
+ "sub_region's value = {:?} universal={:?}",
self.region_value_str(sub_region),
self.universal_regions.is_universal_region(sub_region),
);
@@ -1353,7 +1355,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// true if `'sup` outlives static.
if !self.universe_compatible(sub_region_scc, sup_region_scc) {
debug!(
- "eval_outlives: sub universe `{sub_region_scc:?}` is not nameable \
+ "sub universe `{sub_region_scc:?}` is not nameable \
by super `{sup_region_scc:?}`, promoting to static",
);
@@ -1374,9 +1376,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
});
if !universal_outlives {
- debug!(
- "eval_outlives: returning false because sub region contains a universal region not present in super"
- );
+ debug!("sub region contains a universal region not present in super");
return false;
}
@@ -1385,22 +1385,20 @@ impl<'tcx> RegionInferenceContext<'tcx> {
if self.universal_regions.is_universal_region(sup_region) {
// Micro-opt: universal regions contain all points.
- debug!(
- "eval_outlives: returning true because super is universal and hence contains all points"
- );
+ debug!("super is universal and hence contains all points");
return true;
}
- let result = self.scc_values.contains_points(sup_region_scc, sub_region_scc);
- debug!("returning {} because of comparison between points in sup/sub", result);
- result
+ debug!("comparison between points in sup/sub");
+
+ self.scc_values.contains_points(sup_region_scc, sub_region_scc)
}
/// Once regions have been propagated, this method is used to see
/// whether any of the constraints were too strong. In particular,
/// we want to check for a case where a universally quantified
/// region exceeded its bounds. Consider:
- /// ```compile_fail,E0312
+ /// ```compile_fail
/// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
/// ```
/// In this case, returning `x` requires `&'a u32 <: &'b u32`
@@ -1415,7 +1413,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// report them as errors.
fn check_universal_regions(
&self,
- body: &Body<'tcx>,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
errors_buffer: &mut RegionErrors<'tcx>,
) {
@@ -1426,7 +1423,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// they did not grow too large, accumulating any requirements
// for our caller into the `outlives_requirements` vector.
self.check_universal_region(
- body,
fr,
&mut propagated_outlives_requirements,
errors_buffer,
@@ -1455,7 +1451,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// <https://smallcultfollowing.com/babysteps/blog/2019/01/17/polonius-and-region-errors/>
///
/// In the canonical example
- /// ```compile_fail,E0312
+ /// ```compile_fail
/// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
/// ```
/// returning `x` requires `&'a u32 <: &'b u32` and hence we establish (transitively) a
@@ -1467,7 +1463,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// report them as errors.
fn check_polonius_subset_errors(
&self,
- body: &Body<'tcx>,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
errors_buffer: &mut RegionErrors<'tcx>,
polonius_output: Rc<PoloniusOutput>,
@@ -1514,7 +1509,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let propagated = self.try_propagate_universal_region_error(
*longer_fr,
*shorter_fr,
- body,
&mut propagated_outlives_requirements,
);
if propagated == RegionRelationCheckResult::Error {
@@ -1554,13 +1548,9 @@ impl<'tcx> RegionInferenceContext<'tcx> {
///
/// Things that are to be propagated are accumulated into the
/// `outlives_requirements` vector.
- #[instrument(
- skip(self, body, propagated_outlives_requirements, errors_buffer),
- level = "debug"
- )]
+ #[instrument(skip(self, propagated_outlives_requirements, errors_buffer), level = "debug")]
fn check_universal_region(
&self,
- body: &Body<'tcx>,
longer_fr: RegionVid,
propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
errors_buffer: &mut RegionErrors<'tcx>,
@@ -1583,7 +1573,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
longer_fr,
representative,
- body,
propagated_outlives_requirements,
) {
errors_buffer.push(RegionErrorKind::RegionError {
@@ -1603,7 +1592,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
longer_fr,
shorter_fr,
- body,
propagated_outlives_requirements,
) {
// We only report the first region error. Subsequent errors are hidden so as
@@ -1628,7 +1616,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
&self,
longer_fr: RegionVid,
shorter_fr: RegionVid,
- body: &Body<'tcx>,
propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
) -> RegionRelationCheckResult {
// If it is known that `fr: o`, carry on.
@@ -1644,7 +1631,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
self.try_propagate_universal_region_error(
longer_fr,
shorter_fr,
- body,
propagated_outlives_requirements,
)
}
@@ -1656,7 +1642,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
&self,
longer_fr: RegionVid,
shorter_fr: RegionVid,
- body: &Body<'tcx>,
propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
) -> RegionRelationCheckResult {
if let Some(propagated_outlives_requirements) = propagated_outlives_requirements {
@@ -1668,7 +1653,6 @@ impl<'tcx> RegionInferenceContext<'tcx> {
debug!("try_propagate_universal_region_error: fr_minus={:?}", fr_minus);
let blame_span_category = self.find_outlives_blame_span(
- body,
longer_fr,
NllRegionVariableOrigin::FreeRegion,
shorter_fr,
@@ -1734,7 +1718,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
fn check_member_constraints(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
errors_buffer: &mut RegionErrors<'tcx>,
) {
let member_constraints = self.member_constraints.clone();
@@ -1822,50 +1806,26 @@ impl<'tcx> RegionInferenceContext<'tcx> {
pub(crate) fn retrieve_closure_constraint_info(
&self,
- _body: &Body<'tcx>,
- constraint: &OutlivesConstraint<'tcx>,
- ) -> BlameConstraint<'tcx> {
- let loc = match constraint.locations {
- Locations::All(span) => {
- return BlameConstraint {
- category: constraint.category,
- from_closure: false,
- cause: ObligationCause::dummy_with_span(span),
- variance_info: constraint.variance_info,
- };
+ constraint: OutlivesConstraint<'tcx>,
+ ) -> Option<(ConstraintCategory<'tcx>, Span)> {
+ match constraint.locations {
+ Locations::All(_) => None,
+ Locations::Single(loc) => {
+ self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub)).copied()
}
- Locations::Single(loc) => loc,
- };
-
- let opt_span_category =
- self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub));
- opt_span_category
- .map(|&(category, span)| BlameConstraint {
- category,
- from_closure: true,
- cause: ObligationCause::dummy_with_span(span),
- variance_info: constraint.variance_info,
- })
- .unwrap_or(BlameConstraint {
- category: constraint.category,
- from_closure: false,
- cause: ObligationCause::dummy_with_span(constraint.span),
- variance_info: constraint.variance_info,
- })
+ }
}
/// Finds a good `ObligationCause` to blame for the fact that `fr1` outlives `fr2`.
pub(crate) fn find_outlives_blame_span(
&self,
- body: &Body<'tcx>,
fr1: RegionVid,
fr1_origin: NllRegionVariableOrigin,
fr2: RegionVid,
) -> (ConstraintCategory<'tcx>, ObligationCause<'tcx>) {
- let BlameConstraint { category, cause, .. } =
- self.best_blame_constraint(body, fr1, fr1_origin, |r| {
- self.provides_universal_region(r, fr1, fr2)
- });
+ let BlameConstraint { category, cause, .. } = self
+ .best_blame_constraint(fr1, fr1_origin, |r| self.provides_universal_region(r, fr1, fr2))
+ .0;
(category, cause)
}
@@ -1970,7 +1930,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
/// Finds some region R such that `fr1: R` and `R` is live at `elem`.
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
pub(crate) fn find_sub_region_live_at(&self, fr1: RegionVid, elem: Location) -> RegionVid {
trace!(scc = ?self.constraint_sccs.scc(fr1));
trace!(universe = ?self.scc_universes[self.constraint_sccs.scc(fr1)]);
@@ -2048,23 +2008,18 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// creating a constraint path that forces `R` to outlive
/// `from_region`, and then finding the best choices within that
/// path to blame.
+ #[instrument(level = "debug", skip(self, target_test))]
pub(crate) fn best_blame_constraint(
&self,
- body: &Body<'tcx>,
from_region: RegionVid,
from_region_origin: NllRegionVariableOrigin,
target_test: impl Fn(RegionVid) -> bool,
- ) -> BlameConstraint<'tcx> {
- debug!(
- "best_blame_constraint(from_region={:?}, from_region_origin={:?})",
- from_region, from_region_origin
- );
-
+ ) -> (BlameConstraint<'tcx>, Vec<ExtraConstraintInfo>) {
// Find all paths
let (path, target_region) =
self.find_constraint_paths_between_regions(from_region, target_test).unwrap();
debug!(
- "best_blame_constraint: path={:#?}",
+ "path={:#?}",
path.iter()
.map(|c| format!(
"{:?} ({:?}: {:?})",
@@ -2075,6 +2030,18 @@ impl<'tcx> RegionInferenceContext<'tcx> {
.collect::<Vec<_>>()
);
+ let mut extra_info = vec![];
+ for constraint in path.iter() {
+ let outlived = constraint.sub;
+ let Some(origin) = self.var_infos.get(outlived) else { continue; };
+ let RegionVariableOrigin::Nll(NllRegionVariableOrigin::Placeholder(p)) = origin.origin else { continue; };
+ debug!(?constraint, ?p);
+ let ConstraintCategory::Predicate(span) = constraint.category else { continue; };
+ extra_info.push(ExtraConstraintInfo::PlaceholderFromPredicate(span));
+ // We only want to point to one
+ break;
+ }
+
// We try to avoid reporting a `ConstraintCategory::Predicate` as our best constraint.
// Instead, we use it to produce an improved `ObligationCauseCode`.
// FIXME - determine what we should do if we encounter multiple `ConstraintCategory::Predicate`
@@ -2100,23 +2067,33 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let mut categorized_path: Vec<BlameConstraint<'tcx>> = path
.iter()
.map(|constraint| {
- if constraint.category == ConstraintCategory::ClosureBounds {
- self.retrieve_closure_constraint_info(body, &constraint)
- } else {
- BlameConstraint {
- category: constraint.category,
- from_closure: false,
- cause: ObligationCause::new(
- constraint.span,
- CRATE_HIR_ID,
- cause_code.clone(),
- ),
- variance_info: constraint.variance_info,
- }
+ let (category, span, from_closure, cause_code) =
+ if constraint.category == ConstraintCategory::ClosureBounds {
+ if let Some((category, span)) =
+ self.retrieve_closure_constraint_info(*constraint)
+ {
+ (category, span, true, ObligationCauseCode::MiscObligation)
+ } else {
+ (
+ constraint.category,
+ constraint.span,
+ false,
+ ObligationCauseCode::MiscObligation,
+ )
+ }
+ } else {
+ (constraint.category, constraint.span, false, cause_code.clone())
+ };
+ BlameConstraint {
+ category,
+ from_closure,
+ cause: ObligationCause::new(span, CRATE_HIR_ID, cause_code),
+ variance_info: constraint.variance_info,
+ outlives_constraint: *constraint,
}
})
.collect();
- debug!("best_blame_constraint: categorized_path={:#?}", categorized_path);
+ debug!("categorized_path={:#?}", categorized_path);
// To find the best span to cite, we first try to look for the
// final constraint that is interesting and where the `sup` is
@@ -2214,10 +2191,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
let best_choice =
if blame_source { range.rev().find(find_region) } else { range.find(find_region) };
- debug!(
- "best_blame_constraint: best_choice={:?} blame_source={}",
- best_choice, blame_source
- );
+ debug!(?best_choice, ?blame_source, ?extra_info);
if let Some(i) = best_choice {
if let Some(next) = categorized_path.get(i + 1) {
@@ -2226,7 +2200,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
{
// The return expression is being influenced by the return type being
// impl Trait, point at the return type and not the return expr.
- return next.clone();
+ return (next.clone(), extra_info);
}
}
@@ -2246,7 +2220,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
}
- return categorized_path[i].clone();
+ return (categorized_path[i].clone(), extra_info);
}
// If that search fails, that is.. unusual. Maybe everything
@@ -2254,14 +2228,35 @@ impl<'tcx> RegionInferenceContext<'tcx> {
// appears to be the most interesting point to report to the
// user via an even more ad-hoc guess.
categorized_path.sort_by(|p0, p1| p0.category.cmp(&p1.category));
- debug!("best_blame_constraint: sorted_path={:#?}", categorized_path);
+ debug!("sorted_path={:#?}", categorized_path);
- categorized_path.remove(0)
+ (categorized_path.remove(0), extra_info)
}
pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
self.universe_causes[&universe].clone()
}
+
+ /// Tries to find the terminator of the loop in which the region 'r' resides.
+ /// Returns the location of the terminator if found.
+ pub(crate) fn find_loop_terminator_location(
+ &self,
+ r: RegionVid,
+ body: &Body<'_>,
+ ) -> Option<Location> {
+ let scc = self.constraint_sccs.scc(r.to_region_vid());
+ let locations = self.scc_values.locations_outlived_by(scc);
+ for location in locations {
+ let bb = &body[location.block];
+ if let Some(terminator) = &bb.terminator {
+ // terminator of a loop should be TerminatorKind::FalseUnwind
+ if let TerminatorKind::FalseUnwind { .. } = terminator.kind {
+ return Some(location);
+ }
+ }
+ }
+ None
+ }
}
impl<'tcx> RegionDefinition<'tcx> {
@@ -2338,7 +2333,13 @@ impl<'tcx> ClosureRegionRequirementsExt<'tcx> for ClosureRegionRequirements<'tcx
outlives_requirement={:?}",
region, outlived_region, outlives_requirement,
);
- ty::Binder::dummy(ty::OutlivesPredicate(region.into(), outlived_region))
+ (
+ ty::Binder::dummy(ty::OutlivesPredicate(
+ region.into(),
+ outlived_region,
+ )),
+ ConstraintCategory::BoringNoLocation,
+ )
}
ClosureOutlivesSubject::Ty(ty) => {
@@ -2348,7 +2349,10 @@ impl<'tcx> ClosureRegionRequirementsExt<'tcx> for ClosureRegionRequirements<'tcx
outlives_requirement={:?}",
ty, outlived_region, outlives_requirement,
);
- ty::Binder::dummy(ty::OutlivesPredicate(ty.into(), outlived_region))
+ (
+ ty::Binder::dummy(ty::OutlivesPredicate(ty.into(), outlived_region)),
+ ConstraintCategory::BoringNoLocation,
+ )
}
}
})
@@ -2362,4 +2366,5 @@ pub struct BlameConstraint<'tcx> {
pub from_closure: bool,
pub cause: ObligationCause<'tcx>,
pub variance_info: ty::VarianceDiagInfo<'tcx>,
+ pub outlives_constraint: OutlivesConstraint<'tcx>,
}
diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
index d6712b6a4..465f353aa 100644
--- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
+++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
@@ -2,18 +2,16 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::vec_map::VecMap;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::OpaqueTyOrigin;
-use rustc_infer::infer::error_reporting::unexpected_hidden_region_diagnostic;
use rustc_infer::infer::TyCtxtInferExt as _;
use rustc_infer::infer::{DefiningAnchor, InferCtxt};
use rustc_infer::traits::{Obligation, ObligationCause, TraitEngine};
-use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts};
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::visit::TypeVisitable;
use rustc_middle::ty::{
self, OpaqueHiddenType, OpaqueTypeKey, ToPredicate, Ty, TyCtxt, TypeFoldable,
};
use rustc_span::Span;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::TraitEngineExt as _;
use super::RegionInferenceContext;
@@ -58,10 +56,10 @@ impl<'tcx> RegionInferenceContext<'tcx> {
/// Calling `universal_upper_bound` for such a region gives `fr_fn_body`,
/// which has no `external_name` in which case we use `'empty` as the
/// region to pass to `infer_opaque_definition_from_instantiation`.
- #[instrument(level = "debug", skip(self, infcx))]
+ #[instrument(level = "debug", skip(self, infcx), ret)]
pub(crate) fn infer_opaque_types(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
opaque_ty_decls: VecMap<OpaqueTypeKey<'tcx>, (OpaqueHiddenType<'tcx>, OpaqueTyOrigin)>,
) -> VecMap<LocalDefId, OpaqueHiddenType<'tcx>> {
let mut result: VecMap<LocalDefId, OpaqueHiddenType<'tcx>> = VecMap::new();
@@ -107,7 +105,7 @@ impl<'tcx> RegionInferenceContext<'tcx> {
.iter()
.find(|ur_vid| self.eval_equal(vid, **ur_vid))
.and_then(|ur_vid| self.definitions[*ur_vid].external_name)
- .unwrap_or(infcx.tcx.lifetimes.re_root_empty),
+ .unwrap_or(infcx.tcx.lifetimes.re_erased),
_ => region,
});
@@ -192,7 +190,7 @@ pub trait InferCtxtExt<'tcx> {
) -> Ty<'tcx>;
}
-impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
/// Given the fully resolved, instantiated type for an opaque
/// type, i.e., the value of an inference variable like C1 or C2
/// (*), computes the "definition type" for an opaque type
@@ -227,31 +225,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
return self.tcx.ty_error();
}
- let OpaqueTypeKey { def_id, substs } = opaque_type_key;
-
- // Use substs to build up a reverse map from regions to their
- // identity mappings. This is necessary because of `impl
- // Trait` lifetimes are computed by replacing existing
- // lifetimes with 'static and remapping only those used in the
- // `impl Trait` return type, resulting in the parameters
- // shifting.
- let id_substs = InternalSubsts::identity_for_item(self.tcx, def_id.to_def_id());
- debug!(?id_substs);
- let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> =
- substs.iter().enumerate().map(|(index, subst)| (subst, id_substs[index])).collect();
- debug!("map = {:#?}", map);
-
- // Convert the type from the function into a type valid outside
- // the function, by replacing invalid regions with 'static,
- // after producing an error for each of them.
- let definition_ty = instantiated_ty.ty.fold_with(&mut ReverseMapper::new(
- self.tcx,
- opaque_type_key,
- map,
- instantiated_ty.ty,
- instantiated_ty.span,
- ));
- debug!(?definition_ty);
+ let definition_ty = instantiated_ty
+ .remap_generic_params_to_declaration_params(opaque_type_key, self.tcx, false, origin)
+ .ty;
if !check_opaque_type_parameter_valid(
self.tcx,
@@ -264,72 +240,70 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Only check this for TAIT. RPIT already supports `src/test/ui/impl-trait/nested-return-type2.rs`
// on stable and we'd break that.
- if let OpaqueTyOrigin::TyAlias = origin {
- // This logic duplicates most of `check_opaque_meets_bounds`.
- // FIXME(oli-obk): Also do region checks here and then consider removing `check_opaque_meets_bounds` entirely.
- let param_env = self.tcx.param_env(def_id);
- let body_id = self.tcx.local_def_id_to_hir_id(def_id);
- // HACK This bubble is required for this tests to pass:
- // type-alias-impl-trait/issue-67844-nested-opaque.rs
- self.tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).enter(
- move |infcx| {
- // Require the hidden type to be well-formed with only the generics of the opaque type.
- // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
- // hidden type is well formed even without those bounds.
- let predicate =
- ty::Binder::dummy(ty::PredicateKind::WellFormed(definition_ty.into()))
- .to_predicate(infcx.tcx);
- let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ let OpaqueTyOrigin::TyAlias = origin else {
+ return definition_ty;
+ };
+ let def_id = opaque_type_key.def_id;
+ // This logic duplicates most of `check_opaque_meets_bounds`.
+ // FIXME(oli-obk): Also do region checks here and then consider removing `check_opaque_meets_bounds` entirely.
+ let param_env = self.tcx.param_env(def_id);
+ let body_id = self.tcx.local_def_id_to_hir_id(def_id);
+ // HACK This bubble is required for this tests to pass:
+ // type-alias-impl-trait/issue-67844-nested-opaque.rs
+ let infcx =
+ self.tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).build();
+ // Require the hidden type to be well-formed with only the generics of the opaque type.
+ // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
+ // hidden type is well formed even without those bounds.
+ let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(definition_ty.into()))
+ .to_predicate(infcx.tcx);
+ let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+
+ let id_substs = InternalSubsts::identity_for_item(self.tcx, def_id.to_def_id());
- // Require that the hidden type actually fulfills all the bounds of the opaque type, even without
- // the bounds that the function supplies.
- match infcx.register_hidden_type(
- OpaqueTypeKey { def_id, substs: id_substs },
- ObligationCause::misc(instantiated_ty.span, body_id),
- param_env,
+ // Require that the hidden type actually fulfills all the bounds of the opaque type, even without
+ // the bounds that the function supplies.
+ let opaque_ty = self.tcx.mk_opaque(def_id.to_def_id(), id_substs);
+ match infcx
+ .at(&ObligationCause::misc(instantiated_ty.span, body_id), param_env)
+ .eq(opaque_ty, definition_ty)
+ {
+ Ok(infer_ok) => {
+ for obligation in infer_ok.obligations {
+ fulfillment_cx.register_predicate_obligation(&infcx, obligation);
+ }
+ }
+ Err(err) => {
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(
+ &ObligationCause::misc(instantiated_ty.span, body_id),
+ opaque_ty,
definition_ty,
- origin,
- ) {
- Ok(infer_ok) => {
- for obligation in infer_ok.obligations {
- fulfillment_cx.register_predicate_obligation(&infcx, obligation);
- }
- }
- Err(err) => {
- infcx
- .report_mismatched_types(
- &ObligationCause::misc(instantiated_ty.span, body_id),
- self.tcx.mk_opaque(def_id.to_def_id(), id_substs),
- definition_ty,
- err,
- )
- .emit();
- }
- }
+ err,
+ )
+ .emit();
+ }
+ }
- fulfillment_cx.register_predicate_obligation(
- &infcx,
- Obligation::misc(instantiated_ty.span, body_id, param_env, predicate),
- );
+ fulfillment_cx.register_predicate_obligation(
+ &infcx,
+ Obligation::misc(instantiated_ty.span, body_id, param_env, predicate),
+ );
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = fulfillment_cx.select_all_or_error(&infcx);
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = fulfillment_cx.select_all_or_error(&infcx);
- // This is still required for many(half of the tests in ui/type-alias-impl-trait)
- // tests to pass
- let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ // This is still required for many(half of the tests in ui/type-alias-impl-trait)
+ // tests to pass
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
- if errors.is_empty() {
- definition_ty
- } else {
- infcx.report_fulfillment_errors(&errors, None, false);
- self.tcx.ty_error()
- }
- },
- )
- } else {
+ if errors.is_empty() {
definition_ty
+ } else {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ self.tcx.ty_error()
}
}
}
@@ -425,238 +399,3 @@ fn check_opaque_type_parameter_valid(
}
true
}
-
-struct ReverseMapper<'tcx> {
- tcx: TyCtxt<'tcx>,
-
- key: ty::OpaqueTypeKey<'tcx>,
- map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
- map_missing_regions_to_empty: bool,
-
- /// initially `Some`, set to `None` once error has been reported
- hidden_ty: Option<Ty<'tcx>>,
-
- /// Span of function being checked.
- span: Span,
-}
-
-impl<'tcx> ReverseMapper<'tcx> {
- fn new(
- tcx: TyCtxt<'tcx>,
- key: ty::OpaqueTypeKey<'tcx>,
- map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
- hidden_ty: Ty<'tcx>,
- span: Span,
- ) -> Self {
- Self {
- tcx,
- key,
- map,
- map_missing_regions_to_empty: false,
- hidden_ty: Some(hidden_ty),
- span,
- }
- }
-
- fn fold_kind_mapping_missing_regions_to_empty(
- &mut self,
- kind: GenericArg<'tcx>,
- ) -> GenericArg<'tcx> {
- assert!(!self.map_missing_regions_to_empty);
- self.map_missing_regions_to_empty = true;
- let kind = kind.fold_with(self);
- self.map_missing_regions_to_empty = false;
- kind
- }
-
- fn fold_kind_normally(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
- assert!(!self.map_missing_regions_to_empty);
- kind.fold_with(self)
- }
-}
-
-impl<'tcx> TypeFolder<'tcx> for ReverseMapper<'tcx> {
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- #[instrument(skip(self), level = "debug")]
- fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
- match *r {
- // Ignore bound regions and `'static` regions that appear in the
- // type, we only need to remap regions that reference lifetimes
- // from the function declaration.
- // This would ignore `'r` in a type like `for<'r> fn(&'r u32)`.
- ty::ReLateBound(..) | ty::ReStatic => return r,
-
- // If regions have been erased (by writeback), don't try to unerase
- // them.
- ty::ReErased => return r,
-
- // The regions that we expect from borrow checking.
- ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReEmpty(ty::UniverseIndex::ROOT) => {}
-
- ty::ReEmpty(_) | ty::RePlaceholder(_) | ty::ReVar(_) => {
- // All of the regions in the type should either have been
- // erased by writeback, or mapped back to named regions by
- // borrow checking.
- bug!("unexpected region kind in opaque type: {:?}", r);
- }
- }
-
- let generics = self.tcx().generics_of(self.key.def_id);
- match self.map.get(&r.into()).map(|k| k.unpack()) {
- Some(GenericArgKind::Lifetime(r1)) => r1,
- Some(u) => panic!("region mapped to unexpected kind: {:?}", u),
- None if self.map_missing_regions_to_empty => self.tcx.lifetimes.re_root_empty,
- None if generics.parent.is_some() => {
- if let Some(hidden_ty) = self.hidden_ty.take() {
- unexpected_hidden_region_diagnostic(
- self.tcx,
- self.tcx.def_span(self.key.def_id),
- hidden_ty,
- r,
- self.key,
- )
- .emit();
- }
- self.tcx.lifetimes.re_root_empty
- }
- None => {
- self.tcx
- .sess
- .struct_span_err(self.span, "non-defining opaque type use in defining scope")
- .span_label(
- self.span,
- format!(
- "lifetime `{}` is part of concrete type but not used in \
- parameter list of the `impl Trait` type alias",
- r
- ),
- )
- .emit();
-
- self.tcx().lifetimes.re_static
- }
- }
- }
-
- fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- match *ty.kind() {
- ty::Closure(def_id, substs) => {
- // I am a horrible monster and I pray for death. When
- // we encounter a closure here, it is always a closure
- // from within the function that we are currently
- // type-checking -- one that is now being encapsulated
- // in an opaque type. Ideally, we would
- // go through the types/lifetimes that it references
- // and treat them just like we would any other type,
- // which means we would error out if we find any
- // reference to a type/region that is not in the
- // "reverse map".
- //
- // **However,** in the case of closures, there is a
- // somewhat subtle (read: hacky) consideration. The
- // problem is that our closure types currently include
- // all the lifetime parameters declared on the
- // enclosing function, even if they are unused by the
- // closure itself. We can't readily filter them out,
- // so here we replace those values with `'empty`. This
- // can't really make a difference to the rest of the
- // compiler; those regions are ignored for the
- // outlives relation, and hence don't affect trait
- // selection or auto traits, and they are erased
- // during codegen.
-
- let generics = self.tcx.generics_of(def_id);
- let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
- if index < generics.parent_count {
- // Accommodate missing regions in the parent kinds...
- self.fold_kind_mapping_missing_regions_to_empty(kind)
- } else {
- // ...but not elsewhere.
- self.fold_kind_normally(kind)
- }
- }));
-
- self.tcx.mk_closure(def_id, substs)
- }
-
- ty::Generator(def_id, substs, movability) => {
- let generics = self.tcx.generics_of(def_id);
- let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
- if index < generics.parent_count {
- // Accommodate missing regions in the parent kinds...
- self.fold_kind_mapping_missing_regions_to_empty(kind)
- } else {
- // ...but not elsewhere.
- self.fold_kind_normally(kind)
- }
- }));
-
- self.tcx.mk_generator(def_id, substs, movability)
- }
-
- ty::Param(param) => {
- // Look it up in the substitution list.
- match self.map.get(&ty.into()).map(|k| k.unpack()) {
- // Found it in the substitution list; replace with the parameter from the
- // opaque type.
- Some(GenericArgKind::Type(t1)) => t1,
- Some(u) => panic!("type mapped to unexpected kind: {:?}", u),
- None => {
- debug!(?param, ?self.map);
- self.tcx
- .sess
- .struct_span_err(
- self.span,
- &format!(
- "type parameter `{}` is part of concrete type but not \
- used in parameter list for the `impl Trait` type alias",
- ty
- ),
- )
- .emit();
-
- self.tcx().ty_error()
- }
- }
- }
-
- _ => ty.super_fold_with(self),
- }
- }
-
- fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
- trace!("checking const {:?}", ct);
- // Find a const parameter
- match ct.kind() {
- ty::ConstKind::Param(..) => {
- // Look it up in the substitution list.
- match self.map.get(&ct.into()).map(|k| k.unpack()) {
- // Found it in the substitution list, replace with the parameter from the
- // opaque type.
- Some(GenericArgKind::Const(c1)) => c1,
- Some(u) => panic!("const mapped to unexpected kind: {:?}", u),
- None => {
- self.tcx
- .sess
- .struct_span_err(
- self.span,
- &format!(
- "const parameter `{}` is part of concrete type but not \
- used in parameter list for the `impl Trait` type alias",
- ct
- ),
- )
- .emit();
-
- self.tcx().const_error(ct.ty())
- }
- }
- }
-
- _ => ct,
- }
- }
-}
diff --git a/compiler/rustc_borrowck/src/region_infer/values.rs b/compiler/rustc_borrowck/src/region_infer/values.rs
index c81ef10f7..de20a4bb4 100644
--- a/compiler/rustc_borrowck/src/region_infer/values.rs
+++ b/compiler/rustc_borrowck/src/region_infer/values.rs
@@ -25,7 +25,7 @@ impl RegionValueElements {
pub(crate) fn new(body: &Body<'_>) -> Self {
let mut num_points = 0;
let statements_before_block: IndexVec<BasicBlock, usize> = body
- .basic_blocks()
+ .basic_blocks
.iter()
.map(|block_data| {
let v = num_points;
@@ -37,7 +37,7 @@ impl RegionValueElements {
debug!("RegionValueElements: num_points={:#?}", num_points);
let mut basic_blocks = IndexVec::with_capacity(num_points);
- for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+ for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
basic_blocks.extend((0..=bb_data.statements.len()).map(|_| bb));
}
diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs
index 7a8ce621c..f30237690 100644
--- a/compiler/rustc_borrowck/src/renumber.rs
+++ b/compiler/rustc_borrowck/src/renumber.rs
@@ -1,6 +1,7 @@
use rustc_index::vec::IndexVec;
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
use rustc_middle::mir::visit::{MutVisitor, TyContext};
+use rustc_middle::mir::Constant;
use rustc_middle::mir::{Body, Location, Promoted};
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
@@ -9,7 +10,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
/// inference variables, returning the number of variables created.
#[instrument(skip(infcx, body, promoted), level = "debug")]
pub fn renumber_mir<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
body: &mut Body<'tcx>,
promoted: &mut IndexVec<Promoted, Body<'tcx>>,
) {
@@ -27,7 +28,7 @@ pub fn renumber_mir<'tcx>(
/// Replaces all regions appearing in `value` with fresh inference
/// variables.
#[instrument(skip(infcx), level = "debug")]
-pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'_, 'tcx>, value: T) -> T
+pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
@@ -38,7 +39,7 @@ where
}
struct NllVisitor<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> NllVisitor<'a, 'tcx> {
@@ -77,7 +78,10 @@ impl<'a, 'tcx> MutVisitor<'tcx> for NllVisitor<'a, 'tcx> {
debug!(?region);
}
- fn visit_const(&mut self, constant: &mut ty::Const<'tcx>, _location: Location) {
- *constant = self.renumber_regions(*constant);
+ #[instrument(skip(self), level = "debug")]
+ fn visit_constant(&mut self, constant: &mut Constant<'tcx>, _location: Location) {
+ let literal = constant.literal;
+ constant.literal = self.renumber_regions(literal);
+ debug!("constant: {:#?}", constant);
}
}
diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs
index 895723d44..cff3089c3 100644
--- a/compiler/rustc_borrowck/src/session_diagnostics.rs
+++ b/compiler/rustc_borrowck/src/session_diagnostics.rs
@@ -1,9 +1,12 @@
-use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
+use rustc_errors::{IntoDiagnosticArg, MultiSpan};
+use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_middle::ty::Ty;
use rustc_span::Span;
-#[derive(SessionDiagnostic)]
-#[error(borrowck::move_unsized, code = "E0161")]
+use crate::diagnostics::RegionName;
+
+#[derive(Diagnostic)]
+#[diag(borrowck_move_unsized, code = "E0161")]
pub(crate) struct MoveUnsized<'tcx> {
pub ty: Ty<'tcx>,
#[primary_span]
@@ -11,8 +14,8 @@ pub(crate) struct MoveUnsized<'tcx> {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(borrowck::higher_ranked_lifetime_error)]
+#[derive(Diagnostic)]
+#[diag(borrowck_higher_ranked_lifetime_error)]
pub(crate) struct HigherRankedLifetimeError {
#[subdiagnostic]
pub cause: Option<HigherRankedErrorCause>,
@@ -20,25 +23,128 @@ pub(crate) struct HigherRankedLifetimeError {
pub span: Span,
}
-#[derive(SessionSubdiagnostic)]
+#[derive(Subdiagnostic)]
pub(crate) enum HigherRankedErrorCause {
- #[note(borrowck::could_not_prove)]
+ #[note(borrowck_could_not_prove)]
CouldNotProve { predicate: String },
- #[note(borrowck::could_not_normalize)]
+ #[note(borrowck_could_not_normalize)]
CouldNotNormalize { value: String },
}
-#[derive(SessionDiagnostic)]
-#[error(borrowck::higher_ranked_subtype_error)]
+#[derive(Diagnostic)]
+#[diag(borrowck_higher_ranked_subtype_error)]
pub(crate) struct HigherRankedSubtypeError {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(borrowck::generic_does_not_live_long_enough)]
+#[derive(Diagnostic)]
+#[diag(borrowck_generic_does_not_live_long_enough)]
pub(crate) struct GenericDoesNotLiveLongEnough {
pub kind: String,
#[primary_span]
pub span: Span,
}
+
+#[derive(LintDiagnostic)]
+#[diag(borrowck_var_does_not_need_mut)]
+pub(crate) struct VarNeedNotMut {
+ #[suggestion_short(applicability = "machine-applicable", code = "")]
+ pub span: Span,
+}
+#[derive(Diagnostic)]
+#[diag(borrowck_var_cannot_escape_closure)]
+#[note]
+#[note(cannot_escape)]
+pub(crate) struct FnMutError {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub ty_err: FnMutReturnTypeErr,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum VarHereDenote {
+ #[label(borrowck_var_here_captured)]
+ Captured {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(borrowck_var_here_defined)]
+ Defined {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(borrowck_closure_inferred_mut)]
+ FnMutInferred {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum FnMutReturnTypeErr {
+ #[label(borrowck_returned_closure_escaped)]
+ ReturnClosure {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(borrowck_returned_async_block_escaped)]
+ ReturnAsyncBlock {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(borrowck_returned_ref_escaped)]
+ ReturnRef {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(borrowck_lifetime_constraints_error)]
+pub(crate) struct LifetimeOutliveErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum LifetimeReturnCategoryErr<'a> {
+ #[label(borrowck_returned_lifetime_wrong)]
+ WrongReturn {
+ #[primary_span]
+ span: Span,
+ mir_def_name: &'a str,
+ outlived_fr_name: RegionName,
+ fr_name: &'a RegionName,
+ },
+ #[label(borrowck_returned_lifetime_short)]
+ ShortReturn {
+ #[primary_span]
+ span: Span,
+ category_desc: &'static str,
+ free_region_name: &'a RegionName,
+ outlived_fr_name: RegionName,
+ },
+}
+
+impl IntoDiagnosticArg for &RegionName {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ format!("{}", self).into_diagnostic_arg()
+ }
+}
+
+impl IntoDiagnosticArg for RegionName {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ format!("{}", self).into_diagnostic_arg()
+ }
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum RequireStaticErr {
+ #[note(borrowck_used_impl_require_static)]
+ UsedImpl {
+ #[primary_span]
+ multi_span: MultiSpan,
+ },
+}
diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs
index 6cfe5efb6..a581726a1 100644
--- a/compiler/rustc_borrowck/src/type_check/canonical.rs
+++ b/compiler/rustc_borrowck/src/type_check/canonical.rs
@@ -24,8 +24,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
/// **Any `rustc_infer::infer` operations that might generate region
/// constraints should occur within this method so that those
/// constraints can be properly localized!**
- #[instrument(skip(self, category, op), level = "trace")]
- pub(super) fn fully_perform_op<R, Op>(
+ #[instrument(skip(self, op), level = "trace")]
+ pub(super) fn fully_perform_op<R: fmt::Debug, Op>(
&mut self,
locations: Locations,
category: ConstraintCategory<'tcx>,
@@ -39,6 +39,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let TypeOpOutput { output, constraints, error_info } = op.fully_perform(self.infcx)?;
+ debug!(?output, ?constraints);
+
if let Some(data) = constraints {
self.push_region_constraints(locations, category, data);
}
@@ -50,11 +52,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
Some(error_info) => error_info.to_universe_info(old_universe),
None => UniverseInfo::other(),
};
- for u in old_universe..universe {
- self.borrowck_context
- .constraints
- .universe_causes
- .insert(u + 1, universe_info.clone());
+ for u in (old_universe + 1)..=universe {
+ self.borrowck_context.constraints.universe_causes.insert(u, universe_info.clone());
}
}
@@ -69,15 +68,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
where
T: TypeFoldable<'tcx>,
{
+ let old_universe = self.infcx.universe();
+
let (instantiated, _) =
self.infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
- for u in 0..canonical.max_universe.as_u32() {
- let info = UniverseInfo::other();
- self.borrowck_context
- .constraints
- .universe_causes
- .insert(ty::UniverseIndex::from_u32(u), info);
+ for u in (old_universe + 1)..=self.infcx.universe() {
+ self.borrowck_context.constraints.universe_causes.insert(u, UniverseInfo::other());
}
instantiated
@@ -90,17 +87,19 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
locations: Locations,
category: ConstraintCategory<'tcx>,
) {
- self.prove_predicates(
- Some(ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
+ self.prove_predicate(
+ ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
trait_ref,
constness: ty::BoundConstness::NotConst,
polarity: ty::ImplPolarity::Positive,
- }))),
+ }))
+ .to_predicate(self.tcx()),
locations,
category,
);
}
+ #[instrument(level = "debug", skip(self))]
pub(super) fn normalize_and_prove_instantiated_predicates(
&mut self,
// Keep this parameter for now, in case we start using
@@ -115,8 +114,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
.zip(instantiated_predicates.spans.into_iter())
{
debug!(?predicate);
- let predicate = self.normalize(predicate, locations);
- self.prove_predicate(predicate, locations, ConstraintCategory::Predicate(span));
+ let category = ConstraintCategory::Predicate(span);
+ let predicate = self.normalize_with_category(predicate, locations, category);
+ self.prove_predicate(predicate, locations, category);
}
}
@@ -152,15 +152,27 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
})
}
- #[instrument(skip(self), level = "debug")]
pub(super) fn normalize<T>(&mut self, value: T, location: impl NormalizeLocation) -> T
where
T: type_op::normalize::Normalizable<'tcx> + fmt::Display + Copy + 'tcx,
{
+ self.normalize_with_category(value, location, ConstraintCategory::Boring)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn normalize_with_category<T>(
+ &mut self,
+ value: T,
+ location: impl NormalizeLocation,
+ category: ConstraintCategory<'tcx>,
+ ) -> T
+ where
+ T: type_op::normalize::Normalizable<'tcx> + fmt::Display + Copy + 'tcx,
+ {
let param_env = self.param_env;
self.fully_perform_op(
location.to_locations(),
- ConstraintCategory::Boring,
+ category,
param_env.and(type_op::normalize::Normalize::new(value)),
)
.unwrap_or_else(|NoSolution| {
diff --git a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
index 167960918..d5bfc2f52 100644
--- a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
+++ b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
@@ -6,7 +6,7 @@ use rustc_infer::infer::region_constraints::{GenericKind, VerifyBound};
use rustc_infer::infer::{self, InferCtxt, SubregionOrigin};
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::TypeVisitable;
+use rustc_middle::ty::TypeFoldable;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::{Span, DUMMY_SP};
@@ -19,7 +19,7 @@ use crate::{
};
pub(crate) struct ConstraintConversion<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
universal_regions: &'a UniversalRegions<'tcx>,
/// Each RBP `GK: 'a` is assumed to be true. These encode
@@ -43,7 +43,7 @@ pub(crate) struct ConstraintConversion<'a, 'tcx> {
impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
pub(crate) fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
universal_regions: &'a UniversalRegions<'tcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
implicit_region_bound: ty::Region<'tcx>,
@@ -86,7 +86,7 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
}
}
- pub(super) fn convert(&mut self, query_constraint: &QueryOutlivesConstraint<'tcx>) {
+ fn convert(&mut self, query_constraint: &QueryOutlivesConstraint<'tcx>) {
debug!("generate: constraints at: {:#?}", self.locations);
// Extract out various useful fields we'll need below.
@@ -98,34 +98,25 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
// region constraints like `for<'a> 'a: 'b`. At some point
// when we move to universes, we will, and this assertion
// will start to fail.
- let ty::OutlivesPredicate(k1, r2) = query_constraint.no_bound_vars().unwrap_or_else(|| {
- bug!("query_constraint {:?} contained bound vars", query_constraint,);
- });
+ let ty::OutlivesPredicate(k1, r2) =
+ query_constraint.0.no_bound_vars().unwrap_or_else(|| {
+ bug!("query_constraint {:?} contained bound vars", query_constraint,);
+ });
+
+ let constraint_category = query_constraint.1;
match k1.unpack() {
GenericArgKind::Lifetime(r1) => {
let r1_vid = self.to_region_vid(r1);
let r2_vid = self.to_region_vid(r2);
- self.add_outlives(r1_vid, r2_vid);
+ self.add_outlives(r1_vid, r2_vid, constraint_category);
}
- GenericArgKind::Type(mut t1) => {
+ GenericArgKind::Type(t1) => {
// we don't actually use this for anything, but
// the `TypeOutlives` code needs an origin.
let origin = infer::RelateParamBound(DUMMY_SP, t1, None);
- // Placeholder regions need to be converted now because it may
- // create new region variables, which can't be done later when
- // verifying these bounds.
- if t1.has_placeholders() {
- t1 = tcx.fold_regions(t1, |r, _| match *r {
- ty::RePlaceholder(placeholder) => {
- self.constraints.placeholder_region(self.infcx, placeholder)
- }
- _ => r,
- });
- }
-
TypeOutlives::new(
&mut *self,
tcx,
@@ -133,7 +124,7 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
Some(implicit_region_bound),
param_env,
)
- .type_must_outlive(origin, t1, r2);
+ .type_must_outlive(origin, t1, r2, constraint_category);
}
GenericArgKind::Const(_) => {
@@ -143,6 +134,25 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
}
}
+ /// Placeholder regions need to be converted eagerly because it may
+ /// create new region variables, which we must not do when verifying
+ /// our region bounds.
+ ///
+ /// FIXME: This should get removed once higher ranked region obligations
+ /// are dealt with during trait solving.
+ fn replace_placeholders_with_nll<T: TypeFoldable<'tcx>>(&mut self, value: T) -> T {
+ if value.has_placeholders() {
+ self.tcx.fold_regions(value, |r, _| match *r {
+ ty::RePlaceholder(placeholder) => {
+ self.constraints.placeholder_region(self.infcx, placeholder)
+ }
+ _ => r,
+ })
+ } else {
+ value
+ }
+ }
+
fn verify_to_type_test(
&mut self,
generic_kind: GenericKind<'tcx>,
@@ -150,7 +160,6 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
verify_bound: VerifyBound<'tcx>,
) -> TypeTest<'tcx> {
let lower_bound = self.to_region_vid(region);
-
TypeTest { generic_kind, lower_bound, locations: self.locations, verify_bound }
}
@@ -162,10 +171,19 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
}
}
- fn add_outlives(&mut self, sup: ty::RegionVid, sub: ty::RegionVid) {
+ fn add_outlives(
+ &mut self,
+ sup: ty::RegionVid,
+ sub: ty::RegionVid,
+ category: ConstraintCategory<'tcx>,
+ ) {
+ let category = match self.category {
+ ConstraintCategory::Boring | ConstraintCategory::BoringNoLocation => category,
+ _ => self.category,
+ };
self.constraints.outlives_constraints.push(OutlivesConstraint {
locations: self.locations,
- category: self.category,
+ category,
span: self.span,
sub,
sup,
@@ -185,10 +203,11 @@ impl<'a, 'b, 'tcx> TypeOutlivesDelegate<'tcx> for &'a mut ConstraintConversion<'
_origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
+ constraint_category: ConstraintCategory<'tcx>,
) {
let b = self.to_region_vid(b);
let a = self.to_region_vid(a);
- self.add_outlives(b, a);
+ self.add_outlives(b, a, constraint_category);
}
fn push_verify(
@@ -198,6 +217,8 @@ impl<'a, 'b, 'tcx> TypeOutlivesDelegate<'tcx> for &'a mut ConstraintConversion<'
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
) {
+ let kind = self.replace_placeholders_with_nll(kind);
+ let bound = self.replace_placeholders_with_nll(bound);
let type_test = self.verify_to_type_test(kind, a, bound);
self.add_type_test(type_test);
}
diff --git a/compiler/rustc_borrowck/src/type_check/free_region_relations.rs b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs
index cc0318ede..029095926 100644
--- a/compiler/rustc_borrowck/src/type_check/free_region_relations.rs
+++ b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs
@@ -1,5 +1,5 @@
use rustc_data_structures::frozen::Frozen;
-use rustc_data_structures::transitive_relation::TransitiveRelation;
+use rustc_data_structures::transitive_relation::{TransitiveRelation, TransitiveRelationBuilder};
use rustc_infer::infer::canonical::QueryRegionConstraints;
use rustc_infer::infer::outlives;
use rustc_infer::infer::outlives::env::RegionBoundPairs;
@@ -8,7 +8,6 @@ use rustc_infer::infer::InferCtxt;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::traits::query::OutlivesBound;
use rustc_middle::ty::{self, RegionVid, Ty};
-use rustc_span::DUMMY_SP;
use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
use std::rc::Rc;
use type_op::TypeOpOutput;
@@ -48,7 +47,7 @@ pub(crate) struct CreateResult<'tcx> {
}
pub(crate) fn create<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
implicit_region_bound: ty::Region<'tcx>,
universal_regions: &Rc<UniversalRegions<'tcx>>,
@@ -61,25 +60,13 @@ pub(crate) fn create<'tcx>(
constraints,
universal_regions: universal_regions.clone(),
region_bound_pairs: Default::default(),
- relations: UniversalRegionRelations {
- universal_regions: universal_regions.clone(),
- outlives: Default::default(),
- inverse_outlives: Default::default(),
- },
+ outlives: Default::default(),
+ inverse_outlives: Default::default(),
}
.create()
}
impl UniversalRegionRelations<'_> {
- /// Records in the `outlives_relation` (and
- /// `inverse_outlives_relation`) that `fr_a: fr_b`. Invoked by the
- /// builder below.
- fn relate_universal_regions(&mut self, fr_a: RegionVid, fr_b: RegionVid) {
- debug!("relate_universal_regions: fr_a={:?} outlives fr_b={:?}", fr_a, fr_b);
- self.outlives.add(fr_a, fr_b);
- self.inverse_outlives.add(fr_b, fr_a);
- }
-
/// Given two universal regions, returns the postdominating
/// upper-bound (effectively the least upper bound).
///
@@ -209,19 +196,29 @@ impl UniversalRegionRelations<'_> {
}
struct UniversalRegionRelationsBuilder<'this, 'tcx> {
- infcx: &'this InferCtxt<'this, 'tcx>,
+ infcx: &'this InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
universal_regions: Rc<UniversalRegions<'tcx>>,
implicit_region_bound: ty::Region<'tcx>,
constraints: &'this mut MirTypeckRegionConstraints<'tcx>,
// outputs:
- relations: UniversalRegionRelations<'tcx>,
+ outlives: TransitiveRelationBuilder<RegionVid>,
+ inverse_outlives: TransitiveRelationBuilder<RegionVid>,
region_bound_pairs: RegionBoundPairs<'tcx>,
}
impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
+ /// Records in the `outlives_relation` (and
+ /// `inverse_outlives_relation`) that `fr_a: fr_b`.
+ fn relate_universal_regions(&mut self, fr_a: RegionVid, fr_b: RegionVid) {
+ debug!("relate_universal_regions: fr_a={:?} outlives fr_b={:?}", fr_a, fr_b);
+ self.outlives.add(fr_a, fr_b);
+ self.inverse_outlives.add(fr_b, fr_a);
+ }
+
pub(crate) fn create(mut self) -> CreateResult<'tcx> {
+ let span = self.infcx.tcx.def_span(self.universal_regions.defining_ty.def_id());
let unnormalized_input_output_tys = self
.universal_regions
.unnormalized_input_tys
@@ -242,10 +239,9 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
let constraint_sets: Vec<_> = unnormalized_input_output_tys
.flat_map(|ty| {
debug!("build: input_or_output={:?}", ty);
- // We only add implied bounds for the normalized type as the unnormalized
- // type may not actually get checked by the caller.
- //
- // Can otherwise be unsound, see #91068.
+ // We add implied bounds from both the unnormalized and normalized ty.
+ // See issue #87748
+ let constraints_implied1 = self.add_implied_bounds(ty);
let TypeOpOutput { output: norm_ty, constraints: constraints1, .. } = self
.param_env
.and(type_op::normalize::Normalize::new(ty))
@@ -254,7 +250,7 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
self.infcx
.tcx
.sess
- .delay_span_bug(DUMMY_SP, &format!("failed to normalize {:?}", ty));
+ .delay_span_bug(span, &format!("failed to normalize {:?}", ty));
TypeOpOutput {
output: self.infcx.tcx.ty_error(),
constraints: None,
@@ -269,13 +265,14 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
// }
// impl Foo for () {
// type Bar = ();
- // fn foo(&self) ->&() {}
+ // fn foo(&self) -> &() {}
// }
// ```
// Both &Self::Bar and &() are WF
- let constraints_implied = self.add_implied_bounds(norm_ty);
+ let constraints_implied2 =
+ if ty != norm_ty { self.add_implied_bounds(norm_ty) } else { None };
normalized_inputs_and_output.push(norm_ty);
- constraints1.into_iter().chain(constraints_implied)
+ constraints1.into_iter().chain(constraints_implied1).chain(constraints_implied2)
})
.collect();
@@ -292,9 +289,9 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
let fr_fn_body = self.universal_regions.fr_fn_body;
for fr in self.universal_regions.universal_regions() {
debug!("build: relating free region {:?} to itself and to 'static", fr);
- self.relations.relate_universal_regions(fr, fr);
- self.relations.relate_universal_regions(fr_static, fr);
- self.relations.relate_universal_regions(fr, fr_fn_body);
+ self.relate_universal_regions(fr, fr);
+ self.relate_universal_regions(fr_static, fr);
+ self.relate_universal_regions(fr, fr_fn_body);
}
for data in &constraint_sets {
@@ -304,8 +301,8 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
&self.region_bound_pairs,
self.implicit_region_bound,
self.param_env,
- Locations::All(DUMMY_SP),
- DUMMY_SP,
+ Locations::All(span),
+ span,
ConstraintCategory::Internal,
&mut self.constraints,
)
@@ -313,7 +310,11 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
}
CreateResult {
- universal_region_relations: Frozen::freeze(self.relations),
+ universal_region_relations: Frozen::freeze(UniversalRegionRelations {
+ universal_regions: self.universal_regions,
+ outlives: self.outlives.freeze(),
+ inverse_outlives: self.inverse_outlives.freeze(),
+ }),
region_bound_pairs: self.region_bound_pairs,
normalized_inputs_and_output,
}
@@ -346,17 +347,10 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
match outlives_bound {
OutlivesBound::RegionSubRegion(r1, r2) => {
- // `where Type:` is lowered to `where Type: 'empty` so that
- // we check `Type` is well formed, but there's no use for
- // this bound here.
- if r1.is_empty() {
- return;
- }
-
// The bound says that `r1 <= r2`; we store `r2: r1`.
let r1 = self.universal_regions.to_region_vid(r1);
let r2 = self.universal_regions.to_region_vid(r2);
- self.relations.relate_universal_regions(r2, r1);
+ self.relate_universal_regions(r2, r1);
}
OutlivesBound::RegionSubParam(r_a, param_b) => {
@@ -368,6 +362,11 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
self.region_bound_pairs
.insert(ty::OutlivesPredicate(GenericKind::Projection(projection_b), r_a));
}
+
+ OutlivesBound::RegionSubOpaque(r_a, def_id, substs) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Opaque(def_id, substs), r_a));
+ }
}
}
}
diff --git a/compiler/rustc_borrowck/src/type_check/input_output.rs b/compiler/rustc_borrowck/src/type_check/input_output.rs
index 4431a2e8e..a66ddd27d 100644
--- a/compiler/rustc_borrowck/src/type_check/input_output.rs
+++ b/compiler/rustc_borrowck/src/type_check/input_output.rs
@@ -7,16 +7,11 @@
//! `RETURN_PLACE` the MIR arguments) are always fully normalized (and
//! contain revealed `impl Trait` values).
-use crate::type_check::constraint_conversion::ConstraintConversion;
use rustc_index::vec::Idx;
use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::mir::*;
use rustc_middle::ty::Ty;
use rustc_span::Span;
-use rustc_span::DUMMY_SP;
-use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
-use rustc_trait_selection::traits::query::Fallible;
-use type_op::TypeOpOutput;
use crate::universal_regions::UniversalRegions;
@@ -185,7 +180,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
- #[instrument(skip(self, span), level = "debug")]
+ #[instrument(skip(self), level = "debug")]
fn equate_normalized_input_or_output(&mut self, a: Ty<'tcx>, b: Ty<'tcx>, span: Span) {
if let Err(_) =
self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
@@ -194,13 +189,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// `rustc_traits::normalize_after_erasing_regions`. Ideally, we'd
// like to normalize *before* inserting into `local_decls`, but
// doing so ends up causing some other trouble.
- let b = match self.normalize_and_add_constraints(b) {
- Ok(n) => n,
- Err(_) => {
- debug!("equate_inputs_and_outputs: NoSolution");
- b
- }
- };
+ let b = self.normalize(b, Locations::All(span));
// Note: if we have to introduce new placeholders during normalization above, then we won't have
// added those universes to the universe info, which we would want in `relate_tys`.
@@ -218,28 +207,4 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
}
-
- pub(crate) fn normalize_and_add_constraints(&mut self, t: Ty<'tcx>) -> Fallible<Ty<'tcx>> {
- let TypeOpOutput { output: norm_ty, constraints, .. } =
- self.param_env.and(type_op::normalize::Normalize::new(t)).fully_perform(self.infcx)?;
-
- debug!("{:?} normalized to {:?}", t, norm_ty);
-
- for data in constraints {
- ConstraintConversion::new(
- self.infcx,
- &self.borrowck_context.universal_regions,
- &self.region_bound_pairs,
- self.implicit_region_bound,
- self.param_env,
- Locations::All(DUMMY_SP),
- DUMMY_SP,
- ConstraintCategory::Internal,
- &mut self.borrowck_context.constraints,
- )
- .convert_all(&*data);
- }
-
- Ok(norm_ty)
- }
}
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index d32b1edcd..3c1c3ab45 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -30,8 +30,9 @@ use rustc_middle::ty::cast::CastTy;
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef, UserSubsts};
use rustc_middle::ty::visit::TypeVisitable;
use rustc_middle::ty::{
- self, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, OpaqueHiddenType,
- OpaqueTypeKey, RegionVid, ToPredicate, Ty, TyCtxt, UserType, UserTypeAnnotationIndex,
+ self, Binder, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, Dynamic,
+ OpaqueHiddenType, OpaqueTypeKey, RegionVid, ToPredicate, Ty, TyCtxt, UserType,
+ UserTypeAnnotationIndex,
};
use rustc_span::def_id::CRATE_DEF_ID;
use rustc_span::{Span, DUMMY_SP};
@@ -122,7 +123,7 @@ mod relate_tys;
/// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis
/// - `elements` -- MIR region map
pub(crate) fn type_check<'mir, 'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body: &Body<'tcx>,
promoted: &IndexVec<Promoted, Body<'tcx>>,
@@ -137,8 +138,6 @@ pub(crate) fn type_check<'mir, 'tcx>(
use_polonius: bool,
) -> MirTypeckResults<'tcx> {
let implicit_region_bound = infcx.tcx.mk_region(ty::ReVar(universal_regions.fr_fn_body));
- let mut universe_causes = FxHashMap::default();
- universe_causes.insert(ty::UniverseIndex::from_u32(0), UniverseInfo::other());
let mut constraints = MirTypeckRegionConstraints {
placeholder_indices: PlaceholderIndices::default(),
placeholder_index_to_region: IndexVec::default(),
@@ -147,7 +146,7 @@ pub(crate) fn type_check<'mir, 'tcx>(
member_constraints: MemberConstraintSet::default(),
closure_bounds_mapping: Default::default(),
type_tests: Vec::default(),
- universe_causes,
+ universe_causes: FxHashMap::default(),
};
let CreateResult {
@@ -164,9 +163,8 @@ pub(crate) fn type_check<'mir, 'tcx>(
debug!(?normalized_inputs_and_output);
- for u in ty::UniverseIndex::ROOT..infcx.universe() {
- let info = UniverseInfo::other();
- constraints.universe_causes.insert(u, info);
+ for u in ty::UniverseIndex::ROOT..=infcx.universe() {
+ constraints.universe_causes.insert(u, UniverseInfo::other());
}
let mut borrowck_context = BorrowCheckContext {
@@ -178,97 +176,15 @@ pub(crate) fn type_check<'mir, 'tcx>(
upvars,
};
- let opaque_type_values = type_check_internal(
+ let mut checker = TypeChecker::new(
infcx,
- param_env,
body,
- promoted,
+ param_env,
&region_bound_pairs,
implicit_region_bound,
&mut borrowck_context,
- |mut cx| {
- debug!("inside extra closure of type_check_internal");
- cx.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
- liveness::generate(
- &mut cx,
- body,
- elements,
- flow_inits,
- move_data,
- location_table,
- use_polonius,
- );
-
- translate_outlives_facts(&mut cx);
- let opaque_type_values =
- infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
-
- opaque_type_values
- .into_iter()
- .map(|(opaque_type_key, decl)| {
- cx.fully_perform_op(
- Locations::All(body.span),
- ConstraintCategory::OpaqueType,
- CustomTypeOp::new(
- |infcx| {
- infcx.register_member_constraints(
- param_env,
- opaque_type_key,
- decl.hidden_type.ty,
- decl.hidden_type.span,
- );
- Ok(InferOk { value: (), obligations: vec![] })
- },
- || "opaque_type_map".to_string(),
- ),
- )
- .unwrap();
- let mut hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type);
- trace!(
- "finalized opaque type {:?} to {:#?}",
- opaque_type_key,
- hidden_type.ty.kind()
- );
- if hidden_type.has_infer_types_or_consts() {
- infcx.tcx.sess.delay_span_bug(
- decl.hidden_type.span,
- &format!("could not resolve {:#?}", hidden_type.ty.kind()),
- );
- hidden_type.ty = infcx.tcx.ty_error();
- }
-
- (opaque_type_key, (hidden_type, decl.origin))
- })
- .collect()
- },
);
- MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
-}
-
-#[instrument(
- skip(infcx, body, promoted, region_bound_pairs, borrowck_context, extra),
- level = "debug"
-)]
-fn type_check_internal<'a, 'tcx, R>(
- infcx: &'a InferCtxt<'a, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- body: &'a Body<'tcx>,
- promoted: &'a IndexVec<Promoted, Body<'tcx>>,
- region_bound_pairs: &'a RegionBoundPairs<'tcx>,
- implicit_region_bound: ty::Region<'tcx>,
- borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
- extra: impl FnOnce(TypeChecker<'a, 'tcx>) -> R,
-) -> R {
- debug!("body: {:#?}", body);
- let mut checker = TypeChecker::new(
- infcx,
- body,
- param_env,
- region_bound_pairs,
- implicit_region_bound,
- borrowck_context,
- );
let errors_reported = {
let mut verifier = TypeVerifier::new(&mut checker, promoted);
verifier.visit_body(&body);
@@ -280,7 +196,56 @@ fn type_check_internal<'a, 'tcx, R>(
checker.typeck_mir(body);
}
- extra(checker)
+ checker.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
+ liveness::generate(
+ &mut checker,
+ body,
+ elements,
+ flow_inits,
+ move_data,
+ location_table,
+ use_polonius,
+ );
+
+ translate_outlives_facts(&mut checker);
+ let opaque_type_values = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+
+ let opaque_type_values = opaque_type_values
+ .into_iter()
+ .map(|(opaque_type_key, decl)| {
+ checker
+ .fully_perform_op(
+ Locations::All(body.span),
+ ConstraintCategory::OpaqueType,
+ CustomTypeOp::new(
+ |infcx| {
+ infcx.register_member_constraints(
+ param_env,
+ opaque_type_key,
+ decl.hidden_type.ty,
+ decl.hidden_type.span,
+ );
+ Ok(InferOk { value: (), obligations: vec![] })
+ },
+ || "opaque_type_map".to_string(),
+ ),
+ )
+ .unwrap();
+ let mut hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type);
+ trace!("finalized opaque type {:?} to {:#?}", opaque_type_key, hidden_type.ty.kind());
+ if hidden_type.has_non_region_infer() {
+ infcx.tcx.sess.delay_span_bug(
+ decl.hidden_type.span,
+ &format!("could not resolve {:#?}", hidden_type.ty.kind()),
+ );
+ hidden_type.ty = infcx.tcx.ty_error();
+ }
+
+ (opaque_type_key, (hidden_type, decl.origin))
+ })
+ .collect();
+
+ MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
}
fn translate_outlives_facts(typeck: &mut TypeChecker<'_, '_>) {
@@ -344,6 +309,8 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
}
fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ debug!(?constant, ?location, "visit_constant");
+
self.super_constant(constant, location);
let ty = self.sanitize_type(constant, constant.literal.ty());
@@ -387,11 +354,15 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
let tcx = self.tcx();
let maybe_uneval = match constant.literal {
ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Unevaluated(uv) => Some(uv),
+ ty::ConstKind::Unevaluated(_) => {
+ bug!("should not encounter unevaluated ConstantKind::Ty here, got {:?}", ct)
+ }
_ => None,
},
+ ConstantKind::Unevaluated(uv, _) => Some(uv),
_ => None,
};
+
if let Some(uv) = maybe_uneval {
if let Some(promoted) = uv.promoted {
let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>,
@@ -454,12 +425,18 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
}
if let ty::FnDef(def_id, substs) = *constant.literal.ty().kind() {
+ // const_trait_impl: use a non-const param env when checking that a FnDef type is well formed.
+ // this is because the well-formedness of the function does not need to be proved to have `const`
+ // impls for trait bounds.
let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
+ let prev = self.cx.param_env;
+ self.cx.param_env = prev.without_const();
self.cx.normalize_and_prove_instantiated_predicates(
def_id,
instantiated_predicates,
locations,
);
+ self.cx.param_env = prev;
}
}
}
@@ -607,6 +584,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
// modify their locations.
let all_facts = &mut None;
let mut constraints = Default::default();
+ let mut type_tests = Default::default();
let mut closure_bounds = Default::default();
let mut liveness_constraints =
LivenessValues::new(Rc::new(RegionValueElements::new(&promoted_body)));
@@ -618,6 +596,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
&mut this.cx.borrowck_context.constraints.outlives_constraints,
&mut constraints,
);
+ mem::swap(&mut this.cx.borrowck_context.constraints.type_tests, &mut type_tests);
mem::swap(
&mut this.cx.borrowck_context.constraints.closure_bounds_mapping,
&mut closure_bounds,
@@ -642,6 +621,13 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
swap_constraints(self);
let locations = location.to_locations();
+
+ // Use location of promoted const in collected constraints
+ for type_test in type_tests.iter() {
+ let mut type_test = type_test.clone();
+ type_test.locations = locations;
+ self.cx.borrowck_context.constraints.type_tests.push(type_test)
+ }
for constraint in constraints.outlives().iter() {
let mut constraint = constraint.clone();
constraint.locations = locations;
@@ -790,6 +776,19 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
}
PlaceTy::from_ty(fty)
}
+ ProjectionElem::OpaqueCast(ty) => {
+ let ty = self.sanitize_type(place, ty);
+ let ty = self.cx.normalize(ty, location);
+ self.cx
+ .eq_types(
+ base.ty,
+ ty,
+ location.to_locations(),
+ ConstraintCategory::TypeAnnotation,
+ )
+ .unwrap();
+ PlaceTy::from_ty(ty)
+ }
}
}
@@ -883,7 +882,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
/// way, it accrues region constraints -- these can later be used by
/// NLL region checking.
struct TypeChecker<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
last_span: Span,
body: &'a Body<'tcx>,
@@ -953,7 +952,7 @@ pub(crate) struct MirTypeckRegionConstraints<'tcx> {
impl<'tcx> MirTypeckRegionConstraints<'tcx> {
fn placeholder_region(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
placeholder: ty::PlaceholderRegion,
) -> ty::Region<'tcx> {
let placeholder_index = self.placeholder_indices.insert(placeholder);
@@ -1037,7 +1036,7 @@ impl Locations {
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
body: &'a Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
@@ -1076,6 +1075,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let CanonicalUserTypeAnnotation { span, ref user_ty, inferred_ty } = *user_annotation;
let inferred_ty = self.normalize(inferred_ty, Locations::All(span));
let annotation = self.instantiate_canonical_with_fresh_inference_vars(span, user_ty);
+ debug!(?annotation);
match annotation {
UserType::Ty(mut ty) => {
ty = self.normalize(ty, Locations::All(span));
@@ -1195,10 +1195,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
tcx,
self.param_env,
proj,
- |this, field, ()| {
+ |this, field, _| {
let ty = this.field_ty(tcx, field);
self.normalize(ty, locations)
},
+ |_, _| unreachable!(),
);
curr_projected_ty = projected_ty;
}
@@ -1334,12 +1335,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
);
}
}
- StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
- ..
- }) => span_bug!(
- stmt.source_info.span,
- "Unexpected StatementKind::CopyNonOverlapping, should only appear after lowering_intrinsics",
- ),
+ StatementKind::Intrinsic(box ref kind) => match kind {
+ NonDivergingIntrinsic::Assume(op) => self.check_operand(op, location),
+ NonDivergingIntrinsic::CopyNonOverlapping(..) => span_bug!(
+ stmt.source_info.span,
+ "Unexpected NonDivergingIntrinsic::CopyNonOverlapping, should only appear after lowering_intrinsics",
+ ),
+ },
StatementKind::FakeRead(..)
| StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
@@ -1448,9 +1450,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
))
});
debug!(?sig);
- let sig = self.normalize(sig, term_location);
- self.check_call_dest(body, term, &sig, *destination, target, term_location);
-
+ // IMPORTANT: We have to prove well formed for the function signature before
+ // we normalize it, as otherwise types like `<&'a &'b () as Trait>::Assoc`
+ // get normalized away, causing us to ignore the `'b: 'a` bound used by the function.
+ //
+ // Normalization results in a well formed type if the input is well formed, so we
+ // don't have to check it twice.
+ //
+ // See #91068 for an example.
self.prove_predicates(
sig.inputs_and_output
.iter()
@@ -1458,6 +1465,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
term_location.to_locations(),
ConstraintCategory::Boring,
);
+ let sig = self.normalize(sig, term_location);
+ self.check_call_dest(body, term, &sig, *destination, target, term_location);
// The ordinary liveness rules will ensure that all
// regions in the type of the callee are live here. We
@@ -1621,7 +1630,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let op_arg_ty = self.normalize(op_arg_ty, term_location);
let category = if from_hir_call {
- ConstraintCategory::CallArgument(func_ty)
+ ConstraintCategory::CallArgument(self.infcx.tcx.erase_regions(func_ty))
} else {
ConstraintCategory::Boring
};
@@ -1774,7 +1783,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// `Sized` bound in no way depends on precise regions, so this
// shouldn't affect `is_sized`.
let erased_ty = tcx.erase_regions(ty);
- if !erased_ty.is_sized(tcx.at(span), self.param_env) {
+ if !erased_ty.is_sized(tcx, self.param_env) {
// in current MIR construction, all non-control-flow rvalue
// expressions evaluate through `as_temp` or `into` a return
// slot or local, so to find all unsized rvalues it is enough
@@ -1834,14 +1843,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
fn check_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+ debug!(?op, ?location, "check_operand");
+
if let Operand::Constant(constant) = op {
let maybe_uneval = match constant.literal {
- ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Unevaluated(uv) => Some(uv),
- _ => None,
- },
- _ => None,
+ ConstantKind::Val(..) | ConstantKind::Ty(_) => None,
+ ConstantKind::Unevaluated(uv, _) => Some(uv),
};
+
if let Some(uv) = maybe_uneval {
if uv.promoted.is_none() {
let tcx = self.tcx();
@@ -1904,7 +1913,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
- &Rvalue::NullaryOp(_, ty) => {
+ &Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, ty) => {
let trait_ref = ty::TraitRef {
def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
substs: tcx.mk_substs_trait(ty, &[]),
@@ -2033,6 +2042,36 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
);
}
+ CastKind::DynStar => {
+ // get the constraints from the target type (`dyn* Clone`)
+ //
+ // apply them to prove that the source type `Foo` implements `Clone` etc
+ let (existential_predicates, region) = match ty.kind() {
+ Dynamic(predicates, region, ty::DynStar) => (predicates, region),
+ _ => panic!("Invalid dyn* cast_ty"),
+ };
+
+ let self_ty = op.ty(body, tcx);
+
+ self.prove_predicates(
+ existential_predicates
+ .iter()
+ .map(|predicate| predicate.with_self_ty(tcx, self_ty)),
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ );
+
+ let outlives_predicate =
+ tcx.mk_predicate(Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(self_ty, *region),
+ )));
+ self.prove_predicate(
+ outlives_predicate,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ );
+ }
+
CastKind::Pointer(PointerCast::MutToConstPointer) => {
let ty::RawPtr(ty::TypeAndMut {
ty: ty_from,
@@ -2176,25 +2215,104 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
}
-
- CastKind::Misc => {
+ CastKind::IntToInt => {
let ty_from = op.ty(body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(*ty);
- // Misc casts are either between floats and ints, or one ptr type to another.
match (cast_ty_from, cast_ty_to) {
- (
- Some(CastTy::Int(_) | CastTy::Float),
- Some(CastTy::Int(_) | CastTy::Float),
- )
- | (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Ptr(_))) => (),
+ (Some(CastTy::Int(_)), Some(CastTy::Int(_))) => (),
_ => {
span_mirbug!(
self,
rvalue,
- "Invalid Misc cast {:?} -> {:?}",
+ "Invalid IntToInt cast {:?} -> {:?}",
ty_from,
- ty,
+ ty
+ )
+ }
+ }
+ }
+ CastKind::IntToFloat => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Int(_)), Some(CastTy::Float)) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid IntToFloat cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+ CastKind::FloatToInt => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Float), Some(CastTy::Int(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid FloatToInt cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+ CastKind::FloatToFloat => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Float), Some(CastTy::Float)) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid FloatToFloat cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+ CastKind::FnPtrToPtr => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::FnPtr), Some(CastTy::Ptr(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid FnPtrToPtr cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+ CastKind::PtrToPtr => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Ptr(_)), Some(CastTy::Ptr(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid PtrToPtr cast {:?} -> {:?}",
+ ty_from,
+ ty
)
}
}
@@ -2490,6 +2608,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
ProjectionElem::Field(..)
| ProjectionElem::Downcast(..)
+ | ProjectionElem::OpaqueCast(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. } => {
@@ -2584,7 +2703,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
.enumerate()
.filter_map(|(idx, constraint)| {
let ty::OutlivesPredicate(k1, r2) =
- constraint.no_bound_vars().unwrap_or_else(|| {
+ constraint.0.no_bound_vars().unwrap_or_else(|| {
bug!("query_constraint {:?} contained bound vars", constraint,);
});
@@ -2659,7 +2778,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
self.check_local(&body, local, local_decl);
}
- for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ for (block, block_data) in body.basic_blocks.iter_enumerated() {
let mut location = Location { block, statement_index: 0 };
for stmt in &block_data.statements {
if !stmt.source_info.span.is_dummy() {
@@ -2710,7 +2829,7 @@ impl<'tcx> TypeOp<'tcx> for InstantiateOpaqueType<'tcx> {
/// constraints in our `InferCtxt`
type ErrorInfo = InstantiateOpaqueType<'tcx>;
- fn fully_perform(mut self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ fn fully_perform(mut self, infcx: &InferCtxt<'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
let (mut output, region_constraints) = scrape_region_constraints(infcx, || {
Ok(InferOk { value: (), obligations: self.obligations.clone() })
})?;
diff --git a/compiler/rustc_borrowck/src/type_check/relate_tys.rs b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
index c97a6a1a6..4f2dc263b 100644
--- a/compiler/rustc_borrowck/src/type_check/relate_tys.rs
+++ b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
@@ -1,6 +1,6 @@
use rustc_infer::infer::nll_relate::{NormalizationStrategy, TypeRelating, TypeRelatingDelegate};
use rustc_infer::infer::NllRegionVariableOrigin;
-use rustc_infer::traits::ObligationCause;
+use rustc_infer::traits::PredicateObligations;
use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::relate::TypeRelation;
@@ -155,27 +155,16 @@ impl<'tcx> TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, 'tcx>
true
}
- fn register_opaque_type(
+ fn register_opaque_type_obligations(
&mut self,
- a: Ty<'tcx>,
- b: Ty<'tcx>,
- a_is_expected: bool,
+ obligations: PredicateObligations<'tcx>,
) -> Result<(), TypeError<'tcx>> {
- let param_env = self.param_env();
- let span = self.span();
- let def_id = self.type_checker.body.source.def_id().expect_local();
- let body_id = self.type_checker.tcx().hir().local_def_id_to_hir_id(def_id);
- let cause = ObligationCause::misc(span, body_id);
self.type_checker
.fully_perform_op(
self.locations,
self.category,
InstantiateOpaqueType {
- obligations: self
- .type_checker
- .infcx
- .handle_opaque_type(a, b, a_is_expected, &cause, param_env)?
- .obligations,
+ obligations,
// These fields are filled in during execution of the operation
base_universe: None,
region_constraints: None,
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
index 2a7713bc4..2beb5e0ab 100644
--- a/compiler/rustc_borrowck/src/universal_regions.rs
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -22,8 +22,8 @@ use rustc_hir::{BodyOwnerKind, HirId};
use rustc_index::vec::{Idx, IndexVec};
use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::{self, InlineConstSubsts, InlineConstSubstsParts, RegionVid, Ty, TyCtxt};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
use std::iter;
use crate::nll::ToRegionVid;
@@ -54,13 +54,6 @@ pub struct UniversalRegions<'tcx> {
/// The total number of universal region variables instantiated.
num_universals: usize,
- /// A special region variable created for the `'empty(U0)` region.
- /// Note that this is **not** a "universal" region, as it doesn't
- /// represent a universally bound placeholder or any such thing.
- /// But we do create it here in this type because it's a useful region
- /// to have around in a few limited cases.
- pub root_empty: RegionVid,
-
/// The "defining" type for this function, with all universal
/// regions instantiated. For a closure or generator, this is the
/// closure type, but for a top-level function it's the `FnDef`.
@@ -226,7 +219,7 @@ impl<'tcx> UniversalRegions<'tcx> {
/// signature. This will also compute the relationships that are
/// known between those regions.
pub fn new(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
mir_def: ty::WithOptConstParam<LocalDefId>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
@@ -323,11 +316,7 @@ impl<'tcx> UniversalRegions<'tcx> {
/// See `UniversalRegionIndices::to_region_vid`.
pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
- if let ty::ReEmpty(ty::UniverseIndex::ROOT) = *r {
- self.root_empty
- } else {
- self.indices.to_region_vid(r)
- }
+ self.indices.to_region_vid(r)
}
/// As part of the NLL unit tests, you can annotate a function with
@@ -393,7 +382,7 @@ impl<'tcx> UniversalRegions<'tcx> {
}
struct UniversalRegionsBuilder<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
mir_def: ty::WithOptConstParam<LocalDefId>,
mir_hir_id: HirId,
param_env: ty::ParamEnv<'tcx>,
@@ -425,7 +414,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
let typeck_root_def_id = self.infcx.tcx.typeck_root_def_id(self.mir_def.did.to_def_id());
- // If this is is a 'root' body (not a closure/generator/inline const), then
+ // If this is a 'root' body (not a closure/generator/inline const), then
// there are no extern regions, so the local regions start at the same
// position as the (empty) sub-list of extern regions
let first_local_index = if self.mir_def.did.to_def_id() == typeck_root_def_id {
@@ -501,16 +490,10 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
_ => None,
};
- let root_empty = self
- .infcx
- .next_nll_region_var(NllRegionVariableOrigin::Existential { from_forall: true })
- .to_region_vid();
-
UniversalRegions {
indices,
fr_static,
fr_fn_body,
- root_empty,
first_extern_index,
first_local_index,
num_universals,
@@ -716,7 +699,7 @@ trait InferCtxtExt<'tcx> {
);
}
-impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
fn replace_free_regions_with_nll_infer_vars<T>(
&self,
origin: NllRegionVariableOrigin,
@@ -768,10 +751,9 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
mir_def_id: LocalDefId,
indices: &mut UniversalRegionIndices<'tcx>,
) {
- debug!("replace_late_bound_regions_with_nll_infer_vars(mir_def_id={:?})", mir_def_id);
let typeck_root_def_id = self.tcx.typeck_root_def_id(mir_def_id.to_def_id());
for_each_late_bound_region_defined_on(self.tcx, typeck_root_def_id, |r| {
- debug!("replace_late_bound_regions_with_nll_infer_vars: r={:?}", r);
+ debug!(?r);
if !indices.indices.contains_key(&r) {
let region_vid = self.next_nll_region_var(FR);
debug!(?region_vid);
diff --git a/compiler/rustc_builtin_macros/Cargo.toml b/compiler/rustc_builtin_macros/Cargo.toml
index 8d8e9d9b5..6469d0d7b 100644
--- a/compiler/rustc_builtin_macros/Cargo.toml
+++ b/compiler/rustc_builtin_macros/Cargo.toml
@@ -7,20 +7,21 @@ edition = "2021"
doctest = false
[dependencies]
-rustc_parse_format = { path = "../rustc_parse_format" }
-tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_attr = { path = "../rustc_attr" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
+rustc_expand = { path = "../rustc_expand" }
rustc_feature = { path = "../rustc_feature" }
rustc_lexer = { path = "../rustc_lexer" }
rustc_lint_defs = { path = "../rustc_lint_defs" }
rustc_macros = { path = "../rustc_macros" }
+rustc_parse_format = { path = "../rustc_parse_format" }
rustc_parse = { path = "../rustc_parse" }
-rustc_target = { path = "../rustc_target" }
rustc_session = { path = "../rustc_session" }
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
-rustc_ast = { path = "../rustc_ast" }
-rustc_expand = { path = "../rustc_expand" }
rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+thin-vec = "0.2.8"
+tracing = "0.1"
diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
index 1a0ea8f41..a1051d990 100644
--- a/compiler/rustc_builtin_macros/src/asm.rs
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -852,7 +852,7 @@ pub(super) fn expand_global_asm<'cx>(
if let Some(inline_asm) = expand_preparsed_asm(ecx, args) {
MacEager::items(smallvec![P(ast::Item {
ident: Ident::empty(),
- attrs: Vec::new(),
+ attrs: ast::AttrVec::new(),
id: ast::DUMMY_NODE_ID,
kind: ast::ItemKind::GlobalAsm(Box::new(inline_asm)),
vis: ast::Visibility {
diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs
index 925c36edb..119724b50 100644
--- a/compiler/rustc_builtin_macros/src/assert.rs
+++ b/compiler/rustc_builtin_macros/src/assert.rs
@@ -52,7 +52,7 @@ pub fn expand_assert<'cx>(
let expr = if let Some(tokens) = custom_message {
let then = cx.expr(
call_site_span,
- ExprKind::MacCall(MacCall {
+ ExprKind::MacCall(P(MacCall {
path: panic_path(),
args: P(MacArgs::Delimited(
DelimSpan::from_single(call_site_span),
@@ -60,7 +60,7 @@ pub fn expand_assert<'cx>(
tokens,
)),
prior_type_ascription: None,
- }),
+ })),
);
expr_if_not(cx, call_site_span, cond_expr, then, None)
}
diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs
index dcea883a5..bb6839360 100644
--- a/compiler/rustc_builtin_macros/src/assert/context.rs
+++ b/compiler/rustc_builtin_macros/src/assert/context.rs
@@ -13,6 +13,7 @@ use rustc_span::{
symbol::{sym, Ident, Symbol},
Span,
};
+use thin_vec::thin_vec;
pub(super) struct Context<'cx, 'a> {
// An optimization.
@@ -57,6 +58,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
/// Builds the whole `assert!` expression. For example, `let elem = 1; assert!(elem == 1);` expands to:
///
/// ```rust
+ /// #![feature(generic_assert_internals)]
/// let elem = 1;
/// {
/// #[allow(unused_imports)]
@@ -69,7 +71,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
/// __local_bind0
/// } == 1
/// ) {
- /// panic!("Assertion failed: elem == 1\nWith captures:\n elem = {}", __capture0)
+ /// panic!("Assertion failed: elem == 1\nWith captures:\n elem = {:?}", __capture0)
/// }
/// }
/// ```
@@ -116,7 +118,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
self.cx.item(
self.span,
Ident::empty(),
- vec![self.cx.attribute(attr::mk_list_item(
+ thin_vec![self.cx.attribute(attr::mk_list_item(
Ident::new(sym::allow, self.span),
vec![attr::mk_nested_word_item(Ident::new(sym::unused_imports, self.span))],
))],
@@ -177,7 +179,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
});
self.cx.expr(
self.span,
- ExprKind::MacCall(MacCall {
+ ExprKind::MacCall(P(MacCall {
path: panic_path,
args: P(MacArgs::Delimited(
DelimSpan::from_single(self.span),
@@ -185,7 +187,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
initial.into_iter().chain(captures).collect::<TokenStream>(),
)),
prior_type_ascription: None,
- }),
+ })),
)
}
@@ -240,8 +242,8 @@ impl<'cx, 'a> Context<'cx, 'a> {
self.manage_cond_expr(prefix);
self.manage_cond_expr(suffix);
}
- ExprKind::MethodCall(_, ref mut local_exprs, _) => {
- for local_expr in local_exprs.iter_mut().skip(1) {
+ ExprKind::MethodCall(_, _,ref mut local_exprs, _) => {
+ for local_expr in local_exprs.iter_mut() {
self.manage_cond_expr(local_expr);
}
}
@@ -377,14 +379,12 @@ impl<'cx, 'a> Context<'cx, 'a> {
id: DUMMY_NODE_ID,
ident: Ident::new(sym::try_capture, self.span),
},
- vec![
- expr_paren(self.cx, self.span, self.cx.expr_addr_of(self.span, wrapper)),
- expr_addr_of_mut(
- self.cx,
- self.span,
- self.cx.expr_path(Path::from_ident(capture)),
- ),
- ],
+ expr_paren(self.cx, self.span, self.cx.expr_addr_of(self.span, wrapper)),
+ vec![expr_addr_of_mut(
+ self.cx,
+ self.span,
+ self.cx.expr_path(Path::from_ident(capture)),
+ )],
self.span,
))
.add_trailing_semicolon();
@@ -442,10 +442,11 @@ fn expr_addr_of_mut(cx: &ExtCtxt<'_>, sp: Span, e: P<Expr>) -> P<Expr> {
fn expr_method_call(
cx: &ExtCtxt<'_>,
path: PathSegment,
+ receiver: P<Expr>,
args: Vec<P<Expr>>,
span: Span,
) -> P<Expr> {
- cx.expr(span, ExprKind::MethodCall(path, args, span))
+ cx.expr(span, ExprKind::MethodCall(path, receiver, args, span))
}
fn expr_paren(cx: &ExtCtxt<'_>, sp: Span, e: P<Expr>) -> P<Expr> {
diff --git a/compiler/rustc_builtin_macros/src/cfg.rs b/compiler/rustc_builtin_macros/src/cfg.rs
index aa355150b..5638c2f61 100644
--- a/compiler/rustc_builtin_macros/src/cfg.rs
+++ b/compiler/rustc_builtin_macros/src/cfg.rs
@@ -8,7 +8,7 @@ use rustc_ast::tokenstream::TokenStream;
use rustc_attr as attr;
use rustc_errors::PResult;
use rustc_expand::base::{self, *};
-use rustc_macros::SessionDiagnostic;
+use rustc_macros::Diagnostic;
use rustc_span::Span;
pub fn expand_cfg(
@@ -35,16 +35,16 @@ pub fn expand_cfg(
}
}
-#[derive(SessionDiagnostic)]
-#[error(builtin_macros::requires_cfg_pattern)]
+#[derive(Diagnostic)]
+#[diag(builtin_macros_requires_cfg_pattern)]
struct RequiresCfgPattern {
#[primary_span]
#[label]
span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(builtin_macros::expected_one_cfg_pattern)]
+#[derive(Diagnostic)]
+#[diag(builtin_macros_expected_one_cfg_pattern)]
struct OneCfgPattern {
#[primary_span]
span: Span,
diff --git a/compiler/rustc_builtin_macros/src/cfg_eval.rs b/compiler/rustc_builtin_macros/src/cfg_eval.rs
index 89b2c3292..750f1fe12 100644
--- a/compiler/rustc_builtin_macros/src/cfg_eval.rs
+++ b/compiler/rustc_builtin_macros/src/cfg_eval.rs
@@ -7,6 +7,7 @@ use rustc_ast::visit::Visitor;
use rustc_ast::NodeId;
use rustc_ast::{mut_visit, visit};
use rustc_ast::{Attribute, HasAttrs, HasTokens};
+use rustc_errors::PResult;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_expand::config::StripUnconfigured;
use rustc_expand::configure;
@@ -144,33 +145,34 @@ impl CfgEval<'_, '_> {
// the location of `#[cfg]` and `#[cfg_attr]` in the token stream. The tokenization
// process is lossless, so this process is invisible to proc-macros.
- let parse_annotatable_with: fn(&mut Parser<'_>) -> _ = match annotatable {
- Annotatable::Item(_) => {
- |parser| Annotatable::Item(parser.parse_item(ForceCollect::Yes).unwrap().unwrap())
- }
- Annotatable::TraitItem(_) => |parser| {
- Annotatable::TraitItem(
- parser.parse_trait_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
- )
- },
- Annotatable::ImplItem(_) => |parser| {
- Annotatable::ImplItem(
- parser.parse_impl_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
- )
- },
- Annotatable::ForeignItem(_) => |parser| {
- Annotatable::ForeignItem(
- parser.parse_foreign_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
- )
- },
- Annotatable::Stmt(_) => |parser| {
- Annotatable::Stmt(P(parser.parse_stmt(ForceCollect::Yes).unwrap().unwrap()))
- },
- Annotatable::Expr(_) => {
- |parser| Annotatable::Expr(parser.parse_expr_force_collect().unwrap())
- }
- _ => unreachable!(),
- };
+ let parse_annotatable_with: for<'a> fn(&mut Parser<'a>) -> PResult<'a, _> =
+ match annotatable {
+ Annotatable::Item(_) => {
+ |parser| Ok(Annotatable::Item(parser.parse_item(ForceCollect::Yes)?.unwrap()))
+ }
+ Annotatable::TraitItem(_) => |parser| {
+ Ok(Annotatable::TraitItem(
+ parser.parse_trait_item(ForceCollect::Yes)?.unwrap().unwrap(),
+ ))
+ },
+ Annotatable::ImplItem(_) => |parser| {
+ Ok(Annotatable::ImplItem(
+ parser.parse_impl_item(ForceCollect::Yes)?.unwrap().unwrap(),
+ ))
+ },
+ Annotatable::ForeignItem(_) => |parser| {
+ Ok(Annotatable::ForeignItem(
+ parser.parse_foreign_item(ForceCollect::Yes)?.unwrap().unwrap(),
+ ))
+ },
+ Annotatable::Stmt(_) => |parser| {
+ Ok(Annotatable::Stmt(P(parser.parse_stmt(ForceCollect::Yes)?.unwrap())))
+ },
+ Annotatable::Expr(_) => {
+ |parser| Ok(Annotatable::Expr(parser.parse_expr_force_collect()?))
+ }
+ _ => unreachable!(),
+ };
// 'Flatten' all nonterminals (i.e. `TokenKind::Interpolated`)
// to `None`-delimited groups containing the corresponding tokens. This
@@ -188,22 +190,35 @@ impl CfgEval<'_, '_> {
let orig_tokens = annotatable.to_tokens().flattened();
// Re-parse the tokens, setting the `capture_cfg` flag to save extra information
- // to the captured `AttrAnnotatedTokenStream` (specifically, we capture
- // `AttrAnnotatedTokenTree::AttributesData` for all occurrences of `#[cfg]` and `#[cfg_attr]`)
+ // to the captured `AttrTokenStream` (specifically, we capture
+ // `AttrTokenTree::AttributesData` for all occurrences of `#[cfg]` and `#[cfg_attr]`)
let mut parser =
rustc_parse::stream_to_parser(&self.cfg.sess.parse_sess, orig_tokens, None);
parser.capture_cfg = true;
- annotatable = parse_annotatable_with(&mut parser);
+ match parse_annotatable_with(&mut parser) {
+ Ok(a) => annotatable = a,
+ Err(mut err) => {
+ err.emit();
+ return Some(annotatable);
+ }
+ }
- // Now that we have our re-parsed `AttrAnnotatedTokenStream`, recursively configuring
+ // Now that we have our re-parsed `AttrTokenStream`, recursively configuring
// our attribute target will correctly the tokens as well.
flat_map_annotatable(self, annotatable)
}
}
impl MutVisitor for CfgEval<'_, '_> {
+ #[instrument(level = "trace", skip(self))]
fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
- self.cfg.configure_expr(expr);
+ self.cfg.configure_expr(expr, false);
+ mut_visit::noop_visit_expr(expr, self);
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn visit_method_receiver_expr(&mut self, expr: &mut P<ast::Expr>) {
+ self.cfg.configure_expr(expr, true);
mut_visit::noop_visit_expr(expr, self);
}
diff --git a/compiler/rustc_builtin_macros/src/cmdline_attrs.rs b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
index 747e48ece..db05c00d2 100644
--- a/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
+++ b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
@@ -28,7 +28,13 @@ pub fn inject(mut krate: ast::Crate, parse_sess: &ParseSess, attrs: &[String]) -
continue;
}
- krate.attrs.push(mk_attr(AttrStyle::Inner, path, args, start_span.to(end_span)));
+ krate.attrs.push(mk_attr(
+ &parse_sess.attr_id_generator,
+ AttrStyle::Inner,
+ path,
+ args,
+ start_span.to(end_span),
+ ));
}
krate
diff --git a/compiler/rustc_builtin_macros/src/concat.rs b/compiler/rustc_builtin_macros/src/concat.rs
index a23dd1d12..41f4e8c23 100644
--- a/compiler/rustc_builtin_macros/src/concat.rs
+++ b/compiler/rustc_builtin_macros/src/concat.rs
@@ -39,7 +39,7 @@ pub fn expand_concat(
ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..) => {
cx.span_err(e.span, "cannot concatenate a byte string literal");
}
- ast::LitKind::Err(_) => {
+ ast::LitKind::Err => {
has_errors = true;
}
},
diff --git a/compiler/rustc_builtin_macros/src/concat_bytes.rs b/compiler/rustc_builtin_macros/src/concat_bytes.rs
index a1afec410..66e86bf21 100644
--- a/compiler/rustc_builtin_macros/src/concat_bytes.rs
+++ b/compiler/rustc_builtin_macros/src/concat_bytes.rs
@@ -1,6 +1,5 @@
use rustc_ast as ast;
use rustc_ast::{ptr::P, tokenstream::TokenStream};
-use rustc_data_structures::sync::Lrc;
use rustc_errors::Applicability;
use rustc_expand::base::{self, DummyResult};
@@ -43,7 +42,7 @@ fn invalid_type_err(cx: &mut base::ExtCtxt<'_>, expr: &P<rustc_ast::Expr>, is_ne
ast::LitKind::Bool(_) => {
cx.span_err(expr.span, "cannot concatenate boolean literals");
}
- ast::LitKind::Err(_) => {}
+ ast::LitKind::Err => {}
ast::LitKind::Int(_, _) if !is_nested => {
let mut err = cx.struct_span_err(expr.span, "cannot concatenate numeric literals");
if let Ok(snippet) = cx.sess.source_map().span_to_snippet(expr.span) {
@@ -185,5 +184,5 @@ pub fn expand_concat_bytes(
return base::MacEager::expr(DummyResult::raw_expr(sp, true));
}
let sp = cx.with_def_site_ctxt(sp);
- base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::from(accumulator))))
+ base::MacEager::expr(cx.expr_byte_str(sp, accumulator))
}
diff --git a/compiler/rustc_builtin_macros/src/derive.rs b/compiler/rustc_builtin_macros/src/derive.rs
index d3de10ca4..e0fb7affb 100644
--- a/compiler/rustc_builtin_macros/src/derive.rs
+++ b/compiler/rustc_builtin_macros/src/derive.rs
@@ -32,7 +32,8 @@ impl MultiItemModifier for Expander {
ecx.resolver.resolve_derives(ecx.current_expansion.id, ecx.force_mode, &|| {
let template =
AttributeTemplate { list: Some("Trait1, Trait2, ..."), ..Default::default() };
- let attr = attr::mk_attr_outer(meta_item.clone());
+ let attr =
+ attr::mk_attr_outer(&sess.parse_sess.attr_id_generator, meta_item.clone());
validate_attr::check_builtin_attribute(
&sess.parse_sess,
&attr,
@@ -126,9 +127,9 @@ fn report_bad_target(sess: &Session, item: &Annotatable, span: Span) -> bool {
}
fn report_unexpected_literal(sess: &Session, lit: &ast::Lit) {
- let help_msg = match lit.token.kind {
- token::Str if rustc_lexer::is_ident(lit.token.symbol.as_str()) => {
- format!("try using `#[derive({})]`", lit.token.symbol)
+ let help_msg = match lit.token_lit.kind {
+ token::Str if rustc_lexer::is_ident(lit.token_lit.symbol.as_str()) => {
+ format!("try using `#[derive({})]`", lit.token_lit.symbol)
}
_ => "for example, write `#[derive(Debug)]` for `Debug`".to_string(),
};
diff --git a/compiler/rustc_builtin_macros/src/deriving/bounds.rs b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
index 5ef68c6ae..7bd344467 100644
--- a/compiler/rustc_builtin_macros/src/deriving/bounds.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
@@ -15,8 +15,8 @@ pub fn expand_deriving_copy(
) {
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(marker::Copy),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: true,
diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs
index 7755ff779..fa8685f5f 100644
--- a/compiler/rustc_builtin_macros/src/deriving/clone.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs
@@ -1,12 +1,12 @@
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::path_std;
-
use rustc_ast::{self as ast, Generics, ItemKind, MetaItem, VariantData};
use rustc_data_structures::fx::FxHashSet;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand_deriving_clone(
cx: &mut ExtCtxt<'_>,
@@ -68,11 +68,11 @@ pub fn expand_deriving_clone(
}
let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
+ let attrs = thin_vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(clone::Clone),
+ skip_path_as_bound: false,
additional_bounds: bounds,
generics: Bounds::empty(),
supports_unions: true,
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
index 4e798bf6a..eab67b0d3 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
@@ -7,6 +7,7 @@ use rustc_data_structures::fx::FxHashSet;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand_deriving_eq(
cx: &mut ExtCtxt<'_>,
@@ -20,11 +21,11 @@ pub fn expand_deriving_eq(
let hidden = rustc_ast::attr::mk_nested_word_item(Ident::new(sym::hidden, span));
let doc = rustc_ast::attr::mk_list_item(Ident::new(sym::doc, span), vec![hidden]);
let no_coverage = cx.meta_word(span, sym::no_coverage);
- let attrs = vec![cx.attribute(inline), cx.attribute(doc), cx.attribute(no_coverage)];
+ let attrs = thin_vec![cx.attribute(inline), cx.attribute(doc), cx.attribute(no_coverage)];
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(cmp::Eq),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: true,
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
index 1612be862..7f117981a 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
@@ -1,11 +1,11 @@
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::path_std;
-
use rustc_ast::MetaItem;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand_deriving_ord(
cx: &mut ExtCtxt<'_>,
@@ -15,11 +15,11 @@ pub fn expand_deriving_ord(
push: &mut dyn FnMut(Annotatable),
) {
let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
+ let attrs = thin_vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(cmp::Ord),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
index 0141b3377..236cbccaf 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
@@ -1,12 +1,12 @@
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::{path_local, path_std};
-
use rustc_ast::ptr::P;
use rustc_ast::{BinOpKind, BorrowKind, Expr, ExprKind, MetaItem, Mutability};
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand_deriving_partial_eq(
cx: &mut ExtCtxt<'_>,
@@ -15,14 +15,8 @@ pub fn expand_deriving_partial_eq(
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
- fn cs_op(
- cx: &mut ExtCtxt<'_>,
- span: Span,
- substr: &Substructure<'_>,
- op: BinOpKind,
- combiner: BinOpKind,
- base: bool,
- ) -> BlockOrExpr {
+ fn cs_eq(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ let base = true;
let expr = cs_fold(
true, // use foldl
cx,
@@ -47,39 +41,22 @@ pub fn expand_deriving_partial_eq(
cx.expr_deref(field.span, expr.clone())
}
};
- cx.expr_binary(field.span, op, convert(&field.self_expr), convert(other_expr))
+ cx.expr_binary(
+ field.span,
+ BinOpKind::Eq,
+ convert(&field.self_expr),
+ convert(other_expr),
+ )
+ }
+ CsFold::Combine(span, expr1, expr2) => {
+ cx.expr_binary(span, BinOpKind::And, expr1, expr2)
}
- CsFold::Combine(span, expr1, expr2) => cx.expr_binary(span, combiner, expr1, expr2),
CsFold::Fieldless => cx.expr_bool(span, base),
},
);
BlockOrExpr::new_expr(expr)
}
- fn cs_eq(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
- cs_op(cx, span, substr, BinOpKind::Eq, BinOpKind::And, true)
- }
- fn cs_ne(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
- cs_op(cx, span, substr, BinOpKind::Ne, BinOpKind::Or, false)
- }
-
- macro_rules! md {
- ($name:expr, $f:ident) => {{
- let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
- MethodDef {
- name: $name,
- generics: Bounds::empty(),
- explicit_self: true,
- nonself_args: vec![(self_ref(), sym::other)],
- ret_ty: Path(path_local!(bool)),
- attributes: attrs,
- unify_fieldless_variants: true,
- combine_substructure: combine_substructure(Box::new(|a, b, c| $f(a, b, c))),
- }
- }};
- }
-
super::inject_impl_of_structural_trait(
cx,
span,
@@ -88,18 +65,25 @@ pub fn expand_deriving_partial_eq(
push,
);
- // avoid defining `ne` if we can
- // c-like enums, enums without any fields and structs without fields
- // can safely define only `eq`.
- let mut methods = vec![md!(sym::eq, cs_eq)];
- if !is_type_without_fields(item) {
- methods.push(md!(sym::ne, cs_ne));
- }
+ // No need to generate `ne`, the default suffices, and not generating it is
+ // faster.
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = thin_vec![cx.attribute(inline)];
+ let methods = vec![MethodDef {
+ name: sym::eq,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![(self_ref(), sym::other)],
+ ret_ty: Path(path_local!(bool)),
+ attributes: attrs,
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| cs_eq(a, b, c))),
+ }];
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(cmp::PartialEq),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
index 2ebb01cc8..4173403a1 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
@@ -1,11 +1,11 @@
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::{path_std, pathvec_std};
-
use rustc_ast::MetaItem;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand_deriving_partial_ord(
cx: &mut ExtCtxt<'_>,
@@ -19,7 +19,7 @@ pub fn expand_deriving_partial_ord(
Path(Path::new_(pathvec_std!(option::Option), vec![Box::new(ordering_ty)], PathKind::Std));
let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
+ let attrs = thin_vec![cx.attribute(inline)];
let partial_cmp_def = MethodDef {
name: sym::partial_cmp,
@@ -36,8 +36,8 @@ pub fn expand_deriving_partial_ord(
let trait_def = TraitDef {
span,
- attributes: vec![],
path: path_std!(cmp::PartialOrd),
+ skip_path_as_bound: false,
additional_bounds: vec![],
generics: Bounds::empty(),
supports_unions: false,
diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs
index ceef893e8..2cf614ed9 100644
--- a/compiler/rustc_builtin_macros/src/deriving/debug.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs
@@ -19,8 +19,8 @@ pub fn expand_deriving_debug(
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: path_std!(fmt::Debug),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
@@ -30,7 +30,7 @@ pub fn expand_deriving_debug(
explicit_self: true,
nonself_args: vec![(fmtr, sym::f)],
ret_ty: Path(path_std!(fmt::Result)),
- attributes: Vec::new(),
+ attributes: ast::AttrVec::new(),
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
show_substructure(a, b, c)
@@ -52,7 +52,7 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>
// We want to make sure we have the ctxt set so that we can use unstable methods
let span = cx.with_def_site_ctxt(span);
- let name = cx.expr_lit(span, ast::LitKind::Str(ident.name, ast::StrStyle::Cooked));
+ let name = cx.expr_str(span, ident.name);
let fmt = substr.nonselflike_args[0].clone();
// Struct and tuples are similar enough that we use the same code for both,
@@ -89,10 +89,7 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>
for i in 0..fields.len() {
let field = &fields[i];
if is_struct {
- let name = cx.expr_lit(
- field.span,
- ast::LitKind::Str(field.name.unwrap().name, ast::StrStyle::Cooked),
- );
+ let name = cx.expr_str(field.span, field.name.unwrap().name);
args.push(name);
}
// Use an extra indirection to make sure this works for unsized types.
@@ -108,10 +105,7 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>
for field in fields {
if is_struct {
- name_exprs.push(cx.expr_lit(
- field.span,
- ast::LitKind::Str(field.name.unwrap().name, ast::StrStyle::Cooked),
- ));
+ name_exprs.push(cx.expr_str(field.span, field.name.unwrap().name));
}
// Use an extra indirection to make sure this works for unsized types.
diff --git a/compiler/rustc_builtin_macros/src/deriving/decodable.rs b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
index d688143a2..d669f6168 100644
--- a/compiler/rustc_builtin_macros/src/deriving/decodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
@@ -22,8 +22,8 @@ pub fn expand_deriving_rustc_decodable(
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: Path::new_(vec![krate, sym::Decodable], vec![], PathKind::Global),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
@@ -48,7 +48,7 @@ pub fn expand_deriving_rustc_decodable(
],
PathKind::Std,
)),
- attributes: Vec::new(),
+ attributes: ast::AttrVec::new(),
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
decodable_substructure(a, b, c, krate)
diff --git a/compiler/rustc_builtin_macros/src/deriving/default.rs b/compiler/rustc_builtin_macros/src/deriving/default.rs
index 517769091..17df9fb27 100644
--- a/compiler/rustc_builtin_macros/src/deriving/default.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/default.rs
@@ -1,16 +1,14 @@
use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
-
use rustc_ast as ast;
-use rustc_ast::walk_list;
-use rustc_ast::EnumDef;
-use rustc_ast::VariantData;
+use rustc_ast::{walk_list, EnumDef, VariantData};
use rustc_errors::Applicability;
use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
use rustc_span::symbol::Ident;
use rustc_span::symbol::{kw, sym};
use rustc_span::Span;
use smallvec::SmallVec;
+use thin_vec::thin_vec;
pub fn expand_deriving_default(
cx: &mut ExtCtxt<'_>,
@@ -22,11 +20,11 @@ pub fn expand_deriving_default(
item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
let inline = cx.meta_word(span, sym::inline);
- let attrs = vec![cx.attribute(inline)];
+ let attrs = thin_vec![cx.attribute(inline)];
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: Path::new(vec![kw::Default, sym::Default]),
+ skip_path_as_bound: has_a_default_variant(item),
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
@@ -265,3 +263,22 @@ impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, '
}
}
}
+
+fn has_a_default_variant(item: &Annotatable) -> bool {
+ struct HasDefaultAttrOnVariant {
+ found: bool,
+ }
+
+ impl<'ast> rustc_ast::visit::Visitor<'ast> for HasDefaultAttrOnVariant {
+ fn visit_variant(&mut self, v: &'ast rustc_ast::Variant) {
+ if v.attrs.iter().any(|attr| attr.has_name(kw::Default)) {
+ self.found = true;
+ }
+ // no need to subrecurse.
+ }
+ }
+
+ let mut visitor = HasDefaultAttrOnVariant { found: false };
+ item.visit_with(&mut visitor);
+ visitor.found
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/encodable.rs b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
index 70167cac6..f83f58b97 100644
--- a/compiler/rustc_builtin_macros/src/deriving/encodable.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
@@ -89,7 +89,7 @@ use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::pathvec_std;
-use rustc_ast::{ExprKind, MetaItem, Mutability};
+use rustc_ast::{AttrVec, ExprKind, MetaItem, Mutability};
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::Span;
@@ -106,8 +106,8 @@ pub fn expand_deriving_rustc_encodable(
let trait_def = TraitDef {
span,
- attributes: Vec::new(),
path: Path::new_(vec![krate, sym::Encodable], vec![], PathKind::Global),
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
@@ -132,7 +132,7 @@ pub fn expand_deriving_rustc_encodable(
],
PathKind::Std,
)),
- attributes: Vec::new(),
+ attributes: AttrVec::new(),
unify_fieldless_variants: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
encodable_substructure(a, b, c, krate)
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index 735017aa5..16ee3aa89 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -162,33 +162,35 @@
pub use StaticFields::*;
pub use SubstructureFields::*;
-use std::cell::RefCell;
-use std::iter;
-use std::vec;
-
+use crate::deriving;
use rustc_ast::ptr::P;
-use rustc_ast::{self as ast, EnumDef, Expr, Generics, PatKind};
+use rustc_ast::{
+ self as ast, BindingAnnotation, ByRef, EnumDef, Expr, Generics, Mutability, PatKind,
+};
use rustc_ast::{GenericArg, GenericParamKind, VariantData};
use rustc_attr as attr;
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::Span;
-
+use std::cell::RefCell;
+use std::iter;
+use std::ops::Not;
+use std::vec;
+use thin_vec::thin_vec;
use ty::{Bounds, Path, Ref, Self_, Ty};
-use crate::deriving;
-
pub mod ty;
pub struct TraitDef<'a> {
/// The span for the current #[derive(Foo)] header.
pub span: Span,
- pub attributes: Vec<ast::Attribute>,
-
/// Path of the trait, including any type parameters
pub path: Path,
+ /// Whether to skip adding the current trait as a bound to the type parameters of the type.
+ pub skip_path_as_bound: bool,
+
/// Additional bounds required of any type parameters of the type,
/// other than the current trait
pub additional_bounds: Vec<Ty>,
@@ -219,7 +221,7 @@ pub struct MethodDef<'a> {
/// Returns type
pub ret_ty: Ty,
- pub attributes: Vec<ast::Attribute>,
+ pub attributes: ast::AttrVec,
/// Can we combine fieldless variants for enums into a single match arm?
/// If true, indicates that the trait operation uses the enum tag in some
@@ -383,16 +385,11 @@ fn find_type_parameters(
}
// Place bound generic params on a stack, to extract them when a type is encountered.
- fn visit_poly_trait_ref(
- &mut self,
- trait_ref: &'a ast::PolyTraitRef,
- modifier: &'a ast::TraitBoundModifier,
- ) {
+ fn visit_poly_trait_ref(&mut self, trait_ref: &'a ast::PolyTraitRef) {
let stack_len = self.bound_generic_params_stack.len();
- self.bound_generic_params_stack
- .extend(trait_ref.bound_generic_params.clone().into_iter());
+ self.bound_generic_params_stack.extend(trait_ref.bound_generic_params.iter().cloned());
- visit::walk_poly_trait_ref(self, trait_ref, modifier);
+ visit::walk_poly_trait_ref(self, trait_ref);
self.bound_generic_params_stack.truncate(stack_len);
}
@@ -568,8 +565,8 @@ impl<'a> TraitDef<'a> {
kind: ast::VisibilityKind::Inherited,
tokens: None,
},
- attrs: Vec::new(),
- kind: ast::AssocItemKind::TyAlias(Box::new(ast::TyAlias {
+ attrs: ast::AttrVec::new(),
+ kind: ast::AssocItemKind::Type(Box::new(ast::TyAlias {
defaultness: ast::Defaultness::Final,
generics: Generics::default(),
where_clauses: (
@@ -603,13 +600,13 @@ impl<'a> TraitDef<'a> {
cx.trait_bound(p.to_path(cx, self.span, type_ident, generics))
}).chain(
// require the current trait
- iter::once(cx.trait_bound(trait_path.clone()))
+ self.skip_path_as_bound.not().then(|| cx.trait_bound(trait_path.clone()))
).chain(
// also add in any bounds from the declaration
param.bounds.iter().cloned()
).collect();
- cx.typaram(param.ident.span.with_ctxt(ctxt), param.ident, vec![], bounds, None)
+ cx.typaram(param.ident.span.with_ctxt(ctxt), param.ident, bounds, None)
}
GenericParamKind::Const { ty, kw_span, .. } => {
let const_nodefault_kind = GenericParamKind::Const {
@@ -644,11 +641,7 @@ impl<'a> TraitDef<'a> {
}
ast::WherePredicate::EqPredicate(we) => {
let span = we.span.with_ctxt(ctxt);
- ast::WherePredicate::EqPredicate(ast::WhereEqPredicate {
- id: ast::DUMMY_NODE_ID,
- span,
- ..we.clone()
- })
+ ast::WherePredicate::EqPredicate(ast::WhereEqPredicate { span, ..we.clone() })
}
}
}));
@@ -726,15 +719,13 @@ impl<'a> TraitDef<'a> {
let self_type = cx.ty_path(path);
let attr = cx.attribute(cx.meta_word(self.span, sym::automatically_derived));
+ let attrs = thin_vec![attr];
let opt_trait_ref = Some(trait_ref);
- let mut a = vec![attr];
- a.extend(self.attributes.iter().cloned());
-
cx.item(
self.span,
Ident::empty(),
- a,
+ attrs,
ast::ItemKind::Impl(Box::new(ast::Impl {
unsafety: ast::Unsafe::No,
polarity: ast::ImplPolarity::Positive,
@@ -1078,9 +1069,9 @@ impl<'a> MethodDef<'a> {
let mut body = mk_body(cx, selflike_fields);
let struct_path = cx.path(span, vec![Ident::new(kw::SelfUpper, type_ident.span)]);
- let use_ref_pat = is_packed && !always_copy;
+ let by_ref = ByRef::from(is_packed && !always_copy);
let patterns =
- trait_.create_struct_patterns(cx, struct_path, struct_def, &prefixes, use_ref_pat);
+ trait_.create_struct_patterns(cx, struct_path, struct_def, &prefixes, by_ref);
// Do the let-destructuring.
let mut stmts: Vec<_> = iter::zip(selflike_args, patterns)
@@ -1123,6 +1114,11 @@ impl<'a> MethodDef<'a> {
/// ```
/// is equivalent to:
/// ```
+ /// #![feature(core_intrinsics)]
+ /// enum A {
+ /// A1,
+ /// A2(i32)
+ /// }
/// impl ::core::cmp::PartialEq for A {
/// #[inline]
/// fn eq(&self, other: &A) -> bool {
@@ -1262,13 +1258,13 @@ impl<'a> MethodDef<'a> {
let sp = variant.span.with_ctxt(trait_.span.ctxt());
let variant_path = cx.path(sp, vec![type_ident, variant.ident]);
- let use_ref_pat = false; // because enums can't be repr(packed)
+ let by_ref = ByRef::No; // because enums can't be repr(packed)
let mut subpats: Vec<_> = trait_.create_struct_patterns(
cx,
variant_path,
&variant.data,
&prefixes,
- use_ref_pat,
+ by_ref,
);
// `(VariantK, VariantK, ...)` or just `VariantK`.
@@ -1429,7 +1425,7 @@ impl<'a> TraitDef<'a> {
struct_path: ast::Path,
struct_def: &'a VariantData,
prefixes: &[String],
- use_ref_pat: bool,
+ by_ref: ByRef,
) -> Vec<P<ast::Pat>> {
prefixes
.iter()
@@ -1437,17 +1433,19 @@ impl<'a> TraitDef<'a> {
let pieces_iter =
struct_def.fields().iter().enumerate().map(|(i, struct_field)| {
let sp = struct_field.span.with_ctxt(self.span.ctxt());
- let binding_mode = if use_ref_pat {
- ast::BindingMode::ByRef(ast::Mutability::Not)
- } else {
- ast::BindingMode::ByValue(ast::Mutability::Not)
- };
let ident = self.mk_pattern_ident(prefix, i);
let path = ident.with_span_pos(sp);
(
sp,
struct_field.ident,
- cx.pat(path.span, PatKind::Ident(binding_mode, path, None)),
+ cx.pat(
+ path.span,
+ PatKind::Ident(
+ BindingAnnotation(by_ref, Mutability::Not),
+ path,
+ None,
+ ),
+ ),
)
});
@@ -1637,19 +1635,3 @@ where
StaticEnum(..) | StaticStruct(..) => cx.span_bug(trait_span, "static function in `derive`"),
}
}
-
-/// Returns `true` if the type has no value fields
-/// (for an enum, no variant has any fields)
-pub fn is_type_without_fields(item: &Annotatable) -> bool {
- if let Annotatable::Item(ref item) = *item {
- match item.kind {
- ast::ItemKind::Enum(ref enum_def, _) => {
- enum_def.variants.iter().all(|v| v.data.fields().is_empty())
- }
- ast::ItemKind::Struct(ref variant_data, _) => variant_data.fields().is_empty(),
- _ => false,
- }
- } else {
- false
- }
-}
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs
index 4d46f7cd4..36e2e2930 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs
@@ -146,7 +146,6 @@ fn mk_ty_param(
cx: &ExtCtxt<'_>,
span: Span,
name: Symbol,
- attrs: &[ast::Attribute],
bounds: &[Path],
self_ident: Ident,
self_generics: &Generics,
@@ -158,7 +157,7 @@ fn mk_ty_param(
cx.trait_bound(path)
})
.collect();
- cx.typaram(span, Ident::new(name, span), attrs.to_owned(), bounds, None)
+ cx.typaram(span, Ident::new(name, span), bounds, None)
}
/// Bounds on type parameters.
@@ -183,7 +182,7 @@ impl Bounds {
.iter()
.map(|t| {
let (name, ref bounds) = *t;
- mk_ty_param(cx, span, name, &[], &bounds, self_ty, self_generics)
+ mk_ty_param(cx, span, name, &bounds, self_ty, self_generics)
})
.collect();
diff --git a/compiler/rustc_builtin_macros/src/deriving/hash.rs b/compiler/rustc_builtin_macros/src/deriving/hash.rs
index 32ae3d344..6e9d5f08b 100644
--- a/compiler/rustc_builtin_macros/src/deriving/hash.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/hash.rs
@@ -2,7 +2,7 @@ use crate::deriving::generic::ty::*;
use crate::deriving::generic::*;
use crate::deriving::{path_std, pathvec_std};
-use rustc_ast::{MetaItem, Mutability};
+use rustc_ast::{AttrVec, MetaItem, Mutability};
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
@@ -21,8 +21,8 @@ pub fn expand_deriving_hash(
let arg = Path::new_local(typaram);
let hash_trait_def = TraitDef {
span,
- attributes: Vec::new(),
path,
+ skip_path_as_bound: false,
additional_bounds: Vec::new(),
generics: Bounds::empty(),
supports_unions: false,
@@ -32,7 +32,7 @@ pub fn expand_deriving_hash(
explicit_self: true,
nonself_args: vec![(Ref(Box::new(Path(arg)), Mutability::Mut), sym::state)],
ret_ty: Unit,
- attributes: vec![],
+ attributes: AttrVec::new(),
unify_fieldless_variants: true,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
hash_substructure(a, b, c)
diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs
index c1ca089da..ee346047a 100644
--- a/compiler/rustc_builtin_macros/src/deriving/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs
@@ -131,6 +131,8 @@ fn inject_impl_of_structural_trait(
// Create generics param list for where clauses and impl headers
let mut generics = generics.clone();
+ let ctxt = span.ctxt();
+
// Create the type of `self`.
//
// in addition, remove defaults from generic params (impls cannot have them).
@@ -138,16 +140,18 @@ fn inject_impl_of_structural_trait(
.params
.iter_mut()
.map(|param| match &mut param.kind {
- ast::GenericParamKind::Lifetime => {
- ast::GenericArg::Lifetime(cx.lifetime(span, param.ident))
- }
+ ast::GenericParamKind::Lifetime => ast::GenericArg::Lifetime(
+ cx.lifetime(param.ident.span.with_ctxt(ctxt), param.ident),
+ ),
ast::GenericParamKind::Type { default } => {
*default = None;
- ast::GenericArg::Type(cx.ty_ident(span, param.ident))
+ ast::GenericArg::Type(cx.ty_ident(param.ident.span.with_ctxt(ctxt), param.ident))
}
ast::GenericParamKind::Const { ty: _, kw_span: _, default } => {
*default = None;
- ast::GenericArg::Const(cx.const_ident(span, param.ident))
+ ast::GenericArg::Const(
+ cx.const_ident(param.ident.span.with_ctxt(ctxt), param.ident),
+ )
}
})
.collect();
@@ -164,7 +168,7 @@ fn inject_impl_of_structural_trait(
// Keep the lint and stability attributes of the original item, to control
// how the generated implementation is linted.
- let mut attrs = Vec::new();
+ let mut attrs = ast::AttrVec::new();
attrs.extend(
item.attrs
.iter()
@@ -174,6 +178,8 @@ fn inject_impl_of_structural_trait(
})
.cloned(),
);
+ // Mark as `automatically_derived` to avoid some silly lints.
+ attrs.push(cx.attribute(cx.meta_word(span, sym::automatically_derived)));
let newitem = cx.item(
span,
diff --git a/compiler/rustc_builtin_macros/src/edition_panic.rs b/compiler/rustc_builtin_macros/src/edition_panic.rs
index ea0e768a5..3f1a8b3bc 100644
--- a/compiler/rustc_builtin_macros/src/edition_panic.rs
+++ b/compiler/rustc_builtin_macros/src/edition_panic.rs
@@ -48,7 +48,7 @@ fn expand<'cx>(
MacEager::expr(
cx.expr(
sp,
- ExprKind::MacCall(MacCall {
+ ExprKind::MacCall(P(MacCall {
path: Path {
span: sp,
segments: cx
@@ -64,7 +64,7 @@ fn expand<'cx>(
tts,
)),
prior_type_ascription: None,
- }),
+ })),
),
)
}
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index 9eb96ec76..8b07c1106 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -1,289 +1,42 @@
-use ArgumentType::*;
-use Position::*;
-
-use rustc_ast as ast;
use rustc_ast::ptr::P;
+use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
-use rustc_ast::visit::{self, Visitor};
-use rustc_ast::{token, BlockCheckMode, UnsafeSource};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_ast::Expr;
+use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{pluralize, Applicability, MultiSpan, PResult};
use rustc_expand::base::{self, *};
use rustc_parse_format as parse;
-use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::symbol::{Ident, Symbol};
use rustc_span::{BytePos, InnerSpan, Span};
-use smallvec::SmallVec;
use rustc_lint_defs::builtin::NAMED_ARGUMENTS_USED_POSITIONALLY;
use rustc_lint_defs::{BufferedEarlyLint, BuiltinLintDiagnostics, LintId};
-use rustc_parse_format::Count;
-use std::borrow::Cow;
-use std::collections::hash_map::Entry;
-
-#[derive(PartialEq)]
-enum ArgumentType {
- Placeholder(&'static str),
- Count,
-}
-enum Position {
- Exact(usize),
- Capture(usize),
- Named(Symbol, InnerSpan),
-}
-
-/// Indicates how positional named argument (i.e. an named argument which is used by position
-/// instead of by name) is used in format string
-/// * `Arg` is the actual argument to print
-/// * `Width` is width format argument
-/// * `Precision` is precion format argument
-/// Example: `{Arg:Width$.Precision$}
-#[derive(Debug, Eq, PartialEq)]
-enum PositionalNamedArgType {
- Arg,
- Width,
- Precision,
-}
-
-/// Contains information necessary to create a lint for a positional named argument
-#[derive(Debug)]
-struct PositionalNamedArg {
- ty: PositionalNamedArgType,
- /// The piece of the using this argument (multiple pieces can use the same argument)
- cur_piece: usize,
- /// The InnerSpan for in the string to be replaced with the named argument
- /// This will be None when the position is implicit
- inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
- /// The name to use instead of the position
- replacement: Symbol,
- /// The span for the positional named argument (so the lint can point a message to it)
- positional_named_arg_span: Span,
- has_formatting: bool,
-}
-
-impl PositionalNamedArg {
- /// Determines:
- /// 1) span to be replaced with the name of the named argument and
- /// 2) span to be underlined for error messages
- fn get_positional_arg_spans(&self, cx: &Context<'_, '_>) -> (Option<Span>, Option<Span>) {
- if let Some(inner_span) = &self.inner_span_to_replace {
- let span =
- cx.fmtsp.from_inner(InnerSpan { start: inner_span.start, end: inner_span.end });
- (Some(span), Some(span))
- } else if self.ty == PositionalNamedArgType::Arg {
- // In the case of a named argument whose position is implicit, if the argument *has*
- // formatting, there will not be a span to replace. Instead, we insert the name after
- // the `{`, which will be the first character of arg_span. If the argument does *not*
- // have formatting, there may or may not be a span to replace. This is because
- // whitespace is allowed in arguments without formatting (such as `format!("{ }", 1);`)
- // but is not allowed in arguments with formatting (an error will be generated in cases
- // like `format!("{ :1.1}", 1.0f32);`.
- // For the message span, if there is formatting, we want to use the opening `{` and the
- // next character, which will the `:` indicating the start of formatting. If there is
- // not any formatting, we want to underline the entire span.
- cx.arg_spans.get(self.cur_piece).map_or((None, None), |arg_span| {
- if self.has_formatting {
- (
- Some(arg_span.with_lo(arg_span.lo() + BytePos(1)).shrink_to_lo()),
- Some(arg_span.with_hi(arg_span.lo() + BytePos(2))),
- )
- } else {
- let replace_start = arg_span.lo() + BytePos(1);
- let replace_end = arg_span.hi() - BytePos(1);
- let to_replace = arg_span.with_lo(replace_start).with_hi(replace_end);
- (Some(to_replace), Some(*arg_span))
- }
- })
- } else {
- (None, None)
- }
- }
-}
+mod ast;
+use ast::*;
-/// Encapsulates all the named arguments that have been used positionally
-#[derive(Debug)]
-struct PositionalNamedArgsLint {
- positional_named_args: Vec<PositionalNamedArg>,
-}
+mod expand;
+use expand::expand_parsed_format_args;
-impl PositionalNamedArgsLint {
- /// For a given positional argument, check if the index is for a named argument.
- ///
- /// Since positional arguments are required to come before named arguments, if the positional
- /// index is greater than or equal to the start of named arguments, we know it's a named
- /// argument used positionally.
- ///
- /// Example:
- /// println!("{} {} {2}", 0, a=1, b=2);
- ///
- /// In this case, the first piece (`{}`) would be ArgumentImplicitlyIs with an index of 0. The
- /// total number of arguments is 3 and the number of named arguments is 2, so the start of named
- /// arguments is index 1. Therefore, the index of 0 is okay.
- ///
- /// The second piece (`{}`) would be ArgumentImplicitlyIs with an index of 1, which is the start
- /// of named arguments, and so we should add a lint to use the named argument `a`.
- ///
- /// The third piece (`{2}`) would be ArgumentIs with an index of 2, which is greater than the
- /// start of named arguments, and so we should add a lint to use the named argument `b`.
- ///
- /// This same check also works for width and precision formatting when either or both are
- /// CountIsParam, which contains an index into the arguments.
- fn maybe_add_positional_named_arg(
- &mut self,
- current_positional_arg: usize,
- total_args_length: usize,
- format_argument_index: usize,
- ty: PositionalNamedArgType,
- cur_piece: usize,
- inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
- names: &FxHashMap<Symbol, (usize, Span)>,
- has_formatting: bool,
- ) {
- let start_of_named_args = total_args_length - names.len();
- if current_positional_arg >= start_of_named_args {
- self.maybe_push(
- format_argument_index,
- ty,
- cur_piece,
- inner_span_to_replace,
- names,
- has_formatting,
- )
- }
- }
+// The format_args!() macro is expanded in three steps:
+// 1. First, `parse_args` will parse the `(literal, arg, arg, name=arg, name=arg)` syntax,
+// but doesn't parse the template (the literal) itself.
+// 2. Second, `make_format_args` will parse the template, the format options, resolve argument references,
+// produce diagnostics, and turn the whole thing into a `FormatArgs` structure.
+// 3. Finally, `expand_parsed_format_args` will turn that `FormatArgs` structure
+// into the expression that the macro expands to.
- /// Try constructing a PositionalNamedArg struct and pushing it into the vec of positional
- /// named arguments. If a named arg associated with `format_argument_index` cannot be found,
- /// a new item will not be added as the lint cannot be emitted in this case.
- fn maybe_push(
- &mut self,
- format_argument_index: usize,
- ty: PositionalNamedArgType,
- cur_piece: usize,
- inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
- names: &FxHashMap<Symbol, (usize, Span)>,
- has_formatting: bool,
- ) {
- let named_arg = names
- .iter()
- .find(|&(_, &(index, _))| index == format_argument_index)
- .map(|found| found.clone());
+// See format/ast.rs for the FormatArgs structure and glossary.
- if let Some((&replacement, &(_, positional_named_arg_span))) = named_arg {
- // In FormatSpec, `precision_span` starts at the leading `.`, which we want to keep in
- // the lint suggestion, so increment `start` by 1 when `PositionalArgumentType` is
- // `Precision`.
- let inner_span_to_replace = if ty == PositionalNamedArgType::Precision {
- inner_span_to_replace
- .map(|is| rustc_parse_format::InnerSpan { start: is.start + 1, end: is.end })
- } else {
- inner_span_to_replace
- };
- self.positional_named_args.push(PositionalNamedArg {
- ty,
- cur_piece,
- inner_span_to_replace,
- replacement,
- positional_named_arg_span,
- has_formatting,
- });
- }
- }
-}
-
-struct Context<'a, 'b> {
- ecx: &'a mut ExtCtxt<'b>,
- /// The macro's call site. References to unstable formatting internals must
- /// use this span to pass the stability checker.
- macsp: Span,
- /// The span of the format string literal.
- fmtsp: Span,
-
- /// List of parsed argument expressions.
- /// Named expressions are resolved early, and are appended to the end of
- /// argument expressions.
- ///
- /// Example showing the various data structures in motion:
- ///
- /// * Original: `"{foo:o} {:o} {foo:x} {0:x} {1:o} {:x} {1:x} {0:o}"`
- /// * Implicit argument resolution: `"{foo:o} {0:o} {foo:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
- /// * Name resolution: `"{2:o} {0:o} {2:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
- /// * `arg_types` (in JSON): `[[0, 1, 0], [0, 1, 1], [0, 1]]`
- /// * `arg_unique_types` (in simplified JSON): `[["o", "x"], ["o", "x"], ["o", "x"]]`
- /// * `names` (in JSON): `{"foo": 2}`
- args: Vec<P<ast::Expr>>,
- /// The number of arguments that were added by implicit capturing.
- num_captured_args: usize,
- /// Placeholder slot numbers indexed by argument.
- arg_types: Vec<Vec<usize>>,
- /// Unique format specs seen for each argument.
- arg_unique_types: Vec<Vec<ArgumentType>>,
- /// Map from named arguments to their resolved indices.
- names: FxHashMap<Symbol, (usize, Span)>,
-
- /// The latest consecutive literal strings, or empty if there weren't any.
- literal: String,
-
- /// Collection of the compiled `rt::Argument` structures
- pieces: Vec<P<ast::Expr>>,
- /// Collection of string literals
- str_pieces: Vec<P<ast::Expr>>,
- /// Stays `true` if all formatting parameters are default (as in "{}{}").
- all_pieces_simple: bool,
-
- /// Mapping between positional argument references and indices into the
- /// final generated static argument array. We record the starting indices
- /// corresponding to each positional argument, and number of references
- /// consumed so far for each argument, to facilitate correct `Position`
- /// mapping in `build_piece`. In effect this can be seen as a "flattened"
- /// version of `arg_unique_types`.
- ///
- /// Again with the example described above in docstring for `args`:
- ///
- /// * `arg_index_map` (in JSON): `[[0, 1, 0], [2, 3, 3], [4, 5]]`
- arg_index_map: Vec<Vec<usize>>,
-
- /// Starting offset of count argument slots.
- count_args_index_offset: usize,
-
- /// Count argument slots and tracking data structures.
- /// Count arguments are separately tracked for de-duplication in case
- /// multiple references are made to one argument. For example, in this
- /// format string:
- ///
- /// * Original: `"{:.*} {:.foo$} {1:.*} {:.0$}"`
- /// * Implicit argument resolution: `"{1:.0$} {2:.foo$} {1:.3$} {4:.0$}"`
- /// * Name resolution: `"{1:.0$} {2:.5$} {1:.3$} {4:.0$}"`
- /// * `count_positions` (in JSON): `{0: 0, 5: 1, 3: 2}`
- /// * `count_args`: `vec![0, 5, 3]`
- count_args: Vec<usize>,
- /// Relative slot numbers for count arguments.
- count_positions: FxHashMap<usize, usize>,
- /// Number of count slots assigned.
- count_positions_count: usize,
-
- /// Current position of the implicit positional arg pointer, as if it
- /// still existed in this phase of processing.
- /// Used only for `all_pieces_simple` tracking in `build_piece`.
- curarg: usize,
- /// Current piece being evaluated, used for error reporting.
- curpiece: usize,
- /// Keep track of invalid references to positional arguments.
- invalid_refs: Vec<(usize, usize)>,
- /// Spans of all the formatting arguments, in order.
- arg_spans: Vec<Span>,
- /// All the formatting arguments that have formatting flags set, in order for diagnostics.
- arg_with_formatting: Vec<parse::FormatSpec<'a>>,
-
- /// Whether this format string came from a string literal, as opposed to a macro.
- is_literal: bool,
- unused_names_lint: PositionalNamedArgsLint,
-}
-
-pub struct FormatArg {
- expr: P<ast::Expr>,
- named: bool,
+// Only used in parse_args and report_invalid_references,
+// to indicate how a referred argument was used.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum PositionUsedAs {
+ Placeholder(Option<Span>),
+ Precision,
+ Width,
}
+use PositionUsedAs::*;
/// Parses the arguments from the given list of tokens, returning the diagnostic
/// if there's a parse error so we can continue parsing other format!
@@ -292,15 +45,14 @@ pub struct FormatArg {
/// If parsing succeeds, the return value is:
///
/// ```text
-/// Some((fmtstr, parsed arguments, index map for named arguments))
+/// Ok((fmtstr, parsed arguments))
/// ```
fn parse_args<'a>(
ecx: &mut ExtCtxt<'a>,
sp: Span,
tts: TokenStream,
-) -> PResult<'a, (P<ast::Expr>, Vec<FormatArg>, FxHashMap<Symbol, (usize, Span)>)> {
- let mut args = Vec::<FormatArg>::new();
- let mut names = FxHashMap::<Symbol, (usize, Span)>::default();
+) -> PResult<'a, (P<Expr>, FormatArguments)> {
+ let mut args = FormatArguments::new();
let mut p = ecx.new_parser_from_tts(tts);
@@ -329,7 +81,6 @@ fn parse_args<'a>(
};
let mut first = true;
- let mut named = false;
while p.token != token::Eof {
if !p.eat(&token::Comma) {
@@ -361,879 +112,54 @@ fn parse_args<'a>(
} // accept trailing commas
match p.token.ident() {
Some((ident, _)) if p.look_ahead(1, |t| *t == token::Eq) => {
- named = true;
p.bump();
p.expect(&token::Eq)?;
- let e = p.parse_expr()?;
- if let Some((prev, _)) = names.get(&ident.name) {
- ecx.struct_span_err(e.span, &format!("duplicate argument named `{}`", ident))
- .span_label(args[*prev].expr.span, "previously here")
- .span_label(e.span, "duplicate argument")
- .emit();
+ let expr = p.parse_expr()?;
+ if let Some((_, prev)) = args.by_name(ident.name) {
+ ecx.struct_span_err(
+ ident.span,
+ &format!("duplicate argument named `{}`", ident),
+ )
+ .span_label(prev.kind.ident().unwrap().span, "previously here")
+ .span_label(ident.span, "duplicate argument")
+ .emit();
continue;
}
-
- // Resolve names into slots early.
- // Since all the positional args are already seen at this point
- // if the input is valid, we can simply append to the positional
- // args. And remember the names.
- let slot = args.len();
- names.insert(ident.name, (slot, ident.span));
- args.push(FormatArg { expr: e, named: true });
+ args.add(FormatArgument { kind: FormatArgumentKind::Named(ident), expr });
}
_ => {
- let e = p.parse_expr()?;
- if named {
+ let expr = p.parse_expr()?;
+ if !args.named_args().is_empty() {
let mut err = ecx.struct_span_err(
- e.span,
+ expr.span,
"positional arguments cannot follow named arguments",
);
- err.span_label(e.span, "positional arguments must be before named arguments");
- for pos in names.values() {
- err.span_label(args[pos.0].expr.span, "named argument");
+ err.span_label(
+ expr.span,
+ "positional arguments must be before named arguments",
+ );
+ for arg in args.named_args() {
+ if let Some(name) = arg.kind.ident() {
+ err.span_label(name.span.to(arg.expr.span), "named argument");
+ }
}
err.emit();
}
- args.push(FormatArg { expr: e, named: false });
+ args.add(FormatArgument { kind: FormatArgumentKind::Normal, expr });
}
}
}
- Ok((fmtstr, args, names))
+ Ok((fmtstr, args))
}
-impl<'a, 'b> Context<'a, 'b> {
- /// The number of arguments that were explicitly given.
- fn num_args(&self) -> usize {
- self.args.len() - self.num_captured_args
- }
-
- fn resolve_name_inplace(&mut self, p: &mut parse::Piece<'_>) {
- // NOTE: the `unwrap_or` branch is needed in case of invalid format
- // arguments, e.g., `format_args!("{foo}")`.
- let lookup =
- |s: &str| self.names.get(&Symbol::intern(s)).unwrap_or(&(0, Span::default())).0;
-
- match *p {
- parse::String(_) => {}
- parse::NextArgument(ref mut arg) => {
- if let parse::ArgumentNamed(s) = arg.position {
- arg.position = parse::ArgumentIs(lookup(s));
- }
- if let parse::CountIsName(s, _) = arg.format.width {
- arg.format.width = parse::CountIsParam(lookup(s));
- }
- if let parse::CountIsName(s, _) = arg.format.precision {
- arg.format.precision = parse::CountIsParam(lookup(s));
- }
- }
- }
- }
-
- /// Verifies one piece of a parse string, and remembers it if valid.
- /// All errors are not emitted as fatal so we can continue giving errors
- /// about this and possibly other format strings.
- fn verify_piece(&mut self, p: &parse::Piece<'_>) {
- match *p {
- parse::String(..) => {}
- parse::NextArgument(ref arg) => {
- // width/precision first, if they have implicit positional
- // parameters it makes more sense to consume them first.
- self.verify_count(
- arg.format.width,
- &arg.format.width_span,
- PositionalNamedArgType::Width,
- );
- self.verify_count(
- arg.format.precision,
- &arg.format.precision_span,
- PositionalNamedArgType::Precision,
- );
-
- let has_precision = arg.format.precision != Count::CountImplied;
- let has_width = arg.format.width != Count::CountImplied;
-
- // argument second, if it's an implicit positional parameter
- // it's written second, so it should come after width/precision.
- let pos = match arg.position {
- parse::ArgumentIs(i) => {
- self.unused_names_lint.maybe_add_positional_named_arg(
- i,
- self.args.len(),
- i,
- PositionalNamedArgType::Arg,
- self.curpiece,
- Some(arg.position_span),
- &self.names,
- has_precision || has_width,
- );
-
- Exact(i)
- }
- parse::ArgumentImplicitlyIs(i) => {
- self.unused_names_lint.maybe_add_positional_named_arg(
- i,
- self.args.len(),
- i,
- PositionalNamedArgType::Arg,
- self.curpiece,
- None,
- &self.names,
- has_precision || has_width,
- );
- Exact(i)
- }
- parse::ArgumentNamed(s) => {
- let symbol = Symbol::intern(s);
- let span = arg.position_span;
- Named(symbol, InnerSpan::new(span.start, span.end))
- }
- };
-
- let ty = Placeholder(match arg.format.ty {
- "" => "Display",
- "?" => "Debug",
- "e" => "LowerExp",
- "E" => "UpperExp",
- "o" => "Octal",
- "p" => "Pointer",
- "b" => "Binary",
- "x" => "LowerHex",
- "X" => "UpperHex",
- _ => {
- let fmtsp = self.fmtsp;
- let sp = arg
- .format
- .ty_span
- .map(|sp| fmtsp.from_inner(InnerSpan::new(sp.start, sp.end)));
- let mut err = self.ecx.struct_span_err(
- sp.unwrap_or(fmtsp),
- &format!("unknown format trait `{}`", arg.format.ty),
- );
- err.note(
- "the only appropriate formatting traits are:\n\
- - ``, which uses the `Display` trait\n\
- - `?`, which uses the `Debug` trait\n\
- - `e`, which uses the `LowerExp` trait\n\
- - `E`, which uses the `UpperExp` trait\n\
- - `o`, which uses the `Octal` trait\n\
- - `p`, which uses the `Pointer` trait\n\
- - `b`, which uses the `Binary` trait\n\
- - `x`, which uses the `LowerHex` trait\n\
- - `X`, which uses the `UpperHex` trait",
- );
- if let Some(sp) = sp {
- for (fmt, name) in &[
- ("", "Display"),
- ("?", "Debug"),
- ("e", "LowerExp"),
- ("E", "UpperExp"),
- ("o", "Octal"),
- ("p", "Pointer"),
- ("b", "Binary"),
- ("x", "LowerHex"),
- ("X", "UpperHex"),
- ] {
- // FIXME: rustfix (`run-rustfix`) fails to apply suggestions.
- // > "Cannot replace slice of data that was already replaced"
- err.tool_only_span_suggestion(
- sp,
- &format!("use the `{}` trait", name),
- *fmt,
- Applicability::MaybeIncorrect,
- );
- }
- }
- err.emit();
- "<invalid>"
- }
- });
- self.verify_arg_type(pos, ty);
- self.curpiece += 1;
- }
- }
- }
-
- fn verify_count(
- &mut self,
- c: parse::Count<'_>,
- inner_span: &Option<rustc_parse_format::InnerSpan>,
- named_arg_type: PositionalNamedArgType,
- ) {
- match c {
- parse::CountImplied | parse::CountIs(..) => {}
- parse::CountIsParam(i) => {
- self.unused_names_lint.maybe_add_positional_named_arg(
- i,
- self.args.len(),
- i,
- named_arg_type,
- self.curpiece,
- *inner_span,
- &self.names,
- true,
- );
- self.verify_arg_type(Exact(i), Count);
- }
- parse::CountIsName(s, span) => {
- self.verify_arg_type(
- Named(Symbol::intern(s), InnerSpan::new(span.start, span.end)),
- Count,
- );
- }
- }
- }
-
- fn describe_num_args(&self) -> Cow<'_, str> {
- match self.num_args() {
- 0 => "no arguments were given".into(),
- 1 => "there is 1 argument".into(),
- x => format!("there are {} arguments", x).into(),
- }
- }
-
- /// Handle invalid references to positional arguments. Output different
- /// errors for the case where all arguments are positional and for when
- /// there are named arguments or numbered positional arguments in the
- /// format string.
- fn report_invalid_references(&self, numbered_position_args: bool) {
- let mut e;
- let sp = if !self.arg_spans.is_empty() {
- // Point at the formatting arguments.
- MultiSpan::from_spans(self.arg_spans.clone())
- } else {
- MultiSpan::from_span(self.fmtsp)
- };
- let refs =
- self.invalid_refs.iter().map(|(r, pos)| (r.to_string(), self.arg_spans.get(*pos)));
-
- let mut zero_based_note = false;
-
- let count = self.pieces.len()
- + self.arg_with_formatting.iter().filter(|fmt| fmt.precision_span.is_some()).count();
- if self.names.is_empty() && !numbered_position_args && count != self.num_args() {
- e = self.ecx.struct_span_err(
- sp,
- &format!(
- "{} positional argument{} in format string, but {}",
- count,
- pluralize!(count),
- self.describe_num_args(),
- ),
- );
- for arg in &self.args {
- // Point at the arguments that will be formatted.
- e.span_label(arg.span, "");
- }
- } else {
- let (mut refs, spans): (Vec<_>, Vec<_>) = refs.unzip();
- // Avoid `invalid reference to positional arguments 7 and 7 (there is 1 argument)`
- // for `println!("{7:7$}", 1);`
- refs.sort();
- refs.dedup();
- let spans: Vec<_> = spans.into_iter().filter_map(|sp| sp.copied()).collect();
- let sp = if self.arg_spans.is_empty() || spans.is_empty() {
- MultiSpan::from_span(self.fmtsp)
- } else {
- MultiSpan::from_spans(spans)
- };
- let arg_list = if refs.len() == 1 {
- format!("argument {}", refs[0])
- } else {
- let reg = refs.pop().unwrap();
- format!("arguments {head} and {tail}", head = refs.join(", "), tail = reg)
- };
-
- e = self.ecx.struct_span_err(
- sp,
- &format!(
- "invalid reference to positional {} ({})",
- arg_list,
- self.describe_num_args()
- ),
- );
- zero_based_note = true;
- };
-
- for fmt in &self.arg_with_formatting {
- if let Some(span) = fmt.precision_span {
- let span = self.fmtsp.from_inner(InnerSpan::new(span.start, span.end));
- match fmt.precision {
- parse::CountIsParam(pos) if pos > self.num_args() => {
- e.span_label(
- span,
- &format!(
- "this precision flag expects an `usize` argument at position {}, \
- but {}",
- pos,
- self.describe_num_args(),
- ),
- );
- zero_based_note = true;
- }
- parse::CountIsParam(pos) => {
- let count = self.pieces.len()
- + self
- .arg_with_formatting
- .iter()
- .filter(|fmt| fmt.precision_span.is_some())
- .count();
- e.span_label(
- span,
- &format!(
- "this precision flag adds an extra required argument at position {}, \
- which is why there {} expected",
- pos,
- if count == 1 {
- "is 1 argument".to_string()
- } else {
- format!("are {} arguments", count)
- },
- ),
- );
- if let Some(arg) = self.args.get(pos) {
- e.span_label(
- arg.span,
- "this parameter corresponds to the precision flag",
- );
- }
- zero_based_note = true;
- }
- _ => {}
- }
- }
- if let Some(span) = fmt.width_span {
- let span = self.fmtsp.from_inner(InnerSpan::new(span.start, span.end));
- match fmt.width {
- parse::CountIsParam(pos) if pos >= self.num_args() => {
- e.span_label(
- span,
- &format!(
- "this width flag expects an `usize` argument at position {}, \
- but {}",
- pos,
- self.describe_num_args(),
- ),
- );
- zero_based_note = true;
- }
- _ => {}
- }
- }
- }
- if zero_based_note {
- e.note("positional arguments are zero-based");
- }
- if !self.arg_with_formatting.is_empty() {
- e.note(
- "for information about formatting flags, visit \
- https://doc.rust-lang.org/std/fmt/index.html",
- );
- }
-
- e.emit();
- }
-
- /// Actually verifies and tracks a given format placeholder
- /// (a.k.a. argument).
- fn verify_arg_type(&mut self, arg: Position, ty: ArgumentType) {
- if let Exact(arg) = arg {
- if arg >= self.num_args() {
- self.invalid_refs.push((arg, self.curpiece));
- return;
- }
- }
-
- match arg {
- Exact(arg) | Capture(arg) => {
- match ty {
- Placeholder(_) => {
- // record every (position, type) combination only once
- let seen_ty = &mut self.arg_unique_types[arg];
- let i = seen_ty.iter().position(|x| *x == ty).unwrap_or_else(|| {
- let i = seen_ty.len();
- seen_ty.push(ty);
- i
- });
- self.arg_types[arg].push(i);
- }
- Count => {
- if let Entry::Vacant(e) = self.count_positions.entry(arg) {
- let i = self.count_positions_count;
- e.insert(i);
- self.count_args.push(arg);
- self.count_positions_count += 1;
- }
- }
- }
- }
-
- Named(name, span) => {
- match self.names.get(&name) {
- Some(&idx) => {
- // Treat as positional arg.
- self.verify_arg_type(Capture(idx.0), ty)
- }
- None => {
- // For the moment capturing variables from format strings expanded from macros is
- // disabled (see RFC #2795)
- if self.is_literal {
- // Treat this name as a variable to capture from the surrounding scope
- let idx = self.args.len();
- self.arg_types.push(Vec::new());
- self.arg_unique_types.push(Vec::new());
- let span = if self.is_literal {
- self.fmtsp.from_inner(span)
- } else {
- self.fmtsp
- };
- self.num_captured_args += 1;
- self.args.push(self.ecx.expr_ident(span, Ident::new(name, span)));
- self.names.insert(name, (idx, span));
- self.verify_arg_type(Capture(idx), ty)
- } else {
- let msg = format!("there is no argument named `{}`", name);
- let sp = if self.is_literal {
- self.fmtsp.from_inner(span)
- } else {
- self.fmtsp
- };
- let mut err = self.ecx.struct_span_err(sp, &msg);
-
- err.note(&format!(
- "did you intend to capture a variable `{}` from \
- the surrounding scope?",
- name
- ));
- err.note(
- "to avoid ambiguity, `format_args!` cannot capture variables \
- when the format string is expanded from a macro",
- );
-
- err.emit();
- }
- }
- }
- }
- }
- }
-
- /// Builds the mapping between format placeholders and argument objects.
- fn build_index_map(&mut self) {
- // NOTE: Keep the ordering the same as `into_expr`'s expansion would do!
- let args_len = self.args.len();
- self.arg_index_map.reserve(args_len);
-
- let mut sofar = 0usize;
-
- // Map the arguments
- for i in 0..args_len {
- let arg_types = &self.arg_types[i];
- let arg_offsets = arg_types.iter().map(|offset| sofar + *offset).collect::<Vec<_>>();
- self.arg_index_map.push(arg_offsets);
- sofar += self.arg_unique_types[i].len();
- }
-
- // Record starting index for counts, which appear just after arguments
- self.count_args_index_offset = sofar;
- }
-
- fn rtpath(ecx: &ExtCtxt<'_>, s: Symbol) -> Vec<Ident> {
- ecx.std_path(&[sym::fmt, sym::rt, sym::v1, s])
- }
-
- fn build_count(&self, c: parse::Count<'_>) -> P<ast::Expr> {
- let sp = self.macsp;
- let count = |c, arg| {
- let mut path = Context::rtpath(self.ecx, sym::Count);
- path.push(Ident::new(c, sp));
- match arg {
- Some(arg) => self.ecx.expr_call_global(sp, path, vec![arg]),
- None => self.ecx.expr_path(self.ecx.path_global(sp, path)),
- }
- };
- match c {
- parse::CountIs(i) => count(sym::Is, Some(self.ecx.expr_usize(sp, i))),
- parse::CountIsParam(i) => {
- // This needs mapping too, as `i` is referring to a macro
- // argument. If `i` is not found in `count_positions` then
- // the error had already been emitted elsewhere.
- let i = self.count_positions.get(&i).cloned().unwrap_or(0)
- + self.count_args_index_offset;
- count(sym::Param, Some(self.ecx.expr_usize(sp, i)))
- }
- parse::CountImplied => count(sym::Implied, None),
- // should never be the case, names are already resolved
- parse::CountIsName(..) => panic!("should never happen"),
- }
- }
-
- /// Build a literal expression from the accumulated string literals
- fn build_literal_string(&mut self) -> P<ast::Expr> {
- let sp = self.fmtsp;
- let s = Symbol::intern(&self.literal);
- self.literal.clear();
- self.ecx.expr_str(sp, s)
- }
-
- /// Builds a static `rt::Argument` from a `parse::Piece` or append
- /// to the `literal` string.
- fn build_piece(
- &mut self,
- piece: &parse::Piece<'a>,
- arg_index_consumed: &mut Vec<usize>,
- ) -> Option<P<ast::Expr>> {
- let sp = self.macsp;
- match *piece {
- parse::String(s) => {
- self.literal.push_str(s);
- None
- }
- parse::NextArgument(ref arg) => {
- // Build the position
- let pos = {
- match arg.position {
- parse::ArgumentIs(i, ..) | parse::ArgumentImplicitlyIs(i) => {
- // Map to index in final generated argument array
- // in case of multiple types specified
- let arg_idx = match arg_index_consumed.get_mut(i) {
- None => 0, // error already emitted elsewhere
- Some(offset) => {
- let idx_map = &self.arg_index_map[i];
- // unwrap_or branch: error already emitted elsewhere
- let arg_idx = *idx_map.get(*offset).unwrap_or(&0);
- *offset += 1;
- arg_idx
- }
- };
- self.ecx.expr_usize(sp, arg_idx)
- }
-
- // should never be the case, because names are already
- // resolved.
- parse::ArgumentNamed(..) => panic!("should never happen"),
- }
- };
-
- let simple_arg = parse::Argument {
- position: {
- // We don't have ArgumentNext any more, so we have to
- // track the current argument ourselves.
- let i = self.curarg;
- self.curarg += 1;
- parse::ArgumentIs(i)
- },
- position_span: arg.position_span,
- format: parse::FormatSpec {
- fill: arg.format.fill,
- align: parse::AlignUnknown,
- flags: 0,
- precision: parse::CountImplied,
- precision_span: None,
- width: parse::CountImplied,
- width_span: None,
- ty: arg.format.ty,
- ty_span: arg.format.ty_span,
- },
- };
-
- let fill = arg.format.fill.unwrap_or(' ');
-
- let pos_simple = arg.position.index() == simple_arg.position.index();
-
- if arg.format.precision_span.is_some() || arg.format.width_span.is_some() {
- self.arg_with_formatting.push(arg.format);
- }
- if !pos_simple || arg.format != simple_arg.format || fill != ' ' {
- self.all_pieces_simple = false;
- }
-
- // Build the format
- let fill = self.ecx.expr_lit(sp, ast::LitKind::Char(fill));
- let align = |name| {
- let mut p = Context::rtpath(self.ecx, sym::Alignment);
- p.push(Ident::new(name, sp));
- self.ecx.path_global(sp, p)
- };
- let align = match arg.format.align {
- parse::AlignLeft => align(sym::Left),
- parse::AlignRight => align(sym::Right),
- parse::AlignCenter => align(sym::Center),
- parse::AlignUnknown => align(sym::Unknown),
- };
- let align = self.ecx.expr_path(align);
- let flags = self.ecx.expr_u32(sp, arg.format.flags);
- let prec = self.build_count(arg.format.precision);
- let width = self.build_count(arg.format.width);
- let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::FormatSpec));
- let fmt = self.ecx.expr_struct(
- sp,
- path,
- vec![
- self.ecx.field_imm(sp, Ident::new(sym::fill, sp), fill),
- self.ecx.field_imm(sp, Ident::new(sym::align, sp), align),
- self.ecx.field_imm(sp, Ident::new(sym::flags, sp), flags),
- self.ecx.field_imm(sp, Ident::new(sym::precision, sp), prec),
- self.ecx.field_imm(sp, Ident::new(sym::width, sp), width),
- ],
- );
-
- let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::Argument));
- Some(self.ecx.expr_struct(
- sp,
- path,
- vec![
- self.ecx.field_imm(sp, Ident::new(sym::position, sp), pos),
- self.ecx.field_imm(sp, Ident::new(sym::format, sp), fmt),
- ],
- ))
- }
- }
- }
-
- /// Actually builds the expression which the format_args! block will be
- /// expanded to.
- fn into_expr(self) -> P<ast::Expr> {
- let mut original_args = self.args;
- let mut fmt_args = Vec::with_capacity(
- self.arg_unique_types.iter().map(|v| v.len()).sum::<usize>() + self.count_args.len(),
- );
-
- // First, build up the static array which will become our precompiled
- // format "string"
- let pieces = self.ecx.expr_array_ref(self.fmtsp, self.str_pieces);
-
- // We need to construct a &[ArgumentV1] to pass into the fmt::Arguments
- // constructor. In general the expressions in this slice might be
- // permuted from their order in original_args (such as in the case of
- // "{1} {0}"), or may have multiple entries referring to the same
- // element of original_args ("{0} {0}").
- //
- // The following vector has one item per element of our output slice,
- // identifying the index of which element of original_args it's passing,
- // and that argument's type.
- let mut fmt_arg_index_and_ty = SmallVec::<[(usize, &ArgumentType); 8]>::new();
- for (i, unique_types) in self.arg_unique_types.iter().enumerate() {
- fmt_arg_index_and_ty.extend(unique_types.iter().map(|ty| (i, ty)));
- }
- fmt_arg_index_and_ty.extend(self.count_args.iter().map(|&i| (i, &Count)));
-
- // Figure out whether there are permuted or repeated elements. If not,
- // we can generate simpler code.
- //
- // The sequence has no indices out of order or repeated if: for every
- // adjacent pair of elements, the first one's index is less than the
- // second one's index.
- let nicely_ordered =
- fmt_arg_index_and_ty.array_windows().all(|[(i, _i_ty), (j, _j_ty)]| i < j);
-
- // We want to emit:
- //
- // [ArgumentV1::new(&$arg0, …), ArgumentV1::new(&$arg1, …), …]
- //
- // However, it's only legal to do so if $arg0, $arg1, … were written in
- // exactly that order by the programmer. When arguments are permuted, we
- // want them evaluated in the order written by the programmer, not in
- // the order provided to fmt::Arguments. When arguments are repeated, we
- // want the expression evaluated only once.
- //
- // Further, if any arg _after the first one_ contains a yield point such
- // as `await` or `yield`, the above short form is inconvenient for the
- // caller because it would keep a temporary of type ArgumentV1 alive
- // across the yield point. ArgumentV1 can't implement Send since it
- // holds a type-erased arbitrary type.
- //
- // Thus in the not nicely ordered case, and in the yielding case, we
- // emit the following instead:
- //
- // match (&$arg0, &$arg1, …) {
- // args => [ArgumentV1::new(args.$i, …), ArgumentV1::new(args.$j, …), …]
- // }
- //
- // for the sequence of indices $i, $j, … governed by fmt_arg_index_and_ty.
- // This more verbose representation ensures that all arguments are
- // evaluated a single time each, in the order written by the programmer,
- // and that the surrounding future/generator (if any) is Send whenever
- // possible.
- let no_need_for_match =
- nicely_ordered && !original_args.iter().skip(1).any(|e| may_contain_yield_point(e));
-
- for (arg_index, arg_ty) in fmt_arg_index_and_ty {
- let e = &mut original_args[arg_index];
- let span = e.span;
- let arg = if no_need_for_match {
- let expansion_span = e.span.with_ctxt(self.macsp.ctxt());
- // The indices are strictly ordered so e has not been taken yet.
- self.ecx.expr_addr_of(expansion_span, P(e.take()))
- } else {
- let def_site = self.ecx.with_def_site_ctxt(span);
- let args_tuple = self.ecx.expr_ident(def_site, Ident::new(sym::args, def_site));
- let member = Ident::new(sym::integer(arg_index), def_site);
- self.ecx.expr(def_site, ast::ExprKind::Field(args_tuple, member))
- };
- fmt_args.push(Context::format_arg(self.ecx, self.macsp, span, arg_ty, arg));
- }
-
- let args_array = self.ecx.expr_array(self.macsp, fmt_args);
- let args_slice = self.ecx.expr_addr_of(
- self.macsp,
- if no_need_for_match {
- args_array
- } else {
- // In the !no_need_for_match case, none of the exprs were moved
- // away in the previous loop.
- //
- // This uses the arg span for `&arg` so that borrowck errors
- // point to the specific expression passed to the macro (the
- // span is otherwise unavailable in the MIR used by borrowck).
- let heads = original_args
- .into_iter()
- .map(|e| self.ecx.expr_addr_of(e.span.with_ctxt(self.macsp.ctxt()), e))
- .collect();
-
- let pat = self.ecx.pat_ident(self.macsp, Ident::new(sym::args, self.macsp));
- let arm = self.ecx.arm(self.macsp, pat, args_array);
- let head = self.ecx.expr(self.macsp, ast::ExprKind::Tup(heads));
- self.ecx.expr_match(self.macsp, head, vec![arm])
- },
- );
-
- // Now create the fmt::Arguments struct with all our locals we created.
- let (fn_name, fn_args) = if self.all_pieces_simple {
- ("new_v1", vec![pieces, args_slice])
- } else {
- // Build up the static array which will store our precompiled
- // nonstandard placeholders, if there are any.
- let fmt = self.ecx.expr_array_ref(self.macsp, self.pieces);
-
- let path = self.ecx.std_path(&[sym::fmt, sym::UnsafeArg, sym::new]);
- let unsafe_arg = self.ecx.expr_call_global(self.macsp, path, Vec::new());
- let unsafe_expr = self.ecx.expr_block(P(ast::Block {
- stmts: vec![self.ecx.stmt_expr(unsafe_arg)],
- id: ast::DUMMY_NODE_ID,
- rules: BlockCheckMode::Unsafe(UnsafeSource::CompilerGenerated),
- span: self.macsp,
- tokens: None,
- could_be_bare_literal: false,
- }));
-
- ("new_v1_formatted", vec![pieces, args_slice, fmt, unsafe_expr])
- };
-
- let path = self.ecx.std_path(&[sym::fmt, sym::Arguments, Symbol::intern(fn_name)]);
- self.ecx.expr_call_global(self.macsp, path, fn_args)
- }
-
- fn format_arg(
- ecx: &ExtCtxt<'_>,
- macsp: Span,
- mut sp: Span,
- ty: &ArgumentType,
- arg: P<ast::Expr>,
- ) -> P<ast::Expr> {
- sp = ecx.with_def_site_ctxt(sp);
- let trait_ = match *ty {
- Placeholder(trait_) if trait_ == "<invalid>" => return DummyResult::raw_expr(sp, true),
- Placeholder(trait_) => trait_,
- Count => {
- let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, sym::from_usize]);
- return ecx.expr_call_global(macsp, path, vec![arg]);
- }
- };
- let new_fn_name = match trait_ {
- "Display" => "new_display",
- "Debug" => "new_debug",
- "LowerExp" => "new_lower_exp",
- "UpperExp" => "new_upper_exp",
- "Octal" => "new_octal",
- "Pointer" => "new_pointer",
- "Binary" => "new_binary",
- "LowerHex" => "new_lower_hex",
- "UpperHex" => "new_upper_hex",
- _ => unreachable!(),
- };
-
- let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, Symbol::intern(new_fn_name)]);
- ecx.expr_call_global(sp, path, vec![arg])
- }
-}
-
-fn expand_format_args_impl<'cx>(
- ecx: &'cx mut ExtCtxt<'_>,
- mut sp: Span,
- tts: TokenStream,
- nl: bool,
-) -> Box<dyn base::MacResult + 'cx> {
- sp = ecx.with_def_site_ctxt(sp);
- match parse_args(ecx, sp, tts) {
- Ok((efmt, args, names)) => {
- MacEager::expr(expand_preparsed_format_args(ecx, sp, efmt, args, names, nl))
- }
- Err(mut err) => {
- err.emit();
- DummyResult::any(sp)
- }
- }
-}
-
-pub fn expand_format_args<'cx>(
- ecx: &'cx mut ExtCtxt<'_>,
- sp: Span,
- tts: TokenStream,
-) -> Box<dyn base::MacResult + 'cx> {
- expand_format_args_impl(ecx, sp, tts, false)
-}
-
-pub fn expand_format_args_nl<'cx>(
- ecx: &'cx mut ExtCtxt<'_>,
- sp: Span,
- tts: TokenStream,
-) -> Box<dyn base::MacResult + 'cx> {
- expand_format_args_impl(ecx, sp, tts, true)
-}
-
-fn create_lints_for_named_arguments_used_positionally(cx: &mut Context<'_, '_>) {
- for named_arg in &cx.unused_names_lint.positional_named_args {
- let (position_sp_to_replace, position_sp_for_msg) = named_arg.get_positional_arg_spans(cx);
-
- let msg = format!("named argument `{}` is not used by name", named_arg.replacement);
-
- cx.ecx.buffered_early_lint.push(BufferedEarlyLint {
- span: MultiSpan::from_span(named_arg.positional_named_arg_span),
- msg: msg.clone(),
- node_id: ast::CRATE_NODE_ID,
- lint_id: LintId::of(&NAMED_ARGUMENTS_USED_POSITIONALLY),
- diagnostic: BuiltinLintDiagnostics::NamedArgumentUsedPositionally {
- position_sp_to_replace,
- position_sp_for_msg,
- named_arg_sp: named_arg.positional_named_arg_span,
- named_arg_name: named_arg.replacement.to_string(),
- is_formatting_arg: named_arg.ty != PositionalNamedArgType::Arg,
- },
- });
- }
-}
-
-/// Take the various parts of `format_args!(efmt, args..., name=names...)`
-/// and construct the appropriate formatting expression.
-pub fn expand_preparsed_format_args(
+pub fn make_format_args(
ecx: &mut ExtCtxt<'_>,
- sp: Span,
- efmt: P<ast::Expr>,
- args: Vec<FormatArg>,
- names: FxHashMap<Symbol, (usize, Span)>,
+ efmt: P<Expr>,
+ mut args: FormatArguments,
append_newline: bool,
-) -> P<ast::Expr> {
- // NOTE: this verbose way of initializing `Vec<Vec<ArgumentType>>` is because
- // `ArgumentType` does not derive `Clone`.
- let arg_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
- let arg_unique_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
-
- let mut macsp = ecx.call_site();
- macsp = ecx.with_def_site_ctxt(macsp);
-
+) -> Result<FormatArgs, ()> {
let msg = "format argument must be a string literal";
- let fmt_sp = efmt.span;
- let efmt_kind_is_lit: bool = matches!(efmt.kind, ast::ExprKind::Lit(_));
+ let unexpanded_fmt_span = efmt.span;
let (fmt_str, fmt_style, fmt_span) = match expr_to_spanned_string(ecx, efmt, msg) {
Ok(mut fmt) if append_newline => {
fmt.0 = Symbol::intern(&format!("{}\n", fmt.0));
@@ -1242,13 +168,13 @@ pub fn expand_preparsed_format_args(
Ok(fmt) => fmt,
Err(err) => {
if let Some((mut err, suggested)) = err {
- let sugg_fmt = match args.len() {
+ let sugg_fmt = match args.explicit_args().len() {
0 => "{}".to_string(),
- _ => format!("{}{{}}", "{} ".repeat(args.len())),
+ _ => format!("{}{{}}", "{} ".repeat(args.explicit_args().len())),
};
if !suggested {
err.span_suggestion(
- fmt_sp.shrink_to_lo(),
+ unexpanded_fmt_span.shrink_to_lo(),
"you might be missing a string literal to format with",
format!("\"{}\", ", sugg_fmt),
Applicability::MaybeIncorrect,
@@ -1256,17 +182,17 @@ pub fn expand_preparsed_format_args(
}
err.emit();
}
- return DummyResult::raw_expr(sp, true);
+ return Err(());
}
};
let str_style = match fmt_style {
- ast::StrStyle::Cooked => None,
- ast::StrStyle::Raw(raw) => Some(raw as usize),
+ rustc_ast::StrStyle::Cooked => None,
+ rustc_ast::StrStyle::Raw(raw) => Some(raw as usize),
};
let fmt_str = fmt_str.as_str(); // for the suggestions below
- let fmt_snippet = ecx.source_map().span_to_snippet(fmt_sp).ok();
+ let fmt_snippet = ecx.source_map().span_to_snippet(unexpanded_fmt_span).ok();
let mut parser = parse::Parser::new(
fmt_str,
str_style,
@@ -1275,18 +201,20 @@ pub fn expand_preparsed_format_args(
parse::ParseMode::Format,
);
- let mut unverified_pieces = Vec::new();
+ let mut pieces = Vec::new();
while let Some(piece) = parser.next() {
if !parser.errors.is_empty() {
break;
} else {
- unverified_pieces.push(piece);
+ pieces.push(piece);
}
}
+ let is_literal = parser.is_literal;
+
if !parser.errors.is_empty() {
let err = parser.errors.remove(0);
- let sp = if efmt_kind_is_lit {
+ let sp = if is_literal {
fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end))
} else {
// The format string could be another macro invocation, e.g.:
@@ -1304,24 +232,21 @@ pub fn expand_preparsed_format_args(
if let Some(note) = err.note {
e.note(&note);
}
- if let Some((label, span)) = err.secondary_label {
- if efmt_kind_is_lit {
- e.span_label(fmt_span.from_inner(InnerSpan::new(span.start, span.end)), label);
- }
+ if let Some((label, span)) = err.secondary_label && is_literal {
+ e.span_label(fmt_span.from_inner(InnerSpan::new(span.start, span.end)), label);
}
if err.should_be_replaced_with_positional_argument {
let captured_arg_span =
fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
- let positional_args = args.iter().filter(|arg| !arg.named).collect::<Vec<_>>();
if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
- let span = match positional_args.last() {
+ let span = match args.unnamed_args().last() {
Some(arg) => arg.expr.span,
- None => fmt_sp,
+ None => fmt_span,
};
e.multipart_suggestion_verbose(
"consider using a positional formatting argument instead",
vec![
- (captured_arg_span, positional_args.len().to_string()),
+ (captured_arg_span, args.unnamed_args().len().to_string()),
(span.shrink_to_hi(), format!(", {}", arg)),
],
Applicability::MachineApplicable,
@@ -1329,245 +254,626 @@ pub fn expand_preparsed_format_args(
}
}
e.emit();
- return DummyResult::raw_expr(sp, true);
+ return Err(());
}
- let arg_spans = parser
- .arg_places
- .iter()
- .map(|span| fmt_span.from_inner(InnerSpan::new(span.start, span.end)))
- .collect();
+ let to_span = |inner_span: rustc_parse_format::InnerSpan| {
+ is_literal.then(|| {
+ fmt_span.from_inner(InnerSpan { start: inner_span.start, end: inner_span.end })
+ })
+ };
- let named_pos: FxHashSet<usize> = names.values().cloned().map(|(i, _)| i).collect();
+ let mut used = vec![false; args.explicit_args().len()];
+ let mut invalid_refs = Vec::new();
+ let mut numeric_refences_to_named_arg = Vec::new();
- let mut cx = Context {
- ecx,
- args: args.into_iter().map(|arg| arg.expr).collect(),
- num_captured_args: 0,
- arg_types,
- arg_unique_types,
- names,
- curarg: 0,
- curpiece: 0,
- arg_index_map: Vec::new(),
- count_args: Vec::new(),
- count_positions: FxHashMap::default(),
- count_positions_count: 0,
- count_args_index_offset: 0,
- literal: String::new(),
- pieces: Vec::with_capacity(unverified_pieces.len()),
- str_pieces: Vec::with_capacity(unverified_pieces.len()),
- all_pieces_simple: true,
- macsp,
- fmtsp: fmt_span,
- invalid_refs: Vec::new(),
- arg_spans,
- arg_with_formatting: Vec::new(),
- is_literal: parser.is_literal,
- unused_names_lint: PositionalNamedArgsLint { positional_named_args: vec![] },
+ enum ArgRef<'a> {
+ Index(usize),
+ Name(&'a str, Option<Span>),
+ }
+ use ArgRef::*;
+
+ let mut lookup_arg = |arg: ArgRef<'_>,
+ span: Option<Span>,
+ used_as: PositionUsedAs,
+ kind: FormatArgPositionKind|
+ -> FormatArgPosition {
+ let index = match arg {
+ Index(index) => {
+ if let Some(arg) = args.by_index(index) {
+ used[index] = true;
+ if arg.kind.ident().is_some() {
+ // This was a named argument, but it was used as a positional argument.
+ numeric_refences_to_named_arg.push((index, span, used_as));
+ }
+ Ok(index)
+ } else {
+ // Doesn't exist as an explicit argument.
+ invalid_refs.push((index, span, used_as, kind));
+ Err(index)
+ }
+ }
+ Name(name, span) => {
+ let name = Symbol::intern(name);
+ if let Some((index, _)) = args.by_name(name) {
+ // Name found in `args`, so we resolve it to its index.
+ if index < args.explicit_args().len() {
+ // Mark it as used, if it was an explicit argument.
+ used[index] = true;
+ }
+ Ok(index)
+ } else {
+ // Name not found in `args`, so we add it as an implicitly captured argument.
+ let span = span.unwrap_or(fmt_span);
+ let ident = Ident::new(name, span);
+ let expr = if is_literal {
+ ecx.expr_ident(span, ident)
+ } else {
+ // For the moment capturing variables from format strings expanded from macros is
+ // disabled (see RFC #2795)
+ ecx.struct_span_err(span, &format!("there is no argument named `{name}`"))
+ .note(format!("did you intend to capture a variable `{name}` from the surrounding scope?"))
+ .note("to avoid ambiguity, `format_args!` cannot capture variables when the format string is expanded from a macro")
+ .emit();
+ DummyResult::raw_expr(span, true)
+ };
+ Ok(args.add(FormatArgument { kind: FormatArgumentKind::Captured(ident), expr }))
+ }
+ }
+ };
+ FormatArgPosition { index, kind, span }
};
- // This needs to happen *after* the Parser has consumed all pieces to create all the spans
- let pieces = unverified_pieces
- .into_iter()
- .map(|mut piece| {
- cx.verify_piece(&piece);
- cx.resolve_name_inplace(&mut piece);
- piece
- })
- .collect::<Vec<_>>();
+ let mut template = Vec::new();
+ let mut unfinished_literal = String::new();
+ let mut placeholder_index = 0;
- let numbered_position_args = pieces.iter().any(|arg: &parse::Piece<'_>| match *arg {
- parse::String(_) => false,
- parse::NextArgument(arg) => matches!(arg.position, parse::Position::ArgumentIs(..)),
- });
+ for piece in pieces {
+ match piece {
+ parse::Piece::String(s) => {
+ unfinished_literal.push_str(s);
+ }
+ parse::Piece::NextArgument(parse::Argument { position, position_span, format }) => {
+ if !unfinished_literal.is_empty() {
+ template.push(FormatArgsPiece::Literal(Symbol::intern(&unfinished_literal)));
+ unfinished_literal.clear();
+ }
- cx.build_index_map();
+ let span = parser.arg_places.get(placeholder_index).and_then(|&s| to_span(s));
+ placeholder_index += 1;
+
+ let position_span = to_span(position_span);
+ let argument = match position {
+ parse::ArgumentImplicitlyIs(i) => lookup_arg(
+ Index(i),
+ position_span,
+ Placeholder(span),
+ FormatArgPositionKind::Implicit,
+ ),
+ parse::ArgumentIs(i) => lookup_arg(
+ Index(i),
+ position_span,
+ Placeholder(span),
+ FormatArgPositionKind::Number,
+ ),
+ parse::ArgumentNamed(name) => lookup_arg(
+ Name(name, position_span),
+ position_span,
+ Placeholder(span),
+ FormatArgPositionKind::Named,
+ ),
+ };
- let mut arg_index_consumed = vec![0usize; cx.arg_index_map.len()];
+ let alignment = match format.align {
+ parse::AlignUnknown => None,
+ parse::AlignLeft => Some(FormatAlignment::Left),
+ parse::AlignRight => Some(FormatAlignment::Right),
+ parse::AlignCenter => Some(FormatAlignment::Center),
+ };
- for piece in pieces {
- if let Some(piece) = cx.build_piece(&piece, &mut arg_index_consumed) {
- let s = cx.build_literal_string();
- cx.str_pieces.push(s);
- cx.pieces.push(piece);
+ let format_trait = match format.ty {
+ "" => FormatTrait::Display,
+ "?" => FormatTrait::Debug,
+ "e" => FormatTrait::LowerExp,
+ "E" => FormatTrait::UpperExp,
+ "o" => FormatTrait::Octal,
+ "p" => FormatTrait::Pointer,
+ "b" => FormatTrait::Binary,
+ "x" => FormatTrait::LowerHex,
+ "X" => FormatTrait::UpperHex,
+ _ => {
+ invalid_placeholder_type_error(ecx, format.ty, format.ty_span, fmt_span);
+ FormatTrait::Display
+ }
+ };
+
+ let precision_span = format.precision_span.and_then(to_span);
+ let precision = match format.precision {
+ parse::CountIs(n) => Some(FormatCount::Literal(n)),
+ parse::CountIsName(name, name_span) => Some(FormatCount::Argument(lookup_arg(
+ Name(name, to_span(name_span)),
+ precision_span,
+ Precision,
+ FormatArgPositionKind::Named,
+ ))),
+ parse::CountIsParam(i) => Some(FormatCount::Argument(lookup_arg(
+ Index(i),
+ precision_span,
+ Precision,
+ FormatArgPositionKind::Number,
+ ))),
+ parse::CountIsStar(i) => Some(FormatCount::Argument(lookup_arg(
+ Index(i),
+ precision_span,
+ Precision,
+ FormatArgPositionKind::Implicit,
+ ))),
+ parse::CountImplied => None,
+ };
+
+ let width_span = format.width_span.and_then(to_span);
+ let width = match format.width {
+ parse::CountIs(n) => Some(FormatCount::Literal(n)),
+ parse::CountIsName(name, name_span) => Some(FormatCount::Argument(lookup_arg(
+ Name(name, to_span(name_span)),
+ width_span,
+ Width,
+ FormatArgPositionKind::Named,
+ ))),
+ parse::CountIsParam(i) => Some(FormatCount::Argument(lookup_arg(
+ Index(i),
+ width_span,
+ Width,
+ FormatArgPositionKind::Number,
+ ))),
+ parse::CountIsStar(_) => unreachable!(),
+ parse::CountImplied => None,
+ };
+
+ template.push(FormatArgsPiece::Placeholder(FormatPlaceholder {
+ argument,
+ span,
+ format_trait,
+ format_options: FormatOptions {
+ fill: format.fill,
+ alignment,
+ flags: format.flags,
+ precision,
+ width,
+ },
+ }));
+ }
}
}
- if !cx.literal.is_empty() {
- let s = cx.build_literal_string();
- cx.str_pieces.push(s);
+ if !unfinished_literal.is_empty() {
+ template.push(FormatArgsPiece::Literal(Symbol::intern(&unfinished_literal)));
}
- if !cx.invalid_refs.is_empty() {
- cx.report_invalid_references(numbered_position_args);
+ if !invalid_refs.is_empty() {
+ report_invalid_references(ecx, &invalid_refs, &template, fmt_span, &args, parser);
}
- // Make sure that all arguments were used and all arguments have types.
- let errs = cx
- .arg_types
+ let unused = used
.iter()
.enumerate()
- .filter(|(i, ty)| ty.is_empty() && !cx.count_positions.contains_key(&i))
+ .filter(|&(_, used)| !used)
.map(|(i, _)| {
- let msg = if named_pos.contains(&i) {
- // named argument
+ let msg = if let FormatArgumentKind::Named(_) = args.explicit_args()[i].kind {
"named argument never used"
} else {
- // positional argument
"argument never used"
};
- (cx.args[i].span, msg)
+ (args.explicit_args()[i].expr.span, msg)
})
.collect::<Vec<_>>();
- let errs_len = errs.len();
- if !errs.is_empty() {
- let args_used = cx.arg_types.len() - errs_len;
- let args_unused = errs_len;
+ if !unused.is_empty() {
+ // If there's a lot of unused arguments,
+ // let's check if this format arguments looks like another syntax (printf / shell).
+ let detect_foreign_fmt = unused.len() > args.explicit_args().len() / 2;
+ report_missing_placeholders(ecx, unused, detect_foreign_fmt, str_style, fmt_str, fmt_span);
+ }
- let mut diag = {
- if let [(sp, msg)] = &errs[..] {
- let mut diag = cx.ecx.struct_span_err(*sp, *msg);
- diag.span_label(*sp, *msg);
- diag
- } else {
- let mut diag = cx.ecx.struct_span_err(
- errs.iter().map(|&(sp, _)| sp).collect::<Vec<Span>>(),
- "multiple unused formatting arguments",
- );
- diag.span_label(cx.fmtsp, "multiple missing formatting specifiers");
- for (sp, msg) in errs {
- diag.span_label(sp, msg);
+ // Only check for unused named argument names if there are no other errors to avoid causing
+ // too much noise in output errors, such as when a named argument is entirely unused.
+ if invalid_refs.is_empty() && ecx.sess.err_count() == 0 {
+ for &(index, span, used_as) in &numeric_refences_to_named_arg {
+ let (position_sp_to_replace, position_sp_for_msg) = match used_as {
+ Placeholder(pspan) => (span, pspan),
+ Precision => {
+ // Strip the leading `.` for precision.
+ let span = span.map(|span| span.with_lo(span.lo() + BytePos(1)));
+ (span, span)
}
- diag
- }
- };
+ Width => (span, span),
+ };
+ let arg_name = args.explicit_args()[index].kind.ident().unwrap();
+ ecx.buffered_early_lint.push(BufferedEarlyLint {
+ span: arg_name.span.into(),
+ msg: format!("named argument `{}` is not used by name", arg_name.name).into(),
+ node_id: rustc_ast::CRATE_NODE_ID,
+ lint_id: LintId::of(&NAMED_ARGUMENTS_USED_POSITIONALLY),
+ diagnostic: BuiltinLintDiagnostics::NamedArgumentUsedPositionally {
+ position_sp_to_replace,
+ position_sp_for_msg,
+ named_arg_sp: arg_name.span,
+ named_arg_name: arg_name.name.to_string(),
+ is_formatting_arg: matches!(used_as, Width | Precision),
+ },
+ });
+ }
+ }
- // Used to ensure we only report translations for *one* kind of foreign format.
- let mut found_foreign = false;
- // Decide if we want to look for foreign formatting directives.
- if args_used < args_unused {
- use super::format_foreign as foreign;
+ Ok(FormatArgs { span: fmt_span, template, arguments: args })
+}
- // The set of foreign substitutions we've explained. This prevents spamming the user
- // with `%d should be written as {}` over and over again.
- let mut explained = FxHashSet::default();
+fn invalid_placeholder_type_error(
+ ecx: &ExtCtxt<'_>,
+ ty: &str,
+ ty_span: Option<rustc_parse_format::InnerSpan>,
+ fmt_span: Span,
+) {
+ let sp = ty_span.map(|sp| fmt_span.from_inner(InnerSpan::new(sp.start, sp.end)));
+ let mut err =
+ ecx.struct_span_err(sp.unwrap_or(fmt_span), &format!("unknown format trait `{}`", ty));
+ err.note(
+ "the only appropriate formatting traits are:\n\
+ - ``, which uses the `Display` trait\n\
+ - `?`, which uses the `Debug` trait\n\
+ - `e`, which uses the `LowerExp` trait\n\
+ - `E`, which uses the `UpperExp` trait\n\
+ - `o`, which uses the `Octal` trait\n\
+ - `p`, which uses the `Pointer` trait\n\
+ - `b`, which uses the `Binary` trait\n\
+ - `x`, which uses the `LowerHex` trait\n\
+ - `X`, which uses the `UpperHex` trait",
+ );
+ if let Some(sp) = sp {
+ for (fmt, name) in &[
+ ("", "Display"),
+ ("?", "Debug"),
+ ("e", "LowerExp"),
+ ("E", "UpperExp"),
+ ("o", "Octal"),
+ ("p", "Pointer"),
+ ("b", "Binary"),
+ ("x", "LowerHex"),
+ ("X", "UpperHex"),
+ ] {
+ err.tool_only_span_suggestion(
+ sp,
+ &format!("use the `{}` trait", name),
+ *fmt,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err.emit();
+}
- macro_rules! check_foreign {
- ($kind:ident) => {{
- let mut show_doc_note = false;
+fn report_missing_placeholders(
+ ecx: &mut ExtCtxt<'_>,
+ unused: Vec<(Span, &str)>,
+ detect_foreign_fmt: bool,
+ str_style: Option<usize>,
+ fmt_str: &str,
+ fmt_span: Span,
+) {
+ let mut diag = if let &[(span, msg)] = &unused[..] {
+ let mut diag = ecx.struct_span_err(span, msg);
+ diag.span_label(span, msg);
+ diag
+ } else {
+ let mut diag = ecx.struct_span_err(
+ unused.iter().map(|&(sp, _)| sp).collect::<Vec<Span>>(),
+ "multiple unused formatting arguments",
+ );
+ diag.span_label(fmt_span, "multiple missing formatting specifiers");
+ for &(span, msg) in &unused {
+ diag.span_label(span, msg);
+ }
+ diag
+ };
- let mut suggestions = vec![];
- // account for `"` and account for raw strings `r#`
- let padding = str_style.map(|i| i + 2).unwrap_or(1);
- for sub in foreign::$kind::iter_subs(fmt_str, padding) {
- let (trn, success) = match sub.translate() {
- Ok(trn) => (trn, true),
- Err(Some(msg)) => (msg, false),
+ // Used to ensure we only report translations for *one* kind of foreign format.
+ let mut found_foreign = false;
+
+ // Decide if we want to look for foreign formatting directives.
+ if detect_foreign_fmt {
+ use super::format_foreign as foreign;
+
+ // The set of foreign substitutions we've explained. This prevents spamming the user
+ // with `%d should be written as {}` over and over again.
+ let mut explained = FxHashSet::default();
+
+ macro_rules! check_foreign {
+ ($kind:ident) => {{
+ let mut show_doc_note = false;
+
+ let mut suggestions = vec![];
+ // account for `"` and account for raw strings `r#`
+ let padding = str_style.map(|i| i + 2).unwrap_or(1);
+ for sub in foreign::$kind::iter_subs(fmt_str, padding) {
+ let (trn, success) = match sub.translate() {
+ Ok(trn) => (trn, true),
+ Err(Some(msg)) => (msg, false),
+
+ // If it has no translation, don't call it out specifically.
+ _ => continue,
+ };
+
+ let pos = sub.position();
+ let sub = String::from(sub.as_str());
+ if explained.contains(&sub) {
+ continue;
+ }
+ explained.insert(sub.clone());
- // If it has no translation, don't call it out specifically.
- _ => continue,
- };
+ if !found_foreign {
+ found_foreign = true;
+ show_doc_note = true;
+ }
- let pos = sub.position();
- let sub = String::from(sub.as_str());
- if explained.contains(&sub) {
- continue;
- }
- explained.insert(sub.clone());
+ if let Some(inner_sp) = pos {
+ let sp = fmt_span.from_inner(inner_sp);
- if !found_foreign {
- found_foreign = true;
- show_doc_note = true;
+ if success {
+ suggestions.push((sp, trn));
+ } else {
+ diag.span_note(
+ sp,
+ &format!("format specifiers use curly braces, and {}", trn),
+ );
}
-
- if let Some(inner_sp) = pos {
- let sp = fmt_sp.from_inner(inner_sp);
-
- if success {
- suggestions.push((sp, trn));
- } else {
- diag.span_note(
- sp,
- &format!("format specifiers use curly braces, and {}", trn),
- );
- }
+ } else {
+ if success {
+ diag.help(&format!("`{}` should be written as `{}`", sub, trn));
} else {
- if success {
- diag.help(&format!("`{}` should be written as `{}`", sub, trn));
- } else {
- diag.note(&format!(
- "`{}` should use curly braces, and {}",
- sub, trn
- ));
- }
+ diag.note(&format!("`{}` should use curly braces, and {}", sub, trn));
}
}
+ }
- if show_doc_note {
- diag.note(concat!(
- stringify!($kind),
- " formatting not supported; see the documentation for `std::fmt`",
- ));
- }
- if suggestions.len() > 0 {
- diag.multipart_suggestion(
- "format specifiers use curly braces",
- suggestions,
- Applicability::MachineApplicable,
- );
- }
- }};
- }
-
- check_foreign!(printf);
- if !found_foreign {
- check_foreign!(shell);
- }
- }
- if !found_foreign && errs_len == 1 {
- diag.span_label(cx.fmtsp, "formatting specifier missing");
+ if show_doc_note {
+ diag.note(concat!(
+ stringify!($kind),
+ " formatting not supported; see the documentation for `std::fmt`",
+ ));
+ }
+ if suggestions.len() > 0 {
+ diag.multipart_suggestion(
+ "format specifiers use curly braces",
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
+ }};
}
- diag.emit();
- } else if cx.invalid_refs.is_empty() && cx.ecx.sess.err_count() == 0 {
- // Only check for unused named argument names if there are no other errors to avoid causing
- // too much noise in output errors, such as when a named argument is entirely unused.
- create_lints_for_named_arguments_used_positionally(&mut cx);
+ check_foreign!(printf);
+ if !found_foreign {
+ check_foreign!(shell);
+ }
+ }
+ if !found_foreign && unused.len() == 1 {
+ diag.span_label(fmt_span, "formatting specifier missing");
}
- cx.into_expr()
+ diag.emit();
}
-fn may_contain_yield_point(e: &ast::Expr) -> bool {
- struct MayContainYieldPoint(bool);
+/// Handle invalid references to positional arguments. Output different
+/// errors for the case where all arguments are positional and for when
+/// there are named arguments or numbered positional arguments in the
+/// format string.
+fn report_invalid_references(
+ ecx: &mut ExtCtxt<'_>,
+ invalid_refs: &[(usize, Option<Span>, PositionUsedAs, FormatArgPositionKind)],
+ template: &[FormatArgsPiece],
+ fmt_span: Span,
+ args: &FormatArguments,
+ parser: parse::Parser<'_>,
+) {
+ let num_args_desc = match args.explicit_args().len() {
+ 0 => "no arguments were given".to_string(),
+ 1 => "there is 1 argument".to_string(),
+ n => format!("there are {} arguments", n),
+ };
+
+ let mut e;
- impl Visitor<'_> for MayContainYieldPoint {
- fn visit_expr(&mut self, e: &ast::Expr) {
- if let ast::ExprKind::Await(_) | ast::ExprKind::Yield(_) = e.kind {
- self.0 = true;
- } else {
- visit::walk_expr(self, e);
+ if template.iter().all(|piece| match piece {
+ FormatArgsPiece::Placeholder(FormatPlaceholder {
+ argument: FormatArgPosition { kind: FormatArgPositionKind::Number, .. },
+ ..
+ }) => false,
+ FormatArgsPiece::Placeholder(FormatPlaceholder {
+ format_options:
+ FormatOptions {
+ precision:
+ Some(FormatCount::Argument(FormatArgPosition {
+ kind: FormatArgPositionKind::Number,
+ ..
+ })),
+ ..
+ }
+ | FormatOptions {
+ width:
+ Some(FormatCount::Argument(FormatArgPosition {
+ kind: FormatArgPositionKind::Number,
+ ..
+ })),
+ ..
+ },
+ ..
+ }) => false,
+ _ => true,
+ }) {
+ // There are no numeric positions.
+ // Collect all the implicit positions:
+ let mut spans = Vec::new();
+ let mut num_placeholders = 0;
+ for piece in template {
+ let mut placeholder = None;
+ // `{arg:.*}`
+ if let FormatArgsPiece::Placeholder(FormatPlaceholder {
+ format_options:
+ FormatOptions {
+ precision:
+ Some(FormatCount::Argument(FormatArgPosition {
+ span,
+ kind: FormatArgPositionKind::Implicit,
+ ..
+ })),
+ ..
+ },
+ ..
+ }) = piece
+ {
+ placeholder = *span;
+ num_placeholders += 1;
}
+ // `{}`
+ if let FormatArgsPiece::Placeholder(FormatPlaceholder {
+ argument: FormatArgPosition { kind: FormatArgPositionKind::Implicit, .. },
+ span,
+ ..
+ }) = piece
+ {
+ placeholder = *span;
+ num_placeholders += 1;
+ }
+ // For `{:.*}`, we only push one span.
+ spans.extend(placeholder);
}
-
- fn visit_mac_call(&mut self, _: &ast::MacCall) {
- self.0 = true;
+ let span = if spans.is_empty() {
+ MultiSpan::from_span(fmt_span)
+ } else {
+ MultiSpan::from_spans(spans)
+ };
+ e = ecx.struct_span_err(
+ span,
+ &format!(
+ "{} positional argument{} in format string, but {}",
+ num_placeholders,
+ pluralize!(num_placeholders),
+ num_args_desc,
+ ),
+ );
+ for arg in args.explicit_args() {
+ e.span_label(arg.expr.span, "");
+ }
+ // Point out `{:.*}` placeholders: those take an extra argument.
+ let mut has_precision_star = false;
+ for piece in template {
+ if let FormatArgsPiece::Placeholder(FormatPlaceholder {
+ format_options:
+ FormatOptions {
+ precision:
+ Some(FormatCount::Argument(FormatArgPosition {
+ index,
+ span: Some(span),
+ kind: FormatArgPositionKind::Implicit,
+ ..
+ })),
+ ..
+ },
+ ..
+ }) = piece
+ {
+ let (Ok(index) | Err(index)) = index;
+ has_precision_star = true;
+ e.span_label(
+ *span,
+ &format!(
+ "this precision flag adds an extra required argument at position {}, which is why there {} expected",
+ index,
+ if num_placeholders == 1 {
+ "is 1 argument".to_string()
+ } else {
+ format!("are {} arguments", num_placeholders)
+ },
+ ),
+ );
+ }
+ }
+ if has_precision_star {
+ e.note("positional arguments are zero-based");
}
+ } else {
+ let mut indexes: Vec<_> = invalid_refs.iter().map(|&(index, _, _, _)| index).collect();
+ // Avoid `invalid reference to positional arguments 7 and 7 (there is 1 argument)`
+ // for `println!("{7:7$}", 1);`
+ indexes.sort();
+ indexes.dedup();
+ let span: MultiSpan = if !parser.is_literal || parser.arg_places.is_empty() {
+ MultiSpan::from_span(fmt_span)
+ } else {
+ MultiSpan::from_spans(invalid_refs.iter().filter_map(|&(_, span, _, _)| span).collect())
+ };
+ let arg_list = if let &[index] = &indexes[..] {
+ format!("argument {index}")
+ } else {
+ let tail = indexes.pop().unwrap();
+ format!(
+ "arguments {head} and {tail}",
+ head = indexes.into_iter().map(|i| i.to_string()).collect::<Vec<_>>().join(", ")
+ )
+ };
+ e = ecx.struct_span_err(
+ span,
+ &format!("invalid reference to positional {} ({})", arg_list, num_args_desc),
+ );
+ e.note("positional arguments are zero-based");
+ }
- fn visit_attribute(&mut self, _: &ast::Attribute) {
- // Conservatively assume this may be a proc macro attribute in
- // expression position.
- self.0 = true;
+ if template.iter().any(|piece| match piece {
+ FormatArgsPiece::Placeholder(FormatPlaceholder { format_options: f, .. }) => {
+ *f != FormatOptions::default()
}
+ _ => false,
+ }) {
+ e.note("for information about formatting flags, visit https://doc.rust-lang.org/std/fmt/index.html");
+ }
- fn visit_item(&mut self, _: &ast::Item) {
- // Do not recurse into nested items.
+ e.emit();
+}
+
+fn expand_format_args_impl<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ mut sp: Span,
+ tts: TokenStream,
+ nl: bool,
+) -> Box<dyn base::MacResult + 'cx> {
+ sp = ecx.with_def_site_ctxt(sp);
+ match parse_args(ecx, sp, tts) {
+ Ok((efmt, args)) => {
+ if let Ok(format_args) = make_format_args(ecx, efmt, args, nl) {
+ MacEager::expr(expand_parsed_format_args(ecx, format_args))
+ } else {
+ MacEager::expr(DummyResult::raw_expr(sp, true))
+ }
+ }
+ Err(mut err) => {
+ err.emit();
+ DummyResult::any(sp)
}
}
+}
- let mut visitor = MayContainYieldPoint(false);
- visitor.visit_expr(e);
- visitor.0
+pub fn expand_format_args<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ expand_format_args_impl(ecx, sp, tts, false)
+}
+
+pub fn expand_format_args_nl<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ expand_format_args_impl(ecx, sp, tts, true)
}
diff --git a/compiler/rustc_builtin_macros/src/format/ast.rs b/compiler/rustc_builtin_macros/src/format/ast.rs
new file mode 100644
index 000000000..01dbffa21
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format/ast.rs
@@ -0,0 +1,240 @@
+use rustc_ast::ptr::P;
+use rustc_ast::Expr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::Span;
+
+// Definitions:
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └──────────────────────────────────────────────┘
+// FormatArgs
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └─────────┘
+// argument
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └───────────────────┘
+// template
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └────┘└─────────┘└┘
+// pieces
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └────┘ └┘
+// literal pieces
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └─────────┘
+// placeholder
+//
+// format_args!("hello {abc:.xyz$}!!", abc="world");
+// └─┘ └─┘
+// positions (could be names, numbers, empty, or `*`)
+
+/// (Parsed) format args.
+///
+/// Basically the "AST" for a complete `format_args!()`.
+///
+/// E.g., `format_args!("hello {name}");`.
+#[derive(Clone, Debug)]
+pub struct FormatArgs {
+ pub span: Span,
+ pub template: Vec<FormatArgsPiece>,
+ pub arguments: FormatArguments,
+}
+
+/// A piece of a format template string.
+///
+/// E.g. "hello" or "{name}".
+#[derive(Clone, Debug)]
+pub enum FormatArgsPiece {
+ Literal(Symbol),
+ Placeholder(FormatPlaceholder),
+}
+
+/// The arguments to format_args!().
+///
+/// E.g. `1, 2, name="ferris", n=3`,
+/// but also implicit captured arguments like `x` in `format_args!("{x}")`.
+#[derive(Clone, Debug)]
+pub struct FormatArguments {
+ arguments: Vec<FormatArgument>,
+ num_unnamed_args: usize,
+ num_explicit_args: usize,
+ names: FxHashMap<Symbol, usize>,
+}
+
+impl FormatArguments {
+ pub fn new() -> Self {
+ Self {
+ arguments: Vec::new(),
+ names: FxHashMap::default(),
+ num_unnamed_args: 0,
+ num_explicit_args: 0,
+ }
+ }
+
+ pub fn add(&mut self, arg: FormatArgument) -> usize {
+ let index = self.arguments.len();
+ if let Some(name) = arg.kind.ident() {
+ self.names.insert(name.name, index);
+ } else if self.names.is_empty() {
+ // Only count the unnamed args before the first named arg.
+ // (Any later ones are errors.)
+ self.num_unnamed_args += 1;
+ }
+ if !matches!(arg.kind, FormatArgumentKind::Captured(..)) {
+ // This is an explicit argument.
+ // Make sure that all arguments so far are explcit.
+ assert_eq!(
+ self.num_explicit_args,
+ self.arguments.len(),
+ "captured arguments must be added last"
+ );
+ self.num_explicit_args += 1;
+ }
+ self.arguments.push(arg);
+ index
+ }
+
+ pub fn by_name(&self, name: Symbol) -> Option<(usize, &FormatArgument)> {
+ let i = *self.names.get(&name)?;
+ Some((i, &self.arguments[i]))
+ }
+
+ pub fn by_index(&self, i: usize) -> Option<&FormatArgument> {
+ (i < self.num_explicit_args).then(|| &self.arguments[i])
+ }
+
+ pub fn unnamed_args(&self) -> &[FormatArgument] {
+ &self.arguments[..self.num_unnamed_args]
+ }
+
+ pub fn named_args(&self) -> &[FormatArgument] {
+ &self.arguments[self.num_unnamed_args..self.num_explicit_args]
+ }
+
+ pub fn explicit_args(&self) -> &[FormatArgument] {
+ &self.arguments[..self.num_explicit_args]
+ }
+
+ pub fn into_vec(self) -> Vec<FormatArgument> {
+ self.arguments
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct FormatArgument {
+ pub kind: FormatArgumentKind,
+ pub expr: P<Expr>,
+}
+
+#[derive(Clone, Debug)]
+pub enum FormatArgumentKind {
+ /// `format_args(…, arg)`
+ Normal,
+ /// `format_args(…, arg = 1)`
+ Named(Ident),
+ /// `format_args("… {arg} …")`
+ Captured(Ident),
+}
+
+impl FormatArgumentKind {
+ pub fn ident(&self) -> Option<Ident> {
+ match self {
+ &Self::Normal => None,
+ &Self::Named(id) => Some(id),
+ &Self::Captured(id) => Some(id),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct FormatPlaceholder {
+ /// Index into [`FormatArgs::arguments`].
+ pub argument: FormatArgPosition,
+ /// The span inside the format string for the full `{…}` placeholder.
+ pub span: Option<Span>,
+ /// `{}`, `{:?}`, or `{:x}`, etc.
+ pub format_trait: FormatTrait,
+ /// `{}` or `{:.5}` or `{:-^20}`, etc.
+ pub format_options: FormatOptions,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct FormatArgPosition {
+ /// Which argument this position refers to (Ok),
+ /// or would've referred to if it existed (Err).
+ pub index: Result<usize, usize>,
+ /// What kind of position this is. See [`FormatArgPositionKind`].
+ pub kind: FormatArgPositionKind,
+ /// The span of the name or number.
+ pub span: Option<Span>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum FormatArgPositionKind {
+ /// `{}` or `{:.*}`
+ Implicit,
+ /// `{1}` or `{:1$}` or `{:.1$}`
+ Number,
+ /// `{a}` or `{:a$}` or `{:.a$}`
+ Named,
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub enum FormatTrait {
+ /// `{}`
+ Display,
+ /// `{:?}`
+ Debug,
+ /// `{:e}`
+ LowerExp,
+ /// `{:E}`
+ UpperExp,
+ /// `{:o}`
+ Octal,
+ /// `{:p}`
+ Pointer,
+ /// `{:b}`
+ Binary,
+ /// `{:x}`
+ LowerHex,
+ /// `{:X}`
+ UpperHex,
+}
+
+#[derive(Clone, Debug, Default, PartialEq, Eq)]
+pub struct FormatOptions {
+ /// The width. E.g. `{:5}` or `{:width$}`.
+ pub width: Option<FormatCount>,
+ /// The precision. E.g. `{:.5}` or `{:.precision$}`.
+ pub precision: Option<FormatCount>,
+ /// The alignment. E.g. `{:>}` or `{:<}` or `{:^}`.
+ pub alignment: Option<FormatAlignment>,
+ /// The fill character. E.g. the `.` in `{:.>10}`.
+ pub fill: Option<char>,
+ /// The `+`, `-`, `0`, `#`, `x?` and `X?` flags.
+ pub flags: u32,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum FormatAlignment {
+ /// `{:<}`
+ Left,
+ /// `{:>}`
+ Right,
+ /// `{:^}`
+ Center,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum FormatCount {
+ /// `{:5}` or `{:.5}`
+ Literal(usize),
+ /// `{:.*}`, `{:.5$}`, or `{:a$}`, etc.
+ Argument(FormatArgPosition),
+}
diff --git a/compiler/rustc_builtin_macros/src/format/expand.rs b/compiler/rustc_builtin_macros/src/format/expand.rs
new file mode 100644
index 000000000..9dde5efcb
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format/expand.rs
@@ -0,0 +1,353 @@
+use super::*;
+use rustc_ast as ast;
+use rustc_ast::visit::{self, Visitor};
+use rustc_ast::{BlockCheckMode, UnsafeSource};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_span::{sym, symbol::kw};
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+enum ArgumentType {
+ Format(FormatTrait),
+ Usize,
+}
+
+fn make_argument(ecx: &ExtCtxt<'_>, sp: Span, arg: P<ast::Expr>, ty: ArgumentType) -> P<ast::Expr> {
+ // Generate:
+ // ::core::fmt::ArgumentV1::new_…(arg)
+ use ArgumentType::*;
+ use FormatTrait::*;
+ ecx.expr_call_global(
+ sp,
+ ecx.std_path(&[
+ sym::fmt,
+ sym::ArgumentV1,
+ match ty {
+ Format(Display) => sym::new_display,
+ Format(Debug) => sym::new_debug,
+ Format(LowerExp) => sym::new_lower_exp,
+ Format(UpperExp) => sym::new_upper_exp,
+ Format(Octal) => sym::new_octal,
+ Format(Pointer) => sym::new_pointer,
+ Format(Binary) => sym::new_binary,
+ Format(LowerHex) => sym::new_lower_hex,
+ Format(UpperHex) => sym::new_upper_hex,
+ Usize => sym::from_usize,
+ },
+ ]),
+ vec![arg],
+ )
+}
+
+fn make_count(
+ ecx: &ExtCtxt<'_>,
+ sp: Span,
+ count: &Option<FormatCount>,
+ argmap: &mut FxIndexSet<(usize, ArgumentType)>,
+) -> P<ast::Expr> {
+ // Generate:
+ // ::core::fmt::rt::v1::Count::…(…)
+ match count {
+ Some(FormatCount::Literal(n)) => ecx.expr_call_global(
+ sp,
+ ecx.std_path(&[sym::fmt, sym::rt, sym::v1, sym::Count, sym::Is]),
+ vec![ecx.expr_usize(sp, *n)],
+ ),
+ Some(FormatCount::Argument(arg)) => {
+ if let Ok(arg_index) = arg.index {
+ let (i, _) = argmap.insert_full((arg_index, ArgumentType::Usize));
+ ecx.expr_call_global(
+ sp,
+ ecx.std_path(&[sym::fmt, sym::rt, sym::v1, sym::Count, sym::Param]),
+ vec![ecx.expr_usize(sp, i)],
+ )
+ } else {
+ DummyResult::raw_expr(sp, true)
+ }
+ }
+ None => ecx.expr_path(ecx.path_global(
+ sp,
+ ecx.std_path(&[sym::fmt, sym::rt, sym::v1, sym::Count, sym::Implied]),
+ )),
+ }
+}
+
+fn make_format_spec(
+ ecx: &ExtCtxt<'_>,
+ sp: Span,
+ placeholder: &FormatPlaceholder,
+ argmap: &mut FxIndexSet<(usize, ArgumentType)>,
+) -> P<ast::Expr> {
+ // Generate:
+ // ::core::fmt::rt::v1::Argument {
+ // position: 0usize,
+ // format: ::core::fmt::rt::v1::FormatSpec {
+ // fill: ' ',
+ // align: ::core::fmt::rt::v1::Alignment::Unknown,
+ // flags: 0u32,
+ // precision: ::core::fmt::rt::v1::Count::Implied,
+ // width: ::core::fmt::rt::v1::Count::Implied,
+ // },
+ // }
+ let position = match placeholder.argument.index {
+ Ok(arg_index) => {
+ let (i, _) =
+ argmap.insert_full((arg_index, ArgumentType::Format(placeholder.format_trait)));
+ ecx.expr_usize(sp, i)
+ }
+ Err(_) => DummyResult::raw_expr(sp, true),
+ };
+ let fill = ecx.expr_char(sp, placeholder.format_options.fill.unwrap_or(' '));
+ let align = ecx.expr_path(ecx.path_global(
+ sp,
+ ecx.std_path(&[
+ sym::fmt,
+ sym::rt,
+ sym::v1,
+ sym::Alignment,
+ match placeholder.format_options.alignment {
+ Some(FormatAlignment::Left) => sym::Left,
+ Some(FormatAlignment::Right) => sym::Right,
+ Some(FormatAlignment::Center) => sym::Center,
+ None => sym::Unknown,
+ },
+ ]),
+ ));
+ let flags = ecx.expr_u32(sp, placeholder.format_options.flags);
+ let prec = make_count(ecx, sp, &placeholder.format_options.precision, argmap);
+ let width = make_count(ecx, sp, &placeholder.format_options.width, argmap);
+ ecx.expr_struct(
+ sp,
+ ecx.path_global(sp, ecx.std_path(&[sym::fmt, sym::rt, sym::v1, sym::Argument])),
+ vec![
+ ecx.field_imm(sp, Ident::new(sym::position, sp), position),
+ ecx.field_imm(
+ sp,
+ Ident::new(sym::format, sp),
+ ecx.expr_struct(
+ sp,
+ ecx.path_global(
+ sp,
+ ecx.std_path(&[sym::fmt, sym::rt, sym::v1, sym::FormatSpec]),
+ ),
+ vec![
+ ecx.field_imm(sp, Ident::new(sym::fill, sp), fill),
+ ecx.field_imm(sp, Ident::new(sym::align, sp), align),
+ ecx.field_imm(sp, Ident::new(sym::flags, sp), flags),
+ ecx.field_imm(sp, Ident::new(sym::precision, sp), prec),
+ ecx.field_imm(sp, Ident::new(sym::width, sp), width),
+ ],
+ ),
+ ),
+ ],
+ )
+}
+
+pub fn expand_parsed_format_args(ecx: &mut ExtCtxt<'_>, fmt: FormatArgs) -> P<ast::Expr> {
+ let macsp = ecx.with_def_site_ctxt(ecx.call_site());
+
+ let lit_pieces = ecx.expr_array_ref(
+ fmt.span,
+ fmt.template
+ .iter()
+ .enumerate()
+ .filter_map(|(i, piece)| match piece {
+ &FormatArgsPiece::Literal(s) => Some(ecx.expr_str(fmt.span, s)),
+ &FormatArgsPiece::Placeholder(_) => {
+ // Inject empty string before placeholders when not already preceded by a literal piece.
+ if i == 0 || matches!(fmt.template[i - 1], FormatArgsPiece::Placeholder(_)) {
+ Some(ecx.expr_str(fmt.span, kw::Empty))
+ } else {
+ None
+ }
+ }
+ })
+ .collect(),
+ );
+
+ // Whether we'll use the `Arguments::new_v1_formatted` form (true),
+ // or the `Arguments::new_v1` form (false).
+ let mut use_format_options = false;
+
+ // Create a list of all _unique_ (argument, format trait) combinations.
+ // E.g. "{0} {0:x} {0} {1}" -> [(0, Display), (0, LowerHex), (1, Display)]
+ let mut argmap = FxIndexSet::default();
+ for piece in &fmt.template {
+ let FormatArgsPiece::Placeholder(placeholder) = piece else { continue };
+ if placeholder.format_options != Default::default() {
+ // Can't use basic form if there's any formatting options.
+ use_format_options = true;
+ }
+ if let Ok(index) = placeholder.argument.index {
+ if !argmap.insert((index, ArgumentType::Format(placeholder.format_trait))) {
+ // Duplicate (argument, format trait) combination,
+ // which we'll only put once in the args array.
+ use_format_options = true;
+ }
+ }
+ }
+
+ let format_options = use_format_options.then(|| {
+ // Generate:
+ // &[format_spec_0, format_spec_1, format_spec_2]
+ ecx.expr_array_ref(
+ macsp,
+ fmt.template
+ .iter()
+ .filter_map(|piece| {
+ let FormatArgsPiece::Placeholder(placeholder) = piece else { return None };
+ Some(make_format_spec(ecx, macsp, placeholder, &mut argmap))
+ })
+ .collect(),
+ )
+ });
+
+ let arguments = fmt.arguments.into_vec();
+
+ // If the args array contains exactly all the original arguments once,
+ // in order, we can use a simple array instead of a `match` construction.
+ // However, if there's a yield point in any argument except the first one,
+ // we don't do this, because an ArgumentV1 cannot be kept across yield points.
+ let use_simple_array = argmap.len() == arguments.len()
+ && argmap.iter().enumerate().all(|(i, &(j, _))| i == j)
+ && arguments.iter().skip(1).all(|arg| !may_contain_yield_point(&arg.expr));
+
+ let args = if use_simple_array {
+ // Generate:
+ // &[
+ // ::core::fmt::ArgumentV1::new_display(&arg0),
+ // ::core::fmt::ArgumentV1::new_lower_hex(&arg1),
+ // ::core::fmt::ArgumentV1::new_debug(&arg2),
+ // ]
+ ecx.expr_array_ref(
+ macsp,
+ arguments
+ .into_iter()
+ .zip(argmap)
+ .map(|(arg, (_, ty))| {
+ let sp = arg.expr.span.with_ctxt(macsp.ctxt());
+ make_argument(ecx, sp, ecx.expr_addr_of(sp, arg.expr), ty)
+ })
+ .collect(),
+ )
+ } else {
+ // Generate:
+ // match (&arg0, &arg1, &arg2) {
+ // args => &[
+ // ::core::fmt::ArgumentV1::new_display(args.0),
+ // ::core::fmt::ArgumentV1::new_lower_hex(args.1),
+ // ::core::fmt::ArgumentV1::new_debug(args.0),
+ // ]
+ // }
+ let args_ident = Ident::new(sym::args, macsp);
+ let args = argmap
+ .iter()
+ .map(|&(arg_index, ty)| {
+ if let Some(arg) = arguments.get(arg_index) {
+ let sp = arg.expr.span.with_ctxt(macsp.ctxt());
+ make_argument(
+ ecx,
+ sp,
+ ecx.expr_field(
+ sp,
+ ecx.expr_ident(macsp, args_ident),
+ Ident::new(sym::integer(arg_index), macsp),
+ ),
+ ty,
+ )
+ } else {
+ DummyResult::raw_expr(macsp, true)
+ }
+ })
+ .collect();
+ ecx.expr_addr_of(
+ macsp,
+ ecx.expr_match(
+ macsp,
+ ecx.expr_tuple(
+ macsp,
+ arguments
+ .into_iter()
+ .map(|arg| {
+ ecx.expr_addr_of(arg.expr.span.with_ctxt(macsp.ctxt()), arg.expr)
+ })
+ .collect(),
+ ),
+ vec![ecx.arm(macsp, ecx.pat_ident(macsp, args_ident), ecx.expr_array(macsp, args))],
+ ),
+ )
+ };
+
+ if let Some(format_options) = format_options {
+ // Generate:
+ // ::core::fmt::Arguments::new_v1_formatted(
+ // lit_pieces,
+ // args,
+ // format_options,
+ // unsafe { ::core::fmt::UnsafeArg::new() }
+ // )
+ ecx.expr_call_global(
+ macsp,
+ ecx.std_path(&[sym::fmt, sym::Arguments, sym::new_v1_formatted]),
+ vec![
+ lit_pieces,
+ args,
+ format_options,
+ ecx.expr_block(P(ast::Block {
+ stmts: vec![ecx.stmt_expr(ecx.expr_call_global(
+ macsp,
+ ecx.std_path(&[sym::fmt, sym::UnsafeArg, sym::new]),
+ Vec::new(),
+ ))],
+ id: ast::DUMMY_NODE_ID,
+ rules: BlockCheckMode::Unsafe(UnsafeSource::CompilerGenerated),
+ span: macsp,
+ tokens: None,
+ could_be_bare_literal: false,
+ })),
+ ],
+ )
+ } else {
+ // Generate:
+ // ::core::fmt::Arguments::new_v1(
+ // lit_pieces,
+ // args,
+ // )
+ ecx.expr_call_global(
+ macsp,
+ ecx.std_path(&[sym::fmt, sym::Arguments, sym::new_v1]),
+ vec![lit_pieces, args],
+ )
+ }
+}
+
+fn may_contain_yield_point(e: &ast::Expr) -> bool {
+ struct MayContainYieldPoint(bool);
+
+ impl Visitor<'_> for MayContainYieldPoint {
+ fn visit_expr(&mut self, e: &ast::Expr) {
+ if let ast::ExprKind::Await(_) | ast::ExprKind::Yield(_) = e.kind {
+ self.0 = true;
+ } else {
+ visit::walk_expr(self, e);
+ }
+ }
+
+ fn visit_mac_call(&mut self, _: &ast::MacCall) {
+ self.0 = true;
+ }
+
+ fn visit_attribute(&mut self, _: &ast::Attribute) {
+ // Conservatively assume this may be a proc macro attribute in
+ // expression position.
+ self.0 = true;
+ }
+
+ fn visit_item(&mut self, _: &ast::Item) {
+ // Do not recurse into nested items.
+ }
+ }
+
+ let mut visitor = MayContainYieldPoint(false);
+ visitor.visit_expr(e);
+ visitor.0
+}
diff --git a/compiler/rustc_builtin_macros/src/global_allocator.rs b/compiler/rustc_builtin_macros/src/global_allocator.rs
index 36cfbba45..45b9b8ab6 100644
--- a/compiler/rustc_builtin_macros/src/global_allocator.rs
+++ b/compiler/rustc_builtin_macros/src/global_allocator.rs
@@ -4,11 +4,12 @@ use rustc_ast::expand::allocator::{
AllocatorKind, AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS,
};
use rustc_ast::ptr::P;
-use rustc_ast::{self as ast, Attribute, Expr, FnHeader, FnSig, Generics, Param, StmtKind};
+use rustc_ast::{self as ast, AttrVec, Expr, FnHeader, FnSig, Generics, Param, StmtKind};
use rustc_ast::{Fn, ItemKind, Mutability, Stmt, Ty, TyKind, Unsafe};
use rustc_expand::base::{Annotatable, ExtCtxt};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::Span;
+use thin_vec::thin_vec;
pub fn expand(
ecx: &mut ExtCtxt<'_>,
@@ -113,10 +114,10 @@ impl AllocFnFactory<'_, '_> {
self.cx.expr_call(self.ty_span, method, args)
}
- fn attrs(&self) -> Vec<Attribute> {
+ fn attrs(&self) -> AttrVec {
let special = sym::rustc_std_internal_symbol;
let special = self.cx.meta_word(self.span, special);
- vec![self.cx.attribute(special)]
+ thin_vec![self.cx.attribute(special)]
}
fn arg_ty(
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
index 11565ba72..c7ea7de8f 100644
--- a/compiler/rustc_builtin_macros/src/lib.rs
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -7,15 +7,18 @@
#![feature(box_patterns)]
#![feature(decl_macro)]
#![feature(if_let_guard)]
+#![feature(is_some_and)]
#![feature(is_sorted)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(proc_macro_internals)]
#![feature(proc_macro_quote)]
#![recursion_limit = "256"]
extern crate proc_macro;
+#[macro_use]
+extern crate tracing;
+
use crate::deriving::*;
use rustc_expand::base::{MacroExpanderFn, ResolverExpand, SyntaxExtensionKind};
diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
index 5cfda3349..ebe1c3663 100644
--- a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
+++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
@@ -281,7 +281,7 @@ fn mk_decls(cx: &mut ExtCtxt<'_>, macros: &[ProcMacro]) -> P<ast::Item> {
let span = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
let proc_macro = Ident::new(sym::proc_macro, span);
- let krate = cx.item(span, proc_macro, Vec::new(), ast::ItemKind::ExternCrate(None));
+ let krate = cx.item(span, proc_macro, ast::AttrVec::new(), ast::ItemKind::ExternCrate(None));
let bridge = Ident::new(sym::bridge, span);
let client = Ident::new(sym::client, span);
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index 8bf3a0799..d78bbc3c9 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -216,7 +216,7 @@ pub fn expand_include_bytes(
}
};
match cx.source_map().load_binary_file(&file) {
- Ok(bytes) => base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(bytes.into()))),
+ Ok(bytes) => base::MacEager::expr(cx.expr_byte_str(sp, bytes)),
Err(e) => {
cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
DummyResult::any(sp)
diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
index 09ad5f9b3..49ef538f0 100644
--- a/compiler/rustc_builtin_macros/src/standard_library_imports.rs
+++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
@@ -6,6 +6,7 @@ use rustc_span::edition::Edition::*;
use rustc_span::hygiene::AstPass;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::DUMMY_SP;
+use thin_vec::thin_vec;
pub fn inject(
mut krate: ast::Crate,
@@ -51,7 +52,7 @@ pub fn inject(
cx.item(
span,
ident,
- vec![cx.attribute(cx.meta_word(span, sym::macro_use))],
+ thin_vec![cx.attribute(cx.meta_word(span, sym::macro_use))],
ast::ItemKind::ExternCrate(None),
),
);
@@ -78,7 +79,7 @@ pub fn inject(
let use_item = cx.item(
span,
Ident::empty(),
- vec![cx.attribute(cx.meta_word(span, sym::prelude_import))],
+ thin_vec![cx.attribute(cx.meta_word(span, sym::prelude_import))],
ast::ItemKind::Use(ast::UseTree {
prefix: cx.path(span, import_path),
kind: ast::UseTreeKind::Glob,
diff --git a/compiler/rustc_builtin_macros/src/test.rs b/compiler/rustc_builtin_macros/src/test.rs
index e20375689..fee5d04cd 100644
--- a/compiler/rustc_builtin_macros/src/test.rs
+++ b/compiler/rustc_builtin_macros/src/test.rs
@@ -1,7 +1,6 @@
/// The expansion from a test function to the appropriate test struct for libtest
/// Ideally, this code would be in libtest but for efficiency and error messages it lives here.
use crate::util::{check_builtin_macro_attribute, warn_on_duplicate_attribute};
-
use rustc_ast as ast;
use rustc_ast::attr;
use rustc_ast::ptr::P;
@@ -11,8 +10,8 @@ use rustc_expand::base::*;
use rustc_session::Session;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::Span;
-
use std::iter;
+use thin_vec::thin_vec;
// #[test_case] is used by custom test authors to mark tests
// When building for test, it needs to make the item public and gensym the name
@@ -37,13 +36,22 @@ pub fn expand_test_case(
let sp = ecx.with_def_site_ctxt(attr_sp);
let mut item = anno_item.expect_item();
item = item.map(|mut item| {
+ let test_path_symbol = Symbol::intern(&item_path(
+ // skip the name of the root module
+ &ecx.current_expansion.module.mod_path[1..],
+ &item.ident,
+ ));
item.vis = ast::Visibility {
span: item.vis.span,
kind: ast::VisibilityKind::Public,
tokens: None,
};
item.ident.span = item.ident.span.with_ctxt(sp.ctxt());
- item.attrs.push(ecx.attribute(ecx.meta_word(sp, sym::rustc_test_marker)));
+ item.attrs.push(ecx.attribute(attr::mk_name_value_item_str(
+ Ident::new(sym::rustc_test_marker, sp),
+ test_path_symbol,
+ sp,
+ )));
item
});
@@ -116,7 +124,7 @@ pub fn expand_test_or_bench(
// reworked in the future to not need it, it'd be nice.
_ => diag.struct_span_err(attr_sp, msg).forget_guarantee(),
};
- err.span_label(attr_sp, "the `#[test]` macro causes a a function to be run on a test and has no effect on non-functions")
+ err.span_label(attr_sp, "the `#[test]` macro causes a function to be run on a test and has no effect on non-functions")
.span_label(item.span, format!("expected a non-associated function, found {} {}", item.kind.article(), item.kind.descr()))
.span_suggestion(attr_sp, "replace with conditional compilation to make the item only exist when tests are being run", "#[cfg(test)]", Applicability::MaybeIncorrect)
.emit();
@@ -216,18 +224,29 @@ pub fn expand_test_or_bench(
)
};
+ let test_path_symbol = Symbol::intern(&item_path(
+ // skip the name of the root module
+ &cx.current_expansion.module.mod_path[1..],
+ &item.ident,
+ ));
+
let mut test_const = cx.item(
sp,
Ident::new(item.ident.name, sp),
- vec![
+ thin_vec![
// #[cfg(test)]
cx.attribute(attr::mk_list_item(
Ident::new(sym::cfg, attr_sp),
vec![attr::mk_nested_word_item(Ident::new(sym::test, attr_sp))],
)),
- // #[rustc_test_marker]
- cx.attribute(cx.meta_word(attr_sp, sym::rustc_test_marker)),
- ],
+ // #[rustc_test_marker = "test_case_sort_key"]
+ cx.attribute(attr::mk_name_value_item_str(
+ Ident::new(sym::rustc_test_marker, attr_sp),
+ test_path_symbol,
+ attr_sp,
+ )),
+ ]
+ .into(),
// const $ident: test::TestDescAndFn =
ast::ItemKind::Const(
ast::Defaultness::Final,
@@ -251,14 +270,7 @@ pub fn expand_test_or_bench(
cx.expr_call(
sp,
cx.expr_path(test_path("StaticTestName")),
- vec![cx.expr_str(
- sp,
- Symbol::intern(&item_path(
- // skip the name of the root module
- &cx.current_expansion.module.mod_path[1..],
- &item.ident,
- )),
- )],
+ vec![cx.expr_str(sp, test_path_symbol)],
),
),
// ignore: true | false
@@ -334,9 +346,9 @@ pub fn expand_test_or_bench(
});
// extern crate test
- let test_extern = cx.item(sp, test_id, vec![], ast::ItemKind::ExternCrate(None));
+ let test_extern = cx.item(sp, test_id, ast::AttrVec::new(), ast::ItemKind::ExternCrate(None));
- tracing::debug!("synthetic test item:\n{}\n", pprust::item_to_string(&test_const));
+ debug!("synthetic test item:\n{}\n", pprust::item_to_string(&test_const));
if is_stmt {
vec![
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index 0ebe29df9..b8b8351a3 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -14,13 +14,15 @@ use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use rustc_target::spec::PanicStrategy;
use smallvec::{smallvec, SmallVec};
-use tracing::debug;
+use thin_vec::thin_vec;
use std::{iter, mem};
+#[derive(Clone)]
struct Test {
span: Span,
ident: Ident,
+ name: Symbol,
}
struct TestCtxt<'a> {
@@ -120,10 +122,10 @@ impl<'a> MutVisitor for TestHarnessGenerator<'a> {
fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
let mut item = i.into_inner();
- if is_test_case(&self.cx.ext_cx.sess, &item) {
+ if let Some(name) = get_test_name(&self.cx.ext_cx.sess, &item) {
debug!("this is a test item");
- let test = Test { span: item.span, ident: item.ident };
+ let test = Test { span: item.span, ident: item.ident, name };
self.tests.push(test);
}
@@ -187,7 +189,10 @@ impl<'a> MutVisitor for EntryPointCleaner<'a> {
let dc_nested =
attr::mk_nested_word_item(Ident::new(sym::dead_code, self.def_site));
let allow_dead_code_item = attr::mk_list_item(allow_ident, vec![dc_nested]);
- let allow_dead_code = attr::mk_attr_outer(allow_dead_code_item);
+ let allow_dead_code = attr::mk_attr_outer(
+ &self.sess.parse_sess.attr_id_generator,
+ allow_dead_code_item,
+ );
let attrs = attrs
.into_iter()
.filter(|attr| {
@@ -298,8 +303,10 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
let call_test_main = ecx.stmt_expr(call_test_main);
// extern crate test
- let test_extern_stmt =
- ecx.stmt_item(sp, ecx.item(sp, test_id, vec![], ast::ItemKind::ExternCrate(None)));
+ let test_extern_stmt = ecx.stmt_item(
+ sp,
+ ecx.item(sp, test_id, ast::AttrVec::new(), ast::ItemKind::ExternCrate(None)),
+ );
// #[rustc_main]
let main_meta = ecx.meta_word(sp, sym::rustc_main);
@@ -333,7 +340,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
let main = P(ast::Item {
ident: main_id,
- attrs: vec![main_attr],
+ attrs: thin_vec![main_attr],
id: ast::DUMMY_NODE_ID,
kind: main,
vis: ast::Visibility { span: sp, kind: ast::VisibilityKind::Public, tokens: None },
@@ -352,9 +359,12 @@ fn mk_tests_slice(cx: &TestCtxt<'_>, sp: Span) -> P<ast::Expr> {
debug!("building test vector from {} tests", cx.test_cases.len());
let ecx = &cx.ext_cx;
+ let mut tests = cx.test_cases.clone();
+ tests.sort_by(|a, b| a.name.as_str().cmp(&b.name.as_str()));
+
ecx.expr_array_ref(
sp,
- cx.test_cases
+ tests
.iter()
.map(|test| {
ecx.expr_addr_of(test.span, ecx.expr_path(ecx.path(test.span, vec![test.ident])))
@@ -363,8 +373,8 @@ fn mk_tests_slice(cx: &TestCtxt<'_>, sp: Span) -> P<ast::Expr> {
)
}
-fn is_test_case(sess: &Session, i: &ast::Item) -> bool {
- sess.contains_name(&i.attrs, sym::rustc_test_marker)
+fn get_test_name(sess: &Session, i: &ast::Item) -> Option<Symbol> {
+ sess.first_attr_value_str_by_name(&i.attrs, sym::rustc_test_marker)
}
fn get_test_runner(
diff --git a/compiler/rustc_codegen_cranelift/.cirrus.yml b/compiler/rustc_codegen_cranelift/.cirrus.yml
index 61da6a249..732edd661 100644
--- a/compiler/rustc_codegen_cranelift/.cirrus.yml
+++ b/compiler/rustc_codegen_cranelift/.cirrus.yml
@@ -22,4 +22,4 @@ task:
- # Reduce amount of benchmark runs as they are slow
- export COMPILE_RUNS=2
- export RUN_RUNS=2
- - ./test.sh
+ - ./y.rs test
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
index aa556a21b..5061010c8 100644
--- a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -29,7 +29,11 @@ jobs:
matrix:
include:
- os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-unknown-linux-gnu
- os: macos-latest
+ env:
+ TARGET_TRIPLE: x86_64-apple-darwin
# cross-compile from Linux to Windows using mingw
- os: ubuntu-latest
env:
@@ -103,7 +107,7 @@ jobs:
# Enable extra checks
export CG_CLIF_ENABLE_VERIFIER=1
- ./test.sh
+ ./y.rs test
- name: Package prebuilt cg_clif
run: tar cvfJ cg_clif.tar.xz build
@@ -112,7 +116,7 @@ jobs:
if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
uses: actions/upload-artifact@v2
with:
- name: cg_clif-${{ runner.os }}
+ name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
path: cg_clif.tar.xz
- name: Upload prebuilt cg_clif (cross compile)
@@ -122,56 +126,89 @@ jobs:
name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
path: cg_clif.tar.xz
- build_windows:
- runs-on: windows-latest
+ windows:
+ runs-on: ${{ matrix.os }}
timeout-minutes: 60
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ # Native Windows build with MSVC
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-msvc
+ # cross-compile from Windows to Windows MinGW
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+
steps:
- uses: actions/checkout@v3
- #- name: Cache cargo installed crates
- # uses: actions/cache@v2
- # with:
- # path: ~/.cargo/bin
- # key: ${{ runner.os }}-cargo-installed-crates
-
- #- name: Cache cargo registry and index
- # uses: actions/cache@v2
- # with:
- # path: |
- # ~/.cargo/registry
- # ~/.cargo/git
- # key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
-
- #- name: Cache cargo target dir
- # uses: actions/cache@v2
- # with:
- # path: target
- # key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Set MinGW as the default toolchain
+ if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: rustup set default-host x86_64-pc-windows-gnu
- name: Prepare dependencies
run: |
git config --global user.email "user@example.com"
git config --global user.name "User"
git config --global core.autocrlf false
- rustup set default-host x86_64-pc-windows-gnu
rustc y.rs -o y.exe -g
./y.exe prepare
+ - name: Build without unstable features
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ # This is the config rust-lang/rust uses for builds
+ run: ./y.rs build --no-unstable-features
+
- name: Build
- #name: Test
+ run: ./y.rs build --sysroot none
+
+ - name: Test
run: |
# Enable backtraces for easier debugging
- #export RUST_BACKTRACE=1
+ $Env:RUST_BACKTRACE=1
# Reduce amount of benchmark runs as they are slow
- #export COMPILE_RUNS=2
- #export RUN_RUNS=2
+ $Env:COMPILE_RUNS=2
+ $Env:RUN_RUNS=2
# Enable extra checks
- #export CG_CLIF_ENABLE_VERIFIER=1
-
- ./y.exe build
+ $Env:CG_CLIF_ENABLE_VERIFIER=1
+
+ # WIP Disable some tests
+
+ # This fails due to some weird argument handling by hyperfine, not an actual regression
+ # more of a build system issue
+ (Get-Content config.txt) -replace '(bench.simple-raytracer)', '# $1' | Out-File config.txt
+
+ # This fails with a different output than expected
+ (Get-Content config.txt) -replace '(test.regex-shootout-regex-dna)', '# $1' | Out-File config.txt
+
+ ./y.exe test
- name: Package prebuilt cg_clif
# don't use compression as xzip isn't supported by tar on windows and bzip2 hangs
@@ -180,5 +217,5 @@ jobs:
- name: Upload prebuilt cg_clif
uses: actions/upload-artifact@v2
with:
- name: cg_clif-${{ runner.os }}
+ name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
path: cg_clif.tar
diff --git a/compiler/rustc_codegen_cranelift/.vscode/settings.json b/compiler/rustc_codegen_cranelift/.vscode/settings.json
index d88309e41..13301bf20 100644
--- a/compiler/rustc_codegen_cranelift/.vscode/settings.json
+++ b/compiler/rustc_codegen_cranelift/.vscode/settings.json
@@ -7,7 +7,7 @@
"rust-analyzer.cargo.features": ["unstable-features"],
"rust-analyzer.linkedProjects": [
"./Cargo.toml",
- //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+ //"./build_sysroot/sysroot_src/library/std/Cargo.toml",
{
"roots": [
"./example/mini_core.rs",
@@ -36,10 +36,10 @@
]
},
{
- "roots": ["./scripts/filter_profile.rs"],
+ "roots": ["./example/std_example.rs"],
"crates": [
{
- "root_module": "./scripts/filter_profile.rs",
+ "root_module": "./example/std_example.rs",
"edition": "2018",
"deps": [{ "crate": 1, "name": "std" }],
"cfg": [],
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
index 532049c85..3fa9d56cd 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -15,9 +15,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.56"
+version = "1.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
+checksum = "c794e162a5eff65c72ef524dfe393eb923c354e350bb78b9c7383df13f3bc142"
[[package]]
name = "ar"
@@ -25,6 +25,12 @@ version = "0.8.0"
source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
[[package]]
+name = "arrayvec"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
+
+[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -37,6 +43,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
+name = "bumpalo"
+version = "3.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d"
+
+[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -50,19 +62,21 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cranelift-bforest"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "749d0d6022c9038dccf480bdde2a38d435937335bf2bb0f14e815d94517cdce8"
+checksum = "44409ccf2d0f663920cab563d2b79fcd6b2e9a2bcc6e929fef76c8f82ad6c17a"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-codegen"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e94370cc7b37bf652ccd8bb8f09bd900997f7ccf97520edfc75554bb5c4abbea"
+checksum = "98de2018ad96eb97f621f7d6b900a0cc661aec8d02ea4a50e56ecb48e5a2fcaf"
dependencies = [
+ "arrayvec",
+ "bumpalo",
"cranelift-bforest",
"cranelift-codegen-meta",
"cranelift-codegen-shared",
@@ -77,30 +91,30 @@ dependencies = [
[[package]]
name = "cranelift-codegen-meta"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0a3cea8fdab90e44018c5b9a1dfd460d8ee265ac354337150222a354628bdb6"
+checksum = "5287ce36e6c4758fbaf298bd1a8697ad97a4f2375a3d1b61142ea538db4877e5"
dependencies = [
"cranelift-codegen-shared",
]
[[package]]
name = "cranelift-codegen-shared"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ac72f76f2698598951ab26d8c96eaa854810e693e7dd52523958b5909fde6b2"
+checksum = "2855c24219e2f08827f3f4ffb2da92e134ae8d8ecc185b11ec8f9878cf5f588e"
[[package]]
name = "cranelift-entity"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09eaeacfcd2356fe0e66b295e8f9d59fdd1ac3ace53ba50de14d628ec902f72d"
+checksum = "0b65673279d75d34bf11af9660ae2dbd1c22e6d28f163f5c72f4e1dc56d56103"
[[package]]
name = "cranelift-frontend"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dba69c9980d5ffd62c18a2bde927855fcd7c8dc92f29feaf8636052662cbd99c"
+checksum = "3ed2b3d7a4751163f6c4a349205ab1b7d9c00eecf19dcea48592ef1f7688eefc"
dependencies = [
"cranelift-codegen",
"log",
@@ -110,15 +124,15 @@ dependencies = [
[[package]]
name = "cranelift-isle"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2920dc1e05cac40304456ed3301fde2c09bd6a9b0210bcfa2f101398d628d5b"
+checksum = "3be64cecea9d90105fc6a2ba2d003e98c867c1d6c4c86cc878f97ad9fb916293"
[[package]]
name = "cranelift-jit"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c3c5ed067f2c81577e431f3039148a9c187b33cc79e0d1731fede27d801ec56"
+checksum = "f98ed42a70a0c9c388e34ec9477f57fc7300f541b1e5136a0e2ea02b1fac6015"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -129,14 +143,14 @@ dependencies = [
"log",
"region",
"target-lexicon",
- "winapi",
+ "windows-sys",
]
[[package]]
name = "cranelift-module"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eee6784303bf9af235237a4885f7417e09a35df896d38ea969a0081064b3ede4"
+checksum = "d658ac7f156708bfccb647216cc8b9387469f50d352ba4ad80150541e4ae2d49"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -144,9 +158,9 @@ dependencies = [
[[package]]
name = "cranelift-native"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f04dfa45f9b2a6f587c564d6b63388e00cd6589d2df6ea2758cf79e1a13285e6"
+checksum = "c4a03a6ac1b063e416ca4b93f6247978c991475e8271465340caa6f92f3c16a4"
dependencies = [
"cranelift-codegen",
"libc",
@@ -155,9 +169,9 @@ dependencies = [
[[package]]
name = "cranelift-object"
-version = "0.85.3"
+version = "0.88.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bf38b2c505db749276793116c0cb30bd096206c7810e471677a453134881881"
+checksum = "eef0b4119b645b870a43a036d76c0ada3a076b1f82e8b8487659304c8b09049b"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -187,9 +201,9 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad"
+checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
dependencies = [
"cfg-if",
"libc",
@@ -198,49 +212,43 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.26.1"
+version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
+checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
dependencies = [
"indexmap",
]
[[package]]
name = "hashbrown"
-version = "0.11.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
-name = "hashbrown"
-version = "0.12.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
-
-[[package]]
name = "indexmap"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
dependencies = [
"autocfg",
- "hashbrown 0.12.3",
+ "hashbrown",
]
[[package]]
name = "libc"
-version = "0.2.126"
+version = "0.2.127"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
+checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b"
[[package]]
name = "libloading"
-version = "0.6.7"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
+checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd"
dependencies = [
"cfg-if",
"winapi",
@@ -248,9 +256,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.14"
+version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
@@ -266,33 +274,33 @@ dependencies = [
[[package]]
name = "memchr"
-version = "2.4.1"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "object"
-version = "0.28.4"
+version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424"
+checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
dependencies = [
"crc32fast",
- "hashbrown 0.11.2",
+ "hashbrown",
"indexmap",
"memchr",
]
[[package]]
name = "once_cell"
-version = "1.10.0"
+version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
+checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
[[package]]
name = "regalloc2"
-version = "0.2.3"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a8d23b35d7177df3b9d31ed8a9ab4bf625c668be77a319d4f5efd4a5257701c"
+checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779"
dependencies = [
"fxhash",
"log",
@@ -340,15 +348,15 @@ checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
[[package]]
name = "smallvec"
-version = "1.8.1"
+version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc88c725d61fc6c3132893370cac4a0200e3fedf5da8331c570664b1987f5ca2"
+checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
[[package]]
name = "target-lexicon"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1"
+checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
[[package]]
name = "version_check"
@@ -358,9 +366,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
-version = "0.10.2+wasi-snapshot-preview1"
+version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
@@ -383,3 +391,46 @@ name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
index 61e977e3e..09cf5b4a1 100644
--- a/compiler/rustc_codegen_cranelift/Cargo.toml
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -8,19 +8,19 @@ crate-type = ["dylib"]
[dependencies]
# These have to be in sync with each other
-cranelift-codegen = { version = "0.85.3", features = ["unwind", "all-arch"] }
-cranelift-frontend = "0.85.3"
-cranelift-module = "0.85.3"
-cranelift-native = "0.85.3"
-cranelift-jit = { version = "0.85.3", optional = true }
-cranelift-object = "0.85.3"
+cranelift-codegen = { version = "0.88.1", features = ["unwind", "all-arch"] }
+cranelift-frontend = "0.88.1"
+cranelift-module = "0.88.1"
+cranelift-native = "0.88.1"
+cranelift-jit = { version = "0.88.1", optional = true }
+cranelift-object = "0.88.1"
target-lexicon = "0.12.0"
gimli = { version = "0.26.0", default-features = false, features = ["write"]}
-object = { version = "0.28.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+object = { version = "0.29.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
indexmap = "1.9.1"
-libloading = { version = "0.6.0", optional = true }
+libloading = { version = "0.7.3", optional = true }
once_cell = "1.10.0"
smallvec = "1.8.1"
diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md
index 8a2db5a43..1e84c7fa3 100644
--- a/compiler/rustc_codegen_cranelift/Readme.md
+++ b/compiler/rustc_codegen_cranelift/Readme.md
@@ -52,9 +52,7 @@ configuration options.
## Not yet supported
* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041))
- * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
- `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
- have to specify specific registers instead.
+ * On UNIX there is support for invoking an external assembler for `global_asm!` and `asm!`.
* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
## License
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
index 7b2cdd273..f6a9cb672 100644
--- a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
@@ -55,10 +55,20 @@ dependencies = [
]
[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
name = "compiler_builtins"
-version = "0.1.75"
+version = "0.1.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6e3183e88f659a862835db8f4b67dbeed3d93e44dd4927eef78edb1c149d784"
+checksum = "18cd7635fea7bb481ea543b392789844c1ad581299da70184c7175ce3af76603"
dependencies = [
"rustc-std-workspace-core",
]
@@ -69,9 +79,9 @@ version = "0.0.0"
[[package]]
name = "dlmalloc"
-version = "0.2.3"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6fe28e0bf9357092740362502f5cc7955d8dc125ebda71dec72336c2e15c62e"
+checksum = "203540e710bfadb90e5e29930baf5d10270cec1f43ab34f46f78b147b2de715a"
dependencies = [
"compiler_builtins",
"libc",
@@ -80,9 +90,9 @@ dependencies = [
[[package]]
name = "fortanix-sgx-abi"
-version = "0.3.3"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-core",
@@ -123,9 +133,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
-version = "0.2.4"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7668753748e445859e4e373c3d41117235d9feed578392f5a3a73efdc751ca4a"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
dependencies = [
"compiler_builtins",
"libc",
@@ -135,9 +145,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.126"
+version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
+checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
dependencies = [
"rustc-std-workspace-core",
]
@@ -182,7 +192,7 @@ name = "panic_abort"
version = "0.0.0"
dependencies = [
"alloc",
- "cfg-if",
+ "cfg-if 0.1.10",
"compiler_builtins",
"core",
"libc",
@@ -193,7 +203,7 @@ name = "panic_unwind"
version = "0.0.0"
dependencies = [
"alloc",
- "cfg-if",
+ "cfg-if 0.1.10",
"compiler_builtins",
"core",
"libc",
@@ -245,7 +255,7 @@ version = "0.0.0"
dependencies = [
"addr2line",
"alloc",
- "cfg-if",
+ "cfg-if 1.0.0",
"compiler_builtins",
"core",
"dlmalloc",
@@ -267,7 +277,7 @@ dependencies = [
name = "std_detect"
version = "0.1.5"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"compiler_builtins",
"libc",
"rustc-std-workspace-alloc",
@@ -289,7 +299,7 @@ dependencies = [
name = "test"
version = "0.0.0"
dependencies = [
- "cfg-if",
+ "cfg-if 0.1.10",
"core",
"getopts",
"libc",
@@ -301,9 +311,9 @@ dependencies = [
[[package]]
name = "unicode-width"
-version = "0.1.9"
+version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-core",
@@ -315,7 +325,7 @@ name = "unwind"
version = "0.0.0"
dependencies = [
"cc",
- "cfg-if",
+ "cfg-if 0.1.10",
"compiler_builtins",
"core",
"libc",
diff --git a/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs b/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
new file mode 100644
index 000000000..fae5b2716
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
@@ -0,0 +1,52 @@
+use std::env;
+use std::path::Path;
+
+use super::build_sysroot;
+use super::config;
+use super::prepare;
+use super::utils::{cargo_command, spawn_and_wait};
+use super::SysrootKind;
+
+pub(crate) fn run(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
+ cg_clif_dylib: &Path,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ if !config::get_bool("testsuite.abi-cafe") {
+ eprintln!("[SKIP] abi-cafe");
+ return;
+ }
+
+ if host_triple != target_triple {
+ eprintln!("[SKIP] abi-cafe (cross-compilation not supported)");
+ return;
+ }
+
+ eprintln!("Building sysroot for abi-cafe");
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ target_dir,
+ cg_clif_dylib,
+ host_triple,
+ target_triple,
+ );
+
+ eprintln!("Running abi-cafe");
+ let abi_cafe_path = prepare::ABI_CAFE.source_dir();
+ env::set_current_dir(abi_cafe_path.clone()).unwrap();
+
+ let pairs = ["rustc_calls_cgclif", "cgclif_calls_rustc", "cgclif_calls_cc", "cc_calls_cgclif"];
+
+ let mut cmd = cargo_command("cargo", "run", Some(target_triple), &abi_cafe_path);
+ cmd.arg("--");
+ cmd.arg("--pairs");
+ cmd.args(pairs);
+ cmd.arg("--add-rustc-codegen-backend");
+ cmd.arg(format!("cgclif:{}", cg_clif_dylib.display()));
+
+ spawn_and_wait(cmd);
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
index 48faec8bc..cda468bcf 100644
--- a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
@@ -1,20 +1,22 @@
use std::env;
-use std::path::{Path, PathBuf};
-use std::process::Command;
+use std::path::PathBuf;
+
+use super::rustc_info::get_file_name;
+use super::utils::{cargo_command, is_ci};
pub(crate) fn build_backend(
channel: &str,
host_triple: &str,
use_unstable_features: bool,
) -> PathBuf {
- let mut cmd = Command::new("cargo");
- cmd.arg("build").arg("--target").arg(host_triple);
+ let source_dir = std::env::current_dir().unwrap();
+ let mut cmd = cargo_command("cargo", "build", Some(host_triple), &source_dir);
cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
- if env::var("CI").as_ref().map(|val| &**val) == Ok("true") {
+ if is_ci() {
// Deny warnings on CI
rustflags += " -Dwarnings";
@@ -39,5 +41,9 @@ pub(crate) fn build_backend(
eprintln!("[BUILD] rustc_codegen_cranelift");
super::utils::spawn_and_wait(cmd);
- Path::new("target").join(host_triple).join(channel)
+ source_dir
+ .join("target")
+ .join(host_triple)
+ .join(channel)
+ .join(get_file_name("rustc_codegen_cranelift", "dylib"))
}
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
index 16cce83dd..856aecc49 100644
--- a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
@@ -2,18 +2,20 @@ use std::fs;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
-use super::rustc_info::{get_file_name, get_rustc_version};
-use super::utils::{spawn_and_wait, try_hard_link};
+use super::rustc_info::{get_file_name, get_rustc_version, get_wrapper_file_name};
+use super::utils::{cargo_command, spawn_and_wait, try_hard_link};
use super::SysrootKind;
pub(crate) fn build_sysroot(
channel: &str,
sysroot_kind: SysrootKind,
target_dir: &Path,
- cg_clif_build_dir: PathBuf,
+ cg_clif_dylib_src: &Path,
host_triple: &str,
target_triple: &str,
) {
+ eprintln!("[BUILD] sysroot {:?}", sysroot_kind);
+
if target_dir.exists() {
fs::remove_dir_all(target_dir).unwrap();
}
@@ -21,7 +23,6 @@ pub(crate) fn build_sysroot(
fs::create_dir_all(target_dir.join("lib")).unwrap();
// Copy the backend
- let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
let cg_clif_dylib_path = target_dir
.join(if cfg!(windows) {
// Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
@@ -30,16 +31,18 @@ pub(crate) fn build_sysroot(
} else {
"lib"
})
- .join(&cg_clif_dylib);
- try_hard_link(cg_clif_build_dir.join(cg_clif_dylib), &cg_clif_dylib_path);
+ .join(get_file_name("rustc_codegen_cranelift", "dylib"));
+ try_hard_link(cg_clif_dylib_src, &cg_clif_dylib_path);
// Build and copy rustc and cargo wrappers
for wrapper in ["rustc-clif", "cargo-clif"] {
+ let wrapper_name = get_wrapper_file_name(wrapper, "bin");
+
let mut build_cargo_wrapper_cmd = Command::new("rustc");
build_cargo_wrapper_cmd
.arg(PathBuf::from("scripts").join(format!("{wrapper}.rs")))
.arg("-o")
- .arg(target_dir.join(wrapper))
+ .arg(target_dir.join(wrapper_name))
.arg("-g");
spawn_and_wait(build_cargo_wrapper_cmd);
}
@@ -182,10 +185,10 @@ fn build_clif_sysroot_for_triple(
}
// Build sysroot
- let mut build_cmd = Command::new("cargo");
- build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
+ let mut build_cmd = cargo_command("cargo", "build", Some(triple), Path::new("build_sysroot"));
let mut rustflags = "-Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
rustflags.push_str(&format!(" -Zcodegen-backend={}", cg_clif_dylib_path.to_str().unwrap()));
+ rustflags.push_str(&format!(" --sysroot={}", target_dir.to_str().unwrap()));
if channel == "release" {
build_cmd.arg("--release");
rustflags.push_str(" -Zmir-opt-level=3");
diff --git a/compiler/rustc_codegen_cranelift/build_system/config.rs b/compiler/rustc_codegen_cranelift/build_system/config.rs
index ef540cf1f..c31784e10 100644
--- a/compiler/rustc_codegen_cranelift/build_system/config.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/config.rs
@@ -1,4 +1,5 @@
-use std::{fs, process};
+use std::fs;
+use std::process;
fn load_config_file() -> Vec<(String, Option<String>)> {
fs::read_to_string("config.txt")
diff --git a/compiler/rustc_codegen_cranelift/build_system/mod.rs b/compiler/rustc_codegen_cranelift/build_system/mod.rs
index b897b7fba..b25270d83 100644
--- a/compiler/rustc_codegen_cranelift/build_system/mod.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/mod.rs
@@ -2,11 +2,15 @@ use std::env;
use std::path::PathBuf;
use std::process;
+use self::utils::is_ci;
+
+mod abi_cafe;
mod build_backend;
mod build_sysroot;
mod config;
mod prepare;
mod rustc_info;
+mod tests;
mod utils;
fn usage() {
@@ -15,6 +19,9 @@ fn usage() {
eprintln!(
" ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
);
+ eprintln!(
+ " ./y.rs test [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
+ );
}
macro_rules! arg_error {
@@ -25,11 +32,13 @@ macro_rules! arg_error {
}};
}
+#[derive(PartialEq, Debug)]
enum Command {
Build,
+ Test,
}
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug)]
pub(crate) enum SysrootKind {
None,
Clif,
@@ -42,16 +51,22 @@ pub fn main() {
// The target dir is expected in the default location. Guard against the user changing it.
env::set_var("CARGO_TARGET_DIR", "target");
+ if is_ci() {
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ env::set_var("CARGO_BUILD_INCREMENTAL", "false");
+ }
+
let mut args = env::args().skip(1);
let command = match args.next().as_deref() {
Some("prepare") => {
if args.next().is_some() {
- arg_error!("./x.rs prepare doesn't expect arguments");
+ arg_error!("./y.rs prepare doesn't expect arguments");
}
prepare::prepare();
process::exit(0);
}
Some("build") => Command::Build,
+ Some("test") => Command::Test,
Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
Some(command) => arg_error!("Unknown command {}", command),
None => {
@@ -107,22 +122,36 @@ pub fn main() {
host_triple.clone()
};
- if target_triple.ends_with("-msvc") {
- eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
- eprintln!("Switch to the MinGW toolchain for Windows support.");
- eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
- eprintln!("set the global default target to MinGW");
- process::exit(1);
- }
+ let cg_clif_dylib = build_backend::build_backend(channel, &host_triple, use_unstable_features);
+ match command {
+ Command::Test => {
+ tests::run_tests(
+ channel,
+ sysroot_kind,
+ &target_dir,
+ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
- let cg_clif_build_dir =
- build_backend::build_backend(channel, &host_triple, use_unstable_features);
- build_sysroot::build_sysroot(
- channel,
- sysroot_kind,
- &target_dir,
- cg_clif_build_dir,
- &host_triple,
- &target_triple,
- );
+ abi_cafe::run(
+ channel,
+ sysroot_kind,
+ &target_dir,
+ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+ Command::Build => {
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ &target_dir,
+ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+ }
}
diff --git a/compiler/rustc_codegen_cranelift/build_system/prepare.rs b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
index 8bb00352d..3111f62f6 100644
--- a/compiler/rustc_codegen_cranelift/build_system/prepare.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
@@ -1,57 +1,63 @@
use std::env;
use std::ffi::OsStr;
-use std::ffi::OsString;
use std::fs;
-use std::path::Path;
+use std::path::{Path, PathBuf};
use std::process::Command;
use super::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
-use super::utils::{copy_dir_recursively, spawn_and_wait};
+use super::utils::{cargo_command, copy_dir_recursively, spawn_and_wait};
+
+pub(crate) const ABI_CAFE: GitRepo =
+ GitRepo::github("Gankra", "abi-cafe", "4c6dc8c9c687e2b3a760ff2176ce236872b37212", "abi-cafe");
+
+pub(crate) const RAND: GitRepo =
+ GitRepo::github("rust-random", "rand", "0f933f9c7176e53b2a3c7952ded484e1783f0bf1", "rand");
+
+pub(crate) const REGEX: GitRepo =
+ GitRepo::github("rust-lang", "regex", "341f207c1071f7290e3f228c710817c280c8dca1", "regex");
+
+pub(crate) const PORTABLE_SIMD: GitRepo = GitRepo::github(
+ "rust-lang",
+ "portable-simd",
+ "d5cd4a8112d958bd3a252327e0d069a6363249bd",
+ "portable-simd",
+);
+
+pub(crate) const SIMPLE_RAYTRACER: GitRepo = GitRepo::github(
+ "ebobby",
+ "simple-raytracer",
+ "804a7a21b9e673a482797aa289a18ed480e4d813",
+ "<none>",
+);
pub(crate) fn prepare() {
+ if Path::new("download").exists() {
+ std::fs::remove_dir_all(Path::new("download")).unwrap();
+ }
+ std::fs::create_dir_all(Path::new("download")).unwrap();
+
prepare_sysroot();
+ // FIXME maybe install this only locally?
eprintln!("[INSTALL] hyperfine");
Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
- clone_repo_shallow_github(
- "rand",
- "rust-random",
- "rand",
- "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
- );
- apply_patches("rand", Path::new("rand"));
-
- clone_repo_shallow_github(
- "regex",
- "rust-lang",
- "regex",
- "341f207c1071f7290e3f228c710817c280c8dca1",
- );
-
- clone_repo_shallow_github(
- "portable-simd",
- "rust-lang",
- "portable-simd",
- "b8d6b6844602f80af79cd96401339ec594d472d8",
- );
- apply_patches("portable-simd", Path::new("portable-simd"));
-
- clone_repo_shallow_github(
- "simple-raytracer",
- "ebobby",
- "simple-raytracer",
- "804a7a21b9e673a482797aa289a18ed480e4d813",
- );
+ ABI_CAFE.fetch();
+ RAND.fetch();
+ REGEX.fetch();
+ PORTABLE_SIMD.fetch();
+ SIMPLE_RAYTRACER.fetch();
eprintln!("[LLVM BUILD] simple-raytracer");
- let mut build_cmd = Command::new("cargo");
- build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
+ let build_cmd = cargo_command("cargo", "build", None, &SIMPLE_RAYTRACER.source_dir());
spawn_and_wait(build_cmd);
fs::copy(
- Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
- // FIXME use get_file_name here too once testing is migrated to rust
- "simple-raytracer/raytracer_cg_llvm",
+ SIMPLE_RAYTRACER
+ .source_dir()
+ .join("target")
+ .join("debug")
+ .join(get_file_name("main", "bin")),
+ SIMPLE_RAYTRACER.source_dir().join(get_file_name("raytracer_cg_llvm", "bin")),
)
.unwrap();
}
@@ -83,38 +89,78 @@ fn prepare_sysroot() {
apply_patches("sysroot", &sysroot_src);
}
+pub(crate) struct GitRepo {
+ url: GitRepoUrl,
+ rev: &'static str,
+ patch_name: &'static str,
+}
+
+enum GitRepoUrl {
+ Github { user: &'static str, repo: &'static str },
+}
+
+impl GitRepo {
+ const fn github(
+ user: &'static str,
+ repo: &'static str,
+ rev: &'static str,
+ patch_name: &'static str,
+ ) -> GitRepo {
+ GitRepo { url: GitRepoUrl::Github { user, repo }, rev, patch_name }
+ }
+
+ pub(crate) fn source_dir(&self) -> PathBuf {
+ match self.url {
+ GitRepoUrl::Github { user: _, repo } => {
+ std::env::current_dir().unwrap().join("download").join(repo)
+ }
+ }
+ }
+
+ fn fetch(&self) {
+ match self.url {
+ GitRepoUrl::Github { user, repo } => {
+ clone_repo_shallow_github(&self.source_dir(), user, repo, self.rev);
+ }
+ }
+ apply_patches(self.patch_name, &self.source_dir());
+ }
+}
+
#[allow(dead_code)]
-fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
+fn clone_repo(download_dir: &Path, repo: &str, rev: &str) {
eprintln!("[CLONE] {}", repo);
// Ignore exit code as the repo may already have been checked out
- Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
+ Command::new("git").arg("clone").arg(repo).arg(&download_dir).spawn().unwrap().wait().unwrap();
let mut clean_cmd = Command::new("git");
- clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
+ clean_cmd.arg("checkout").arg("--").arg(".").current_dir(&download_dir);
spawn_and_wait(clean_cmd);
let mut checkout_cmd = Command::new("git");
- checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
+ checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(download_dir);
spawn_and_wait(checkout_cmd);
}
-fn clone_repo_shallow_github(target_dir: &str, username: &str, repo: &str, rev: &str) {
+fn clone_repo_shallow_github(download_dir: &Path, user: &str, repo: &str, rev: &str) {
if cfg!(windows) {
// Older windows doesn't have tar or curl by default. Fall back to using git.
- clone_repo(target_dir, &format!("https://github.com/{}/{}.git", username, repo), rev);
+ clone_repo(download_dir, &format!("https://github.com/{}/{}.git", user, repo), rev);
return;
}
- let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", username, repo, rev);
- let archive_file = format!("{}.tar.gz", rev);
- let archive_dir = format!("{}-{}", repo, rev);
+ let downloads_dir = std::env::current_dir().unwrap().join("download");
+
+ let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", user, repo, rev);
+ let archive_file = downloads_dir.join(format!("{}.tar.gz", rev));
+ let archive_dir = downloads_dir.join(format!("{}-{}", repo, rev));
- eprintln!("[DOWNLOAD] {}/{} from {}", username, repo, archive_url);
+ eprintln!("[DOWNLOAD] {}/{} from {}", user, repo, archive_url);
// Remove previous results if they exists
let _ = std::fs::remove_file(&archive_file);
let _ = std::fs::remove_dir_all(&archive_dir);
- let _ = std::fs::remove_dir_all(target_dir);
+ let _ = std::fs::remove_dir_all(&download_dir);
// Download zip archive
let mut download_cmd = Command::new("curl");
@@ -123,13 +169,13 @@ fn clone_repo_shallow_github(target_dir: &str, username: &str, repo: &str, rev:
// Unpack tar archive
let mut unpack_cmd = Command::new("tar");
- unpack_cmd.arg("xf").arg(&archive_file);
+ unpack_cmd.arg("xf").arg(&archive_file).current_dir(downloads_dir);
spawn_and_wait(unpack_cmd);
// Rename unpacked dir to the expected name
- std::fs::rename(archive_dir, target_dir).unwrap();
+ std::fs::rename(archive_dir, &download_dir).unwrap();
- init_git_repo(Path::new(target_dir));
+ init_git_repo(&download_dir);
// Cleanup
std::fs::remove_file(archive_file).unwrap();
@@ -149,14 +195,20 @@ fn init_git_repo(repo_dir: &Path) {
spawn_and_wait(git_commit_cmd);
}
-fn get_patches(crate_name: &str) -> Vec<OsString> {
- let mut patches: Vec<_> = fs::read_dir("patches")
+fn get_patches(source_dir: &Path, crate_name: &str) -> Vec<PathBuf> {
+ let mut patches: Vec<_> = fs::read_dir(source_dir.join("patches"))
.unwrap()
.map(|entry| entry.unwrap().path())
.filter(|path| path.extension() == Some(OsStr::new("patch")))
- .map(|path| path.file_name().unwrap().to_owned())
- .filter(|file_name| {
- file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
+ .filter(|path| {
+ path.file_name()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .split_once("-")
+ .unwrap()
+ .1
+ .starts_with(crate_name)
})
.collect();
patches.sort();
@@ -164,11 +216,18 @@ fn get_patches(crate_name: &str) -> Vec<OsString> {
}
fn apply_patches(crate_name: &str, target_dir: &Path) {
- for patch in get_patches(crate_name) {
- eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
- let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
+ if crate_name == "<none>" {
+ return;
+ }
+
+ for patch in get_patches(&std::env::current_dir().unwrap(), crate_name) {
+ eprintln!(
+ "[PATCH] {:?} <- {:?}",
+ target_dir.file_name().unwrap(),
+ patch.file_name().unwrap()
+ );
let mut apply_patch_cmd = Command::new("git");
- apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
+ apply_patch_cmd.arg("am").arg(patch).arg("-q").current_dir(target_dir);
spawn_and_wait(apply_patch_cmd);
}
}
diff --git a/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
index 9206bb02b..3c08b6fa3 100644
--- a/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
@@ -63,3 +63,12 @@ pub(crate) fn get_file_name(crate_name: &str, crate_type: &str) -> String {
assert!(file_name.contains(crate_name));
file_name
}
+
+/// Similar to `get_file_name`, but converts any dashes (`-`) in the `crate_name` to
+/// underscores (`_`). This is specially made for the rustc and cargo wrappers
+/// which have a dash in the name, and that is not allowed in a crate name.
+pub(crate) fn get_wrapper_file_name(crate_name: &str, crate_type: &str) -> String {
+ let crate_name = crate_name.replace('-', "_");
+ let wrapper_name = get_file_name(&crate_name, crate_type);
+ wrapper_name.replace('_', "-")
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/tests.rs b/compiler/rustc_codegen_cranelift/build_system/tests.rs
new file mode 100644
index 000000000..a414b60f4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/tests.rs
@@ -0,0 +1,610 @@
+use super::build_sysroot;
+use super::config;
+use super::prepare;
+use super::rustc_info::get_wrapper_file_name;
+use super::utils::{cargo_command, hyperfine_command, spawn_and_wait, spawn_and_wait_with_input};
+use build_system::SysrootKind;
+use std::env;
+use std::ffi::OsStr;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+struct TestCase {
+ config: &'static str,
+ func: &'static dyn Fn(&TestRunner),
+}
+
+impl TestCase {
+ const fn new(config: &'static str, func: &'static dyn Fn(&TestRunner)) -> Self {
+ Self { config, func }
+ }
+}
+
+const NO_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("build.mini_core", &|runner| {
+ runner.run_rustc([
+ "example/mini_core.rs",
+ "--crate-name",
+ "mini_core",
+ "--crate-type",
+ "lib,dylib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("build.example", &|runner| {
+ runner.run_rustc([
+ "example/example.rs",
+ "--crate-type",
+ "lib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("jit.mini_core_hello_world", &|runner| {
+ let mut jit_cmd = runner.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit",
+ "-Cprefer-dynamic",
+ "example/mini_core_hello_world.rs",
+ "--cfg",
+ "jit",
+ "--target",
+ &runner.host_triple,
+ ]);
+ jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
+ spawn_and_wait(jit_cmd);
+
+ eprintln!("[JIT-lazy] mini_core_hello_world");
+ let mut jit_cmd = runner.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit-lazy",
+ "-Cprefer-dynamic",
+ "example/mini_core_hello_world.rs",
+ "--cfg",
+ "jit",
+ "--target",
+ &runner.host_triple,
+ ]);
+ jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
+ spawn_and_wait(jit_cmd);
+ }),
+ TestCase::new("aot.mini_core_hello_world", &|runner| {
+ runner.run_rustc([
+ "example/mini_core_hello_world.rs",
+ "--crate-name",
+ "mini_core_hello_world",
+ "--crate-type",
+ "bin",
+ "-g",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("mini_core_hello_world", ["abc", "bcd"]);
+ }),
+];
+
+const BASE_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("aot.arbitrary_self_types_pointers_and_wrappers", &|runner| {
+ runner.run_rustc([
+ "example/arbitrary_self_types_pointers_and_wrappers.rs",
+ "--crate-name",
+ "arbitrary_self_types_pointers_and_wrappers",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("arbitrary_self_types_pointers_and_wrappers", []);
+ }),
+ TestCase::new("aot.issue_91827_extern_types", &|runner| {
+ runner.run_rustc([
+ "example/issue-91827-extern-types.rs",
+ "--crate-name",
+ "issue_91827_extern_types",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("issue_91827_extern_types", []);
+ }),
+ TestCase::new("build.alloc_system", &|runner| {
+ runner.run_rustc([
+ "example/alloc_system.rs",
+ "--crate-type",
+ "lib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("aot.alloc_example", &|runner| {
+ runner.run_rustc([
+ "example/alloc_example.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("alloc_example", []);
+ }),
+ TestCase::new("jit.std_example", &|runner| {
+ runner.run_rustc([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit",
+ "-Cprefer-dynamic",
+ "example/std_example.rs",
+ "--target",
+ &runner.host_triple,
+ ]);
+
+ eprintln!("[JIT-lazy] std_example");
+ runner.run_rustc([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit-lazy",
+ "-Cprefer-dynamic",
+ "example/std_example.rs",
+ "--target",
+ &runner.host_triple,
+ ]);
+ }),
+ TestCase::new("aot.std_example", &|runner| {
+ runner.run_rustc([
+ "example/std_example.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("std_example", ["arg"]);
+ }),
+ TestCase::new("aot.dst_field_align", &|runner| {
+ runner.run_rustc([
+ "example/dst-field-align.rs",
+ "--crate-name",
+ "dst_field_align",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("dst_field_align", []);
+ }),
+ TestCase::new("aot.subslice-patterns-const-eval", &|runner| {
+ runner.run_rustc([
+ "example/subslice-patterns-const-eval.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("subslice-patterns-const-eval", []);
+ }),
+ TestCase::new("aot.track-caller-attribute", &|runner| {
+ runner.run_rustc([
+ "example/track-caller-attribute.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("track-caller-attribute", []);
+ }),
+ TestCase::new("aot.float-minmax-pass", &|runner| {
+ runner.run_rustc([
+ "example/float-minmax-pass.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("float-minmax-pass", []);
+ }),
+ TestCase::new("aot.mod_bench", &|runner| {
+ runner.run_rustc([
+ "example/mod_bench.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("mod_bench", []);
+ }),
+];
+
+const EXTENDED_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("test.rust-random/rand", &|runner| {
+ runner.in_dir(prepare::RAND.source_dir(), |runner| {
+ runner.run_cargo("clean", []);
+
+ if runner.host_triple == runner.target_triple {
+ eprintln!("[TEST] rust-random/rand");
+ runner.run_cargo("test", ["--workspace"]);
+ } else {
+ eprintln!("[AOT] rust-random/rand");
+ runner.run_cargo("build", ["--workspace", "--tests"]);
+ }
+ });
+ }),
+ TestCase::new("bench.simple-raytracer", &|runner| {
+ runner.in_dir(prepare::SIMPLE_RAYTRACER.source_dir(), |runner| {
+ let run_runs = env::var("RUN_RUNS").unwrap_or("10".to_string()).parse().unwrap();
+
+ if runner.host_triple == runner.target_triple {
+ eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
+ let prepare = runner.cargo_command("clean", []);
+
+ let llvm_build_cmd = cargo_command("cargo", "build", None, Path::new("."));
+
+ let cargo_clif = runner
+ .root_dir
+ .clone()
+ .join("build")
+ .join(get_wrapper_file_name("cargo-clif", "bin"));
+ let clif_build_cmd = cargo_command(cargo_clif, "build", None, Path::new("."));
+
+ let bench_compile =
+ hyperfine_command(1, run_runs, Some(prepare), llvm_build_cmd, clif_build_cmd);
+
+ spawn_and_wait(bench_compile);
+
+ eprintln!("[BENCH RUN] ebobby/simple-raytracer");
+ fs::copy(PathBuf::from("./target/debug/main"), PathBuf::from("raytracer_cg_clif"))
+ .unwrap();
+
+ let bench_run = hyperfine_command(
+ 0,
+ run_runs,
+ None,
+ Command::new("./raytracer_cg_llvm"),
+ Command::new("./raytracer_cg_clif"),
+ );
+ spawn_and_wait(bench_run);
+ } else {
+ runner.run_cargo("clean", []);
+ eprintln!("[BENCH COMPILE] ebobby/simple-raytracer (skipped)");
+ eprintln!("[COMPILE] ebobby/simple-raytracer");
+ runner.run_cargo("build", []);
+ eprintln!("[BENCH RUN] ebobby/simple-raytracer (skipped)");
+ }
+ });
+ }),
+ TestCase::new("test.libcore", &|runner| {
+ runner.in_dir(
+ std::env::current_dir()
+ .unwrap()
+ .join("build_sysroot")
+ .join("sysroot_src")
+ .join("library")
+ .join("core")
+ .join("tests"),
+ |runner| {
+ runner.run_cargo("clean", []);
+
+ if runner.host_triple == runner.target_triple {
+ runner.run_cargo("test", []);
+ } else {
+ eprintln!("Cross-Compiling: Not running tests");
+ runner.run_cargo("build", ["--tests"]);
+ }
+ },
+ );
+ }),
+ TestCase::new("test.regex-shootout-regex-dna", &|runner| {
+ runner.in_dir(prepare::REGEX.source_dir(), |runner| {
+ runner.run_cargo("clean", []);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
+
+ let mut build_cmd = runner.cargo_command("build", ["--example", "shootout-regex-dna"]);
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+
+ if runner.host_triple == runner.target_triple {
+ let mut run_cmd = runner.cargo_command("run", ["--example", "shootout-regex-dna"]);
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+
+ let input =
+ fs::read_to_string(PathBuf::from("examples/regexdna-input.txt")).unwrap();
+ let expected_path = PathBuf::from("examples/regexdna-output.txt");
+ let expected = fs::read_to_string(&expected_path).unwrap();
+
+ let output = spawn_and_wait_with_input(run_cmd, input);
+ // Make sure `[codegen mono items] start` doesn't poison the diff
+ let output = output
+ .lines()
+ .filter(|line| !line.contains("codegen mono items"))
+ .chain(Some("")) // This just adds the trailing newline
+ .collect::<Vec<&str>>()
+ .join("\r\n");
+
+ let output_matches = expected.lines().eq(output.lines());
+ if !output_matches {
+ let res_path = PathBuf::from("res.txt");
+ fs::write(&res_path, &output).unwrap();
+
+ if cfg!(windows) {
+ println!("Output files don't match!");
+ println!("Expected Output:\n{}", expected);
+ println!("Actual Output:\n{}", output);
+ } else {
+ let mut diff = Command::new("diff");
+ diff.arg("-u");
+ diff.arg(res_path);
+ diff.arg(expected_path);
+ spawn_and_wait(diff);
+ }
+
+ std::process::exit(1);
+ }
+ }
+ });
+ }),
+ TestCase::new("test.regex", &|runner| {
+ runner.in_dir(prepare::REGEX.source_dir(), |runner| {
+ runner.run_cargo("clean", []);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
+
+ if runner.host_triple == runner.target_triple {
+ let mut run_cmd = runner.cargo_command(
+ "test",
+ [
+ "--tests",
+ "--",
+ "--exclude-should-panic",
+ "--test-threads",
+ "1",
+ "-Zunstable-options",
+ "-q",
+ ],
+ );
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+ spawn_and_wait(run_cmd);
+ } else {
+ eprintln!("Cross-Compiling: Not running tests");
+ let mut build_cmd =
+ runner.cargo_command("build", ["--tests", "--target", &runner.target_triple]);
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+ }
+ });
+ }),
+ TestCase::new("test.portable-simd", &|runner| {
+ runner.in_dir(prepare::PORTABLE_SIMD.source_dir(), |runner| {
+ runner.run_cargo("clean", []);
+ runner.run_cargo("build", ["--all-targets", "--target", &runner.target_triple]);
+
+ if runner.host_triple == runner.target_triple {
+ runner.run_cargo("test", ["-q"]);
+ }
+ });
+ }),
+];
+
+pub(crate) fn run_tests(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
+ cg_clif_dylib: &Path,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ let runner = TestRunner::new(host_triple.to_string(), target_triple.to_string());
+
+ if config::get_bool("testsuite.no_sysroot") {
+ build_sysroot::build_sysroot(
+ channel,
+ SysrootKind::None,
+ &target_dir,
+ cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+
+ let _ = fs::remove_dir_all(Path::new("target").join("out"));
+ runner.run_testsuite(NO_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] no_sysroot tests");
+ }
+
+ let run_base_sysroot = config::get_bool("testsuite.base_sysroot");
+ let run_extended_sysroot = config::get_bool("testsuite.extended_sysroot");
+
+ if run_base_sysroot || run_extended_sysroot {
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ &target_dir,
+ cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+
+ if run_base_sysroot {
+ runner.run_testsuite(BASE_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] base_sysroot tests");
+ }
+
+ if run_extended_sysroot {
+ runner.run_testsuite(EXTENDED_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] extended_sysroot tests");
+ }
+}
+
+struct TestRunner {
+ root_dir: PathBuf,
+ out_dir: PathBuf,
+ jit_supported: bool,
+ rust_flags: String,
+ run_wrapper: Vec<String>,
+ host_triple: String,
+ target_triple: String,
+}
+
+impl TestRunner {
+ pub fn new(host_triple: String, target_triple: String) -> Self {
+ let root_dir = env::current_dir().unwrap();
+
+ let mut out_dir = root_dir.clone();
+ out_dir.push("target");
+ out_dir.push("out");
+
+ let is_native = host_triple == target_triple;
+ let jit_supported =
+ target_triple.contains("x86_64") && is_native && !host_triple.contains("windows");
+
+ let mut rust_flags = env::var("RUSTFLAGS").ok().unwrap_or("".to_string());
+ let mut run_wrapper = Vec::new();
+
+ if !is_native {
+ match target_triple.as_str() {
+ "aarch64-unknown-linux-gnu" => {
+ // We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ rust_flags = format!("-Clinker=aarch64-linux-gnu-gcc{}", rust_flags);
+ run_wrapper = vec!["qemu-aarch64", "-L", "/usr/aarch64-linux-gnu"];
+ }
+ "x86_64-pc-windows-gnu" => {
+ // We are cross-compiling for Windows. Run tests in wine.
+ run_wrapper = vec!["wine"];
+ }
+ _ => {
+ println!("Unknown non-native platform");
+ }
+ }
+ }
+
+ // FIXME fix `#[linkage = "extern_weak"]` without this
+ if host_triple.contains("darwin") {
+ rust_flags = format!("{} -Clink-arg=-undefined -Clink-arg=dynamic_lookup", rust_flags);
+ }
+
+ Self {
+ root_dir,
+ out_dir,
+ jit_supported,
+ rust_flags,
+ run_wrapper: run_wrapper.iter().map(|s| s.to_string()).collect(),
+ host_triple,
+ target_triple,
+ }
+ }
+
+ pub fn run_testsuite(&self, tests: &[TestCase]) {
+ for &TestCase { config, func } in tests {
+ let (tag, testname) = config.split_once('.').unwrap();
+ let tag = tag.to_uppercase();
+ let is_jit_test = tag == "JIT";
+
+ if !config::get_bool(config) || (is_jit_test && !self.jit_supported) {
+ eprintln!("[{tag}] {testname} (skipped)");
+ continue;
+ } else {
+ eprintln!("[{tag}] {testname}");
+ }
+
+ func(self);
+ }
+ }
+
+ fn in_dir(&self, new: impl AsRef<Path>, callback: impl FnOnce(&TestRunner)) {
+ let current = env::current_dir().unwrap();
+
+ env::set_current_dir(new).unwrap();
+ callback(self);
+ env::set_current_dir(current).unwrap();
+ }
+
+ fn rustc_command<I, S>(&self, args: I) -> Command
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ let mut rustc_clif = self.root_dir.clone();
+ rustc_clif.push("build");
+ rustc_clif.push(get_wrapper_file_name("rustc-clif", "bin"));
+
+ let mut cmd = Command::new(rustc_clif);
+ cmd.args(self.rust_flags.split_whitespace());
+ cmd.arg("-L");
+ cmd.arg(format!("crate={}", self.out_dir.display()));
+ cmd.arg("--out-dir");
+ cmd.arg(format!("{}", self.out_dir.display()));
+ cmd.arg("-Cdebuginfo=2");
+ cmd.args(args);
+ cmd
+ }
+
+ fn run_rustc<I, S>(&self, args: I)
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ spawn_and_wait(self.rustc_command(args));
+ }
+
+ fn run_out_command<'a, I>(&self, name: &str, args: I)
+ where
+ I: IntoIterator<Item = &'a str>,
+ {
+ let mut full_cmd = vec![];
+
+ // Prepend the RUN_WRAPPER's
+ if !self.run_wrapper.is_empty() {
+ full_cmd.extend(self.run_wrapper.iter().cloned());
+ }
+
+ full_cmd.push({
+ let mut out_path = self.out_dir.clone();
+ out_path.push(name);
+ out_path.to_str().unwrap().to_string()
+ });
+
+ for arg in args.into_iter() {
+ full_cmd.push(arg.to_string());
+ }
+
+ let mut cmd_iter = full_cmd.into_iter();
+ let first = cmd_iter.next().unwrap();
+
+ let mut cmd = Command::new(first);
+ cmd.args(cmd_iter);
+
+ spawn_and_wait(cmd);
+ }
+
+ fn cargo_command<'a, I>(&self, subcommand: &str, args: I) -> Command
+ where
+ I: IntoIterator<Item = &'a str>,
+ {
+ let mut cargo_clif = self.root_dir.clone();
+ cargo_clif.push("build");
+ cargo_clif.push(get_wrapper_file_name("cargo-clif", "bin"));
+
+ let mut cmd = cargo_command(
+ cargo_clif,
+ subcommand,
+ if subcommand == "clean" { None } else { Some(&self.target_triple) },
+ Path::new("."),
+ );
+ cmd.args(args);
+ cmd.env("RUSTFLAGS", &self.rust_flags);
+ cmd
+ }
+
+ fn run_cargo<'a, I>(&self, subcommand: &str, args: I)
+ where
+ I: IntoIterator<Item = &'a str>,
+ {
+ spawn_and_wait(self.cargo_command(subcommand, args));
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/utils.rs b/compiler/rustc_codegen_cranelift/build_system/utils.rs
index 12b5d70fa..48da64906 100644
--- a/compiler/rustc_codegen_cranelift/build_system/utils.rs
+++ b/compiler/rustc_codegen_cranelift/build_system/utils.rs
@@ -1,6 +1,54 @@
+use std::env;
use std::fs;
+use std::io::Write;
use std::path::Path;
-use std::process::{self, Command};
+use std::process::{self, Command, Stdio};
+
+pub(crate) fn cargo_command(
+ cargo: impl AsRef<Path>,
+ subcommand: &str,
+ triple: Option<&str>,
+ source_dir: &Path,
+) -> Command {
+ let mut cmd = Command::new(cargo.as_ref());
+ cmd.arg(subcommand)
+ .arg("--manifest-path")
+ .arg(source_dir.join("Cargo.toml"))
+ .arg("--target-dir")
+ .arg(source_dir.join("target"));
+
+ if let Some(triple) = triple {
+ cmd.arg("--target").arg(triple);
+ }
+
+ cmd
+}
+
+pub(crate) fn hyperfine_command(
+ warmup: u64,
+ runs: u64,
+ prepare: Option<Command>,
+ a: Command,
+ b: Command,
+) -> Command {
+ let mut bench = Command::new("hyperfine");
+
+ if warmup != 0 {
+ bench.arg("--warmup").arg(warmup.to_string());
+ }
+
+ if runs != 0 {
+ bench.arg("--runs").arg(runs.to_string());
+ }
+
+ if let Some(prepare) = prepare {
+ bench.arg("--prepare").arg(format!("{:?}", prepare));
+ }
+
+ bench.arg(format!("{:?}", a)).arg(format!("{:?}", b));
+
+ bench
+}
#[track_caller]
pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
@@ -18,6 +66,27 @@ pub(crate) fn spawn_and_wait(mut cmd: Command) {
}
}
+#[track_caller]
+pub(crate) fn spawn_and_wait_with_input(mut cmd: Command, input: String) -> String {
+ let mut child = cmd
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn child process");
+
+ let mut stdin = child.stdin.take().expect("Failed to open stdin");
+ std::thread::spawn(move || {
+ stdin.write_all(input.as_bytes()).expect("Failed to write to stdin");
+ });
+
+ let output = child.wait_with_output().expect("Failed to read stdout");
+ if !output.status.success() {
+ process::exit(1);
+ }
+
+ String::from_utf8(output.stdout).unwrap()
+}
+
pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
for entry in fs::read_dir(from).unwrap() {
let entry = entry.unwrap();
@@ -33,3 +102,7 @@ pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
}
}
}
+
+pub(crate) fn is_ci() -> bool {
+ env::var("CI").as_ref().map(|val| &**val) == Ok("true")
+}
diff --git a/compiler/rustc_codegen_cranelift/clean_all.sh b/compiler/rustc_codegen_cranelift/clean_all.sh
index ea1f8c1e8..fedab2433 100755
--- a/compiler/rustc_codegen_cranelift/clean_all.sh
+++ b/compiler/rustc_codegen_cranelift/clean_all.sh
@@ -3,4 +3,8 @@ set -e
rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
rm -rf target/ build/ perf.data{,.old} y.bin
-rm -rf rand/ regex/ simple-raytracer/ portable-simd/
+rm -rf download/
+
+# Kept for now in case someone updates their checkout of cg_clif before running clean_all.sh
+# FIXME remove at some point in the future
+rm -rf rand/ regex/ simple-raytracer/ portable-simd/ abi-checker/ abi-cafe/
diff --git a/compiler/rustc_codegen_cranelift/config.txt b/compiler/rustc_codegen_cranelift/config.txt
index b14db27d6..0d539191b 100644
--- a/compiler/rustc_codegen_cranelift/config.txt
+++ b/compiler/rustc_codegen_cranelift/config.txt
@@ -15,3 +15,38 @@
# This option can be changed while the build system is already running for as long as sysroot
# building hasn't started yet.
#keep_sysroot
+
+
+# Testsuite
+#
+# Each test suite item has a corresponding key here. The default is to run all tests.
+# Comment any of these lines to skip individual tests.
+
+testsuite.no_sysroot
+build.mini_core
+build.example
+jit.mini_core_hello_world
+aot.mini_core_hello_world
+
+testsuite.base_sysroot
+aot.arbitrary_self_types_pointers_and_wrappers
+aot.issue_91827_extern_types
+build.alloc_system
+aot.alloc_example
+jit.std_example
+aot.std_example
+aot.dst_field_align
+aot.subslice-patterns-const-eval
+aot.track-caller-attribute
+aot.float-minmax-pass
+aot.mod_bench
+
+testsuite.extended_sysroot
+test.rust-random/rand
+bench.simple-raytracer
+test.libcore
+test.regex-shootout-regex-dna
+test.regex
+test.portable-simd
+
+testsuite.abi-cafe
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
index cf95c89bc..50261c193 100644
--- a/compiler/rustc_codegen_cranelift/example/alloc_system.rs
+++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
@@ -94,7 +94,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
- &mut *(ptr as *mut Header).offset(-1)
+ &mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
diff --git a/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
index 2ecc8b823..039100696 100644
--- a/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
+++ b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
@@ -5,7 +5,6 @@
// Test that we can handle unsized types with an extern type tail part.
// Regression test for issue #91827.
-#![feature(const_ptr_offset_from)]
#![feature(extern_types)]
use std::ptr::addr_of;
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
index 8b6042a3d..7f85b52f0 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -535,7 +535,7 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
}
#[lang = "box_free"]
-unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, alloc: ()) {
+unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, _alloc: ()) {
libc::free(ptr.pointer.0 as *mut u8);
}
@@ -559,27 +559,41 @@ pub union MaybeUninit<T> {
pub mod intrinsics {
extern "rust-intrinsic" {
+ #[rustc_safe_intrinsic]
pub fn abort() -> !;
+ #[rustc_safe_intrinsic]
pub fn size_of<T>() -> usize;
pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ #[rustc_safe_intrinsic]
pub fn min_align_of<T>() -> usize;
pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
pub fn transmute<T, U>(e: T) -> U;
pub fn ctlz_nonzero<T>(x: T) -> T;
+ #[rustc_safe_intrinsic]
pub fn needs_drop<T: ?::Sized>() -> bool;
+ #[rustc_safe_intrinsic]
pub fn bitreverse<T>(x: T) -> T;
+ #[rustc_safe_intrinsic]
pub fn bswap<T>(x: T) -> T;
pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
}
}
pub mod libc {
+ // With the new Universal CRT, msvc has switched to all the printf functions being inline wrapper
+ // functions. legacy_stdio_definitions.lib which provides the printf wrapper functions as normal
+ // symbols to link against.
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env="msvc", link(name="legacy_stdio_definitions"))]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+
#[cfg_attr(unix, link(name = "c"))]
#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
extern "C" {
pub fn puts(s: *const i8) -> i32;
- pub fn printf(format: *const i8, ...) -> i32;
pub fn malloc(size: usize) -> *mut u8;
pub fn free(ptr: *mut u8);
pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
index aa1f239ba..215d3556a 100644
--- a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -93,6 +93,7 @@ fn start<T: Termination + 'static>(
main: fn() -> T,
argc: isize,
argv: *const *const u8,
+ _sigpipe: u8,
) -> isize {
if argc == 3 {
unsafe { puts(*argv as *const i8); }
@@ -139,7 +140,7 @@ pub struct bool_11 {
field10: bool,
}
-extern "C" fn bool_struct_in_11(arg0: bool_11) {}
+extern "C" fn bool_struct_in_11(_arg0: bool_11) {}
#[allow(unreachable_code)] // FIXME false positive
fn main() {
@@ -321,7 +322,7 @@ fn main() {
#[cfg(not(any(jit, windows)))]
test_tls();
- #[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+ #[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
unsafe {
global_asm_test();
}
@@ -343,7 +344,7 @@ fn main() {
}
}
-#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+#[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
extern "C" {
fn global_asm_test();
}
@@ -358,6 +359,16 @@ global_asm! {
"
}
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "darwin"))]
+global_asm! {
+ "
+ .global _global_asm_test
+ _global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
#[repr(C)]
enum c_void {
_1,
@@ -375,6 +386,7 @@ struct pthread_attr_t {
}
#[link(name = "pthread")]
+#[cfg(unix)]
extern "C" {
fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
@@ -391,6 +403,91 @@ extern "C" {
) -> c_int;
}
+type DWORD = u32;
+type LPDWORD = *mut u32;
+
+type LPVOID = *mut c_void;
+type HANDLE = *mut c_void;
+
+#[link(name = "msvcrt")]
+#[cfg(windows)]
+extern "C" {
+ fn WaitForSingleObject(
+ hHandle: LPVOID,
+ dwMilliseconds: DWORD
+ ) -> DWORD;
+
+ fn CreateThread(
+ lpThreadAttributes: LPVOID, // Technically LPSECURITY_ATTRIBUTES, but we don't use it anyway
+ dwStackSize: usize,
+ lpStartAddress: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ lpParameter: LPVOID,
+ dwCreationFlags: DWORD,
+ lpThreadId: LPDWORD
+ ) -> HANDLE;
+}
+
+struct Thread {
+ #[cfg(windows)]
+ handle: HANDLE,
+ #[cfg(unix)]
+ handle: pthread_t,
+}
+
+impl Thread {
+ unsafe fn create(f: extern "C" fn(_: *mut c_void) -> *mut c_void) -> Self {
+ #[cfg(unix)]
+ {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, f, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ Thread {
+ handle: thread,
+ }
+ }
+
+ #[cfg(windows)]
+ {
+ let handle = CreateThread(0 as *mut c_void, 0, f, 0 as *mut c_void, 0, 0 as *mut u32);
+
+ if (handle as u64) == 0 {
+ assert!(false);
+ }
+
+ Thread {
+ handle,
+ }
+ }
+ }
+
+
+ unsafe fn join(self) {
+ #[cfg(unix)]
+ {
+ let mut res = 0 as *mut c_void;
+ pthread_join(self.handle, &mut res);
+ }
+
+ #[cfg(windows)]
+ {
+ // The INFINITE macro is used to signal operations that do not timeout.
+ let infinite = 0xffffffff;
+ assert!(WaitForSingleObject(self.handle, infinite) == 0);
+ }
+ }
+}
+
+
+
+
#[thread_local]
#[cfg(not(jit))]
static mut TLS: u8 = 42;
@@ -404,21 +501,10 @@ extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
#[cfg(not(jit))]
fn test_tls() {
unsafe {
- let mut attr: pthread_attr_t = zeroed();
- let mut thread: pthread_t = 0;
-
assert_eq!(TLS, 42);
- if pthread_attr_init(&mut attr) != 0 {
- assert!(false);
- }
-
- if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
- assert!(false);
- }
-
- let mut res = 0 as *mut c_void;
- pthread_join(thread, &mut res);
+ let thread = Thread::create(mutate_tls);
+ thread.join();
// TLS of main thread must not have been changed by the other thread.
assert_eq!(TLS, 42);
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
index 0b5b6cd55..ad108c349 100644
--- a/compiler/rustc_codegen_cranelift/example/std_example.rs
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -1,4 +1,4 @@
-#![feature(core_intrinsics, generators, generator_trait, is_sorted, bench_black_box)]
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-abi-cafe-Disable-some-test-on-x86_64-pc-windows-gnu.patch b/compiler/rustc_codegen_cranelift/patches/0001-abi-cafe-Disable-some-test-on-x86_64-pc-windows-gnu.patch
new file mode 100644
index 000000000..0e5e7cdfc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0001-abi-cafe-Disable-some-test-on-x86_64-pc-windows-gnu.patch
@@ -0,0 +1,29 @@
+From 2b15fee2bb5fd14e34c7e17e44d99cb34f4c555d Mon Sep 17 00:00:00 2001
+From: Afonso Bordado <afonsobordado@az8.co>
+Date: Tue, 27 Sep 2022 07:55:17 +0100
+Subject: [PATCH] Disable some test on x86_64-pc-windows-gnu
+
+---
+ src/report.rs | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/report.rs b/src/report.rs
+index eeec614..f582867 100644
+--- a/src/report.rs
++++ b/src/report.rs
+@@ -48,6 +48,12 @@ pub fn get_test_rules(test: &TestKey, caller: &dyn AbiImpl, callee: &dyn AbiImpl
+ //
+ // THIS AREA RESERVED FOR VENDORS TO APPLY PATCHES
+
++ // x86_64-pc-windows-gnu has some broken i128 tests that aren't disabled by default
++ if cfg!(all(target_os = "windows", target_env = "gnu")) && test.test_name == "ui128" {
++ result.run = Link;
++ result.check = Pass(Link);
++ }
++
+ // END OF VENDOR RESERVED AREA
+ //
+ //
+--
+2.30.1.windows.1
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch
index 54e13b090..89e2b61c1 100644
--- a/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch
+++ b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch
@@ -1,80 +1,29 @@
-From 97c473937382a5b5858d9cce3c947855d23b2dc5 Mon Sep 17 00:00:00 2001
+From b742f03694b920cc14400727d54424e8e1b60928 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Thu, 18 Nov 2021 19:28:40 +0100
Subject: [PATCH] Disable unsupported tests
---
- crates/core_simd/src/math.rs | 6 ++++++
- crates/core_simd/src/vector.rs | 2 ++
- crates/core_simd/tests/masks.rs | 2 ++
- crates/core_simd/tests/ops_macros.rs | 4 ++++
- 4 files changed, 14 insertions(+)
+ crates/core_simd/src/elements/int.rs | 8 ++++++++
+ crates/core_simd/src/elements/uint.rs | 4 ++++
+ crates/core_simd/src/masks/full_masks.rs | 6 ++++++
+ crates/core_simd/src/vector.rs | 2 ++
+ crates/core_simd/tests/masks.rs | 3 ---
+ 5 files changed, 20 insertions(+), 3 deletions(-)
-diff --git a/crates/core_simd/src/math.rs b/crates/core_simd/src/math.rs
-index 2bae414..2f87499 100644
---- a/crates/core_simd/src/math.rs
-+++ b/crates/core_simd/src/math.rs
-@@ -5,6 +5,7 @@ macro_rules! impl_uint_arith {
- ($($ty:ty),+) => {
- $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
-
-+ /*
- /// Lanewise saturating add.
- ///
- /// # Examples
-@@ -43,6 +44,7 @@ macro_rules! impl_uint_arith {
- pub fn saturating_sub(self, second: Self) -> Self {
- unsafe { simd_saturating_sub(self, second) }
- }
-+ */
- })+
- }
- }
-@@ -51,6 +53,7 @@ macro_rules! impl_int_arith {
- ($($ty:ty),+) => {
- $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
-
-+ /*
- /// Lanewise saturating add.
- ///
- /// # Examples
-@@ -89,6 +92,7 @@ macro_rules! impl_int_arith {
- pub fn saturating_sub(self, second: Self) -> Self {
- unsafe { simd_saturating_sub(self, second) }
- }
-+ */
-
- /// Lanewise absolute value, implemented in Rust.
- /// Every lane becomes its absolute value.
-@@ -109,6 +113,7 @@ macro_rules! impl_int_arith {
- (self^m) - m
- }
-
-+ /*
- /// Lanewise saturating absolute value, implemented in Rust.
- /// As abs(), except the MIN value becomes MAX instead of itself.
- ///
-@@ -151,6 +156,7 @@ macro_rules! impl_int_arith {
- pub fn saturating_neg(self) -> Self {
- Self::splat(0).saturating_sub(self)
- }
-+ */
- })+
- }
- }
diff --git a/crates/core_simd/src/vector.rs b/crates/core_simd/src/vector.rs
-index 7c5ec2b..c8631e8 100644
+index e8e8f68..7173c24 100644
--- a/crates/core_simd/src/vector.rs
+++ b/crates/core_simd/src/vector.rs
-@@ -75,6 +75,7 @@ where
- Self(array)
+@@ -250,6 +250,7 @@ where
+ unsafe { intrinsics::simd_cast(self) }
}
+ /*
/// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
/// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
///
-@@ -297,6 +298,7 @@ where
+@@ -473,6 +474,7 @@ where
// Cleared ☢️ *mut T Zone
}
}
@@ -82,26 +31,5 @@ index 7c5ec2b..c8631e8 100644
}
impl<T, const LANES: usize> Copy for Simd<T, LANES>
-diff --git a/crates/core_simd/tests/masks.rs b/crates/core_simd/tests/masks.rs
-index 6a8ecd3..68fcb49 100644
---- a/crates/core_simd/tests/masks.rs
-+++ b/crates/core_simd/tests/masks.rs
-@@ -68,6 +68,7 @@ macro_rules! test_mask_api {
- assert_eq!(core_simd::Mask::<$type, 8>::from_int(int), mask);
- }
-
-+ /*
- #[cfg(feature = "generic_const_exprs")]
- #[test]
- fn roundtrip_bitmask_conversion() {
-@@ -80,6 +81,7 @@ macro_rules! test_mask_api {
- assert_eq!(bitmask, [0b01001001, 0b10000011]);
- assert_eq!(core_simd::Mask::<$type, 16>::from_bitmask(bitmask), mask);
- }
-+ */
- }
- }
- }
--
-2.26.2.7.g19db9cfb68
-
+2.25.1
diff --git a/compiler/rustc_codegen_cranelift/patches/0003-rand-Disable-rand-tests-on-mingw.patch b/compiler/rustc_codegen_cranelift/patches/0003-rand-Disable-rand-tests-on-mingw.patch
new file mode 100644
index 000000000..d8775e2d0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0003-rand-Disable-rand-tests-on-mingw.patch
@@ -0,0 +1,47 @@
+From eec874c889b8d24e5ad50faded24288150f057b1 Mon Sep 17 00:00:00 2001
+From: Afonso Bordado <afonsobordado@az8.co>
+Date: Tue, 27 Sep 2022 08:13:58 +0100
+Subject: [PATCH] Disable rand tests on mingw
+
+---
+ rand_distr/src/pareto.rs | 2 ++
+ rand_distr/tests/value_stability.rs | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/rand_distr/src/pareto.rs b/rand_distr/src/pareto.rs
+index 217899e..9cedeb7 100644
+--- a/rand_distr/src/pareto.rs
++++ b/rand_distr/src/pareto.rs
+@@ -107,6 +107,8 @@ mod tests {
+ }
+
+ #[test]
++ // This is broken on x86_64-pc-windows-gnu presumably due to a broken powf implementation
++ #[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
+ fn value_stability() {
+ fn test_samples<F: Float + core::fmt::Debug, D: Distribution<F>>(
+ distr: D, zero: F, expected: &[F],
+diff --git a/rand_distr/tests/value_stability.rs b/rand_distr/tests/value_stability.rs
+index 192ba74..0101ace 100644
+--- a/rand_distr/tests/value_stability.rs
++++ b/rand_distr/tests/value_stability.rs
+@@ -72,6 +72,8 @@ fn unit_disc_stability() {
+ }
+
+ #[test]
++// This is broken on x86_64-pc-windows-gnu
++#[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
+ fn pareto_stability() {
+ test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
+ 1.0423688f32, 2.1235929, 4.132709, 1.4679428,
+@@ -143,6 +145,8 @@ fn inverse_gaussian_stability() {
+ }
+
+ #[test]
++// This is broken on x86_64-pc-windows-gnu
++#[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
+ fn gamma_stability() {
+ // Gamma has 3 cases: shape == 1, shape < 1, shape > 1
+ test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
+--
+2.25.1
diff --git a/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
index 50ef0bd94..f3cd7ee77 100644
--- a/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
+++ b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
@@ -46,5 +46,17 @@ index 4bc44e9..8e3c7a4 100644
#[test]
fn cell_allows_array_cycle() {
+diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
+index 13b12db..96fe4b9 100644
+--- a/library/core/tests/atomic.rs
++++ b/library/core/tests/atomic.rs
+@@ -185,6 +185,7 @@ fn ptr_bitops() {
+ }
+
+ #[test]
++#[cfg_attr(target_arch = "s390x", ignore)] // s390x backend doesn't support stack alignment >8 bytes
+ #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
+ fn ptr_bitops_tagging() {
+ #[repr(align(16))]
--
2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index 3ab395d89..c0a2e7a78 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1,3 +1,3 @@
[toolchain]
-channel = "nightly-2022-07-25"
+channel = "nightly-2022-10-23"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
diff --git a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
index 091bfa1e9..d6a377895 100644
--- a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
@@ -10,6 +10,8 @@ git fetch
git checkout -- .
git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
+git am ../patches/*-sysroot-*.patch
+
git apply - <<EOF
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index d95b5b7f17f..00b6f0e3635 100644
@@ -66,3 +68,7 @@ popd
# FIXME remove once inline asm is fully supported
export RUSTFLAGS="$RUSTFLAGS --cfg=rustix_use_libc"
+
+# Allow the testsuite to use llvm tools
+host_triple=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+export LLVM_BIN_DIR="$(rustc --print sysroot)/lib/rustlib/$host_triple/bin"
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
index 944787612..9b5db3cf8 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -29,10 +29,6 @@ rm src/test/incremental/change_crate_dep_kind.rs
rm src/test/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
# requires compiling with -Cpanic=unwind
-rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
-rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
-rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
-rm src/test/ui/generator/size-moved-locals.rs # same
rm -r src/test/ui/macros/rfc-2011-nicer-assert-messages/
# vendor intrinsics
@@ -67,6 +63,7 @@ rm src/test/ui/target-feature/missing-plusminus.rs # error not implemented
rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
rm -r src/test/run-make/emit-named-files # requires full --emit support
rm src/test/ui/abi/stack-probes.rs # stack probes not yet implemented
+rm src/test/ui/simd/intrinsic/ptr-cast.rs # simd_expose_addr intrinsic unimplemented
# optimization tests
# ==================
@@ -110,12 +107,13 @@ rm src/test/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unorde
# bugs in the test suite
# ======================
rm src/test/ui/backtrace.rs # TODO warning
-rm src/test/ui/empty_global_asm.rs # TODO add needs-asm-support
rm src/test/ui/simple_global_asm.rs # TODO add needs-asm-support
rm src/test/ui/test-attrs/test-type.rs # TODO panic message on stderr. correct stdout
# not sure if this is actually a bug in the test suite, but the symbol list shows the function without leading _ for some reason
rm -r src/test/run-make/native-link-modifier-bundle
+rm src/test/ui/stdio-is-blocking.rs # really slow with unoptimized libstd
+
echo "[TEST] rustc test suite"
RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/tests.sh b/compiler/rustc_codegen_cranelift/scripts/tests.sh
deleted file mode 100755
index 9b5ffa409..000000000
--- a/compiler/rustc_codegen_cranelift/scripts/tests.sh
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-export CG_CLIF_DISPLAY_CG_TIME=1
-export CG_CLIF_DISABLE_INCR_CACHE=1
-
-export HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
-export TARGET_TRIPLE=${TARGET_TRIPLE:-$HOST_TRIPLE}
-
-export RUN_WRAPPER=''
-
-case "$TARGET_TRIPLE" in
- x86_64*)
- export JIT_SUPPORTED=1
- ;;
- *)
- export JIT_SUPPORTED=0
- ;;
-esac
-
-if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
- export JIT_SUPPORTED=0
- if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
- # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
- export RUSTFLAGS='-Clinker=aarch64-linux-gnu-gcc '$RUSTFLAGS
- export RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
- elif [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
- # We are cross-compiling for Windows. Run tests in wine.
- export RUN_WRAPPER='wine'
- else
- echo "Unknown non-native platform"
- fi
-fi
-
-# FIXME fix `#[linkage = "extern_weak"]` without this
-if [[ "$(uname)" == 'Darwin' ]]; then
- export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
-fi
-
-MY_RUSTC="$(pwd)/build/rustc-clif $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
-
-function no_sysroot_tests() {
- echo "[BUILD] mini_core"
- $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
-
- echo "[BUILD] example"
- $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
-
- if [[ "$JIT_SUPPORTED" = "1" ]]; then
- echo "[JIT] mini_core_hello_world"
- CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
-
- echo "[JIT-lazy] mini_core_hello_world"
- CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
- else
- echo "[JIT] mini_core_hello_world (skipped)"
- fi
-
- echo "[AOT] mini_core_hello_world"
- $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
- # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
-}
-
-function base_sysroot_tests() {
- echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
- $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
-
- echo "[AOT] issue_91827_extern_types"
- $MY_RUSTC example/issue-91827-extern-types.rs --crate-name issue_91827_extern_types --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/issue_91827_extern_types
-
- echo "[BUILD] alloc_system"
- $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
-
- echo "[AOT] alloc_example"
- $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/alloc_example
-
- if [[ "$JIT_SUPPORTED" = "1" ]]; then
- echo "[JIT] std_example"
- $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
-
- echo "[JIT-lazy] std_example"
- $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
- else
- echo "[JIT] std_example (skipped)"
- fi
-
- echo "[AOT] std_example"
- $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/std_example arg
-
- echo "[AOT] dst_field_align"
- $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/dst_field_align
-
- echo "[AOT] subslice-patterns-const-eval"
- $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
-
- echo "[AOT] track-caller-attribute"
- $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/track-caller-attribute
-
- echo "[AOT] float-minmax-pass"
- $MY_RUSTC example/float-minmax-pass.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/float-minmax-pass
-
- echo "[AOT] mod_bench"
- $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
- $RUN_WRAPPER ./target/out/mod_bench
-}
-
-function extended_sysroot_tests() {
- pushd rand
- ../build/cargo-clif clean
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- echo "[TEST] rust-random/rand"
- ../build/cargo-clif test --workspace
- else
- echo "[AOT] rust-random/rand"
- ../build/cargo-clif build --workspace --target $TARGET_TRIPLE --tests
- fi
- popd
-
- pushd simple-raytracer
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- echo "[BENCH COMPILE] ebobby/simple-raytracer"
- hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "../build/cargo-clif clean" \
- "RUSTFLAGS='' cargo build" \
- "../build/cargo-clif build"
-
- echo "[BENCH RUN] ebobby/simple-raytracer"
- cp ./target/debug/main ./raytracer_cg_clif
- hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
- else
- ../build/cargo-clif clean
- echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
- echo "[COMPILE] ebobby/simple-raytracer"
- ../build/cargo-clif build --target $TARGET_TRIPLE
- echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
- fi
- popd
-
- pushd build_sysroot/sysroot_src/library/core/tests
- echo "[TEST] libcore"
- ../../../../../build/cargo-clif clean
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- ../../../../../build/cargo-clif test
- else
- ../../../../../build/cargo-clif build --target $TARGET_TRIPLE --tests
- fi
- popd
-
- pushd regex
- echo "[TEST] rust-lang/regex example shootout-regex-dna"
- ../build/cargo-clif clean
- export RUSTFLAGS="$RUSTFLAGS --cap-lints warn" # newer aho_corasick versions throw a deprecation warning
- # Make sure `[codegen mono items] start` doesn't poison the diff
- ../build/cargo-clif build --example shootout-regex-dna --target $TARGET_TRIPLE
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- cat examples/regexdna-input.txt \
- | ../build/cargo-clif run --example shootout-regex-dna --target $TARGET_TRIPLE \
- | grep -v "Spawned thread" > res.txt
- diff -u res.txt examples/regexdna-output.txt
- fi
-
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- echo "[TEST] rust-lang/regex tests"
- ../build/cargo-clif test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
- else
- echo "[AOT] rust-lang/regex tests"
- ../build/cargo-clif build --tests --target $TARGET_TRIPLE
- fi
- popd
-
- pushd portable-simd
- echo "[TEST] rust-lang/portable-simd"
- ../build/cargo-clif clean
- ../build/cargo-clif build --all-targets --target $TARGET_TRIPLE
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- ../build/cargo-clif test -q
- fi
- popd
-}
-
-case "$1" in
- "no_sysroot")
- no_sysroot_tests
- ;;
- "base_sysroot")
- base_sysroot_tests
- ;;
- "extended_sysroot")
- extended_sysroot_tests
- ;;
- *)
- echo "unknown test suite"
- ;;
-esac
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
index 37d2679c1..7f4619b5c 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -24,7 +24,7 @@ pub(super) fn add_arg_comment<'tcx>(
local: Option<mir::Local>,
local_field: Option<usize>,
params: &[Value],
- arg_abi_mode: PassMode,
+ arg_abi_mode: &PassMode,
arg_layout: TyAndLayout<'tcx>,
) {
if !fx.clif_comments.enabled() {
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 815450f68..99059e788 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -342,7 +342,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let ret_place = codegen_place(fx, destination);
- // Handle special calls like instrinsics and empty drop glue.
+ // Handle special calls like intrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
.unwrap()
@@ -465,7 +465,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
let sig = fx.bcx.import_signature(sig);
- (CallTarget::Indirect(sig, method), Some(ptr))
+ (CallTarget::Indirect(sig, method), Some(ptr.get_addr(fx)))
}
// Normal call
@@ -560,7 +560,19 @@ pub(crate) fn codegen_drop<'tcx>(
// we don't actually need to drop anything
} else {
match ty.kind() {
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn Trait)
+ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
+ // args[0] args[1]
+ //
+ // args = ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
let ptr = ptr.get_addr(fx);
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
@@ -578,6 +590,43 @@ pub(crate) fn codegen_drop<'tcx>(
let sig = fx.bcx.import_signature(sig);
fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
}
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn* Trait)
+ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
+ //
+ // args = [ * ]
+ // |
+ // v
+ // ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ //
+ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
+ //
+ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
+ // vtable = (*args[0]).1 // loads the vtable out
+ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
+ //
+ // SO THEN WE CAN USE THE ABOVE CODE.
+ let (data, vtable) = drop_place.to_cvalue(fx).dyn_star_force_data_on_stack(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
+
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[data]);
+ }
_ => {
assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index 6c10baa53..e5ad31eb9 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -23,7 +23,7 @@ fn reg_to_abi_param(reg: Reg) -> AbiParam {
(RegKind::Integer, 9..=16) => types::I128,
(RegKind::Float, 4) => types::F32,
(RegKind::Float, 8) => types::F64,
- (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ (RegKind::Vector, size) => types::I8.by(u32::try_from(size).unwrap()).unwrap(),
_ => unreachable!("{:?}", reg),
};
AbiParam::new(clif_ty)
@@ -38,7 +38,7 @@ fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -
param
}
-fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+fn cast_target_to_abi_params(cast: &CastTarget) -> SmallVec<[AbiParam; 2]> {
let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
(0, 0)
} else {
@@ -100,7 +100,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
- PassMode::Cast(cast) => cast_target_to_abi_params(cast),
+ PassMode::Cast(ref cast, pad_i32) => {
+ assert!(!pad_i32, "padding support not yet implemented");
+ cast_target_to_abi_params(cast)
+ }
PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
if on_stack {
// Abi requires aligning struct size to pointer size
@@ -145,7 +148,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
- PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
+ PassMode::Cast(ref cast, _) => {
+ (None, cast_target_to_abi_params(cast).into_iter().collect())
+ }
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
assert!(!on_stack);
(Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
@@ -160,7 +165,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
pub(super) fn to_casted_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
- cast: CastTarget,
+ cast: &CastTarget,
) -> SmallVec<[Value; 2]> {
let (ptr, meta) = arg.force_stack(fx);
assert!(meta.is_none());
@@ -179,21 +184,21 @@ pub(super) fn from_casted_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
block_params: &[Value],
layout: TyAndLayout<'tcx>,
- cast: CastTarget,
+ cast: &CastTarget,
) -> CValue<'tcx> {
let abi_params = cast_target_to_abi_params(cast);
let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
let layout_size = u32::try_from(layout.size.bytes()).unwrap();
- let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
// specify stack slot alignment.
- // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+ // Stack slot size may be bigger for example `[u8; 3]` which is packed into an `i32`.
// It may also be smaller for example when the type is a wrapper around an integer with a
// larger alignment than the integer.
size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
});
- let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+ let ptr = Pointer::stack_slot(stack_slot);
let mut offset = 0;
let mut block_params_iter = block_params.iter().copied();
for param in abi_params {
@@ -224,7 +229,7 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
let (a, b) = arg.load_scalar_pair(fx);
smallvec![a, b]
}
- PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+ PassMode::Cast(ref cast, _) => to_casted_value(fx, arg, cast),
PassMode::Indirect { .. } => {
if is_owned {
match arg.force_stack(fx) {
@@ -268,7 +273,7 @@ pub(super) fn cvalue_for_param<'tcx>(
local,
local_field,
&block_params,
- arg_abi.mode,
+ &arg_abi.mode,
arg_abi.layout,
);
@@ -282,7 +287,9 @@ pub(super) fn cvalue_for_param<'tcx>(
assert_eq!(block_params.len(), 2, "{:?}", block_params);
Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
}
- PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
+ PassMode::Cast(ref cast, _) => {
+ Some(from_casted_value(fx, &block_params, arg_abi.layout, cast))
+ }
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
assert_eq!(block_params.len(), 1, "{:?}", block_params);
Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
index ff3bb2dfd..aaa141876 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/returning.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -13,7 +13,7 @@ pub(super) fn codegen_return_param<'tcx>(
block_params_iter: &mut impl Iterator<Item = Value>,
) -> CPlace<'tcx> {
let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => {
let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
(
super::make_local_place(
@@ -44,7 +44,7 @@ pub(super) fn codegen_return_param<'tcx>(
Some(RETURN_PLACE),
None,
&ret_param,
- fx.fn_abi.as_ref().unwrap().ret.mode,
+ &fx.fn_abi.as_ref().unwrap().ret.mode,
fx.fn_abi.as_ref().unwrap().ret.layout,
);
@@ -75,7 +75,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
- PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => (None, None),
};
let call_inst = f(fx, return_ptr);
@@ -92,7 +92,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
ret_place
.write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(ref cast, _) => {
let results =
fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
let result =
@@ -131,7 +131,7 @@ pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(ref cast, _) => {
let place = fx.get_local_place(RETURN_PLACE);
let ret_val = place.to_cvalue(fx);
let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
index 6d321c7b2..bad8a87b9 100644
--- a/compiler/rustc_codegen_cranelift/src/allocator.rs
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -78,7 +78,7 @@ fn codegen_inner(
let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
let mut ctx = Context::new();
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+ ctx.func.signature = sig.clone();
{
let mut func_ctx = FunctionBuilderContext::new();
let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
@@ -116,7 +116,7 @@ fn codegen_inner(
let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
let mut ctx = Context::new();
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+ ctx.func.signature = sig;
{
let mut func_ctx = FunctionBuilderContext::new();
let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
index 35b89358b..0cbb9f3ec 100644
--- a/compiler/rustc_codegen_cranelift/src/analyze.rs
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -26,7 +26,7 @@ pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
})
.collect::<IndexVec<Local, SsaKind>>();
- for bb in fx.mir.basic_blocks().iter() {
+ for bb in fx.mir.basic_blocks.iter() {
for stmt in bb.statements.iter() {
match &stmt.kind {
Assign(place_and_rval) => match &place_and_rval.1 {
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
index b4c790961..f2e3bf16e 100644
--- a/compiler/rustc_codegen_cranelift/src/archive.rs
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -38,6 +38,7 @@ impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
_lib_name: &str,
_dll_imports: &[rustc_session::cstore::DllImport],
_tmpdir: &Path,
+ _is_direct_dependency: bool,
) -> PathBuf {
bug!("creating dll imports is not supported");
}
@@ -159,6 +160,8 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
let err = err.to_string();
if err == "Unknown file magic" {
// Not an object file; skip it.
+ } else if object::read::archive::ArchiveFile::parse(&*data).is_ok() {
+ // Nested archive file; skip it.
} else {
sess.fatal(&format!(
"error parsing `{}` during archive creation: {}",
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 122e103ff..1db445027 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -6,21 +6,45 @@ use rustc_middle::ty::adjustment::PointerCast;
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
-use indexmap::IndexSet;
+use cranelift_codegen::ir::UserFuncName;
use crate::constant::ConstantCx;
+use crate::debuginfo::FunctionDebugContext;
use crate::prelude::*;
use crate::pretty_clif::CommentWriter;
-pub(crate) fn codegen_fn<'tcx>(
- cx: &mut crate::CodegenCx<'tcx>,
+pub(crate) struct CodegenedFunction {
+ symbol_name: String,
+ func_id: FuncId,
+ func: Function,
+ clif_comments: CommentWriter,
+ func_debug_cx: Option<FunctionDebugContext>,
+}
+
+#[cfg_attr(not(feature = "jit"), allow(dead_code))]
+pub(crate) fn codegen_and_compile_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut crate::CodegenCx,
+ cached_context: &mut Context,
module: &mut dyn Module,
instance: Instance<'tcx>,
) {
- let tcx = cx.tcx;
-
let _inst_guard =
crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+
+ let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
+ let codegened_func = codegen_fn(tcx, cx, cached_func, module, instance);
+
+ compile_fn(cx, cached_context, module, codegened_func);
+}
+
+pub(crate) fn codegen_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut crate::CodegenCx,
+ cached_func: Function,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) -> CodegenedFunction {
debug_assert!(!instance.substs.needs_infer());
let mir = tcx.instance_mir(instance.def);
@@ -34,16 +58,15 @@ pub(crate) fn codegen_fn<'tcx>(
});
// Declare function
- let symbol_name = tcx.symbol_name(instance);
+ let symbol_name = tcx.symbol_name(instance).name.to_string();
let sig = get_function_sig(tcx, module.isa().triple(), instance);
- let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
-
- cx.cached_context.clear();
+ let func_id = module.declare_function(&symbol_name, Linkage::Local, &sig).unwrap();
// Make the FunctionBuilder
let mut func_ctx = FunctionBuilderContext::new();
- let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
- func.name = ExternalName::user(0, func_id.as_u32());
+ let mut func = cached_func;
+ func.clear();
+ func.name = UserFuncName::user(0, func_id.as_u32());
func.signature = sig;
func.collect_debug_info();
@@ -52,13 +75,19 @@ pub(crate) fn codegen_fn<'tcx>(
// Predefine blocks
let start_block = bcx.create_block();
let block_map: IndexVec<BasicBlock, Block> =
- (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+ (0..mir.basic_blocks.len()).map(|_| bcx.create_block()).collect();
// Make FunctionCx
let target_config = module.target_config();
let pointer_type = target_config.pointer_type();
let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+ let func_debug_cx = if let Some(debug_context) = &mut cx.debug_context {
+ Some(debug_context.define_function(tcx, &symbol_name, mir.span))
+ } else {
+ None
+ };
+
let mut fx = FunctionCx {
cx,
module,
@@ -66,6 +95,7 @@ pub(crate) fn codegen_fn<'tcx>(
target_config,
pointer_type,
constants_cx: ConstantCx::new(),
+ func_debug_cx,
instance,
symbol_name,
@@ -78,81 +108,48 @@ pub(crate) fn codegen_fn<'tcx>(
caller_location: None, // set by `codegen_fn_prelude`
clif_comments,
- source_info_set: indexmap::IndexSet::new(),
+ last_source_file: None,
next_ssa_var: 0,
};
- let arg_uninhabited = fx
- .mir
- .args_iter()
- .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
-
- if !crate::constant::check_constants(&mut fx) {
- fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
- fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
- crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
- } else if arg_uninhabited {
- fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
- fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
- } else {
- tcx.sess.time("codegen clif ir", || {
- tcx.sess
- .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
- codegen_fn_content(&mut fx);
- });
- }
+ tcx.sess.time("codegen clif ir", || codegen_fn_body(&mut fx, start_block));
// Recover all necessary data from fx, before accessing func will prevent future access to it.
- let instance = fx.instance;
+ let symbol_name = fx.symbol_name;
let clif_comments = fx.clif_comments;
- let source_info_set = fx.source_info_set;
- let local_map = fx.local_map;
+ let func_debug_cx = fx.func_debug_cx;
fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
- crate::pretty_clif::write_clif_file(
- tcx,
- "unopt",
- module.isa(),
- instance,
- &func,
- &clif_comments,
- );
+ if cx.should_write_ir {
+ crate::pretty_clif::write_clif_file(
+ tcx.output_filenames(()),
+ &symbol_name,
+ "unopt",
+ module.isa(),
+ &func,
+ &clif_comments,
+ );
+ }
// Verify function
verify_func(tcx, &clif_comments, &func);
- compile_fn(
- cx,
- module,
- instance,
- symbol_name.name,
- func_id,
- func,
- clif_comments,
- source_info_set,
- local_map,
- );
+ CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx }
}
-fn compile_fn<'tcx>(
- cx: &mut crate::CodegenCx<'tcx>,
+pub(crate) fn compile_fn(
+ cx: &mut crate::CodegenCx,
+ cached_context: &mut Context,
module: &mut dyn Module,
- instance: Instance<'tcx>,
- symbol_name: &str,
- func_id: FuncId,
- func: Function,
- mut clif_comments: CommentWriter,
- source_info_set: IndexSet<SourceInfo>,
- local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+ codegened_func: CodegenedFunction,
) {
- let tcx = cx.tcx;
+ let clif_comments = codegened_func.clif_comments;
// Store function in context
- let context = &mut cx.cached_context;
+ let context = cached_context;
context.clear();
- context.func = func;
+ context.func = codegened_func.func;
// If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
// instruction, which doesn't have an encoding.
@@ -164,17 +161,6 @@ fn compile_fn<'tcx>(
// invalidate it when it would change.
context.domtree.clear();
- // Perform rust specific optimizations
- tcx.sess.time("optimize clif ir", || {
- crate::optimize::optimize_function(
- tcx,
- module.isa(),
- instance,
- context,
- &mut clif_comments,
- );
- });
-
#[cfg(any())] // This is never true
let _clif_guard = {
use std::fmt::Write;
@@ -203,46 +189,44 @@ fn compile_fn<'tcx>(
};
// Define function
- tcx.sess.time("define function", || {
- context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
- module.define_function(func_id, context).unwrap();
+ cx.profiler.verbose_generic_activity("define function").run(|| {
+ context.want_disasm = cx.should_write_ir;
+ module.define_function(codegened_func.func_id, context).unwrap();
});
- // Write optimized function to file for debugging
- crate::pretty_clif::write_clif_file(
- tcx,
- "opt",
- module.isa(),
- instance,
- &context.func,
- &clif_comments,
- );
+ if cx.should_write_ir {
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ &cx.output_filenames,
+ &codegened_func.symbol_name,
+ "opt",
+ module.isa(),
+ &context.func,
+ &clif_comments,
+ );
- if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
- crate::pretty_clif::write_ir_file(
- tcx,
- || format!("{}.vcode", tcx.symbol_name(instance).name),
- |file| file.write_all(disasm.as_bytes()),
- )
+ if let Some(disasm) = &context.compiled_code().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ &cx.output_filenames,
+ &format!("{}.vcode", codegened_func.symbol_name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
}
// Define debuginfo for function
let isa = module.isa();
let debug_context = &mut cx.debug_context;
let unwind_context = &mut cx.unwind_context;
- tcx.sess.time("generate debug info", || {
+ cx.profiler.verbose_generic_activity("generate debug info").run(|| {
if let Some(debug_context) = debug_context {
- debug_context.define_function(
- instance,
- func_id,
- symbol_name,
- isa,
+ codegened_func.func_debug_cx.unwrap().finalize(
+ debug_context,
+ codegened_func.func_id,
context,
- &source_info_set,
- local_map,
);
}
- unwind_context.add_function(func_id, &context, isa);
+ unwind_context.add_function(codegened_func.func_id, &context, isa);
});
}
@@ -268,8 +252,28 @@ pub(crate) fn verify_func(
});
}
-fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
- for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
+ if !crate::constant::check_constants(fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ // compilation should have been aborted
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+ if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+ fx.tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(fx, start_block));
+
+ for (bb, bb_data) in fx.mir.basic_blocks.iter_enumerated() {
let block = fx.get_block(bb);
fx.bcx.switch_to_block(block);
@@ -457,17 +461,8 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
template,
operands,
*options,
+ *destination,
);
-
- match *destination {
- Some(destination) => {
- let destination_block = fx.get_block(destination);
- fx.bcx.ins().jump(destination_block, &[]);
- }
- None => {
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
- }
- }
}
TerminatorKind::Resume | TerminatorKind::Abort => {
// FIXME implement unwinding
@@ -640,7 +635,12 @@ fn codegen_stmt<'tcx>(
lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
}
Rvalue::Cast(
- CastKind::Misc
+ CastKind::IntToInt
+ | CastKind::FloatToFloat
+ | CastKind::FloatToInt
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
| CastKind::PointerExposeAddress
| CastKind::PointerFromExposedAddress,
ref operand,
@@ -708,12 +708,14 @@ fn codegen_stmt<'tcx>(
let operand = codegen_operand(fx, operand);
operand.unsize_value(fx, lval);
}
+ Rvalue::Cast(CastKind::DynStar, ref operand, _) => {
+ let operand = codegen_operand(fx, operand);
+ operand.coerce_dyn_star(fx, lval);
+ }
Rvalue::Discriminant(place) => {
let place = codegen_place(fx, place);
let value = place.to_cvalue(fx);
- let discr =
- crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
- lval.write_cvalue(fx, discr);
+ crate::discriminant::codegen_get_discriminant(fx, lval, value, dest_layout);
}
Rvalue::Repeat(ref operand, times) => {
let operand = codegen_operand(fx, operand);
@@ -768,11 +770,7 @@ fn codegen_stmt<'tcx>(
lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
}
Rvalue::NullaryOp(null_op, ty) => {
- assert!(
- lval.layout()
- .ty
- .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
- );
+ assert!(lval.layout().ty.is_sized(fx.tcx, ParamEnv::reveal_all()));
let layout = fx.layout_of(fx.monomorphize(ty));
let val = match null_op {
NullOp::SizeOf => layout.size.bytes(),
@@ -803,20 +801,31 @@ fn codegen_stmt<'tcx>(
| StatementKind::AscribeUserType(..) => {}
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
- StatementKind::CopyNonOverlapping(inner) => {
- let dst = codegen_operand(fx, &inner.dst);
- let pointee = dst
- .layout()
- .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
- .expect("Expected pointer");
- let dst = dst.load_scalar(fx);
- let src = codegen_operand(fx, &inner.src).load_scalar(fx);
- let count = codegen_operand(fx, &inner.count).load_scalar(fx);
- let elem_size: u64 = pointee.size.bytes();
- let bytes =
- if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
- fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
- }
+ StatementKind::Intrinsic(ref intrinsic) => match &**intrinsic {
+ // We ignore `assume` intrinsics, they are only useful for optimizations
+ NonDivergingIntrinsic::Assume(_) => {}
+ NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
+ src,
+ dst,
+ count,
+ }) => {
+ let dst = codegen_operand(fx, dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, src).load_scalar(fx);
+ let count = codegen_operand(fx, count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ },
}
}
@@ -844,6 +853,7 @@ pub(crate) fn codegen_place<'tcx>(
PlaceElem::Deref => {
cplace = cplace.place_deref(fx);
}
+ PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
PlaceElem::Field(field, _ty) => {
cplace = cplace.place_field(fx, field);
}
@@ -910,7 +920,7 @@ pub(crate) fn codegen_operand<'tcx>(
let cplace = codegen_place(fx, *place);
cplace.to_cvalue(fx)
}
- Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
}
}
@@ -934,8 +944,11 @@ pub(crate) fn codegen_panic_inner<'tcx>(
args: &[Value],
span: Span,
) {
- let def_id =
- fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+ let def_id = fx
+ .tcx
+ .lang_items()
+ .require(lang_item)
+ .unwrap_or_else(|e| fx.tcx.sess.span_fatal(span, e.to_string()));
let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let symbol_name = fx.tcx.symbol_name(instance).name;
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index f9dc1b516..589594465 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -1,14 +1,18 @@
use cranelift_codegen::isa::TargetFrontendConfig;
+use gimli::write::FileId;
+
+use rustc_data_structures::sync::Lrc;
use rustc_index::vec::IndexVec;
use rustc_middle::ty::layout::{
FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers,
};
-use rustc_middle::ty::SymbolName;
+use rustc_span::SourceFile;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Integer, Primitive};
use rustc_target::spec::{HasTargetSpec, Target};
use crate::constant::ConstantCx;
+use crate::debuginfo::FunctionDebugContext;
use crate::prelude::*;
pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
@@ -74,7 +78,7 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
_ => unreachable!(),
};
- match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
_ => return None,
@@ -232,15 +236,16 @@ pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
}
pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
- pub(crate) cx: &'clif mut crate::CodegenCx<'tcx>,
+ pub(crate) cx: &'clif mut crate::CodegenCx,
pub(crate) module: &'m mut dyn Module,
pub(crate) tcx: TyCtxt<'tcx>,
pub(crate) target_config: TargetFrontendConfig, // Cached from module
pub(crate) pointer_type: Type, // Cached from module
pub(crate) constants_cx: ConstantCx,
+ pub(crate) func_debug_cx: Option<FunctionDebugContext>,
pub(crate) instance: Instance<'tcx>,
- pub(crate) symbol_name: SymbolName<'tcx>,
+ pub(crate) symbol_name: String,
pub(crate) mir: &'tcx Body<'tcx>,
pub(crate) fn_abi: Option<&'tcx FnAbi<'tcx, Ty<'tcx>>>,
@@ -252,7 +257,11 @@ pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
pub(crate) caller_location: Option<CValue<'tcx>>,
pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
- pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+ /// Last accessed source file and it's debuginfo file id.
+ ///
+ /// For optimization purposes only
+ pub(crate) last_source_file: Option<(Lrc<SourceFile>, FileId)>,
/// This should only be accessed by `CPlace::new_var`.
pub(crate) next_ssa_var: u32,
@@ -336,8 +345,31 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
}
pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
- let (index, _) = self.source_info_set.insert_full(source_info);
- self.bcx.set_srcloc(SourceLoc::new(index as u32));
+ if let Some(debug_context) = &mut self.cx.debug_context {
+ let (file, line, column) =
+ DebugContext::get_span_loc(self.tcx, self.mir.span, source_info.span);
+
+ // add_source_file is very slow.
+ // Optimize for the common case of the current file not being changed.
+ let mut cached_file_id = None;
+ if let Some((ref last_source_file, last_file_id)) = self.last_source_file {
+ // If the allocations are not equal, the files may still be equal, but that
+ // doesn't matter, as this is just an optimization.
+ if rustc_data_structures::sync::Lrc::ptr_eq(last_source_file, &file) {
+ cached_file_id = Some(last_file_id);
+ }
+ }
+
+ let file_id = if let Some(file_id) = cached_file_id {
+ file_id
+ } else {
+ debug_context.add_source_file(&file)
+ };
+
+ let source_loc =
+ self.func_debug_cx.as_mut().unwrap().add_dbg_loc(file_id, line, column);
+ self.bcx.set_srcloc(source_loc);
+ }
}
// Note: must be kept in sync with get_caller_location from cg_ssa
diff --git a/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs
new file mode 100644
index 000000000..f855e20e0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs
@@ -0,0 +1,177 @@
+use std::sync::{Arc, Condvar, Mutex};
+
+use rustc_session::Session;
+
+use jobserver::HelperThread;
+
+// FIXME don't panic when a worker thread panics
+
+pub(super) struct ConcurrencyLimiter {
+ helper_thread: Option<HelperThread>,
+ state: Arc<Mutex<state::ConcurrencyLimiterState>>,
+ available_token_condvar: Arc<Condvar>,
+ finished: bool,
+}
+
+impl ConcurrencyLimiter {
+ pub(super) fn new(sess: &Session, pending_jobs: usize) -> Self {
+ let state = Arc::new(Mutex::new(state::ConcurrencyLimiterState::new(pending_jobs)));
+ let available_token_condvar = Arc::new(Condvar::new());
+
+ let state_helper = state.clone();
+ let available_token_condvar_helper = available_token_condvar.clone();
+ let helper_thread = sess
+ .jobserver
+ .clone()
+ .into_helper_thread(move |token| {
+ let mut state = state_helper.lock().unwrap();
+ state.add_new_token(token.unwrap());
+ available_token_condvar_helper.notify_one();
+ })
+ .unwrap();
+ ConcurrencyLimiter {
+ helper_thread: Some(helper_thread),
+ state,
+ available_token_condvar: Arc::new(Condvar::new()),
+ finished: false,
+ }
+ }
+
+ pub(super) fn acquire(&mut self) -> ConcurrencyLimiterToken {
+ let mut state = self.state.lock().unwrap();
+ loop {
+ state.assert_invariants();
+
+ if state.try_start_job() {
+ return ConcurrencyLimiterToken {
+ state: self.state.clone(),
+ available_token_condvar: self.available_token_condvar.clone(),
+ };
+ }
+
+ self.helper_thread.as_mut().unwrap().request_token();
+ state = self.available_token_condvar.wait(state).unwrap();
+ }
+ }
+
+ pub(super) fn job_already_done(&mut self) {
+ let mut state = self.state.lock().unwrap();
+ state.job_already_done();
+ }
+
+ pub(crate) fn finished(mut self) {
+ self.helper_thread.take();
+
+ // Assert that all jobs have finished
+ let state = Mutex::get_mut(Arc::get_mut(&mut self.state).unwrap()).unwrap();
+ state.assert_done();
+
+ self.finished = true;
+ }
+}
+
+impl Drop for ConcurrencyLimiter {
+ fn drop(&mut self) {
+ if !self.finished && !std::thread::panicking() {
+ panic!("Forgot to call finished() on ConcurrencyLimiter");
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(super) struct ConcurrencyLimiterToken {
+ state: Arc<Mutex<state::ConcurrencyLimiterState>>,
+ available_token_condvar: Arc<Condvar>,
+}
+
+impl Drop for ConcurrencyLimiterToken {
+ fn drop(&mut self) {
+ let mut state = self.state.lock().unwrap();
+ state.job_finished();
+ self.available_token_condvar.notify_one();
+ }
+}
+
+mod state {
+ use jobserver::Acquired;
+
+ #[derive(Debug)]
+ pub(super) struct ConcurrencyLimiterState {
+ pending_jobs: usize,
+ active_jobs: usize,
+
+ // None is used to represent the implicit token, Some to represent explicit tokens
+ tokens: Vec<Option<Acquired>>,
+ }
+
+ impl ConcurrencyLimiterState {
+ pub(super) fn new(pending_jobs: usize) -> Self {
+ ConcurrencyLimiterState { pending_jobs, active_jobs: 0, tokens: vec![None] }
+ }
+
+ pub(super) fn assert_invariants(&self) {
+ // There must be no excess active jobs
+ assert!(self.active_jobs <= self.pending_jobs);
+
+ // There may not be more active jobs than there are tokens
+ assert!(self.active_jobs <= self.tokens.len());
+ }
+
+ pub(super) fn assert_done(&self) {
+ assert_eq!(self.pending_jobs, 0);
+ assert_eq!(self.active_jobs, 0);
+ }
+
+ pub(super) fn add_new_token(&mut self, token: Acquired) {
+ self.tokens.push(Some(token));
+ self.drop_excess_capacity();
+ }
+
+ pub(super) fn try_start_job(&mut self) -> bool {
+ if self.active_jobs < self.tokens.len() {
+ // Using existing token
+ self.job_started();
+ return true;
+ }
+
+ false
+ }
+
+ pub(super) fn job_started(&mut self) {
+ self.assert_invariants();
+ self.active_jobs += 1;
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ pub(super) fn job_finished(&mut self) {
+ self.assert_invariants();
+ self.pending_jobs -= 1;
+ self.active_jobs -= 1;
+ self.assert_invariants();
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ pub(super) fn job_already_done(&mut self) {
+ self.assert_invariants();
+ self.pending_jobs -= 1;
+ self.assert_invariants();
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ fn drop_excess_capacity(&mut self) {
+ self.assert_invariants();
+
+ // Drop all tokens that can never be used anymore
+ self.tokens.truncate(std::cmp::max(self.pending_jobs, 1));
+
+ // Keep some excess tokens to satisfy requests faster
+ const MAX_EXTRA_CAPACITY: usize = 2;
+ self.tokens.truncate(std::cmp::max(self.active_jobs + MAX_EXTRA_CAPACITY, 1));
+
+ self.assert_invariants();
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index 7f7fd0e9c..148b66d95 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -5,10 +5,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
};
-use rustc_middle::ty::ConstKind;
-use rustc_span::DUMMY_SP;
-use cranelift_codegen::ir::GlobalValueData;
use cranelift_module::*;
use crate::prelude::*;
@@ -41,36 +38,22 @@ impl ConstantCx {
pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
let mut all_constants_ok = true;
for constant in &fx.mir.required_consts {
- let const_ = match fx.monomorphize(constant.literal) {
- ConstantKind::Ty(ct) => ct,
+ let unevaluated = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(_) => unreachable!(),
+ ConstantKind::Unevaluated(uv, _) => uv,
ConstantKind::Val(..) => continue,
};
- match const_.kind() {
- ConstKind::Value(_) => {}
- ConstKind::Unevaluated(unevaluated) => {
- if let Err(err) =
- fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
- {
- all_constants_ok = false;
- match err {
- ErrorHandled::Reported(_) | ErrorHandled::Linted => {
- fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
- }
- ErrorHandled::TooGeneric => {
- span_bug!(
- constant.span,
- "codgen encountered polymorphic constant: {:?}",
- err
- );
- }
- }
+
+ if let Err(err) = fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
+ all_constants_ok = false;
+ match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
}
}
- ConstKind::Param(_)
- | ConstKind::Infer(_)
- | ConstKind::Bound(_, _)
- | ConstKind::Placeholder(_)
- | ConstKind::Error(_) => unreachable!("{:?}", const_),
}
}
all_constants_ok
@@ -96,62 +79,47 @@ pub(crate) fn codegen_tls_ref<'tcx>(
CValue::by_val(tls_ptr, layout)
}
-fn codegen_static_ref<'tcx>(
- fx: &mut FunctionCx<'_, '_, 'tcx>,
- def_id: DefId,
- layout: TyAndLayout<'tcx>,
-) -> CPlace<'tcx> {
- let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
- let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
- if fx.clif_comments.enabled() {
- fx.add_comment(local_data_id, format!("{:?}", def_id));
- }
- let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
- assert!(!layout.is_unsized(), "unsized statics aren't supported");
- assert!(
- matches!(
- fx.bcx.func.global_values[local_data_id],
- GlobalValueData::Symbol { tls: false, .. }
- ),
- "tls static referenced without Rvalue::ThreadLocalRef"
- );
- CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
-}
-
-pub(crate) fn codegen_constant<'tcx>(
+pub(crate) fn eval_mir_constant<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
constant: &Constant<'tcx>,
-) -> CValue<'tcx> {
- let const_ = match fx.monomorphize(constant.literal) {
- ConstantKind::Ty(ct) => ct,
- ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
- };
- let const_val = match const_.kind() {
- ConstKind::Value(valtree) => fx.tcx.valtree_to_const_val((const_.ty(), valtree)),
- ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+) -> (ConstValue<'tcx>, Ty<'tcx>) {
+ let constant_kind = fx.monomorphize(constant.literal);
+ let uv = match constant_kind {
+ ConstantKind::Ty(const_) => match const_.kind() {
+ ty::ConstKind::Unevaluated(uv) => uv.expand(),
+ ty::ConstKind::Value(val) => {
+ return (fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty());
+ }
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ },
+ ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _)
if fx.tcx.is_static(def.did) =>
{
- assert!(substs.is_empty());
- assert!(promoted.is_none());
-
- return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty())).to_cvalue(fx);
- }
- ConstKind::Unevaluated(unevaluated) => {
- match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
- Ok(const_val) => const_val,
- Err(_) => {
- span_bug!(constant.span, "erroneous constant not captured by required_consts");
- }
- }
+ span_bug!(constant.span, "MIR constant refers to static");
}
- ConstKind::Param(_)
- | ConstKind::Infer(_)
- | ConstKind::Bound(_, _)
- | ConstKind::Placeholder(_)
- | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ ConstantKind::Unevaluated(uv, _) => uv,
+ ConstantKind::Val(val, _) => return (val, constant_kind.ty()),
};
- codegen_const_value(fx, const_val, const_.ty())
+ (
+ fx.tcx.const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).unwrap_or_else(|_err| {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts");
+ }),
+ constant_kind.ty(),
+ )
+}
+
+pub(crate) fn codegen_constant_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+ let (const_val, ty) = eval_mir_constant(fx, constant);
+
+ codegen_const_value(fx, const_val, ty)
}
pub(crate) fn codegen_const_value<'tcx>(
@@ -267,7 +235,7 @@ pub(crate) fn codegen_const_value<'tcx>(
}
}
-pub(crate) fn pointer_for_allocation<'tcx>(
+fn pointer_for_allocation<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
alloc: ConstAllocation<'tcx>,
) -> crate::pointer::Pointer {
@@ -322,7 +290,7 @@ fn data_id_for_static(
let is_mutable = if tcx.is_mutable_static(def_id) {
true
} else {
- !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ !ty.is_freeze(tcx, ParamEnv::reveal_all())
};
let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
@@ -430,7 +398,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
data_ctx.define(bytes.into_boxed_slice());
- for &(offset, alloc_id) in alloc.relocations().iter() {
+ for &(offset, alloc_id) in alloc.provenance().iter() {
let addend = {
let endianness = tcx.data_layout.endian;
let offset = offset.bytes() as usize;
@@ -490,12 +458,14 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
operand: &Operand<'tcx>,
) -> Option<ConstValue<'tcx>> {
match operand {
- Operand::Constant(const_) => match const_.literal {
- ConstantKind::Ty(const_) => fx
- .monomorphize(const_)
- .eval_for_mir(fx.tcx, ParamEnv::reveal_all())
- .try_to_value(fx.tcx),
+ Operand::Constant(const_) => match fx.monomorphize(const_.literal) {
+ ConstantKind::Ty(const_) => Some(
+ const_.eval_for_mir(fx.tcx, ParamEnv::reveal_all()).try_to_value(fx.tcx).unwrap(),
+ ),
ConstantKind::Val(val, _) => Some(val),
+ ConstantKind::Unevaluated(uv, _) => {
+ Some(fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), uv, None).unwrap())
+ }
},
// FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
// inside a temporary before being passed to the intrinsic requiring the const argument.
@@ -505,12 +475,21 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
return None;
}
let mut computed_const_val = None;
- for bb_data in fx.mir.basic_blocks() {
+ for bb_data in fx.mir.basic_blocks.iter() {
for stmt in &bb_data.statements {
match &stmt.kind {
StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
match &local_and_rvalue.1 {
- Rvalue::Cast(CastKind::Misc, operand, ty) => {
+ Rvalue::Cast(
+ CastKind::IntToInt
+ | CastKind::FloatToFloat
+ | CastKind::FloatToInt
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr,
+ operand,
+ ty,
+ ) => {
if computed_const_val.is_some() {
return None; // local assigned twice
}
@@ -536,9 +515,11 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
{
return None;
}
- StatementKind::CopyNonOverlapping(_) => {
- return None;
- } // conservative handling
+ StatementKind::Intrinsic(ref intrinsic) => match **intrinsic {
+ NonDivergingIntrinsic::CopyNonOverlapping(..) => return None,
+ NonDivergingIntrinsic::Assume(..) => {}
+ },
+ // conservative handling
StatementKind::Assign(_)
| StatementKind::FakeRead(_)
| StatementKind::SetDiscriminant { .. }
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
index 589910ede..9583cd2ec 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -9,7 +9,7 @@ use gimli::{RunTimeEndian, SectionId};
use super::object::WriteDebugInfo;
use super::DebugContext;
-impl DebugContext<'_> {
+impl DebugContext {
pub(crate) fn emit(&mut self, product: &mut ObjectProduct) {
let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
let root = self.dwarf.unit.root();
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index bbcb95913..463de6a91 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -3,8 +3,10 @@
use std::ffi::OsStr;
use std::path::{Component, Path};
+use crate::debuginfo::FunctionDebugContext;
use crate::prelude::*;
+use rustc_data_structures::sync::Lrc;
use rustc_span::{
FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
};
@@ -14,7 +16,6 @@ use cranelift_codegen::MachSrcLoc;
use gimli::write::{
Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
- UnitEntryId,
};
// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
@@ -47,9 +48,9 @@ fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
}
}
-pub(crate) const MD5_LEN: usize = 16;
+const MD5_LEN: usize = 16;
-pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
if hash.kind == SourceFileHashAlgorithm::Md5 {
let mut buf = [0u8; MD5_LEN];
buf.copy_from_slice(hash.hash_bytes());
@@ -59,160 +60,132 @@ pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
}
}
-fn line_program_add_file(
- line_program: &mut LineProgram,
- line_strings: &mut LineStringTable,
- file: &SourceFile,
-) -> FileId {
- match &file.name {
- FileName::Real(path) => {
- let (dir_path, file_name) = split_path_dir_and_file(path.remapped_path_if_available());
- let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
- let file_name = osstr_as_utf8_bytes(file_name);
-
- let dir_id = if !dir_name.is_empty() {
- let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
- line_program.add_directory(dir_name)
- } else {
- line_program.default_directory()
- };
- let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+impl DebugContext {
+ pub(crate) fn get_span_loc(
+ tcx: TyCtxt<'_>,
+ function_span: Span,
+ span: Span,
+ ) -> (Lrc<SourceFile>, u64, u64) {
+ // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+ // In order to have a good line stepping behavior in debugger, we overwrite debug
+ // locations of macro expansions with that of the outermost expansion site (when the macro is
+ // annotated with `#[collapse_debuginfo]` or when `-Zdebug-macros` is provided).
+ let span = if tcx.should_collapse_debuginfo(span) {
+ span
+ } else {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+ };
- let info = make_file_info(file.src_hash);
+ match tcx.sess.source_map().lookup_line(span.lo()) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(span.lo());
- line_program.file_has_md5 &= info.is_some();
- line_program.add_file(file_name, dir_id, info)
+ (
+ file,
+ u64::try_from(line).unwrap() + 1,
+ u64::from((span.lo() - line_pos).to_u32()) + 1,
+ )
+ }
+ Err(file) => (file, 0, 0),
}
- // FIXME give more appropriate file names
- filename => {
- let dir_id = line_program.default_directory();
- let dummy_file_name = LineString::new(
- filename.prefer_remapped().to_string().into_bytes(),
- line_program.encoding(),
- line_strings,
- );
- line_program.add_file(dummy_file_name, dir_id, None)
+ }
+
+ pub(crate) fn add_source_file(&mut self, source_file: &SourceFile) -> FileId {
+ let line_program: &mut LineProgram = &mut self.dwarf.unit.line_program;
+ let line_strings: &mut LineStringTable = &mut self.dwarf.line_strings;
+
+ match &source_file.name {
+ FileName::Real(path) => {
+ let (dir_path, file_name) =
+ split_path_dir_and_file(path.remapped_path_if_available());
+ let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+ let file_name = osstr_as_utf8_bytes(file_name);
+
+ let dir_id = if !dir_name.is_empty() {
+ let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+ line_program.add_directory(dir_name)
+ } else {
+ line_program.default_directory()
+ };
+ let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+ let info = make_file_info(source_file.src_hash);
+
+ line_program.file_has_md5 &= info.is_some();
+ line_program.add_file(file_name, dir_id, info)
+ }
+ // FIXME give more appropriate file names
+ filename => {
+ let dir_id = line_program.default_directory();
+ let dummy_file_name = LineString::new(
+ filename.prefer_remapped().to_string().into_bytes(),
+ line_program.encoding(),
+ line_strings,
+ );
+ line_program.add_file(dummy_file_name, dir_id, None)
+ }
}
}
}
-impl<'tcx> DebugContext<'tcx> {
- pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
- let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
-
- let file_id = line_program_add_file(
- &mut self.dwarf.unit.line_program,
- &mut self.dwarf.line_strings,
- &loc.file,
- );
-
- let entry = self.dwarf.unit.get_mut(entry_id);
-
- entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
- entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
- entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
+impl FunctionDebugContext {
+ pub(crate) fn add_dbg_loc(&mut self, file_id: FileId, line: u64, column: u64) -> SourceLoc {
+ let (index, _) = self.source_loc_set.insert_full((file_id, line, column));
+ SourceLoc::new(u32::try_from(index).unwrap())
}
pub(super) fn create_debug_lines(
&mut self,
+ debug_context: &mut DebugContext,
symbol: usize,
- entry_id: UnitEntryId,
context: &Context,
- function_span: Span,
- source_info_set: &indexmap::IndexSet<SourceInfo>,
) -> CodeOffset {
- let tcx = self.tcx;
- let line_program = &mut self.dwarf.unit.line_program;
-
- let line_strings = &mut self.dwarf.line_strings;
- let mut last_span = None;
- let mut last_file = None;
- let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
- if let Some(last_span) = last_span {
- if span == last_span {
- line_program.generate_row();
- return;
- }
- }
- last_span = Some(span);
-
- // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
- // In order to have a good line stepping behavior in debugger, we overwrite debug
- // locations of macro expansions with that of the outermost expansion site
- // (unless the crate is being compiled with `-Z debug-macros`).
- let span = if !span.from_expansion() || tcx.sess.opts.unstable_opts.debug_macros {
- span
- } else {
- // Walk up the macro expansion chain until we reach a non-expanded span.
- // We also stop at the function body level because no line stepping can occur
- // at the level above that.
- rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+ let create_row_for_span =
+ |debug_context: &mut DebugContext, source_loc: (FileId, u64, u64)| {
+ let (file_id, line, col) = source_loc;
+
+ debug_context.dwarf.unit.line_program.row().file = file_id;
+ debug_context.dwarf.unit.line_program.row().line = line;
+ debug_context.dwarf.unit.line_program.row().column = col;
+ debug_context.dwarf.unit.line_program.generate_row();
};
- let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
- Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.line_begin_pos(span.lo());
-
- (
- file,
- u64::try_from(line).unwrap() + 1,
- u64::from((span.lo() - line_pos).to_u32()) + 1,
- )
- }
- Err(file) => (file, 0, 0),
- };
-
- // line_program_add_file is very slow.
- // Optimize for the common case of the current file not being changed.
- let current_file_changed = if let Some(last_file) = &last_file {
- // If the allocations are not equal, then the files may still be equal, but that
- // is not a problem, as this is just an optimization.
- !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
- } else {
- true
- };
- if current_file_changed {
- let file_id = line_program_add_file(line_program, line_strings, &file);
- line_program.row().file = file_id;
- last_file = Some(file);
- }
-
- line_program.row().line = line;
- line_program.row().column = col;
- line_program.generate_row();
- };
-
- line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+ debug_context
+ .dwarf
+ .unit
+ .line_program
+ .begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
let mut func_end = 0;
- let mcr = context.mach_compile_result.as_ref().unwrap();
+ let mcr = context.compiled_code().unwrap();
for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
- line_program.row().address_offset = u64::from(start);
+ debug_context.dwarf.unit.line_program.row().address_offset = u64::from(start);
if !loc.is_default() {
- let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
- create_row_for_span(line_program, source_info.span);
+ let source_loc = *self.source_loc_set.get_index(loc.bits() as usize).unwrap();
+ create_row_for_span(debug_context, source_loc);
} else {
- create_row_for_span(line_program, function_span);
+ create_row_for_span(debug_context, self.function_source_loc);
}
func_end = end;
}
- line_program.end_sequence(u64::from(func_end));
+ debug_context.dwarf.unit.line_program.end_sequence(u64::from(func_end));
let func_end = mcr.buffer.total_size();
assert_ne!(func_end, 0);
- let entry = self.dwarf.unit.get_mut(entry_id);
+ let entry = debug_context.dwarf.unit.get_mut(self.entry_id);
entry.set(
gimli::DW_AT_low_pc,
AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
);
entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
- self.emit_location(entry_id, function_span);
-
func_end
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index 693092ba5..c55db2017 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -7,35 +7,34 @@ mod unwind;
use crate::prelude::*;
-use rustc_index::vec::IndexVec;
-
-use cranelift_codegen::entity::EntityRef;
-use cranelift_codegen::ir::{Endianness, LabelValueLoc, ValueLabel};
+use cranelift_codegen::ir::Endianness;
use cranelift_codegen::isa::TargetIsa;
-use cranelift_codegen::ValueLocRange;
use gimli::write::{
- Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
- LocationList, Range, RangeList, UnitEntryId,
+ Address, AttributeValue, DwarfUnit, FileId, LineProgram, LineString, Range, RangeList,
+ UnitEntryId,
};
-use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian};
+use indexmap::IndexSet;
pub(crate) use emit::{DebugReloc, DebugRelocName};
pub(crate) use unwind::UnwindContext;
-pub(crate) struct DebugContext<'tcx> {
- tcx: TyCtxt<'tcx>,
-
+pub(crate) struct DebugContext {
endian: RunTimeEndian,
dwarf: DwarfUnit,
unit_range_list: RangeList,
+}
- types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+pub(crate) struct FunctionDebugContext {
+ entry_id: UnitEntryId,
+ function_source_loc: (FileId, u64, u64),
+ source_loc_set: indexmap::IndexSet<(FileId, u64, u64)>,
}
-impl<'tcx> DebugContext<'tcx> {
- pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+impl DebugContext {
+ pub(crate) fn new(tcx: TyCtxt<'_>, isa: &dyn TargetIsa) -> Self {
let encoding = Encoding {
format: Format::Dwarf32,
// FIXME this should be configurable
@@ -101,127 +100,18 @@ impl<'tcx> DebugContext<'tcx> {
root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
}
- DebugContext {
- tcx,
-
- endian,
-
- dwarf,
- unit_range_list: RangeList(Vec::new()),
-
- types: FxHashMap::default(),
- }
- }
-
- fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
- if let Some(type_id) = self.types.get(&ty) {
- return *type_id;
- }
-
- let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
-
- let primitive = |dwarf: &mut DwarfUnit, ate| {
- let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
- let type_entry = dwarf.unit.get_mut(type_id);
- type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
- type_id
- };
-
- let name = format!("{}", ty);
- let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
-
- let type_id = match ty.kind() {
- ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
- ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
- ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
- ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
- ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
- ty::Ref(_, pointee_ty, _mutbl)
- | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
- let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
-
- // Ensure that type is inserted before recursing to avoid duplicates
- self.types.insert(ty, type_id);
-
- let pointee = self.dwarf_ty(*pointee_ty);
-
- let type_entry = self.dwarf.unit.get_mut(type_id);
-
- //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
- type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
-
- type_id
- }
- ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
- let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
-
- // Ensure that type is inserted before recursing to avoid duplicates
- self.types.insert(ty, type_id);
-
- let variant = adt_def.non_enum_variant();
-
- for (field_idx, field_def) in variant.fields.iter().enumerate() {
- let field_offset = layout.fields.offset(field_idx);
- let field_layout = layout.field(
- &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
- field_idx,
- );
-
- let field_type = self.dwarf_ty(field_layout.ty);
-
- let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
- let field_entry = self.dwarf.unit.get_mut(field_id);
-
- field_entry.set(
- gimli::DW_AT_name,
- AttributeValue::String(field_def.name.as_str().to_string().into_bytes()),
- );
- field_entry.set(
- gimli::DW_AT_data_member_location,
- AttributeValue::Udata(field_offset.bytes()),
- );
- field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
- }
-
- type_id
- }
- _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
- };
-
- let type_entry = self.dwarf.unit.get_mut(type_id);
-
- type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
-
- self.types.insert(ty, type_id);
-
- type_id
- }
-
- fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
- let dw_ty = self.dwarf_ty(ty);
-
- let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
- let var_entry = self.dwarf.unit.get_mut(var_id);
-
- var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
-
- var_id
+ DebugContext { endian, dwarf, unit_range_list: RangeList(Vec::new()) }
}
pub(crate) fn define_function(
&mut self,
- instance: Instance<'tcx>,
- func_id: FuncId,
+ tcx: TyCtxt<'_>,
name: &str,
- isa: &dyn TargetIsa,
- context: &Context,
- source_info_set: &indexmap::IndexSet<SourceInfo>,
- local_map: IndexVec<mir::Local, CPlace<'tcx>>,
- ) {
- let symbol = func_id.as_u32() as usize;
- let mir = self.tcx.instance_mir(instance.def);
+ function_span: Span,
+ ) -> FunctionDebugContext {
+ let (file, line, column) = DebugContext::get_span_loc(tcx, function_span, function_span);
+
+ let file_id = self.add_source_file(&file);
// FIXME: add to appropriate scope instead of root
let scope = self.dwarf.unit.root();
@@ -233,14 +123,35 @@ impl<'tcx> DebugContext<'tcx> {
entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
- let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
+ entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
+ entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(line));
+ entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(column));
- self.unit_range_list.0.push(Range::StartLength {
+ FunctionDebugContext {
+ entry_id,
+ function_source_loc: (file_id, line, column),
+ source_loc_set: IndexSet::new(),
+ }
+ }
+}
+
+impl FunctionDebugContext {
+ pub(crate) fn finalize(
+ mut self,
+ debug_context: &mut DebugContext,
+ func_id: FuncId,
+ context: &Context,
+ ) {
+ let symbol = func_id.as_u32() as usize;
+
+ let end = self.create_debug_lines(debug_context, symbol, context);
+
+ debug_context.unit_range_list.0.push(Range::StartLength {
begin: Address::Symbol { symbol, addend: 0 },
length: u64::from(end),
});
- let func_entry = self.dwarf.unit.get_mut(entry_id);
+ let func_entry = debug_context.dwarf.unit.get_mut(self.entry_id);
// Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
func_entry.set(
gimli::DW_AT_low_pc,
@@ -248,110 +159,5 @@ impl<'tcx> DebugContext<'tcx> {
);
// Using Udata for DW_AT_high_pc requires at least DWARF4
func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
-
- // FIXME make it more reliable and implement scopes before re-enabling this.
- if false {
- let value_labels_ranges = std::collections::HashMap::new(); // FIXME
-
- for (local, _local_decl) in mir.local_decls.iter_enumerated() {
- let ty = self.tcx.subst_and_normalize_erasing_regions(
- instance.substs,
- ty::ParamEnv::reveal_all(),
- mir.local_decls[local].ty,
- );
- let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
-
- let location = place_location(
- self,
- isa,
- symbol,
- &local_map,
- &value_labels_ranges,
- Place { local, projection: ty::List::empty() },
- );
-
- let var_entry = self.dwarf.unit.get_mut(var_id);
- var_entry.set(gimli::DW_AT_location, location);
- }
- }
-
- // FIXME create locals for all entries in mir.var_debug_info
- }
-}
-
-fn place_location<'tcx>(
- debug_context: &mut DebugContext<'tcx>,
- isa: &dyn TargetIsa,
- symbol: usize,
- local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
- #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
- ValueLabel,
- Vec<ValueLocRange>,
- >,
- place: Place<'tcx>,
-) -> AttributeValue {
- assert!(place.projection.is_empty()); // FIXME implement them
-
- match local_map[place.local].inner() {
- CPlaceInner::Var(_local, var) => {
- let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
- if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
- let loc_list = LocationList(
- value_loc_ranges
- .iter()
- .map(|value_loc_range| Location::StartEnd {
- begin: Address::Symbol {
- symbol,
- addend: i64::from(value_loc_range.start),
- },
- end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
- data: translate_loc(isa, value_loc_range.loc).unwrap(),
- })
- .collect(),
- );
- let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
-
- AttributeValue::LocationListRef(loc_list_id)
- } else {
- // FIXME set value labels for unused locals
-
- AttributeValue::Exprloc(Expression::new())
- }
- }
- CPlaceInner::VarPair(_, _, _) => {
- // FIXME implement this
-
- AttributeValue::Exprloc(Expression::new())
- }
- CPlaceInner::VarLane(_, _, _) => {
- // FIXME implement this
-
- AttributeValue::Exprloc(Expression::new())
- }
- CPlaceInner::Addr(_, _) => {
- // FIXME implement this (used by arguments and returns)
-
- AttributeValue::Exprloc(Expression::new())
-
- // For PointerBase::Stack:
- //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot)).unwrap())
- }
- }
-}
-
-// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
-fn translate_loc(isa: &dyn TargetIsa, loc: LabelValueLoc) -> Option<Expression> {
- match loc {
- LabelValueLoc::Reg(reg) => {
- let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
- let mut expr = Expression::new();
- expr.op_reg(gimli::Register(machine_reg));
- Some(expr)
- }
- LabelValueLoc::SPOffset(offset) => {
- let mut expr = Expression::new();
- expr.op_breg(X86_64::RSP, offset);
- Some(expr)
- }
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
index f619bb5ed..97b395bcd 100644
--- a/compiler/rustc_codegen_cranelift/src/discriminant.rs
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -42,10 +42,10 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
Variants::Multiple {
tag: _,
tag_field,
- tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
variants: _,
} => {
- if variant_index != dataful_variant {
+ if variant_index != untagged_variant {
let niche = place.place_field(fx, mir::Field::new(tag_field));
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = ty::ScalarInt::try_from_uint(
@@ -62,16 +62,14 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
pub(crate) fn codegen_get_discriminant<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
+ dest: CPlace<'tcx>,
value: CValue<'tcx>,
dest_layout: TyAndLayout<'tcx>,
-) -> CValue<'tcx> {
+) {
let layout = value.layout();
- if layout.abi == Abi::Uninhabited {
- let true_ = fx.bcx.ins().iconst(types::I32, 1);
- fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
- // Return a dummy value
- return CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout);
+ if layout.abi.is_uninhabited() {
+ return;
}
let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
@@ -89,7 +87,9 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
} else {
ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
};
- return CValue::const_val(fx, dest_layout, discr_val);
+ let res = CValue::const_val(fx, dest_layout, discr_val);
+ dest.write_cvalue(fx, res);
+ return;
}
Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
(tag, *tag_field, tag_encoding)
@@ -110,9 +110,10 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
_ => false,
};
let val = clif_intcast(fx, tag, cast_to, signed);
- CValue::by_val(val, dest_layout)
+ let res = CValue::by_val(val, dest_layout);
+ dest.write_cvalue(fx, res);
}
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
// Rebase from niche values to discriminants, and check
// whether the result is in range for the niche variants.
@@ -168,9 +169,11 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
};
- let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
- let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
- CValue::by_val(discr, dest_layout)
+ let untagged_variant =
+ fx.bcx.ins().iconst(cast_to, i64::from(untagged_variant.as_u32()));
+ let discr = fx.bcx.ins().select(is_niche, niche_discr, untagged_variant);
+ let res = CValue::by_val(discr, dest_layout);
+ dest.write_cvalue(fx, res);
}
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index 3cd1ef563..f873561c1 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -1,33 +1,129 @@
//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
//! standalone executable.
+use std::fs::File;
use std::path::PathBuf;
+use std::sync::Arc;
+use std::thread::JoinHandle;
-use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
use rustc_session::cgu_reuse_tracker::CguReuse;
-use rustc_session::config::{DebugInfo, OutputType};
+use rustc_session::config::{DebugInfo, OutputFilenames, OutputType};
use rustc_session::Session;
-use cranelift_codegen::isa::TargetIsa;
use cranelift_object::{ObjectBuilder, ObjectModule};
+use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken};
+use crate::global_asm::GlobalAsmConfig;
use crate::{prelude::*, BackendConfig};
-struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+struct ModuleCodegenResult {
+ module_regular: CompiledModule,
+ module_global_asm: Option<CompiledModule>,
+ existing_work_product: Option<(WorkProductId, WorkProduct)>,
+}
+
+enum OngoingModuleCodegen {
+ Sync(Result<ModuleCodegenResult, String>),
+ Async(JoinHandle<Result<ModuleCodegenResult, String>>),
+}
-impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+impl<HCX> HashStable<HCX> for OngoingModuleCodegen {
fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
// do nothing
}
}
-fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectModule {
+pub(crate) struct OngoingCodegen {
+ modules: Vec<OngoingModuleCodegen>,
+ allocator_module: Option<CompiledModule>,
+ metadata_module: Option<CompiledModule>,
+ metadata: EncodedMetadata,
+ crate_info: CrateInfo,
+ concurrency_limiter: ConcurrencyLimiter,
+}
+
+impl OngoingCodegen {
+ pub(crate) fn join(
+ self,
+ sess: &Session,
+ backend_config: &BackendConfig,
+ ) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+ let mut work_products = FxHashMap::default();
+ let mut modules = vec![];
+
+ for module_codegen in self.modules {
+ let module_codegen_result = match module_codegen {
+ OngoingModuleCodegen::Sync(module_codegen_result) => module_codegen_result,
+ OngoingModuleCodegen::Async(join_handle) => match join_handle.join() {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(panic) => std::panic::resume_unwind(panic),
+ },
+ };
+
+ let module_codegen_result = match module_codegen_result {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(err) => sess.fatal(&err),
+ };
+ let ModuleCodegenResult { module_regular, module_global_asm, existing_work_product } =
+ module_codegen_result;
+
+ if let Some((work_product_id, work_product)) = existing_work_product {
+ work_products.insert(work_product_id, work_product);
+ } else {
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else if let Some(module_global_asm) = &module_global_asm {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[
+ ("o", &module_regular.object.as_ref().unwrap()),
+ ("asm.o", &module_global_asm.object.as_ref().unwrap()),
+ ],
+ )
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[("o", &module_regular.object.as_ref().unwrap())],
+ )
+ };
+ if let Some((work_product_id, work_product)) = work_product {
+ work_products.insert(work_product_id, work_product);
+ }
+ }
+
+ modules.push(module_regular);
+ if let Some(module_global_asm) = module_global_asm {
+ modules.push(module_global_asm);
+ }
+ }
+
+ self.concurrency_limiter.finished();
+
+ (
+ CodegenResults {
+ modules,
+ allocator_module: self.allocator_module,
+ metadata_module: self.metadata_module,
+ metadata: self.metadata,
+ crate_info: self.crate_info,
+ },
+ work_products,
+ )
+ }
+}
+
+fn make_module(sess: &Session, backend_config: &BackendConfig, name: String) -> ObjectModule {
+ let isa = crate::build_isa(sess, backend_config);
+
let mut builder =
ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
// Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
@@ -37,15 +133,15 @@ fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectM
ObjectModule::new(builder)
}
-fn emit_module(
- tcx: TyCtxt<'_>,
- backend_config: &BackendConfig,
+fn emit_cgu(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
name: String,
- kind: ModuleKind,
module: ObjectModule,
- debug: Option<DebugContext<'_>>,
+ debug: Option<DebugContext>,
unwind_context: UnwindContext,
-) -> ModuleCodegenResult {
+ global_asm_object_file: Option<PathBuf>,
+) -> Result<ModuleCodegenResult, String> {
let mut product = module.finish();
if let Some(mut debug) = debug {
@@ -54,134 +150,191 @@ fn emit_module(
unwind_context.emit(&mut product);
- let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
- let obj = product.object.write().unwrap();
+ let module_regular =
+ emit_module(output_filenames, prof, product.object, ModuleKind::Regular, name.clone())?;
+
+ Ok(ModuleCodegenResult {
+ module_regular,
+ module_global_asm: global_asm_object_file.map(|global_asm_object_file| CompiledModule {
+ name: format!("{name}.asm"),
+ kind: ModuleKind::Regular,
+ object: Some(global_asm_object_file),
+ dwarf_object: None,
+ bytecode: None,
+ }),
+ existing_work_product: None,
+ })
+}
- tcx.sess.prof.artifact_size("object_file", name.clone(), obj.len().try_into().unwrap());
+fn emit_module(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
+ object: cranelift_object::object::write::Object<'_>,
+ kind: ModuleKind,
+ name: String,
+) -> Result<CompiledModule, String> {
+ let tmp_file = output_filenames.temp_path(OutputType::Object, Some(&name));
+ let mut file = match File::create(&tmp_file) {
+ Ok(file) => file,
+ Err(err) => return Err(format!("error creating object file: {}", err)),
+ };
- if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess.fatal(&format!("error writing object file: {}", err));
+ if let Err(err) = object.write_stream(&mut file) {
+ return Err(format!("error writing object file: {}", err));
}
- let work_product = if backend_config.disable_incr_cache {
- None
- } else {
- rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
- tcx.sess,
- &name,
- &[("o", &tmp_file)],
- )
- };
+ prof.artifact_size("object_file", &*name, file.metadata().unwrap().len());
- ModuleCodegenResult(
- CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
- work_product,
- )
+ Ok(CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None })
}
fn reuse_workproduct_for_cgu(
tcx: TyCtxt<'_>,
cgu: &CodegenUnit<'_>,
- work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
-) -> CompiledModule {
+) -> Result<ModuleCodegenResult, String> {
let work_product = cgu.previous_work_product(tcx);
- let obj_out = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
- let source_file = rustc_incremental::in_incr_comp_dir_sess(
+ let obj_out_regular =
+ tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ let source_file_regular = rustc_incremental::in_incr_comp_dir_sess(
&tcx.sess,
&work_product.saved_files.get("o").expect("no saved object file in work product"),
);
- if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
- tcx.sess.err(&format!(
+
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_regular, &obj_out_regular) {
+ return Err(format!(
"unable to copy {} to {}: {}",
- source_file.display(),
- obj_out.display(),
+ source_file_regular.display(),
+ obj_out_regular.display(),
err
));
}
+ let obj_out_global_asm =
+ crate::global_asm::add_file_stem_postfix(obj_out_regular.clone(), ".asm");
+ let has_global_asm = if let Some(asm_o) = work_product.saved_files.get("asm.o") {
+ let source_file_global_asm = rustc_incremental::in_incr_comp_dir_sess(&tcx.sess, asm_o);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_global_asm, &obj_out_global_asm)
+ {
+ return Err(format!(
+ "unable to copy {} to {}: {}",
+ source_file_regular.display(),
+ obj_out_regular.display(),
+ err
+ ));
+ }
+ true
+ } else {
+ false
+ };
- work_products.insert(cgu.work_product_id(), work_product);
-
- CompiledModule {
- name: cgu.name().to_string(),
- kind: ModuleKind::Regular,
- object: Some(obj_out),
- dwarf_object: None,
- bytecode: None,
- }
+ Ok(ModuleCodegenResult {
+ module_regular: CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_regular),
+ dwarf_object: None,
+ bytecode: None,
+ },
+ module_global_asm: if has_global_asm {
+ Some(CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_global_asm),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ },
+ existing_work_product: Some((cgu.work_product_id(), work_product)),
+ })
}
fn module_codegen(
tcx: TyCtxt<'_>,
- (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
-) -> ModuleCodegenResult {
- let cgu = tcx.codegen_unit(cgu_name);
- let mono_items = cgu.items_in_deterministic_order(tcx);
-
- let isa = crate::build_isa(tcx.sess, &backend_config);
- let mut module = make_module(tcx.sess, isa, cgu_name.as_str().to_string());
-
- let mut cx = crate::CodegenCx::new(
- tcx,
- backend_config.clone(),
- module.isa(),
- tcx.sess.opts.debuginfo != DebugInfo::None,
- cgu_name,
- );
- super::predefine_mono_items(tcx, &mut module, &mono_items);
- for (mono_item, _) in mono_items {
- match mono_item {
- MonoItem::Fn(inst) => {
- cx.tcx
- .sess
- .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
- }
- MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
- MonoItem::GlobalAsm(item_id) => {
- let item = cx.tcx.hir().item(item_id);
- if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
- if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
- cx.global_asm.push_str("\n.intel_syntax noprefix\n");
- } else {
- cx.global_asm.push_str("\n.att_syntax\n");
- }
- for piece in asm.template {
- match *piece {
- InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
- InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
- }
- }
- cx.global_asm.push_str("\n.att_syntax\n\n");
- } else {
- bug!("Expected GlobalAsm found {:?}", item);
+ (backend_config, global_asm_config, cgu_name, token): (
+ BackendConfig,
+ Arc<GlobalAsmConfig>,
+ rustc_span::Symbol,
+ ConcurrencyLimiterToken,
+ ),
+) -> OngoingModuleCodegen {
+ let (cgu_name, mut cx, mut module, codegened_functions) = tcx.sess.time("codegen cgu", || {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let mut module = make_module(tcx.sess, &backend_config, cgu_name.as_str().to_string());
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ module.isa(),
+ tcx.sess.opts.debuginfo != DebugInfo::None,
+ cgu_name,
+ );
+ super::predefine_mono_items(tcx, &mut module, &mono_items);
+ let mut codegened_functions = vec![];
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => {
+ tcx.sess.time("codegen fn", || {
+ let codegened_function = crate::base::codegen_fn(
+ tcx,
+ &mut cx,
+ Function::new(),
+ &mut module,
+ inst,
+ );
+ codegened_functions.push(codegened_function);
+ });
+ }
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut module, def_id)
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ crate::global_asm::codegen_global_asm_item(tcx, &mut cx.global_asm, item_id);
}
}
}
- }
- crate::main_shim::maybe_create_entry_wrapper(
- tcx,
- &mut module,
- &mut cx.unwind_context,
- false,
- cgu.is_primary(),
- );
-
- let debug_context = cx.debug_context;
- let unwind_context = cx.unwind_context;
- let codegen_result = tcx.sess.time("write object file", || {
- emit_module(
+ crate::main_shim::maybe_create_entry_wrapper(
tcx,
- &backend_config,
- cgu.name().as_str().to_string(),
- ModuleKind::Regular,
- module,
- debug_context,
- unwind_context,
- )
+ &mut module,
+ &mut cx.unwind_context,
+ false,
+ cgu.is_primary(),
+ );
+
+ let cgu_name = cgu.name().as_str().to_owned();
+
+ (cgu_name, cx, module, codegened_functions)
});
- codegen_global_asm(tcx, cgu.name().as_str(), &cx.global_asm);
+ OngoingModuleCodegen::Async(std::thread::spawn(move || {
+ cx.profiler.clone().verbose_generic_activity("compile functions").run(|| {
+ let mut cached_context = Context::new();
+ for codegened_func in codegened_functions {
+ crate::base::compile_fn(&mut cx, &mut cached_context, &mut module, codegened_func);
+ }
+ });
- codegen_result
+ let global_asm_object_file =
+ cx.profiler.verbose_generic_activity("compile assembly").run(|| {
+ crate::global_asm::compile_global_asm(&global_asm_config, &cgu_name, &cx.global_asm)
+ })?;
+
+ let codegen_result = cx.profiler.verbose_generic_activity("write object file").run(|| {
+ emit_cgu(
+ &global_asm_config.output_filenames,
+ &cx.profiler,
+ cgu_name,
+ module,
+ cx.debug_context,
+ cx.unwind_context,
+ global_asm_object_file,
+ )
+ });
+ std::mem::drop(token);
+ codegen_result
+ }))
}
pub(crate) fn run_aot(
@@ -189,9 +342,7 @@ pub(crate) fn run_aot(
backend_config: BackendConfig,
metadata: EncodedMetadata,
need_metadata_module: bool,
-) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
- let mut work_products = FxHashMap::default();
-
+) -> Box<OngoingCodegen> {
let cgus = if tcx.sess.opts.output_types.should_codegen() {
tcx.collect_and_partition_mono_items(()).1
} else {
@@ -206,62 +357,69 @@ pub(crate) fn run_aot(
}
}
+ let global_asm_config = Arc::new(crate::global_asm::GlobalAsmConfig::new(tcx));
+
+ let mut concurrency_limiter = ConcurrencyLimiter::new(tcx.sess, cgus.len());
+
let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
cgus.iter()
.map(|cgu| {
- let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ let cgu_reuse = if backend_config.disable_incr_cache {
+ CguReuse::No
+ } else {
+ determine_cgu_reuse(tcx, cgu)
+ };
tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
- _ if backend_config.disable_incr_cache => {}
- CguReuse::No => {}
- CguReuse::PreLto => {
- return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ CguReuse::No => {
+ let dep_node = cgu.codegen_dep_node(tcx);
+ tcx.dep_graph
+ .with_task(
+ dep_node,
+ tcx,
+ (
+ backend_config.clone(),
+ global_asm_config.clone(),
+ cgu.name(),
+ concurrency_limiter.acquire(),
+ ),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ )
+ .0
+ }
+ CguReuse::PreLto => unreachable!(),
+ CguReuse::PostLto => {
+ concurrency_limiter.job_already_done();
+ OngoingModuleCodegen::Sync(reuse_workproduct_for_cgu(tcx, &*cgu))
}
- CguReuse::PostLto => unreachable!(),
- }
-
- let dep_node = cgu.codegen_dep_node(tcx);
- let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
- dep_node,
- tcx,
- (backend_config.clone(), cgu.name()),
- module_codegen,
- Some(rustc_middle::dep_graph::hash_result),
- );
-
- if let Some((id, product)) = work_product {
- work_products.insert(id, product);
}
-
- module
})
.collect::<Vec<_>>()
});
tcx.sess.abort_if_errors();
- let isa = crate::build_isa(tcx.sess, &backend_config);
- let mut allocator_module = make_module(tcx.sess, isa, "allocator_shim".to_string());
- assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
+ let mut allocator_module = make_module(tcx.sess, &backend_config, "allocator_shim".to_string());
let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
let created_alloc_shim =
crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
let allocator_module = if created_alloc_shim {
- let ModuleCodegenResult(module, work_product) = emit_module(
- tcx,
- &backend_config,
- "allocator_shim".to_string(),
+ let mut product = allocator_module.finish();
+ allocator_unwind_context.emit(&mut product);
+
+ match emit_module(
+ tcx.output_filenames(()),
+ &tcx.sess.prof,
+ product.object,
ModuleKind::Allocator,
- allocator_module,
- None,
- allocator_unwind_context,
- );
- if let Some((id, product)) = work_product {
- work_products.insert(id, product);
+ "allocator_shim".to_owned(),
+ ) {
+ Ok(allocator_module) => Some(allocator_module),
+ Err(err) => tcx.sess.fatal(err),
}
- Some(module)
} else {
None
};
@@ -308,102 +466,14 @@ pub(crate) fn run_aot(
}
.to_owned();
- Box::new((
- CodegenResults {
- modules,
- allocator_module,
- metadata_module,
- metadata,
- crate_info: CrateInfo::new(tcx, target_cpu),
- },
- work_products,
- ))
-}
-
-fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
- use std::io::Write;
- use std::process::{Command, Stdio};
-
- if global_asm.is_empty() {
- return;
- }
-
- if cfg!(not(feature = "inline_asm"))
- || tcx.sess.target.is_like_osx
- || tcx.sess.target.is_like_windows
- {
- if global_asm.contains("__rust_probestack") {
- return;
- }
-
- // FIXME fix linker error on macOS
- if cfg!(not(feature = "inline_asm")) {
- tcx.sess.fatal(
- "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
- );
- } else {
- tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
- }
- }
-
- let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
- let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
-
- // Remove all LLVM style comments
- let global_asm = global_asm
- .lines()
- .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
- .collect::<Vec<_>>()
- .join("\n");
-
- let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
-
- // Assemble `global_asm`
- let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
- let mut child = Command::new(assembler)
- .arg("-o")
- .arg(&global_asm_object_file)
- .stdin(Stdio::piped())
- .spawn()
- .expect("Failed to spawn `as`.");
- child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
- let status = child.wait().expect("Failed to wait for `as`.");
- if !status.success() {
- tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
- }
-
- // Link the global asm and main object file together
- let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
- std::fs::rename(&output_object_file, &main_object_file).unwrap();
- let status = Command::new(linker)
- .arg("-r") // Create a new object file
- .arg("-o")
- .arg(output_object_file)
- .arg(&main_object_file)
- .arg(&global_asm_object_file)
- .status()
- .unwrap();
- if !status.success() {
- tcx.sess.fatal(&format!(
- "Failed to link `{}` and `{}` together",
- main_object_file.display(),
- global_asm_object_file.display(),
- ));
- }
-
- std::fs::remove_file(global_asm_object_file).unwrap();
- std::fs::remove_file(main_object_file).unwrap();
-}
-
-fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
- let mut new_filename = path.file_stem().unwrap().to_owned();
- new_filename.push(postfix);
- if let Some(extension) = path.extension() {
- new_filename.push(".");
- new_filename.push(extension);
- }
- path.set_file_name(new_filename);
- path
+ Box::new(OngoingCodegen {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ concurrency_limiter,
+ })
}
// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
@@ -432,5 +502,5 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
cgu.name()
);
- if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+ if tcx.try_mark_green(&dep_node) { CguReuse::PostLto } else { CguReuse::No }
}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index a56a91000..6a430b521 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -61,19 +61,18 @@ impl UnsafeMessage {
}
}
-fn create_jit_module<'tcx>(
- tcx: TyCtxt<'tcx>,
+fn create_jit_module(
+ tcx: TyCtxt<'_>,
backend_config: &BackendConfig,
hotswap: bool,
-) -> (JITModule, CodegenCx<'tcx>) {
+) -> (JITModule, CodegenCx) {
let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
- let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
let isa = crate::build_isa(tcx.sess, backend_config);
let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
jit_builder.hotswap(hotswap);
crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
- jit_builder.symbols(imported_symbols);
+ jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info));
jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
let mut jit_module = JITModule::new(jit_builder);
@@ -111,6 +110,7 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
&backend_config,
matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
);
+ let mut cached_context = Context::new();
let (_, cgus) = tcx.collect_and_partition_mono_items(());
let mono_items = cgus
@@ -128,11 +128,19 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
MonoItem::Fn(inst) => match backend_config.codegen_mode {
CodegenMode::Aot => unreachable!(),
CodegenMode::Jit => {
- cx.tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
+ tcx.sess.time("codegen fn", || {
+ crate::base::codegen_and_compile_fn(
+ tcx,
+ &mut cx,
+ &mut cached_context,
+ &mut jit_module,
+ inst,
+ )
});
}
- CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
+ CodegenMode::JitLazy => {
+ codegen_shim(tcx, &mut cx, &mut cached_context, &mut jit_module, inst)
+ }
},
MonoItem::Static(def_id) => {
crate::constant::codegen_static(tcx, &mut jit_module, def_id);
@@ -259,7 +267,15 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
false,
Symbol::intern("dummy_cgu_name"),
);
- tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
+ tcx.sess.time("codegen fn", || {
+ crate::base::codegen_and_compile_fn(
+ tcx,
+ &mut cx,
+ &mut Context::new(),
+ jit_module,
+ instance,
+ )
+ });
assert!(cx.global_asm.is_empty());
jit_module.finalize_definitions();
@@ -269,10 +285,10 @@ fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) ->
})
}
-fn load_imported_symbols_for_jit(
+fn dep_symbol_lookup_fn(
sess: &Session,
crate_info: CrateInfo,
-) -> Vec<(String, *const u8)> {
+) -> Box<dyn Fn(&str) -> Option<*const u8>> {
use rustc_middle::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
@@ -299,44 +315,32 @@ fn load_imported_symbols_for_jit(
}
}
- let mut imported_symbols = Vec::new();
- for path in dylib_paths {
- use object::{Object, ObjectSymbol};
- let lib = libloading::Library::new(&path).unwrap();
- let obj = std::fs::read(path).unwrap();
- let obj = object::File::parse(&*obj).unwrap();
- imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
- let name = symbol.name().unwrap().to_string();
- if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
- return None;
- }
- if name.starts_with("rust_metadata_") {
- // The metadata is part of a section that is not loaded by the dynamic linker in
- // case of cg_llvm.
- return None;
- }
- let dlsym_name = if cfg!(target_os = "macos") {
- // On macOS `dlsym` expects the name without leading `_`.
- assert!(name.starts_with('_'), "{:?}", name);
- &name[1..]
- } else {
- &name
- };
- let symbol: libloading::Symbol<'_, *const u8> =
- unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
- Some((name, *symbol))
- }));
- std::mem::forget(lib)
- }
+ let imported_dylibs = Box::leak(
+ dylib_paths
+ .into_iter()
+ .map(|path| unsafe { libloading::Library::new(&path).unwrap() })
+ .collect::<Box<[_]>>(),
+ );
sess.abort_if_errors();
- imported_symbols
+ Box::new(move |sym_name| {
+ for dylib in &*imported_dylibs {
+ if let Ok(sym) = unsafe { dylib.get::<*const u8>(sym_name.as_bytes()) } {
+ return Some(*sym);
+ }
+ }
+ None
+ })
}
-fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
- let tcx = cx.tcx;
-
+fn codegen_shim<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut CodegenCx,
+ cached_context: &mut Context,
+ module: &mut JITModule,
+ inst: Instance<'tcx>,
+) {
let pointer_type = module.target_config().pointer_type();
let name = tcx.symbol_name(inst).name;
@@ -357,8 +361,9 @@ fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: In
)
.unwrap();
- cx.cached_context.clear();
- let trampoline = &mut cx.cached_context.func;
+ let context = cached_context;
+ context.clear();
+ let trampoline = &mut context.func;
trampoline.signature = sig.clone();
let mut builder_ctx = FunctionBuilderContext::new();
@@ -381,5 +386,6 @@ fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: In
let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
trampoline_builder.ins().return_(&ret_vals);
- module.define_function(func_id, &mut cx.cached_context).unwrap();
+ module.define_function(func_id, context).unwrap();
+ cx.unwind_context.add_function(func_id, context, module.isa());
}
diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs
new file mode 100644
index 000000000..dcbcaba30
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs
@@ -0,0 +1,114 @@
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::io::Write;
+use std::path::PathBuf;
+use std::process::{Command, Stdio};
+use std::sync::Arc;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::ItemId;
+use rustc_session::config::{OutputFilenames, OutputType};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_global_asm_item(tcx: TyCtxt<'_>, global_asm: &mut String, item_id: ItemId) {
+ let item = tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+ if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ global_asm.push_str("\n.intel_syntax noprefix\n");
+ } else {
+ global_asm.push_str("\n.att_syntax\n");
+ }
+ for piece in asm.template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
+ }
+ }
+ global_asm.push_str("\n.att_syntax\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct GlobalAsmConfig {
+ asm_enabled: bool,
+ assembler: PathBuf,
+ pub(crate) output_filenames: Arc<OutputFilenames>,
+}
+
+impl GlobalAsmConfig {
+ pub(crate) fn new(tcx: TyCtxt<'_>) -> Self {
+ let asm_enabled = cfg!(feature = "inline_asm") && !tcx.sess.target.is_like_windows;
+
+ GlobalAsmConfig {
+ asm_enabled,
+ assembler: crate::toolchain::get_toolchain_binary(tcx.sess, "as"),
+ output_filenames: tcx.output_filenames(()).clone(),
+ }
+ }
+}
+
+pub(crate) fn compile_global_asm(
+ config: &GlobalAsmConfig,
+ cgu_name: &str,
+ global_asm: &str,
+) -> Result<Option<PathBuf>, String> {
+ if global_asm.is_empty() {
+ return Ok(None);
+ }
+
+ if !config.asm_enabled {
+ if global_asm.contains("__rust_probestack") {
+ return Ok(None);
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ return Err(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift"
+ .to_owned(),
+ );
+ } else {
+ return Err("asm! and global_asm! are not yet supported on Windows".to_owned());
+ }
+ }
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = config.output_filenames.temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(&config.assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ return Err(format!("Failed to assemble `{}`", global_asm));
+ }
+
+ Ok(Some(global_asm_object_file))
+}
+
+pub(crate) fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 241de5e36..3fcc84d39 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -15,15 +15,19 @@ pub(crate) fn codegen_inline_asm<'tcx>(
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperand<'tcx>],
options: InlineAsmOptions,
+ destination: Option<mir::BasicBlock>,
) {
// FIXME add .eh_frame unwind info directives
if !template.is_empty() {
+ // Used by panic_abort
if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
- let true_ = fx.bcx.ins().iconst(types::I32, 1);
- fx.bcx.ins().trapnz(true_, TrapCode::User(1));
+ fx.bcx.ins().trap(TrapCode::User(1));
return;
- } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
+ }
+
+ // Used by stdarch
+ if template[0] == InlineAsmTemplatePiece::String("mov ".to_string())
&& matches!(
template[1],
InlineAsmTemplatePiece::Placeholder {
@@ -32,66 +36,63 @@ pub(crate) fn codegen_inline_asm<'tcx>(
span: _
}
)
- && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
- && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
- && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
- && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
+ && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string())
+ && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string())
&& matches!(
- template[6],
+ template[7],
InlineAsmTemplatePiece::Placeholder {
operand_idx: 0,
modifier: Some('r'),
span: _
}
)
+ && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string())
{
assert_eq!(operands.len(), 4);
let (leaf, eax_place) = match operands[1] {
- InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
- assert_eq!(
- reg,
- InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
- );
- (
- crate::base::codegen_operand(fx, in_value).load_scalar(fx),
- crate::base::codegen_place(fx, out_place.unwrap()),
- )
- }
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
+ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
_ => unreachable!(),
};
let ebx_place = match operands[0] {
- InlineAsmOperand::Out { reg, late: true, place } => {
- assert_eq!(
- reg,
+ InlineAsmOperand::Out {
+ reg:
InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
- X86InlineAsmRegClass::reg
- ))
- );
- crate::base::codegen_place(fx, place.unwrap())
- }
+ X86InlineAsmRegClass::reg,
+ )),
+ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
_ => unreachable!(),
};
let (sub_leaf, ecx_place) = match operands[2] {
- InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
- assert_eq!(
- reg,
- InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
- );
- (
- crate::base::codegen_operand(fx, in_value).load_scalar(fx),
- crate::base::codegen_place(fx, out_place.unwrap()),
- )
- }
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
+ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
_ => unreachable!(),
};
let edx_place = match operands[3] {
- InlineAsmOperand::Out { reg, late: true, place } => {
- assert_eq!(
- reg,
- InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
- );
- crate::base::codegen_place(fx, place.unwrap())
- }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
+ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
_ => unreachable!(),
};
@@ -101,12 +102,99 @@ pub(crate) fn codegen_inline_asm<'tcx>(
ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
return;
- } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+ }
+
+ // Used by compiler-builtins
+ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ return;
} else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ return;
+ }
+
+ // Used by measureme
+ if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
+ && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
+ && matches!(
+ template[8],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
+ // Return zero dummy values for all performance counters
+ match operands[0] {
+ InlineAsmOperand::In {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
+ value: _,
+ } => {}
+ _ => unreachable!(),
+ };
+ let lo = match operands[1] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+ let hi = match operands[2] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+
+ let u32_layout = fx.layout_of(fx.tcx.types.u32);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+ hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String(")".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
}
}
@@ -175,6 +263,16 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
+
+ match destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ }
}
struct InlineAssemblyGenerator<'a, 'tcx> {
@@ -637,7 +735,7 @@ fn call_inline_asm<'tcx>(
inputs: Vec<(Size, Value)>,
outputs: Vec<(Size, CPlace<'tcx>)>,
) {
- let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+ let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32::try_from(slot_size.bytes()).unwrap(),
});
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
index d02dfd93c..5120b89c4 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -62,7 +62,7 @@ pub(crate) fn codegen_cpuid_call<'tcx>(
fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
fx.bcx.switch_to_block(unsupported_leaf);
- crate::trap::trap_unreachable(
+ crate::trap::trap_unimplemented(
fx,
"__cpuid_count arch intrinsic doesn't yet support specified leaf",
);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index 869670c8c..783d426c3 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -14,6 +14,10 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
target: Option<BasicBlock>,
) {
match intrinsic {
+ "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => {
+ // Spin loop hint
+ }
+
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
"llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
intrinsic_args!(fx, args => (a); intrinsic);
@@ -25,8 +29,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
let mut res = fx.bcx.ins().iconst(types::I32, 0);
for lane in (0..lane_count).rev() {
- let a_lane =
- a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
// cast float to int
let a_lane = match lane_ty {
@@ -139,6 +142,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
.sess
.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
crate::trap::trap_unimplemented(fx, intrinsic);
+ return;
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index b2a83e1d4..0302b843a 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -44,7 +44,7 @@ fn report_atomic_type_validation_error<'tcx>(
),
);
// Prevent verifier error
- crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
@@ -53,7 +53,7 @@ pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx
_ => unreachable!(),
};
- match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
_ => None,
@@ -84,6 +84,30 @@ fn simd_for_each_lane<'tcx>(
}
}
+fn simd_pair_for_each_lane_typed<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx);
+ let y_lane = y.value_lane(fx, lane_idx);
+
+ let res_lane = f(fx, x_lane, y_lane);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
fn simd_pair_for_each_lane<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
x: CValue<'tcx>,
@@ -203,7 +227,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
sym::transmute => {
crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
}
- _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ _ => unimplemented!("unsupported intrinsic {}", intrinsic),
}
return;
};
@@ -301,7 +325,44 @@ fn codegen_float_intrinsic_call<'tcx>(
_ => unreachable!(),
};
- let res = fx.easy_call(name, &args, ty);
+ let layout = fx.layout_of(ty);
+ let res = match intrinsic {
+ sym::fmaf32 | sym::fmaf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ let c = args[2].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
+ }
+ sym::copysignf32 | sym::copysignf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
+ }
+ sym::fabsf32
+ | sym::fabsf64
+ | sym::floorf32
+ | sym::floorf64
+ | sym::ceilf32
+ | sym::ceilf64
+ | sym::truncf32
+ | sym::truncf64 => {
+ let a = args[0].load_scalar(fx);
+
+ let val = match intrinsic {
+ sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
+ sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
+ sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
+ sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
+ _ => unreachable!(),
+ };
+
+ CValue::by_val(val, layout)
+ }
+ // These intrinsics aren't supported natively by Cranelift.
+ // Lower them to a libcall.
+ _ => fx.easy_call(name, &args, ty),
+ };
+
ret.write_cvalue(fx, res);
true
@@ -320,9 +381,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
let usize_layout = fx.layout_of(fx.tcx.types.usize);
match intrinsic {
- sym::assume => {
- intrinsic_args!(fx, args => (_a); intrinsic);
- }
sym::likely | sym::unlikely => {
intrinsic_args!(fx, args => (a); intrinsic);
@@ -470,37 +528,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
_ => unreachable!(),
};
- let signed = type_sign(lhs.layout().ty);
-
- let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
-
- let (val, has_overflow) = checked_res.load_scalar_pair(fx);
- let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
-
- let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
-
- let val = match (intrinsic, signed) {
- (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
- (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
- (sym::saturating_add, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- (sym::saturating_sub, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- _ => unreachable!(),
- };
-
- let res = CValue::by_val(val, lhs.layout());
-
+ let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
ret.write_cvalue(fx, res);
}
sym::rotate_left => {
@@ -540,6 +568,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
}
+ sym::ptr_mask => {
+ intrinsic_args!(fx, args => (ptr, mask); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let mask = mask.load_scalar(fx);
+ fx.bcx.ins().band(ptr, mask);
+ }
+
sym::transmute => {
intrinsic_args!(fx, args => (from); intrinsic);
@@ -775,18 +810,11 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, val);
}
- sym::ptr_guaranteed_eq => {
+ sym::ptr_guaranteed_cmp => {
intrinsic_args!(fx, args => (a, b); intrinsic);
- let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
- ret.write_cvalue(fx, val);
- }
-
- sym::ptr_guaranteed_ne => {
- intrinsic_args!(fx, args => (a, b); intrinsic);
-
- let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
- ret.write_cvalue(fx, val);
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
+ ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
}
sym::caller_location => {
@@ -818,8 +846,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
- let ret_block = fx.get_block(destination.unwrap());
- fx.bcx.ins().jump(ret_block, &[]);
return;
} else {
fx.tcx
@@ -851,8 +877,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
- let ret_block = fx.get_block(destination.unwrap());
- fx.bcx.ins().jump(ret_block, &[]);
return;
} else {
fx.tcx
@@ -1176,7 +1200,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
// FIXME once unwinding is supported, change this to actually catch panics
let f_sig = fx.bcx.func.import_signature(Signature {
call_conv: fx.target_config.default_call_conv,
- params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ params: vec![AbiParam::new(pointer_ty(fx.tcx))],
returns: vec![],
});
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 30e3d1125..51fce8c85 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -2,6 +2,7 @@
use rustc_middle::ty::subst::SubstsRef;
use rustc_span::Symbol;
+use rustc_target::abi::Endian;
use super::*;
use crate::prelude::*;
@@ -14,7 +15,7 @@ fn report_simd_type_validation_error(
) {
fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
// Prevent verifier error
- crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
@@ -26,7 +27,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
span: Span,
) {
match intrinsic {
- sym::simd_cast => {
+ sym::simd_as | sym::simd_cast => {
intrinsic_args!(fx, args => (a); intrinsic);
if !a.layout().ty.is_simd() {
@@ -157,11 +158,12 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
),
);
// Prevent verifier error
- crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
return;
}
}
} else {
+ // FIXME remove this case
intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
};
@@ -186,7 +188,10 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let size = Size::from_bytes(
4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
);
- alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap()
+ alloc
+ .inner()
+ .get_bytes_strip_provenance(fx, alloc_range(offset, size))
+ .unwrap()
}
_ => unreachable!("{:?}", idx_const),
};
@@ -274,12 +279,17 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
idx_const
} else {
fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
- let res = crate::trap::trap_unimplemented_ret_value(
+ let trap_block = fx.bcx.create_block();
+ let dummy_block = fx.bcx.create_block();
+ let true_ = fx.bcx.ins().iconst(types::I8, 1);
+ fx.bcx.ins().brnz(true_, trap_block, &[]);
+ fx.bcx.ins().jump(dummy_block, &[]);
+ fx.bcx.switch_to_block(trap_block);
+ crate::trap::trap_unimplemented(
fx,
- ret.layout(),
"Index argument for `simd_extract` is not a constant",
);
- ret.write_cvalue(fx, res);
+ fx.bcx.switch_to_block(dummy_block);
return;
};
@@ -392,21 +402,15 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let layout = a.layout();
let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let res_lane_layout = fx.layout_of(lane_ty);
for lane in 0..lane_count {
- let a_lane = a.value_lane(fx, lane);
- let b_lane = b.value_lane(fx, lane);
- let c_lane = c.value_lane(fx, lane);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+ let c_lane = c.value_lane(fx, lane).load_scalar(fx);
- let res_lane = match lane_ty.kind() {
- ty::Float(FloatTy::F32) => {
- fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty)
- }
- ty::Float(FloatTy::F64) => {
- fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty)
- }
- _ => unreachable!(),
- };
+ let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane);
+ let res_lane = CValue::by_val(res_lane, res_lane_layout);
ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
}
@@ -648,8 +652,128 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
}
}
- // simd_saturating_*
- // simd_bitmask
+ sym::simd_select_bitmask => {
+ intrinsic_args!(fx, args => (m, a, b); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ let m = m.load_scalar(fx);
+
+ for lane in 0..lane_count {
+ let m_lane = fx.bcx.ins().ushr_imm(m, u64::from(lane) as i64);
+ let m_lane = fx.bcx.ins().band_imm(m_lane, 1);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane =
+ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ sym::simd_bitmask => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_clif_ty = fx.clif_type(lane_ty).unwrap();
+
+ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+ // vector mask and returns the most significant bit (MSB) of each lane in the form
+ // of either:
+ // * an unsigned integer
+ // * an array of `u8`
+ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+ //
+ // The bit order of the result depends on the byte endianness, LSB-first for little
+ // endian and MSB-first for big endian.
+ let expected_int_bits = lane_count.max(8);
+ let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
+
+ match lane_ty.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => {
+ fx.tcx.sess.span_fatal(
+ span,
+ &format!(
+ "invalid monomorphization of `simd_bitmask` intrinsic: \
+ vector argument `{}`'s element type `{}`, expected integer element \
+ type",
+ a.layout().ty,
+ lane_ty
+ ),
+ );
+ }
+ }
+
+ let res_type =
+ Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap();
+ let mut res = fx.bcx.ins().iconst(res_type, 0);
+
+ let lanes = match fx.tcx.sess.target.endian {
+ Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>,
+ Endian::Little => Box::new((0..lane_count).rev()) as Box<dyn Iterator<Item = u64>>,
+ };
+ for lane in lanes {
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_clif_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, res_type, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ match ret.layout().ty.kind() {
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {}
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) => {}
+ _ => {
+ fx.tcx.sess.span_fatal(
+ span,
+ &format!(
+ "invalid monomorphization of `simd_bitmask` intrinsic: \
+ cannot return `{}`, expected `u{}` or `[u8; {}]`",
+ ret.layout().ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ );
+ }
+ }
+
+ let res = CValue::by_val(res, ret.layout());
+ ret.write_cvalue(fx, res);
+ }
+
+ sym::simd_saturating_add | sym::simd_saturating_sub => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ let bin_op = match intrinsic {
+ sym::simd_saturating_add => BinOp::Add,
+ sym::simd_saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
+ crate::num::codegen_saturating_int_binop(fx, bin_op, x_lane, y_lane)
+ });
+ }
+
+ // simd_arith_offset
// simd_scatter
// simd_gather
_ => {
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index bb0793b1d..629d79d50 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -4,6 +4,7 @@
#![warn(unused_lifetimes)]
#![warn(unreachable_pub)]
+extern crate jobserver;
#[macro_use]
extern crate rustc_middle;
extern crate rustc_ast;
@@ -25,10 +26,12 @@ extern crate rustc_target;
extern crate rustc_driver;
use std::any::Any;
-use std::cell::Cell;
+use std::cell::{Cell, RefCell};
+use std::sync::Arc;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
+use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_errors::ErrorGuaranteed;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
@@ -51,11 +54,13 @@ mod cast;
mod codegen_i128;
mod common;
mod compiler_builtins;
+mod concurrency_limiter;
mod config;
mod constant;
mod debuginfo;
mod discriminant;
mod driver;
+mod global_asm;
mod inline_asm;
mod intrinsics;
mod linkage;
@@ -91,8 +96,8 @@ mod prelude {
pub(crate) use cranelift_codegen::ir::function::Function;
pub(crate) use cranelift_codegen::ir::types;
pub(crate) use cranelift_codegen::ir::{
- AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
- StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ AbiParam, Block, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, StackSlot,
+ StackSlotData, StackSlotKind, TrapCode, Type, Value,
};
pub(crate) use cranelift_codegen::isa::{self, CallConv};
pub(crate) use cranelift_codegen::Context;
@@ -119,19 +124,20 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
/// The codegen context holds any information shared between the codegen of individual functions
/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
-struct CodegenCx<'tcx> {
- tcx: TyCtxt<'tcx>,
+struct CodegenCx {
+ profiler: SelfProfilerRef,
+ output_filenames: Arc<OutputFilenames>,
+ should_write_ir: bool,
global_asm: String,
inline_asm_index: Cell<usize>,
- cached_context: Context,
- debug_context: Option<DebugContext<'tcx>>,
+ debug_context: Option<DebugContext>,
unwind_context: UnwindContext,
cgu_name: Symbol,
}
-impl<'tcx> CodegenCx<'tcx> {
+impl CodegenCx {
fn new(
- tcx: TyCtxt<'tcx>,
+ tcx: TyCtxt<'_>,
backend_config: BackendConfig,
isa: &dyn TargetIsa,
debug_info: bool,
@@ -147,10 +153,11 @@ impl<'tcx> CodegenCx<'tcx> {
None
};
CodegenCx {
- tcx,
+ profiler: tcx.prof.clone(),
+ output_filenames: tcx.output_filenames(()).clone(),
+ should_write_ir: crate::pretty_clif::should_write_ir(tcx),
global_asm: String::new(),
inline_asm_index: Cell::new(0),
- cached_context: Context::new(),
debug_context,
unwind_context,
cgu_name,
@@ -159,7 +166,7 @@ impl<'tcx> CodegenCx<'tcx> {
}
pub struct CraneliftCodegenBackend {
- pub config: Option<BackendConfig>,
+ pub config: RefCell<Option<BackendConfig>>,
}
impl CodegenBackend for CraneliftCodegenBackend {
@@ -169,6 +176,13 @@ impl CodegenBackend for CraneliftCodegenBackend {
Lto::No | Lto::ThinLocal => {}
Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
}
+
+ let mut config = self.config.borrow_mut();
+ if config.is_none() {
+ let new_config = BackendConfig::from_opts(&sess.opts.cg.llvm_args)
+ .unwrap_or_else(|err| sess.fatal(&err));
+ *config = Some(new_config);
+ }
}
fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<rustc_span::Symbol> {
@@ -186,15 +200,7 @@ impl CodegenBackend for CraneliftCodegenBackend {
need_metadata_module: bool,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
- let config = if let Some(config) = self.config.clone() {
- config
- } else {
- if !tcx.sess.unstable_options() && !tcx.sess.opts.cg.llvm_args.is_empty() {
- tcx.sess.fatal("`-Z unstable-options` must be passed to allow configuring cg_clif");
- }
- BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
- .unwrap_or_else(|err| tcx.sess.fatal(&err))
- };
+ let config = self.config.borrow().clone().unwrap();
match config.codegen_mode {
CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
CodegenMode::Jit | CodegenMode::JitLazy => {
@@ -210,12 +216,13 @@ impl CodegenBackend for CraneliftCodegenBackend {
fn join_codegen(
&self,
ongoing_codegen: Box<dyn Any>,
- _sess: &Session,
+ sess: &Session,
_outputs: &OutputFilenames,
) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
- Ok(*ongoing_codegen
- .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
- .unwrap())
+ Ok(ongoing_codegen
+ .downcast::<driver::aot::OngoingCodegen>()
+ .unwrap()
+ .join(sess, self.config.borrow().as_ref().unwrap()))
}
fn link(
@@ -244,7 +251,6 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar
let mut flags_builder = settings::builder();
flags_builder.enable("is_pic").unwrap();
- flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
flags_builder.set("enable_verifier", enable_verifier).unwrap();
flags_builder.set("regalloc_checker", enable_verifier).unwrap();
@@ -272,6 +278,15 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar
}
}
+ if target_triple.architecture == target_lexicon::Architecture::X86_64 {
+ // Windows depends on stack probes to grow the committed part of the stack
+ flags_builder.enable("enable_probestack").unwrap();
+ flags_builder.set("probestack_strategy", "inline").unwrap();
+ } else {
+ // __cranelift_probestack is not provided and inline stack probes are only supported on x86_64
+ flags_builder.set("enable_probestack", "false").unwrap();
+ }
+
let flags = settings::Flags::new(flags_builder);
let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
@@ -312,5 +327,5 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar
/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
- Box::new(CraneliftCodegenBackend { config: None })
+ Box::new(CraneliftCodegenBackend { config: RefCell::new(None) })
}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
index c67b6e98b..cae6312a6 100644
--- a/compiler/rustc_codegen_cranelift/src/main_shim.rs
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -1,7 +1,7 @@
use rustc_hir::LangItem;
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::AssocKind;
-use rustc_session::config::EntryFnType;
+use rustc_session::config::{sigpipe, EntryFnType};
use rustc_span::symbol::Ident;
use crate::prelude::*;
@@ -15,12 +15,12 @@ pub(crate) fn maybe_create_entry_wrapper(
is_jit: bool,
is_primary_cgu: bool,
) {
- let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
+ let (main_def_id, (is_main_fn, sigpipe)) = match tcx.entry_fn(()) {
Some((def_id, entry_ty)) => (
def_id,
match entry_ty {
- EntryFnType::Main => true,
- EntryFnType::Start => false,
+ EntryFnType::Main { sigpipe } => (true, sigpipe),
+ EntryFnType::Start => (false, sigpipe::DEFAULT),
},
),
None => return,
@@ -35,7 +35,7 @@ pub(crate) fn maybe_create_entry_wrapper(
return;
}
- create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
+ create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn, sigpipe);
fn create_entry_fn(
tcx: TyCtxt<'_>,
@@ -44,6 +44,7 @@ pub(crate) fn maybe_create_entry_wrapper(
rust_main_def_id: DefId,
ignore_lang_start_wrapper: bool,
is_main_fn: bool,
+ sigpipe: u8,
) {
let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
@@ -74,7 +75,7 @@ pub(crate) fn maybe_create_entry_wrapper(
let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
let mut ctx = Context::new();
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+ ctx.func.signature = cmain_sig;
{
let mut func_ctx = FunctionBuilderContext::new();
let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
@@ -83,6 +84,7 @@ pub(crate) fn maybe_create_entry_wrapper(
bcx.switch_to_block(block);
let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_sigpipe = bcx.ins().iconst(types::I8, sigpipe as i64);
let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
@@ -143,7 +145,8 @@ pub(crate) fn maybe_create_entry_wrapper(
let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
- let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
+ let call_inst =
+ bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv, arg_sigpipe]);
bcx.inst_results(call_inst)[0]
} else {
// using user-defined start fn
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
index 4ce8adb18..ecbab408d 100644
--- a/compiler/rustc_codegen_cranelift/src/num.rs
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -150,18 +150,12 @@ pub(crate) fn codegen_int_binop<'tcx>(
BinOp::BitXor => b.bxor(lhs, rhs),
BinOp::BitAnd => b.band(lhs, rhs),
BinOp::BitOr => b.bor(lhs, rhs),
- BinOp::Shl => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- fx.bcx.ins().ishl(lhs, actual_shift)
- }
+ BinOp::Shl => b.ishl(lhs, rhs),
BinOp::Shr => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
if signed {
- fx.bcx.ins().sshr(lhs, actual_shift)
+ b.sshr(lhs, rhs)
} else {
- fx.bcx.ins().ushr(lhs, actual_shift)
+ b.ushr(lhs, rhs)
}
}
// Compare binops handles by `codegen_binop`.
@@ -279,22 +273,15 @@ pub(crate) fn codegen_checked_int_binop<'tcx>(
}
}
BinOp::Shl => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let val = fx.bcx.ins().ishl(lhs, masked_shift);
+ let val = fx.bcx.ins().ishl(lhs, rhs);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
BinOp::Shr => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let val = if !signed {
- fx.bcx.ins().ushr(lhs, masked_shift)
- } else {
- fx.bcx.ins().sshr(lhs, masked_shift)
- };
+ let val =
+ if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) };
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
@@ -309,6 +296,42 @@ pub(crate) fn codegen_checked_int_binop<'tcx>(
CValue::by_val_pair(res, has_overflow, out_layout)
}
+pub(crate) fn codegen_saturating_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ lhs: CValue<'tcx>,
+ rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+
+ let signed = type_sign(lhs.layout().ty);
+ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+
+ let val = match (bin_op, signed) {
+ (BinOp::Add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (BinOp::Sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (BinOp::Add, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ (BinOp::Sub, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ CValue::by_val(val, lhs.layout())
+}
+
pub(crate) fn codegen_float_binop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
index d1f89adb3..0df7e8229 100644
--- a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -1,20 +1,3 @@
//! Various optimizations specific to cg_clif
-use cranelift_codegen::isa::TargetIsa;
-
-use crate::prelude::*;
-
pub(crate) mod peephole;
-
-pub(crate) fn optimize_function<'tcx>(
- tcx: TyCtxt<'tcx>,
- isa: &dyn TargetIsa,
- instance: Instance<'tcx>,
- ctx: &mut Context,
- clif_comments: &mut crate::pretty_clif::CommentWriter,
-) {
- // FIXME classify optimizations over opt levels once we have more
-
- crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx.func, &*clif_comments);
- crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
-}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
index 1d1ec2168..a7af16268 100644
--- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -62,7 +62,7 @@ use cranelift_codegen::{
};
use rustc_middle::ty::layout::FnAbiOf;
-use rustc_session::config::OutputType;
+use rustc_session::config::{OutputFilenames, OutputType};
use crate::prelude::*;
@@ -205,15 +205,11 @@ pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
}
pub(crate) fn write_ir_file(
- tcx: TyCtxt<'_>,
- name: impl FnOnce() -> String,
+ output_filenames: &OutputFilenames,
+ name: &str,
write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
) {
- if !should_write_ir(tcx) {
- return;
- }
-
- let clif_output_dir = tcx.output_filenames(()).with_extension("clif");
+ let clif_output_dir = output_filenames.with_extension("clif");
match std::fs::create_dir(&clif_output_dir) {
Ok(()) => {}
@@ -221,44 +217,43 @@ pub(crate) fn write_ir_file(
res @ Err(_) => res.unwrap(),
}
- let clif_file_name = clif_output_dir.join(name());
+ let clif_file_name = clif_output_dir.join(name);
let res = std::fs::File::create(clif_file_name).and_then(|mut file| write(&mut file));
if let Err(err) = res {
- tcx.sess.warn(&format!("error writing ir file: {}", err));
+ // Using early_warn as no Session is available here
+ rustc_session::early_warn(
+ rustc_session::config::ErrorOutputType::default(),
+ &format!("error writing ir file: {}", err),
+ );
}
}
-pub(crate) fn write_clif_file<'tcx>(
- tcx: TyCtxt<'tcx>,
+pub(crate) fn write_clif_file(
+ output_filenames: &OutputFilenames,
+ symbol_name: &str,
postfix: &str,
isa: &dyn cranelift_codegen::isa::TargetIsa,
- instance: Instance<'tcx>,
func: &cranelift_codegen::ir::Function,
mut clif_comments: &CommentWriter,
) {
// FIXME work around filename too long errors
- write_ir_file(
- tcx,
- || format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
- |file| {
- let mut clif = String::new();
- cranelift_codegen::write::decorate_function(&mut clif_comments, &mut clif, func)
- .unwrap();
+ write_ir_file(output_filenames, &format!("{}.{}.clif", symbol_name, postfix), |file| {
+ let mut clif = String::new();
+ cranelift_codegen::write::decorate_function(&mut clif_comments, &mut clif, func).unwrap();
- for flag in isa.flags().iter() {
- writeln!(file, "set {}", flag)?;
- }
- write!(file, "target {}", isa.triple().architecture.to_string())?;
- for isa_flag in isa.isa_flags().iter() {
- write!(file, " {}", isa_flag)?;
- }
- writeln!(file, "\n")?;
- writeln!(file)?;
- file.write_all(clif.as_bytes())?;
- Ok(())
- },
- );
+ for flag in isa.flags().iter() {
+ writeln!(file, "set {}", flag)?;
+ }
+ write!(file, "target {}", isa.triple().architecture.to_string())?;
+ for isa_flag in isa.isa_flags().iter() {
+ write!(file, " {}", isa_flag)?;
+ }
+ writeln!(file, "\n")?;
+ writeln!(file)?;
+ file.write_all(clif.as_bytes())?;
+ Ok(())
+ });
}
impl fmt::Debug for FunctionCx<'_, '_, '_> {
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
index f86236ef3..b6b465e1f 100644
--- a/compiler/rustc_codegen_cranelift/src/toolchain.rs
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -8,10 +8,8 @@ use rustc_session::Session;
/// Tries to infer the path of a binary for the target toolchain from the linker name.
pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
let (mut linker, _linker_flavor) = linker_and_flavor(sess);
- let linker_file_name = linker
- .file_name()
- .and_then(|name| name.to_str())
- .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+ let linker_file_name =
+ linker.file_name().unwrap().to_str().expect("linker filename should be valid UTF-8");
if linker_file_name == "ld.lld" {
if tool != "ld" {
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
index 923269c4d..82a2ec579 100644
--- a/compiler/rustc_codegen_cranelift/src/trap.rs
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -25,33 +25,10 @@ fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
fx.bcx.ins().call(puts, &[msg_ptr]);
}
-/// Use this for example when a function call should never return. This will fill the current block,
-/// so you can **not** add instructions to it afterwards.
-///
-/// Trap code: user65535
-pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
- codegen_print(fx, msg.as_ref());
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
-}
/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
-/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
-/// to it afterwards.
///
/// Trap code: user65535
pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
codegen_print(fx, msg.as_ref());
- let true_ = fx.bcx.ins().iconst(types::I32, 1);
- fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
-}
-
-/// Like `trap_unimplemented` but returns a fake value of the specified type.
-///
-/// Trap code: user65535
-pub(crate) fn trap_unimplemented_ret_value<'tcx>(
- fx: &mut FunctionCx<'_, '_, 'tcx>,
- dest_layout: TyAndLayout<'tcx>,
- msg: impl AsRef<str>,
-) -> CValue<'tcx> {
- trap_unimplemented(fx, msg);
- CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+ fx.bcx.ins().trap(TrapCode::User(!0));
}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index 052ca0a08..9c88f7dbc 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -25,10 +25,16 @@ pub(crate) fn unsized_info<'tcx>(
.bcx
.ins()
.iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
- (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ (
+ &ty::Dynamic(ref data_a, _, src_dyn_kind),
+ &ty::Dynamic(ref data_b, _, target_dyn_kind),
+ ) => {
+ assert_eq!(src_dyn_kind, target_dyn_kind);
+
let old_info =
old_info.expect("unsized_info: missing old info for trait upcasting coercion");
if data_a.principal_def_id() == data_b.principal_def_id() {
+ // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
return old_info;
}
@@ -100,6 +106,21 @@ fn unsize_ptr<'tcx>(
}
}
+/// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type.
+pub(crate) fn cast_to_dyn_star<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: Value,
+ src_ty_and_layout: TyAndLayout<'tcx>,
+ dst_ty: Ty<'tcx>,
+ old_info: Option<Value>,
+) -> (Value, Value) {
+ assert!(
+ matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ "destination type must be a dyn*"
+ );
+ (src, unsized_info(fx, src_ty_and_layout.ty, dst_ty, old_info))
+}
+
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub(crate) fn coerce_unsized_into<'tcx>(
@@ -146,6 +167,24 @@ pub(crate) fn coerce_unsized_into<'tcx>(
}
}
+pub(crate) fn coerce_dyn_star<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: CValue<'tcx>,
+ dst: CPlace<'tcx>,
+) {
+ let (data, extra) = if let ty::Dynamic(_, _, ty::DynStar) = src.layout().ty.kind() {
+ let (data, vtable) = src.load_scalar_pair(fx);
+ (data, Some(vtable))
+ } else {
+ let data = src.load_scalar(fx);
+ (data, None)
+ };
+
+ let (data, vtable) = cast_to_dyn_star(fx, data, src.layout(), dst.layout().ty, extra);
+
+ dst.write_cvalue(fx, CValue::by_val_pair(data, vtable, dst.layout()));
+}
+
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
pub(crate) fn size_and_align_of_dst<'tcx>(
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 45ae2bd8f..c3dfbd372 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -107,6 +107,50 @@ impl<'tcx> CValue<'tcx> {
}
}
+ // FIXME remove
+ // Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
+ // vtable pointer.
+ pub(crate) fn dyn_star_force_data_on_stack(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ) -> (Value, Value) {
+ assert!(self.1.ty.is_dyn_star());
+
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match self.1.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (ptr.get_addr(fx), vtable)
+ }
+ CValueInner::ByValPair(data, vtable) => {
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
+ / 16
+ * 16,
+ });
+ let data_ptr = Pointer::stack_slot(stack_slot);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ data_ptr.store(fx, data, flags);
+
+ (data_ptr.get_addr(fx), vtable)
+ }
+ CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
+ unreachable!("dyn_star_force_data_on_stack({:?})", self)
+ }
+ }
+ }
+
pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
match self.0 {
CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
@@ -122,7 +166,7 @@ impl<'tcx> CValue<'tcx> {
let clif_ty = match layout.abi {
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
- .by(u16::try_from(count).unwrap())
+ .by(u32::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
};
@@ -236,6 +280,10 @@ impl<'tcx> CValue<'tcx> {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
+ pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_dyn_star(fx, self, dest);
+ }
+
/// If `ty` is signed, `const_val` must already be sign extended.
pub(crate) fn const_val(
fx: &mut FunctionCx<'_, '_, 'tcx>,
@@ -330,7 +378,7 @@ impl<'tcx> CPlace<'tcx> {
.fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
}
- let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
// specify stack slot alignment.
@@ -472,7 +520,7 @@ impl<'tcx> CPlace<'tcx> {
}
_ if src_ty.is_vector() || dst_ty.is_vector() => {
// FIXME do something more efficient for transmutes between vectors and integers.
- let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
// specify stack slot alignment.
@@ -519,7 +567,7 @@ impl<'tcx> CPlace<'tcx> {
if let ty::Array(element, len) = dst_layout.ty.kind() {
// Can only happen for vector types
let len =
- u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+ u32::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
let data = match from.0 {
@@ -614,13 +662,21 @@ impl<'tcx> CPlace<'tcx> {
dst_align,
src_align,
true,
- MemFlags::trusted(),
+ flags,
);
}
CValueInner::ByRef(_, Some(_)) => todo!(),
}
}
+ pub(crate) fn place_opaque_cast(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: self.inner, layout: fx.layout_of(ty) }
+ }
+
pub(crate) fn place_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
@@ -815,7 +871,8 @@ pub(crate) fn assert_assignable<'tcx>(
);
// fn(&T) -> for<'l> fn(&'l T) is allowed
}
- (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
+ (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
+ // FIXME(dyn-star): Do the right thing with DynKinds
for (from, to) in from_traits.iter().zip(to_traits) {
let from =
fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index 36b3725ef..f04fb82de 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -45,12 +45,26 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
arg: CValue<'tcx>,
idx: usize,
-) -> (Value, Value) {
- let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
- arg.load_scalar_pair(fx)
- } else {
- let (ptr, vtable) = arg.try_to_ptr().unwrap();
- (ptr.get_addr(fx), vtable.unwrap())
+) -> (Pointer, Value) {
+ let (ptr, vtable) = 'block: {
+ if let ty::Ref(_, ty, _) = arg.layout().ty.kind() {
+ if ty.is_dyn_star() {
+ let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
+ let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
+ let ptr = dyn_star.place_field(fx, mir::Field::new(0)).to_ptr();
+ let vtable =
+ dyn_star.place_field(fx, mir::Field::new(1)).to_cvalue(fx).load_scalar(fx);
+ break 'block (ptr, vtable);
+ }
+ }
+
+ if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ let (ptr, vtable) = arg.load_scalar_pair(fx);
+ (Pointer::new(ptr), vtable)
+ } else {
+ let (ptr, vtable) = arg.try_to_ptr().unwrap();
+ (ptr, vtable.unwrap())
+ }
};
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
diff --git a/compiler/rustc_codegen_cranelift/test.sh b/compiler/rustc_codegen_cranelift/test.sh
index a10924628..3d929a1d5 100755
--- a/compiler/rustc_codegen_cranelift/test.sh
+++ b/compiler/rustc_codegen_cranelift/test.sh
@@ -1,13 +1,2 @@
#!/usr/bin/env bash
-set -e
-
-./y.rs build --sysroot none "$@"
-
-rm -r target/out || true
-
-scripts/tests.sh no_sysroot
-
-./y.rs build "$@"
-
-scripts/tests.sh base_sysroot
-scripts/tests.sh extended_sysroot
+exec ./y.rs test
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs
index 5f66ca67f..89661918d 100644
--- a/compiler/rustc_codegen_gcc/example/alloc_system.rs
+++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs
@@ -156,7 +156,7 @@ mod platform {
struct Header(*mut u8);
const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
- &mut *(ptr as *mut Header).offset(-1)
+ &mut *(ptr as *mut Header).sub(1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.add(align - (ptr as usize & (align - 1)));
diff --git a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
index d5fa1cec0..c59a40df0 100644
--- a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
+++ b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
@@ -14,7 +14,6 @@ index 06c7be0..359e2e7 100644
@@ -75,7 +75,6 @@
#![feature(never_type)]
#![feature(unwrap_infallible)]
- #![feature(result_into_ok_or_err)]
-#![feature(portable_simd)]
#![feature(ptr_metadata)]
#![feature(once_cell)]
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index 0ed3e1fbe..6fb1cbfad 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -11,10 +11,6 @@ use crate::intrinsic::ArgAbiExt;
use crate::type_of::LayoutGccExt;
impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
- fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
- // TODO(antoyo)
- }
-
fn get_param(&mut self, index: usize) -> Self::Value {
let func = self.current_func();
let param = func.get_param(index as i32);
@@ -107,45 +103,24 @@ pub trait FnAbiGccExt<'gcc, 'tcx> {
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
let mut on_stack_param_indices = FxHashSet::default();
- let args_capacity: usize = self.args.iter().map(|arg|
- if arg.pad.is_some() {
- 1
- }
- else {
- 0
- } +
- if let PassMode::Pair(_, _) = arg.mode {
- 2
- } else {
- 1
- }
- ).sum();
+
+ // This capacity calculation is approximate.
let mut argument_tys = Vec::with_capacity(
- if let PassMode::Indirect { .. } = self.ret.mode {
- 1
- }
- else {
- 0
- } + args_capacity,
+ self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }
);
let return_ty =
match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
- PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Cast(ref cast, _) => cast.gcc_type(cx),
PassMode::Indirect { .. } => {
argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
- for arg in &self.args {
- // add padding
- if let Some(ty) = arg.pad {
- argument_tys.push(ty.gcc_type(cx));
- }
-
+ for arg in self.args.iter() {
let arg_ty = match arg.mode {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
@@ -157,7 +132,13 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { extra_attrs: Some(_), .. } => {
unimplemented!();
}
- PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Cast(ref cast, pad_i32) => {
+ // add padding
+ if pad_i32 {
+ argument_tys.push(Reg::i32().gcc_type(cx));
+ }
+ cast.gcc_type(cx)
+ }
PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
on_stack_param_indices.insert(argument_tys.len());
arg.memory_ty(cx)
diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs
index f863abdcc..f18ae7ea5 100644
--- a/compiler/rustc_codegen_gcc/src/archive.rs
+++ b/compiler/rustc_codegen_gcc/src/archive.rs
@@ -1,6 +1,8 @@
use std::fs::File;
use std::path::{Path, PathBuf};
+use crate::errors::RanlibFailure;
+
use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
use rustc_session::Session;
@@ -45,6 +47,7 @@ impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
_lib_name: &str,
_dll_imports: &[DllImport],
_tmpdir: &Path,
+ _is_direct_dependency: bool,
) -> PathBuf {
unimplemented!();
}
@@ -181,7 +184,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib");
if !status.success() {
- self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ self.config.sess.emit_fatal(RanlibFailure::new(status.code()));
}
any_members
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 52fd66af0..c346dbd63 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -12,6 +12,7 @@ use std::borrow::Cow;
use crate::builder::Builder;
use crate::context::CodegenCx;
+use crate::errors::UnwindingInlineAsm;
use crate::type_of::LayoutGccExt;
use crate::callee::get_fn;
@@ -109,7 +110,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
if options.contains(InlineAsmOptions::MAY_UNWIND) {
self.sess()
- .struct_span_err(span[0], "GCC backend does not support unwinding from inline asm")
+ .create_err(UnwindingInlineAsm { span: span[0] })
.emit();
return;
}
@@ -497,7 +498,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
if options.contains(InlineAsmOptions::NORETURN) {
let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
- self.call(self.type_void(), builtin_unreachable, &[], None);
+ self.call(self.type_void(), None, builtin_unreachable, &[], None);
}
// Write results to outputs.
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 4d40dd099..a314b7cc2 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -15,8 +15,11 @@ use gccjit::{
Type,
UnaryOp,
};
+use rustc_apfloat::{ieee, Float, Round, Status};
use rustc_codegen_ssa::MemFlags;
-use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
+use rustc_codegen_ssa::common::{
+ AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
+};
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{
@@ -31,6 +34,7 @@ use rustc_codegen_ssa::traits::{
StaticBuilderMethods,
};
use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::bug;
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_span::Span;
@@ -440,11 +444,23 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.block.end_with_switch(None, value, default_block, &gcc_cases);
}
- fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ fn invoke(
+ &mut self,
+ typ: Type<'gcc>,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+ func: RValue<'gcc>,
+ args: &[RValue<'gcc>],
+ then: Block<'gcc>,
+ catch: Block<'gcc>,
+ _funclet: Option<&Funclet>,
+ ) -> RValue<'gcc> {
// TODO(bjorn3): Properly implement unwinding.
- let call_site = self.call(typ, func, args, None);
+ let call_site = self.call(typ, None, func, args, None);
let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
self.llbb().end_with_conditional(None, condition, then, catch);
+ if let Some(_fn_abi) = fn_abi {
+ // TODO(bjorn3): Apply function attributes
+ }
call_site
}
@@ -639,11 +655,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
}
- fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
- unimplemented!();
- }
-
- fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
}
@@ -1223,16 +1235,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// TODO(antoyo)
}
- fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
+ fn call(
+ &mut self,
+ _typ: Type<'gcc>,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+ func: RValue<'gcc>,
+ args: &[RValue<'gcc>],
+ funclet: Option<&Funclet>,
+ ) -> RValue<'gcc> {
// FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute(func) };
- if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
+ let call = if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
self.function_call(func, args, funclet)
}
else {
// If it's a not function that was defined, it's a function pointer.
self.function_ptr_call(func, args, funclet)
+ };
+ if let Some(_fn_abi) = fn_abi {
+ // TODO(bjorn3): Apply function attributes
}
+ call
}
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
@@ -1271,12 +1294,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
val
}
- fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
- None
+ fn fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.fptoint_sat(false, val, dest_ty)
}
- fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
- None
+ fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.fptoint_sat(true, val, dest_ty)
}
fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
@@ -1285,6 +1308,166 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let src_ty = self.cx.val_ty(val);
+ let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
+ assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+ (self.cx.element_type(src_ty), self.cx.element_type(dest_ty))
+ } else {
+ (src_ty, dest_ty)
+ };
+
+ // FIXME(jistone): the following was originally the fallback SSA implementation, before LLVM 13
+ // added native `fptosi.sat` and `fptoui.sat` conversions, but it was used by GCC as well.
+ // Now that LLVM always relies on its own, the code has been moved to GCC, but the comments are
+ // still LLVM-specific. This should be updated, and use better GCC specifics if possible.
+
+ let int_width = self.cx.int_width(int_ty);
+ let float_width = self.cx.float_width(float_ty);
+ // LLVM's fpto[su]i returns undef when the input val is infinite, NaN, or does not fit into the
+ // destination integer type after rounding towards zero. This `undef` value can cause UB in
+ // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+ // Semantically, the mathematical value of the input is rounded towards zero to the next
+ // mathematical integer, and then the result is clamped into the range of the destination
+ // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+ // the destination integer type. NaN is mapped to 0.
+ //
+ // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+ // a value representable in int_ty.
+ // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+ // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+ // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+ // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+ // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+ // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+ // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+ // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+ // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+ let int_max = |signed: bool, int_width: u64| -> u128 {
+ let shift_amount = 128 - int_width;
+ if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+ };
+ let int_min = |signed: bool, int_width: u64| -> i128 {
+ if signed { i128::MIN >> (128 - int_width) } else { 0 }
+ };
+
+ let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ // To implement saturation, we perform the following steps:
+ //
+ // 1. Cast val to an integer with fpto[su]i. This may result in undef.
+ // 2. Compare val to f_min and f_max, and use the comparison results to select:
+ // a) int_ty::MIN if val < f_min or val is NaN
+ // b) int_ty::MAX if val > f_max
+ // c) the result of fpto[su]i otherwise
+ // 3. If val is NaN, return 0.0, otherwise return the result of step 2.
+ //
+ // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+ // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+ // undef does not introduce any non-determinism either.
+ // More importantly, the above procedure correctly implements saturating conversion.
+ // Proof (sketch):
+ // If val is NaN, 0 is returned by definition.
+ // Otherwise, val is finite or infinite and thus can be compared with f_min and f_max.
+ // This yields three cases to consider:
+ // (1) if val in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+ // saturating conversion for inputs in that range.
+ // (2) if val > f_max, then val is larger than int_ty::MAX. This holds even if f_max is rounded
+ // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+ // than int_ty::MAX. Because val is larger than int_ty::MAX, the return value of int_ty::MAX
+ // is correct.
+ // (3) if val < f_min, then val is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+ // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+ // QED.
+
+ let float_bits_to_llval = |bx: &mut Self, bits| {
+ let bits_llval = match float_width {
+ 32 => bx.cx().const_u32(bits as u32),
+ 64 => bx.cx().const_u64(bits as u64),
+ n => bug!("unsupported float width {}", n),
+ };
+ bx.bitcast(bits_llval, float_ty)
+ };
+ let (f_min, f_max) = match float_width {
+ 32 => compute_clamp_bounds_single(signed, int_width),
+ 64 => compute_clamp_bounds_double(signed, int_width),
+ n => bug!("unsupported float width {}", n),
+ };
+ let f_min = float_bits_to_llval(self, f_min);
+ let f_max = float_bits_to_llval(self, f_max);
+ let int_max = self.cx.const_uint_big(int_ty, int_max(signed, int_width));
+ let int_min = self.cx.const_uint_big(int_ty, int_min(signed, int_width) as u128);
+ let zero = self.cx.const_uint(int_ty, 0);
+
+ // If we're working with vectors, constants must be "splatted": the constant is duplicated
+ // into each lane of the vector. The algorithm stays the same, we are just using the
+ // same constant across all lanes.
+ let maybe_splat = |bx: &mut Self, val| {
+ if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
+ bx.vector_splat(bx.vector_length(dest_ty), val)
+ } else {
+ val
+ }
+ };
+ let f_min = maybe_splat(self, f_min);
+ let f_max = maybe_splat(self, f_max);
+ let int_max = maybe_splat(self, int_max);
+ let int_min = maybe_splat(self, int_min);
+ let zero = maybe_splat(self, zero);
+
+ // Step 1 ...
+ let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
+ let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min);
+ let greater = self.fcmp(RealPredicate::RealOGT, val, f_max);
+
+ // Step 2: We use two comparisons and two selects, with %s1 being the
+ // result:
+ // %less_or_nan = fcmp ult %val, %f_min
+ // %greater = fcmp olt %val, %f_max
+ // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+ // %s1 = select %greater, int_ty::MAX, %s0
+ // Note that %less_or_nan uses an *unordered* comparison. This
+ // comparison is true if the operands are not comparable (i.e., if val is
+ // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+ // val is NaN.
+ //
+ // Performance note: Unordered comparison can be lowered to a "flipped"
+ // comparison and a negation, and the negation can be merged into the
+ // select. Therefore, it not necessarily any more expensive than an
+ // ordered ("normal") comparison. Whether these optimizations will be
+ // performed is ultimately up to the backend, but at least x86 does
+ // perform them.
+ let s0 = self.select(less_or_nan, int_min, fptosui_result);
+ let s1 = self.select(greater, int_max, s0);
+
+ // Step 3: NaN replacement.
+ // For unsigned types, the above step already yielded int_ty::MIN == 0 if val is NaN.
+ // Therefore we only need to execute this step for signed integer types.
+ if signed {
+ // LLVM has no isNaN predicate, so we use (val == val) instead
+ let cmp = self.fcmp(RealPredicate::RealOEQ, val, val);
+ self.select(cmp, s1, zero)
+ } else {
+ s1
+ }
+ }
+
#[cfg(feature="master")]
pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
let struct_type = mask.get_type().is_struct().expect("mask of struct type");
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index ccb6cbbc2..aa1c271c3 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -158,10 +158,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
None
}
- fn zst_to_backend(&self, _ty: Type<'gcc>) -> RValue<'gcc> {
- self.const_undef(self.type_ix(0))
- }
-
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
index c0b8d2181..81f533288 100644
--- a/compiler/rustc_codegen_gcc/src/consts.rs
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -14,6 +14,7 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRan
use crate::base;
use crate::context::CodegenCx;
+use crate::errors::LinkageConstOrMutType;
use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
@@ -127,7 +128,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
//
// We could remove this hack whenever we decide to drop macOS 10.10 support.
if self.tcx.sess.target.options.is_like_osx {
- // The `inspect` method is okay here because we checked relocations, and
+ // The `inspect` method is okay here because we checked for provenance, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
//
@@ -296,17 +297,17 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
let alloc = alloc.inner();
- let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
- for &(offset, alloc_id) in alloc.relocations().iter() {
+ for &(offset, alloc_id) in alloc.provenance().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
- // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // This `inspect` is okay since we have checked that it is not within a pointer with provenance, it
// is within the bounds of the allocation, and it doesn't affect interpreter execution
// (we inspect the result after interpreter execution). Any undef byte is replaced with
// some arbitrary byte value.
@@ -319,7 +320,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
read_target_uint( dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
- // and we properly interpret the relocation as a relocation pointer offset.
+ // and we properly interpret the provenance as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
@@ -336,7 +337,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
}
if alloc.len() >= next_offset {
let range = next_offset..alloc.len();
- // This `inspect` is okay since we have check that it is after all relocations, it is
+ // This `inspect` is okay since we have check that it is after all provenance, it is
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
// inspect the result after interpreter execution). Any undef byte is replaced with some
// arbitrary byte value.
@@ -368,10 +369,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
cx.layout_of(mt.ty).gcc_type(cx, true)
}
else {
- cx.sess().span_fatal(
- span,
- "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
- )
+ cx.sess().emit_fatal(LinkageConstOrMutType { span: span })
};
// Declare a symbol `foo` with the desired linkage.
let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 478f6d893..62a61eb85 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -13,7 +13,7 @@ use rustc_middle::mir::mono::CodegenUnit;
use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
use rustc_session::Session;
-use rustc_span::Span;
+use rustc_span::{Span, source_map::respan};
use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
@@ -293,7 +293,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
}
- pub fn sess(&self) -> &Session {
+ pub fn sess(&self) -> &'tcx Session {
&self.tcx.sess
}
@@ -416,10 +416,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
self.codegen_unit
}
- fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
- unimplemented!();
- }
-
fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
// TODO(antoyo)
}
@@ -428,10 +424,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
// TODO(antoyo)
}
- fn create_used_variable(&self) {
- unimplemented!();
- }
-
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() {
Some(self.declare_cfn("main", fn_type))
@@ -443,14 +435,6 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
None
}
}
-
- fn compiler_used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
- unimplemented!()
- }
-
- fn create_compiler_used_variable(&self) {
- unimplemented!()
- }
}
impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
@@ -477,7 +461,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
if let LayoutError::SizeOverflow(_) = err {
- self.sess().span_fatal(span, &err.to_string())
+ self.sess().emit_fatal(respan(span, err))
} else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
}
@@ -495,7 +479,7 @@ impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
fn_abi_request: FnAbiRequest<'tcx>,
) -> ! {
if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
- self.sess().span_fatal(span, &err.to_string())
+ self.sess().emit_fatal(respan(span, err))
} else {
match fn_abi_request {
FnAbiRequest::OfFnPtr { sig, extra_args } => {
diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs
new file mode 100644
index 000000000..15ad90f90
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/errors.rs
@@ -0,0 +1,242 @@
+use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
+use rustc_macros::Diagnostic;
+use rustc_middle::ty::Ty;
+use rustc_span::{Span, Symbol};
+use std::borrow::Cow;
+
+struct ExitCode(Option<i32>);
+
+impl IntoDiagnosticArg for ExitCode {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ let ExitCode(exit_code) = self;
+ match exit_code {
+ Some(t) => t.into_diagnostic_arg(),
+ None => DiagnosticArgValue::Str(Cow::Borrowed("<signal>")),
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_ranlib_failure)]
+pub(crate) struct RanlibFailure {
+ exit_code: ExitCode,
+}
+
+impl RanlibFailure {
+ pub fn new(exit_code: Option<i32>) -> Self {
+ RanlibFailure { exit_code: ExitCode(exit_code) }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_basic_integer, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationBasicInteger<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_invalid_float_vector, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationInvalidFloatVector<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub elem_ty: &'a str,
+ pub vec_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_not_float, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationNotFloat<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_unrecognized, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationUnrecognized {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_expected_signed_unsigned, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationExpectedSignedUnsigned<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub elem_ty: Ty<'a>,
+ pub vec_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_unsupported_element, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationUnsupportedElement<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_ty: Ty<'a>,
+ pub elem_ty: Ty<'a>,
+ pub ret_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_invalid_bitmask, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationInvalidBitmask<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+ pub expected_int_bits: u64,
+ pub expected_bytes: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_simd_shuffle, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationSimdShuffle<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_expected_simd, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationExpectedSimd<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub position: &'a str,
+ pub found_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_mask_type, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationMaskType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_return_length, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationReturnLength<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_len: u64,
+ pub ret_ty: Ty<'a>,
+ pub out_len: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_return_length_input_type, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationReturnLengthInputType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_len: u64,
+ pub in_ty: Ty<'a>,
+ pub ret_ty: Ty<'a>,
+ pub out_len: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_return_element, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationReturnElement<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_elem: Ty<'a>,
+ pub in_ty: Ty<'a>,
+ pub ret_ty: Ty<'a>,
+ pub out_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_return_type, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationReturnType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_elem: Ty<'a>,
+ pub in_ty: Ty<'a>,
+ pub ret_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_inserted_type, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationInsertedType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_elem: Ty<'a>,
+ pub in_ty: Ty<'a>,
+ pub out_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_return_integer_type, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationReturnIntegerType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ret_ty: Ty<'a>,
+ pub out_ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_mismatched_lengths, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationMismatchedLengths {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub m_len: u64,
+ pub v_len: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_unsupported_cast, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationUnsupportedCast<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_ty: Ty<'a>,
+ pub in_elem: Ty<'a>,
+ pub ret_ty: Ty<'a>,
+ pub out_elem: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_invalid_monomorphization_unsupported_operation, code = "E0511")]
+pub(crate) struct InvalidMonomorphizationUnsupportedOperation<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub in_ty: Ty<'a>,
+ pub in_elem: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_linkage_const_or_mut_type)]
+pub(crate) struct LinkageConstOrMutType {
+ #[primary_span]
+ pub span: Span
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_lto_not_supported)]
+pub(crate) struct LTONotSupported;
+
+#[derive(Diagnostic)]
+#[diag(codegen_gcc_unwinding_inline_asm)]
+pub(crate) struct UnwindingInlineAsm {
+ #[primary_span]
+ pub span: Span
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index 5fbdedac0..49be6c649 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -4,7 +4,7 @@ mod simd;
use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::base::wants_msvc_seh;
-use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::common::IntPredicate;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
@@ -20,6 +20,7 @@ use crate::abi::GccType;
use crate::builder::Builder;
use crate::common::{SignType, TypeReflection};
use crate::context::CodegenCx;
+use crate::errors::InvalidMonomorphizationBasicInteger;
use crate::type_of::LayoutGccExt;
use crate::intrinsic::simd::generic_simd_intrinsic;
@@ -99,7 +100,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
_ if simple.is_some() => {
// FIXME(antoyo): remove this cast when the API supports function.
let func = unsafe { std::mem::transmute(simple.expect("simple")) };
- self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
+ self.call(self.type_void(), None, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
},
sym::likely => {
self.expect(args[0].immediate(), true)
@@ -130,7 +131,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
- if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
}
let load = self.volatile_load(ptr.get_type(), ptr);
@@ -242,15 +243,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
_ => bug!(),
},
None => {
- span_invalid_monomorphization_error(
- tcx.sess,
- span,
- &format!(
- "invalid monomorphization of `{}` intrinsic: \
- expected basic integer type, found `{}`",
- name, ty
- ),
- );
+ tcx.sess.emit_err(InvalidMonomorphizationBasicInteger { span, name, ty });
return;
}
}
@@ -309,6 +302,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
return;
}
+ sym::ptr_mask => {
+ let usize_type = self.context.new_type::<usize>();
+ let void_ptr_type = self.context.new_type::<*const ()>();
+
+ let ptr = args[0].immediate();
+ let mask = args[1].immediate();
+
+ let addr = self.bitcast(ptr, usize_type);
+ let masked = self.and(addr, mask);
+ self.bitcast(masked, void_ptr_type)
+ },
+
_ if name_str.starts_with("simd_") => {
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
Ok(llval) => llval,
@@ -320,7 +325,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
@@ -336,7 +341,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn abort(&mut self) {
let func = self.context.get_builtin_function("abort");
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
- self.call(self.type_void(), func, &[], None);
+ self.call(self.type_void(), None, func, &[], None);
}
fn assume(&mut self, value: Self::Value) {
@@ -416,7 +421,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
}
- else if let PassMode::Cast(cast) = self.mode {
+ else if let PassMode::Cast(ref cast, _) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -481,7 +486,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
PassMode::Indirect { extra_attrs: Some(_), .. } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
},
- PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
+ PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => {
let next_arg = next();
self.store(bx, next_arg, dst);
},
@@ -1119,7 +1124,7 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<
// NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
// TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
- bx.call(bx.type_void(), try_func, &[data], None);
+ bx.call(bx.type_void(), None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx.data_layout.i32_align.abi;
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index 2401f3350..12e416f62 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -2,7 +2,7 @@ use std::cmp::Ordering;
use gccjit::{BinaryOp, RValue, Type, ToRValue};
use rustc_codegen_ssa::base::compare_simd_types;
-use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
@@ -14,43 +14,48 @@ use rustc_span::{Span, Symbol, sym};
use rustc_target::abi::Align;
use crate::builder::Builder;
+use crate::errors::{
+ InvalidMonomorphizationInvalidFloatVector,
+ InvalidMonomorphizationNotFloat,
+ InvalidMonomorphizationUnrecognized,
+ InvalidMonomorphizationExpectedSignedUnsigned,
+ InvalidMonomorphizationUnsupportedElement,
+ InvalidMonomorphizationInvalidBitmask,
+ InvalidMonomorphizationSimdShuffle,
+ InvalidMonomorphizationExpectedSimd,
+ InvalidMonomorphizationMaskType,
+ InvalidMonomorphizationReturnLength,
+ InvalidMonomorphizationReturnLengthInputType,
+ InvalidMonomorphizationReturnElement,
+ InvalidMonomorphizationReturnType,
+ InvalidMonomorphizationInsertedType,
+ InvalidMonomorphizationReturnIntegerType,
+ InvalidMonomorphizationMismatchedLengths,
+ InvalidMonomorphizationUnsupportedCast,
+ InvalidMonomorphizationUnsupportedOperation
+};
use crate::intrinsic;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
// macros for error handling:
- #[allow(unused_macro_rules)]
- macro_rules! emit_error {
- ($msg: tt) => {
- emit_error!($msg, )
- };
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
-
macro_rules! return_error {
- ($($fmt: tt)*) => {
+ ($err:expr) => {
{
- emit_error!($($fmt)*);
+ bx.sess().emit_err($err);
return Err(());
}
}
}
-
macro_rules! require {
- ($cond: expr, $($fmt: tt)*) => {
+ ($cond:expr, $err:expr) => {
if !$cond {
- return_error!($($fmt)*);
+ return_error!($err);
}
- };
+ }
}
-
macro_rules! require_simd {
($ty: expr, $position: expr) => {
- require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ require!($ty.is_simd(), InvalidMonomorphizationExpectedSimd { span, name, position: $position, found_ty: $ty })
};
}
@@ -82,10 +87,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
bx.load(int_ty, ptr, Align::ONE)
}
_ => return_error!(
- "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
- mask_ty,
- expected_int_bits,
- expected_bytes
+ InvalidMonomorphizationInvalidBitmask { span, name, ty: mask_ty, expected_int_bits, expected_bytes }
),
};
@@ -127,18 +129,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len }
);
require!(
bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
- "expected return type with integer elements, found `{}` with non-integer `{}`",
- ret_ty,
- out_ty
+ InvalidMonomorphizationReturnIntegerType {span, name, ret_ty, out_ty}
);
return Ok(compare_simd_types(
@@ -163,8 +158,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
})
}
_ => return_error!(
- "simd_shuffle index must be an array of `u32`, got `{}`",
- args[2].layout.ty
+ InvalidMonomorphizationSimdShuffle { span, name, ty: args[2].layout.ty }
),
}
}
@@ -179,19 +173,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
require!(
out_len == n,
- "expected return type of length {}, found `{}` with length {}",
- n,
- ret_ty,
- out_len
+ InvalidMonomorphizationReturnLength { span, name, in_len: n, ret_ty, out_len }
);
require!(
in_elem == out_ty,
- "expected return element type `{}` (element of input `{}`), \
- found `{}` with element type `{}`",
- in_elem,
- in_ty,
- ret_ty,
- out_ty
+ InvalidMonomorphizationReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
);
let vector = args[2].immediate();
@@ -207,10 +193,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
if name == sym::simd_insert {
require!(
in_elem == arg_tys[2],
- "expected inserted type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- arg_tys[2]
+ InvalidMonomorphizationInsertedType { span, name, in_elem, in_ty, out_ty: arg_tys[2] }
);
let vector = args[0].immediate();
let index = args[1].immediate();
@@ -263,10 +246,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
if name == sym::simd_extract {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
);
let vector = args[0].immediate();
return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
@@ -279,13 +259,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
+ InvalidMonomorphizationMismatchedLengths { span, name, m_len, v_len }
);
match m_elem_ty.kind() {
ty::Int(_) => {}
- _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ _ => return_error!(InvalidMonomorphizationMaskType { span, name, ty: m_elem_ty }),
}
return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
}
@@ -295,12 +273,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
+ InvalidMonomorphizationReturnLengthInputType { span, name, in_len, in_ty, ret_ty, out_len }
);
// casting cares about nominal type, not just structural type
if in_elem == out_elem {
@@ -412,13 +385,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
}
_ => { /* Unsupported. Fallthrough. */ }
}
- require!(
- false,
- "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
- in_ty,
- in_elem,
- ret_ty,
- out_elem
+ return_error!(
+ InvalidMonomorphizationUnsupportedCast { span, name, in_ty, in_elem, ret_ty, out_elem }
);
}
@@ -431,10 +399,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
})*
_ => {},
}
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
+ return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
@@ -448,23 +413,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
span: Span,
args: &[OperandRef<'tcx, RValue<'gcc>>],
) -> Result<RValue<'gcc>, ()> {
- macro_rules! emit_error {
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
macro_rules! return_error {
- ($($fmt: tt)*) => {
+ ($err:expr) => {
{
- emit_error!($($fmt)*);
+ bx.sess().emit_err($err);
return Err(());
}
}
}
-
let (elem_ty_str, elem_ty) =
if let ty::Float(f) = in_elem.kind() {
let elem_ty = bx.cx.type_float_from_ty(*f);
@@ -472,16 +428,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
32 => ("f32", elem_ty),
64 => ("f64", elem_ty),
_ => {
- return_error!(
- "unsupported element type `{}` of floating-point vector `{}`",
- f.name_str(),
- in_ty
- );
+ return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty });
}
}
}
else {
- return_error!("`{}` is not a floating-point type", in_ty);
+ return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty });
};
let vec_ty = bx.cx.type_vector(elem_ty, in_len);
@@ -504,12 +456,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
- _ => return_error!("unrecognized intrinsic `{}`", name),
+ _ => return_error!(InvalidMonomorphizationUnrecognized { span, name })
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx);
let function: RValue<'gcc> = unsafe { std::mem::transmute(function) };
- let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ let c = bx.call(fn_ty, None, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c)
}
@@ -557,10 +509,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
})*
_ => {},
}
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
+ return_error!(InvalidMonomorphizationUnsupportedOperation { span, name, in_ty, in_elem })
})*
}
}
@@ -579,12 +528,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
_ => {
- return_error!(
- "expected element type `{}` of vector type `{}` \
- to be a signed or unsigned integer type",
- arg_tys[0].simd_size_and_type(bx.tcx()).1,
- arg_tys[0]
- );
+ return_error!(InvalidMonomorphizationExpectedSignedUnsigned {
+ span,
+ name,
+ elem_ty: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ vec_ty: arg_tys[0],
+ });
}
};
let builtin_name =
@@ -617,10 +566,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
if name == sym::$name {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
@@ -644,13 +590,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
}
}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
+ _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }),
};
}
};
@@ -676,20 +616,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
if name == sym::$name {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())),
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
+ _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }),
};
}
};
@@ -704,22 +635,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let input = if !$boolean {
require!(
ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
+ InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty }
);
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
+ _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }),
}
// boolean reductions operate on vectors of i1s:
@@ -733,11 +655,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
}
_ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
+ InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }
),
};
}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 8a206c036..accd02ab0 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -18,12 +18,16 @@
#![recursion_limit="256"]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+extern crate rustc_apfloat;
extern crate rustc_ast;
extern crate rustc_codegen_ssa;
extern crate rustc_data_structures;
extern crate rustc_errors;
extern crate rustc_hir;
+extern crate rustc_macros;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
@@ -49,6 +53,7 @@ mod context;
mod coverageinfo;
mod debuginfo;
mod declare;
+mod errors;
mod int;
mod intrinsic;
mod mono_item;
@@ -58,6 +63,7 @@ mod type_of;
use std::any::Any;
use std::sync::{Arc, Mutex};
+use crate::errors::LTONotSupported;
use gccjit::{Context, OptimizationLevel, CType};
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
@@ -96,7 +102,7 @@ pub struct GccCodegenBackend {
impl CodegenBackend for GccCodegenBackend {
fn init(&self, sess: &Session) {
if sess.lto() != Lto::No {
- sess.warn("LTO is not supported. You may get a linker error.");
+ sess.emit_warning(LTONotSupported {});
}
let temp_dir = TempDir::new().expect("cannot create temporary directory");
@@ -165,15 +171,6 @@ impl ExtraBackendMethods for GccCodegenBackend {
Ok(())
})
}
-
- fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
- unimplemented!();
- }
-
- fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
- None
- // TODO(antoyo)
- }
}
pub struct ModuleBuffer;
@@ -204,7 +201,6 @@ impl WriteBackendMethods for GccCodegenBackend {
type Module = GccContext;
type TargetMachine = ();
type ModuleBuffer = ModuleBuffer;
- type Context = ();
type ThinData = ();
type ThinBuffer = ThinBuffer;
diff --git a/compiler/rustc_codegen_gcc/tests/run/asm.rs b/compiler/rustc_codegen_gcc/tests/run/asm.rs
index 46abbb553..38c1eac7a 100644
--- a/compiler/rustc_codegen_gcc/tests/run/asm.rs
+++ b/compiler/rustc_codegen_gcc/tests/run/asm.rs
@@ -3,11 +3,12 @@
// Run-time:
// status: 0
-#![feature(asm_const, asm_sym)]
+#![feature(asm_const)]
use std::arch::{asm, global_asm};
-global_asm!("
+global_asm!(
+ "
.global add_asm
add_asm:
mov rax, rdi
@@ -132,7 +133,9 @@ fn main() {
assert_eq!(x, 43);
// check sym fn
- extern "C" fn foo() -> u64 { 42 }
+ extern "C" fn foo() -> u64 {
+ 42
+ }
let x: u64;
unsafe {
asm!("call {}", sym foo, lateout("rax") x);
diff --git a/compiler/rustc_codegen_gcc/tests/run/int.rs b/compiler/rustc_codegen_gcc/tests/run/int.rs
index 2b90e4ae8..75779622b 100644
--- a/compiler/rustc_codegen_gcc/tests/run/int.rs
+++ b/compiler/rustc_codegen_gcc/tests/run/int.rs
@@ -3,7 +3,7 @@
// Run-time:
// status: 0
-#![feature(bench_black_box, const_black_box, core_intrinsics, start)]
+#![feature(const_black_box, core_intrinsics, start)]
#![no_std]
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index f9a5463ef..0ad39c240 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -5,14 +5,13 @@ edition = "2021"
[lib]
test = false
-doctest = false
[dependencies]
bitflags = "1.0"
cstr = "0.2"
libc = "0.2"
-libloading = "0.7.1"
measureme = "10.0.0"
+object = { version = "0.29.0", default-features = false, features = ["std", "read_core", "archive", "coff", "elf", "macho", "pe"] }
tracing = "0.1"
rustc_middle = { path = "../rustc_middle" }
rustc-demangle = "0.1.21"
@@ -34,3 +33,4 @@ rustc_target = { path = "../rustc_target" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
+tempfile = "3.2.0"
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 9eb3574e7..d478efc86 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -19,6 +19,7 @@ use rustc_target::abi::call::ArgAbi;
pub use rustc_target::abi::call::*;
use rustc_target::abi::{self, HasDataLayout, Int};
pub use rustc_target::spec::abi::Abi;
+use rustc_target::spec::SanitizerSet;
use libc::c_uint;
use smallvec::SmallVec;
@@ -90,6 +91,13 @@ fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'
if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
}
+ } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
+ // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
+ // memory sanitizer's behavior.
+
+ if regular.contains(ArgAttribute::NoUndef) {
+ attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx));
+ }
}
attrs
@@ -213,7 +221,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
- } else if let PassMode::Cast(cast) = self.mode {
+ } else if let PassMode::Cast(cast, _) = &self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -283,7 +291,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
PassMode::Direct(_)
| PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
- | PassMode::Cast(_) => {
+ | PassMode::Cast(..) => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
@@ -325,20 +333,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
// Ignore "extra" args from the call site for C variadic functions.
// Only the "fixed" args are part of the LLVM function signature.
- let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
+ let args =
+ if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args };
- let args_capacity: usize = args.iter().map(|arg|
- if arg.pad.is_some() { 1 } else { 0 } +
- if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
- ).sum();
+ // This capacity calculation is approximate.
let mut llargument_tys = Vec::with_capacity(
- if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
+ self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
);
- let llreturn_ty = match self.ret.mode {
+ let llreturn_ty = match &self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
- PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Cast(cast, _) => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
@@ -346,12 +352,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
};
for arg in args {
- // add padding
- if let Some(ty) = arg.pad {
- llargument_tys.push(ty.llvm_type(cx));
- }
-
- let llarg_ty = match arg.mode {
+ let llarg_ty = match &arg.mode {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
PassMode::Pair(..) => {
@@ -366,7 +367,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Cast(cast, pad_i32) => {
+ // add padding
+ if *pad_i32 {
+ llargument_tys.push(Reg::i32().llvm_type(cx));
+ }
+ cast.llvm_type(cx)
+ }
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
cx.type_ptr_to(arg.memory_ty(cx))
}
@@ -426,46 +433,46 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
i += 1;
i - 1
};
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
+ match &self.ret.mode {
+ PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
- PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(cast, _) => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
_ => {}
}
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(&ArgAttributes::new());
- }
- match arg.mode {
+ for arg in self.args.iter() {
+ match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
- PassMode::Direct(ref attrs)
- | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ PassMode::Direct(attrs)
+ | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
apply(attrs);
}
- PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
assert!(!on_stack);
apply(attrs);
apply(extra_attrs);
}
- PassMode::Pair(ref a, ref b) => {
+ PassMode::Pair(a, b) => {
apply(a);
apply(b);
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(cast, pad_i32) => {
+ if *pad_i32 {
+ apply(&ArgAttributes::new());
+ }
apply(&cast.attrs);
}
}
@@ -488,17 +495,17 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
i += 1;
i - 1
};
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
+ match &self.ret.mode {
+ PassMode::Direct(attrs) => {
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
}
- PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(cast, _) => {
cast.attrs.apply_attrs_to_callsite(
llvm::AttributePlace::ReturnValue,
&bx.cx,
@@ -517,13 +524,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
}
}
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(bx.cx, &ArgAttributes::new());
- }
- match arg.mode {
+ for arg in self.args.iter() {
+ match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
attributes::apply_to_callsite(
@@ -532,23 +536,22 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
&[byval],
);
}
- PassMode::Direct(ref attrs)
- | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ PassMode::Direct(attrs)
+ | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
apply(bx.cx, attrs);
}
- PassMode::Indirect {
- ref attrs,
- extra_attrs: Some(ref extra_attrs),
- on_stack: _,
- } => {
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
apply(bx.cx, attrs);
apply(bx.cx, extra_attrs);
}
- PassMode::Pair(ref a, ref b) => {
+ PassMode::Pair(a, b) => {
apply(bx.cx, a);
apply(bx.cx, b);
}
- PassMode::Cast(cast) => {
+ PassMode::Cast(cast, pad_i32) => {
+ if *pad_i32 {
+ apply(bx.cx, &ArgAttributes::new());
+ }
apply(bx.cx, &cast.attrs);
}
}
@@ -589,10 +592,6 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
- fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
- fn_abi.apply_attrs_callsite(self, callsite)
- }
-
fn get_param(&mut self, index: usize) -> Self::Value {
llvm::get_param(self.llfn(), index as c_uint)
}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index a53946995..017513721 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -3,7 +3,6 @@ use crate::builder::Builder;
use crate::common::Funclet;
use crate::context::CodegenCx;
use crate::llvm;
-use crate::llvm_util;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
@@ -20,7 +19,6 @@ use rustc_target::asm::*;
use libc::{c_char, c_uint};
use smallvec::SmallVec;
-use tracing::debug;
impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
fn codegen_inline_asm(
@@ -419,13 +417,6 @@ pub(crate) fn inline_asm_call<'ll>(
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
debug!("constraint verification result: {:?}", constraints_ok);
if constraints_ok {
- if unwind && llvm_util::get_version() < (13, 0, 0) {
- bx.cx.sess().span_fatal(
- line_spans[0],
- "unwinding from inline assembly is only supported on llvm >= 13.",
- );
- }
-
let v = llvm::LLVMRustInlineAsm(
fty,
asm.as_ptr().cast(),
@@ -439,9 +430,9 @@ pub(crate) fn inline_asm_call<'ll>(
);
let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
- bx.invoke(fty, v, inputs, dest, catch, funclet)
+ bx.invoke(fty, None, v, inputs, dest, catch, funclet)
} else {
- bx.call(fty, v, inputs, None)
+ bx.call(fty, None, v, inputs, None)
};
// Store mark in a metadata node so we can map LLVM errors
@@ -560,6 +551,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
format!("{{{}}}", reg.name())
}
}
+ // The constraints can be retrieved from
+ // https://llvm.org/docs/LangRef.html#supported-constraint-code-list
InlineAsmRegOrRegClass::RegClass(reg) => match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
@@ -633,6 +626,8 @@ fn modifier_to_llvm(
reg: InlineAsmRegClass,
modifier: Option<char>,
) -> Option<char> {
+ // The modifiers can be retrieved from
+ // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index aabbe8ac2..eff2436d4 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -35,6 +35,10 @@ pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attrib
/// Get LLVM attribute for the provided inline heuristic.
#[inline]
fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
+ if !cx.tcx.sess.opts.unstable_opts.inline_llvm {
+ // disable LLVM inlining
+ return Some(AttributeKind::NoInline.create_attr(cx.llcx));
+ }
match inline {
InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)),
@@ -386,7 +390,8 @@ pub fn from_fn_attrs<'ll, 'tcx>(
) {
let span = cx
.tcx
- .get_attr(instance.def_id(), sym::target_feature)
+ .get_attrs(instance.def_id(), sym::target_feature)
+ .next()
.map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
let msg = format!(
"the target features {} must all be either enabled or disabled together",
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 27039cda2..082665bba 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -2,16 +2,21 @@
use std::env;
use std::ffi::{CStr, CString, OsString};
-use std::io;
+use std::fs;
+use std::io::{self, Write};
use std::mem;
use std::path::{Path, PathBuf};
use std::ptr;
use std::str;
+use object::read::macho::FatArch;
+
+use crate::common;
use crate::llvm::archive_ro::{ArchiveRO, Child};
use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
-use rustc_session::cstore::{DllCallingConvention, DllImport};
+use rustc_data_structures::memmap::Mmap;
+use rustc_session::cstore::DllImport;
use rustc_session::Session;
/// Helper for adding many files to an archive.
@@ -52,13 +57,70 @@ fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
}
}
+fn try_filter_fat_archs(
+ archs: object::read::Result<&[impl FatArch]>,
+ target_arch: object::Architecture,
+ archive_path: &Path,
+ archive_map_data: &[u8],
+) -> io::Result<Option<PathBuf>> {
+ let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+
+ let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() {
+ Some(a) => a,
+ None => return Ok(None),
+ };
+
+ let (mut new_f, extracted_path) = tempfile::Builder::new()
+ .suffix(archive_path.file_name().unwrap())
+ .tempfile()?
+ .keep()
+ .unwrap();
+
+ new_f.write_all(
+ desired.data(archive_map_data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?,
+ )?;
+
+ Ok(Some(extracted_path))
+}
+
+fn try_extract_macho_fat_archive(
+ sess: &Session,
+ archive_path: &Path,
+) -> io::Result<Option<PathBuf>> {
+ let archive_map = unsafe { Mmap::map(fs::File::open(&archive_path)?)? };
+ let target_arch = match sess.target.arch.as_ref() {
+ "aarch64" => object::Architecture::Aarch64,
+ "x86_64" => object::Architecture::X86_64,
+ _ => return Ok(None),
+ };
+
+ match object::macho::FatHeader::parse(&*archive_map) {
+ Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC => {
+ let archs = object::macho::FatHeader::parse_arch32(&*archive_map);
+ try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map)
+ }
+ Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC_64 => {
+ let archs = object::macho::FatHeader::parse_arch64(&*archive_map);
+ try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map)
+ }
+ // Not a FatHeader at all, just return None.
+ _ => Ok(None),
+ }
+}
+
impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
fn add_archive(
&mut self,
archive: &Path,
skip: Box<dyn FnMut(&str) -> bool + 'static>,
) -> io::Result<()> {
- let archive_ro = match ArchiveRO::open(archive) {
+ let mut archive = archive.to_path_buf();
+ if self.sess.target.llvm_target.contains("-apple-macosx") {
+ if let Some(new_archive) = try_extract_macho_fat_archive(&self.sess, &archive)? {
+ archive = new_archive
+ }
+ }
+ let archive_ro = match ArchiveRO::open(&archive) {
Ok(ar) => ar,
Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
};
@@ -66,7 +128,7 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
return Ok(());
}
self.additions.push(Addition::Archive {
- path: archive.to_path_buf(),
+ path: archive,
archive: archive_ro,
skip: Box::new(skip),
});
@@ -103,29 +165,28 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
lib_name: &str,
dll_imports: &[DllImport],
tmpdir: &Path,
+ is_direct_dependency: bool,
) -> PathBuf {
+ let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" };
let output_path = {
let mut output_path: PathBuf = tmpdir.to_path_buf();
- output_path.push(format!("{}_imports", lib_name));
+ output_path.push(format!("{}{}", lib_name, name_suffix));
output_path.with_extension("lib")
};
let target = &sess.target;
- let mingw_gnu_toolchain = target.vendor == "pc"
- && target.os == "windows"
- && target.env == "gnu"
- && target.abi.is_empty();
+ let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(target);
let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
.iter()
.map(|import: &DllImport| {
if sess.target.arch == "x86" {
(
- LlvmArchiveBuilder::i686_decorated_name(import, mingw_gnu_toolchain),
- import.ordinal,
+ common::i686_decorated_name(import, mingw_gnu_toolchain, false),
+ import.ordinal(),
)
} else {
- (import.name.to_string(), import.ordinal)
+ (import.name.to_string(), import.ordinal())
}
})
.collect();
@@ -136,7 +197,8 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// that loaded but crashed with an AV upon calling one of the imported
// functions. Therefore, use binutils to create the import library instead,
// by writing a .DEF file to the temp dir and calling binutils's dlltool.
- let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def");
+ let def_file_path =
+ tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def");
let def_file_content = format!(
"EXPORTS\n{}",
@@ -159,6 +221,9 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
}
};
+ // --no-leading-underscore: For the `import_name_type` feature to work, we need to be
+ // able to control the *exact* spelling of each of the symbols that are being imported:
+ // hence we don't want `dlltool` adding leading underscores automatically.
let dlltool = find_binutils_dlltool(sess);
let result = std::process::Command::new(dlltool)
.args([
@@ -168,6 +233,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
lib_name,
"-l",
output_path.to_str().unwrap(),
+ "--no-leading-underscore",
])
.output();
@@ -188,10 +254,10 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
let output_path_z = rustc_fs_util::path_to_c_string(&output_path);
- tracing::trace!("invoking LLVMRustWriteImportLibrary");
- tracing::trace!(" dll_name {:#?}", dll_name_z);
- tracing::trace!(" output_path {}", output_path.display());
- tracing::trace!(
+ trace!("invoking LLVMRustWriteImportLibrary");
+ trace!(" dll_name {:#?}", dll_name_z);
+ trace!(" output_path {}", output_path.display());
+ trace!(
" import names: {}",
dll_imports
.iter()
@@ -322,22 +388,6 @@ impl<'a> LlvmArchiveBuilder<'a> {
ret
}
}
-
- fn i686_decorated_name(import: &DllImport, mingw: bool) -> String {
- let name = import.name;
- let prefix = if mingw { "" } else { "_" };
-
- match import.calling_convention {
- DllCallingConvention::C => format!("{}{}", prefix, name),
- DllCallingConvention::Stdcall(arg_list_size) => {
- format!("{}{}@{}", prefix, name, arg_list_size)
- }
- DllCallingConvention::Fastcall(arg_list_size) => format!("@{}@{}", name, arg_list_size),
- DllCallingConvention::Vectorcall(arg_list_size) => {
- format!("{}@@{}", name, arg_list_size)
- }
- }
- }
}
fn string_to_io_error(s: String) -> io::Error {
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 3731c6bcf..a49cc7f8d 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,15 +1,14 @@
-use crate::back::write::{
- self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
-};
-use crate::llvm::archive_ro::ArchiveRO;
-use crate::llvm::{self, build_string, False, True};
-use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm};
+use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
+use crate::llvm::{self, build_string};
+use crate::{LlvmCodegenBackend, ModuleLlvm};
+use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::memmap::Mmap;
use rustc_errors::{FatalError, Handler};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
@@ -17,7 +16,6 @@ use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{self, CrateType, Lto};
-use tracing::{debug, info};
use std::ffi::{CStr, CString};
use std::fs::File;
@@ -34,8 +32,8 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
- CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
- CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
+ CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Rlib | CrateType::ProcMacro => false,
}
}
@@ -75,17 +73,6 @@ fn prepare_lto(
// with either fat or thin LTO
let mut upstream_modules = Vec::new();
if cgcx.lto != Lto::ThinLocal {
- if cgcx.opts.cg.prefer_dynamic {
- diag_handler
- .struct_err("cannot prefer dynamic linking when performing LTO")
- .note(
- "only 'staticlib', 'bin', and 'cdylib' outputs are \
- supported with LTO",
- )
- .emit();
- return Err(FatalError);
- }
-
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
@@ -94,9 +81,25 @@ fn prepare_lto(
static library outputs",
);
return Err(e);
+ } else if *crate_type == CrateType::Dylib {
+ if !cgcx.opts.unstable_opts.dylib_lto {
+ return Err(diag_handler
+ .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`"));
+ }
}
}
+ if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
+ diag_handler
+ .struct_err("cannot prefer dynamic linking when performing LTO")
+ .note(
+ "only 'staticlib', 'bin', and 'cdylib' outputs are \
+ supported with LTO",
+ )
+ .emit();
+ return Err(FatalError);
+ }
+
for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
let exported_symbols =
cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
@@ -107,14 +110,24 @@ fn prepare_lto(
.extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
}
- let archive = ArchiveRO::open(path).expect("wanted an rlib");
+ let archive_data = unsafe {
+ Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib"))
+ .expect("couldn't map rlib")
+ };
+ let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
let obj_files = archive
- .iter()
- .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
+ .members()
+ .filter_map(|child| {
+ child.ok().and_then(|c| {
+ std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
+ })
+ })
.filter(|&(name, _)| looks_like_rust_object_file(name));
for (name, child) in obj_files {
info!("adding bitcode from {}", name);
- match get_bitcode_slice_from_object_data(child.data()) {
+ match get_bitcode_slice_from_object_data(
+ child.data(&*archive_data).expect("corrupt rlib"),
+ ) {
Ok(data) => {
let module = SerializedModule::FromRlib(data.to_vec());
upstream_modules.push((module, CString::new(name).unwrap()));
@@ -565,7 +578,7 @@ pub(crate) fn run_pass_manager(
module: &mut ModuleCodegen<ModuleLlvm>,
thin: bool,
) -> Result<(), FatalError> {
- let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name);
+ let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
let config = cgcx.config(module.kind);
// Now we have one massive module inside of llmod. Time to run the
@@ -587,61 +600,9 @@ pub(crate) fn run_pass_manager(
1,
);
}
- if llvm_util::should_use_new_llvm_pass_manager(
- &config.new_llvm_pass_manager,
- &cgcx.target_arch,
- ) {
- let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
- let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
- write::optimize_with_new_llvm_pass_manager(
- cgcx,
- diag_handler,
- module,
- config,
- opt_level,
- opt_stage,
- )?;
- debug!("lto done");
- return Ok(());
- }
-
- let pm = llvm::LLVMCreatePassManager();
- llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
-
- if config.verify_llvm_ir {
- let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
- llvm::LLVMRustAddPass(pm, pass.unwrap());
- }
-
- let opt_level = config
- .opt_level
- .map(|x| to_llvm_opt_settings(x).0)
- .unwrap_or(llvm::CodeGenOptLevel::None);
- with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
- if thin {
- llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
- } else {
- llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager(
- b, pm, /* Internalize = */ False, /* RunInliner = */ True,
- );
- }
- });
-
- // We always generate bitcode through ThinLTOBuffers,
- // which do not support anonymous globals
- if config.bitcode_needed() {
- let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
- llvm::LLVMRustAddPass(pm, pass.unwrap());
- }
-
- if config.verify_llvm_ir {
- let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
- llvm::LLVMRustAddPass(pm, pass.unwrap());
- }
-
- llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
-
- llvm::LLVMDisposePassManager(pm);
+ let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+ let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+ write::llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage)?;
}
debug!("lto done");
Ok(())
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 534d32e8a..11053a8f6 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -5,7 +5,7 @@ use crate::back::profiling::{
use crate::base;
use crate::common;
use crate::consts;
-use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
+use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::llvm_util;
use crate::type_::Type;
use crate::LlvmCodegenBackend;
@@ -21,14 +21,12 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_errors::{FatalError, Handler, Level};
use rustc_fs_util::{link_or_copy, path_to_c_string};
-use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::InnerSpan;
use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
-use tracing::debug;
use libc::{c_char, c_int, c_uint, c_void, size_t};
use std::ffi::CString;
@@ -304,7 +302,6 @@ impl<'a> DiagnosticHandlers<'a> {
remark_passes.as_ptr(),
remark_passes.len(),
);
- llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
DiagnosticHandlers { data, llcx, old_handler }
}
}
@@ -312,9 +309,7 @@ impl<'a> DiagnosticHandlers<'a> {
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
- use std::ptr::null_mut;
unsafe {
- llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
drop(Box::from_raw(self.data));
}
@@ -342,16 +337,6 @@ fn report_inline_asm(
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source);
}
-unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
- if user.is_null() {
- return;
- }
- let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
-
- let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag);
- report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source);
-}
-
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return;
@@ -423,7 +408,15 @@ fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
.map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
}
-pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
+fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
+ if config.instrument_coverage {
+ Some(CString::new("default_%m_%p.profraw").unwrap())
+ } else {
+ None
+ }
+}
+
+pub(crate) unsafe fn llvm_optimize(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
@@ -438,6 +431,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
let pgo_use_path = get_pgo_use_path(config);
let pgo_sample_use_path = get_pgo_sample_use_path(config);
let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+ let instr_profile_output_path = get_instr_profile_output_path(config);
// Sanitizer instrumentation is only inserted during the pre-link optimization stage.
let sanitizer_options = if !is_lto {
Some(llvm::SanitizerOptions {
@@ -470,7 +464,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
// FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
// We would have to add upstream support for this first, before we can support
// config.inline_threshold and our more aggressive default thresholds.
- let result = llvm::LLVMRustOptimizeWithNewPassManager(
+ let result = llvm::LLVMRustOptimize(
module.module_llvm.llmod(),
&*module.module_llvm.tm,
to_pass_builder_opt_level(opt_level),
@@ -488,6 +482,7 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
config.instrument_coverage,
+ instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
config.instrument_gcov,
pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
config.debug_info_for_profiling,
@@ -513,18 +508,11 @@ pub(crate) unsafe fn optimize(
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
- let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
- if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) {
- diag_handler.warn(
- "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15",
- );
- }
-
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
@@ -532,191 +520,24 @@ pub(crate) unsafe fn optimize(
}
if let Some(opt_level) = config.opt_level {
- if llvm_util::should_use_new_llvm_pass_manager(
- &config.new_llvm_pass_manager,
- &cgcx.target_arch,
- ) {
- let opt_stage = match cgcx.lto {
- Lto::Fat => llvm::OptStage::PreLinkFatLTO,
- Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
- _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
- _ => llvm::OptStage::PreLinkNoLTO,
- };
- return optimize_with_new_llvm_pass_manager(
- cgcx,
- diag_handler,
- module,
- config,
- opt_level,
- opt_stage,
- );
- }
-
- if cgcx.prof.llvm_recording_enabled() {
- diag_handler
- .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
- }
-
- // Create the two optimizing pass managers. These mirror what clang
- // does, and are by populated by LLVM's default PassManagerBuilder.
- // Each manager has a different set of passes, but they also share
- // some common passes.
- let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
- let mpm = llvm::LLVMCreatePassManager();
-
- {
- let find_pass = |pass_name: &str| {
- let pass_name = SmallCStr::new(pass_name);
- llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
- };
-
- if config.verify_llvm_ir {
- // Verification should run as the very first pass.
- llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
- }
-
- let mut extra_passes = Vec::new();
- let mut have_name_anon_globals_pass = false;
-
- for pass_name in &config.passes {
- if pass_name == "lint" {
- // Linting should also be performed early, directly on the generated IR.
- llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
- continue;
- }
-
- if let Some(pass) = find_pass(pass_name) {
- extra_passes.push(pass);
- } else {
- diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
- }
-
- if pass_name == "name-anon-globals" {
- have_name_anon_globals_pass = true;
- }
- }
-
- // Instrumentation must be inserted before optimization,
- // otherwise LLVM may optimize some functions away which
- // breaks llvm-cov.
- //
- // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
- if config.instrument_gcov {
- llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap());
- }
- if config.instrument_coverage {
- llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap());
- }
- if config.debug_info_for_profiling {
- llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap());
- }
-
- add_sanitizer_passes(config, &mut extra_passes);
-
- // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
- // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
- // we'll get errors in LLVM.
- let using_thin_buffers = config.bitcode_needed();
- if !config.no_prepopulate_passes {
- llvm::LLVMAddAnalysisPasses(tm, fpm);
- llvm::LLVMAddAnalysisPasses(tm, mpm);
- let opt_level = to_llvm_opt_settings(opt_level).0;
- let prepare_for_thin_lto = cgcx.lto == Lto::Thin
- || cgcx.lto == Lto::ThinLocal
- || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
- with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| {
- llvm::LLVMRustAddLastExtensionPasses(
- b,
- extra_passes.as_ptr(),
- extra_passes.len() as size_t,
- );
- llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm);
- llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm);
- });
-
- have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
- if using_thin_buffers && !prepare_for_thin_lto {
- llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
- have_name_anon_globals_pass = true;
- }
- } else {
- // If we don't use the standard pipeline, directly populate the MPM
- // with the extra passes.
- for pass in extra_passes {
- llvm::LLVMRustAddPass(mpm, pass);
- }
- }
-
- if using_thin_buffers && !have_name_anon_globals_pass {
- // As described above, this will probably cause an error in LLVM
- if config.no_prepopulate_passes {
- diag_handler.err(
- "The current compilation is going to use thin LTO buffers \
- without running LLVM's NameAnonGlobals pass. \
- This will likely cause errors in LLVM. Consider adding \
- -C passes=name-anon-globals to the compiler command line.",
- );
- } else {
- bug!(
- "We are using thin LTO buffers without running the NameAnonGlobals pass. \
- This will likely cause errors in LLVM and should never happen."
- );
- }
- }
- }
-
- diag_handler.abort_if_errors();
-
- // Finally, run the actual optimization passes
- {
- let _timer = cgcx.prof.extra_verbose_generic_activity(
- "LLVM_module_optimize_function_passes",
- &*module.name,
- );
- llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
- }
- {
- let _timer = cgcx.prof.extra_verbose_generic_activity(
- "LLVM_module_optimize_module_passes",
- &*module.name,
- );
- llvm::LLVMRunPassManager(mpm, llmod);
- }
-
- // Deallocate managers that we're now done with
- llvm::LLVMDisposePassManager(fpm);
- llvm::LLVMDisposePassManager(mpm);
+ let opt_stage = match cgcx.lto {
+ Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+ Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ => llvm::OptStage::PreLinkNoLTO,
+ };
+ return llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage);
}
Ok(())
}
-unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
- if config.sanitizer.contains(SanitizerSet::ADDRESS) {
- let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS);
- passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
- passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
- }
- if config.sanitizer.contains(SanitizerSet::MEMORY) {
- let track_origins = config.sanitizer_memory_track_origins as c_int;
- let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY);
- passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
- }
- if config.sanitizer.contains(SanitizerSet::THREAD) {
- passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
- }
- if config.sanitizer.contains(SanitizerSet::HWADDRESS) {
- let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS);
- passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover));
- }
-}
-
pub(crate) fn link(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
use super::lto::{Linker, ModuleBuffer};
- // Sort the modules by name to ensure to ensure deterministic behavior.
+ // Sort the modules by name to ensure deterministic behavior.
modules.sort_by(|a, b| a.name.cmp(&b.name));
let (first, elements) =
modules.split_first().expect("Bug! modules must contain at least one module.");
@@ -1076,72 +897,6 @@ unsafe fn embed_bitcode(
}
}
-pub unsafe fn with_llvm_pmb(
- llmod: &llvm::Module,
- config: &ModuleConfig,
- opt_level: llvm::CodeGenOptLevel,
- prepare_for_thin_lto: bool,
- f: &mut dyn FnMut(&llvm::PassManagerBuilder),
-) {
- use std::ptr;
-
- // Create the PassManagerBuilder for LLVM. We configure it with
- // reasonable defaults and prepare it to actually populate the pass
- // manager.
- let builder = llvm::LLVMRustPassManagerBuilderCreate();
- let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1);
- let inline_threshold = config.inline_threshold;
- let pgo_gen_path = get_pgo_gen_path(config);
- let pgo_use_path = get_pgo_use_path(config);
- let pgo_sample_use_path = get_pgo_sample_use_path(config);
-
- llvm::LLVMRustConfigurePassManagerBuilder(
- builder,
- opt_level,
- config.merge_functions,
- config.vectorize_slp,
- config.vectorize_loop,
- prepare_for_thin_lto,
- pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
- pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
- pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
- opt_size as c_int,
- );
-
- llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
-
- // Here we match what clang does (kinda). For O0 we only inline
- // always-inline functions (but don't add lifetime intrinsics), at O1 we
- // inline with lifetime intrinsics, and O2+ we add an inliner with a
- // thresholds copied from clang.
- match (opt_level, opt_size, inline_threshold) {
- (.., Some(t)) => {
- llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t);
- }
- (llvm::CodeGenOptLevel::Aggressive, ..) => {
- llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275);
- }
- (_, llvm::CodeGenOptSizeDefault, _) => {
- llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75);
- }
- (_, llvm::CodeGenOptSizeAggressive, _) => {
- llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25);
- }
- (llvm::CodeGenOptLevel::None, ..) => {
- llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
- }
- (llvm::CodeGenOptLevel::Less, ..) => {
- llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
- }
- (llvm::CodeGenOptLevel::Default, ..) => {
- llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225);
- }
- }
-
- f(builder);
- llvm::LLVMRustPassManagerBuilderDispose(builder);
-}
-
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
// This is required to satisfy `dllimport` references to static data in .rlibs
// when using MSVC linker. We do this only for data, as linker can fix up
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 86f92dc02..5b2bbdb4b 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -19,6 +19,8 @@ use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
+use cstr::cstr;
+
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_codegen_ssa::traits::*;
@@ -107,11 +109,14 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
}
// Create the llvm.used and llvm.compiler.used variables.
- if !cx.used_statics().borrow().is_empty() {
- cx.create_used_variable()
+ if !cx.used_statics.borrow().is_empty() {
+ cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow());
}
- if !cx.compiler_used_statics().borrow().is_empty() {
- cx.create_compiler_used_variable()
+ if !cx.compiler_used_statics.borrow().is_empty() {
+ cx.create_used_variable_impl(
+ cstr!("llvm.compiler.used"),
+ &*cx.compiler_used_statics.borrow(),
+ );
}
// Run replace-all-uses-with for statics that need it. This must
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index d3096c73a..fca43a0d8 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -1,15 +1,14 @@
+use crate::abi::FnAbiLlvmExt;
use crate::attributes;
use crate::common::Funclet;
use crate::context::CodegenCx;
-use crate::llvm::{self, BasicBlock, False};
-use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
-use crate::llvm_util;
+use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use cstr::cstr;
use libc::{c_char, c_uint};
-use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
+use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
@@ -28,7 +27,6 @@ use std::ffi::CStr;
use std::iter;
use std::ops::Deref;
use std::ptr;
-use tracing::{debug, instrument};
// All Builders must have an llfn associated with them
#[must_use]
@@ -217,6 +215,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn invoke(
&mut self,
llty: &'ll Type,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
args: &[&'ll Value],
then: &'ll BasicBlock,
@@ -229,7 +228,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
- unsafe {
+ let invoke = unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
llty,
@@ -241,7 +240,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bundle,
UNNAMED,
)
+ };
+ if let Some(fn_abi) = fn_abi {
+ fn_abi.apply_attrs_callsite(self, invoke);
}
+ invoke
}
fn unreachable(&mut self) {
@@ -408,20 +411,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
- bx.dynamic_alloca(ty, align)
- }
-
- fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
unsafe {
- let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
+ let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
}
- fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
+ fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
unsafe {
- let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
+ let alloca =
+ llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
@@ -726,11 +726,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
}
- fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.fptoint_sat(false, val, dest_ty)
}
- fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.fptoint_sat(true, val, dest_ty)
}
@@ -1038,35 +1038,23 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
dst: &'ll Value,
cmp: &'ll Value,
src: &'ll Value,
- mut order: rustc_codegen_ssa::common::AtomicOrdering,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
failure_order: rustc_codegen_ssa::common::AtomicOrdering,
weak: bool,
) -> &'ll Value {
let weak = if weak { llvm::True } else { llvm::False };
- if llvm_util::get_version() < (13, 0, 0) {
- use rustc_codegen_ssa::common::AtomicOrdering::*;
- // Older llvm has the pre-C++17 restriction on
- // success and failure memory ordering,
- // requiring the former to be at least as strong as the latter.
- // So, for llvm 12, we upgrade the success ordering to a stronger
- // one if necessary.
- match (order, failure_order) {
- (Relaxed, Acquire) => order = Acquire,
- (Release, Acquire) => order = AcquireRelease,
- (_, SequentiallyConsistent) => order = SequentiallyConsistent,
- _ => {}
- }
- }
unsafe {
- llvm::LLVMRustBuildAtomicCmpXchg(
+ let value = llvm::LLVMBuildAtomicCmpXchg(
self.llbuilder,
dst,
cmp,
src,
AtomicOrdering::from_generic(order),
AtomicOrdering::from_generic(failure_order),
- weak,
- )
+ llvm::False, // SingleThreaded
+ );
+ llvm::LLVMSetWeak(value, weak);
+ value
}
}
fn atomic_rmw(
@@ -1083,7 +1071,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
dst,
src,
AtomicOrdering::from_generic(order),
- False,
+ llvm::False, // SingleThreaded
)
}
}
@@ -1091,13 +1079,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn atomic_fence(
&mut self,
order: rustc_codegen_ssa::common::AtomicOrdering,
- scope: rustc_codegen_ssa::common::SynchronizationScope,
+ scope: SynchronizationScope,
) {
+ let single_threaded = match scope {
+ SynchronizationScope::SingleThread => llvm::True,
+ SynchronizationScope::CrossThread => llvm::False,
+ };
unsafe {
- llvm::LLVMRustBuildAtomicFence(
+ llvm::LLVMBuildFence(
self.llbuilder,
AtomicOrdering::from_generic(order),
- SynchronizationScope::from_generic(scope),
+ single_threaded,
+ UNNAMED,
);
}
}
@@ -1155,6 +1148,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn call(
&mut self,
llty: &'ll Type,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: &'ll Value,
args: &[&'ll Value],
funclet: Option<&Funclet<'ll>>,
@@ -1165,7 +1159,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
- unsafe {
+ let call = unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
llty,
@@ -1174,7 +1168,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args.len() as c_uint,
bundle,
)
+ };
+ if let Some(fn_abi) = fn_abi {
+ fn_abi.apply_attrs_callsite(self, call);
}
+ call
}
fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
@@ -1407,7 +1405,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
let (ty, f) = self.cx.get_intrinsic(intrinsic);
- self.call(ty, f, args, None)
+ self.call(ty, None, f, args, None)
}
fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
@@ -1444,51 +1442,32 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
}
}
- fn fptoint_sat_broken_in_llvm(&self) -> bool {
- match self.tcx.sess.target.arch.as_ref() {
- // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083
- "riscv64" => llvm_util::get_version() < (13, 0, 0),
- _ => false,
- }
- }
-
- fn fptoint_sat(
- &mut self,
- signed: bool,
- val: &'ll Value,
- dest_ty: &'ll Type,
- ) -> Option<&'ll Value> {
- if !self.fptoint_sat_broken_in_llvm() {
- let src_ty = self.cx.val_ty(val);
- let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector
- {
- assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
- (
- self.cx.element_type(src_ty),
- self.cx.element_type(dest_ty),
- Some(self.cx.vector_length(src_ty)),
- )
- } else {
- (src_ty, dest_ty, None)
- };
- let float_width = self.cx.float_width(float_ty);
- let int_width = self.cx.int_width(int_ty);
-
- let instr = if signed { "fptosi" } else { "fptoui" };
- let name = if let Some(vector_length) = vector_length {
- format!(
- "llvm.{}.sat.v{}i{}.v{}f{}",
- instr, vector_length, int_width, vector_length, float_width
- )
- } else {
- format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
- };
- let f =
- self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
- Some(self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None))
+ fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ let src_ty = self.cx.val_ty(val);
+ let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
+ assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+ (
+ self.cx.element_type(src_ty),
+ self.cx.element_type(dest_ty),
+ Some(self.cx.vector_length(src_ty)),
+ )
} else {
- None
- }
+ (src_ty, dest_ty, None)
+ };
+ let float_width = self.cx.float_width(float_ty);
+ let int_width = self.cx.int_width(int_ty);
+
+ let instr = if signed { "fptosi" } else { "fptoui" };
+ let name = if let Some(vector_length) = vector_length {
+ format!(
+ "llvm.{}.sat.v{}i{}.v{}f{}",
+ instr, vector_length, int_width, vector_length, float_width
+ )
+ } else {
+ format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+ };
+ let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
+ self.call(self.type_func(&[src_ty], dest_ty), None, f, &[val], None)
}
pub(crate) fn landing_pad(
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 72155d874..6f0d1b7ce 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -6,11 +6,11 @@
use crate::abi::FnAbiLlvmExt;
use crate::attributes;
+use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
use rustc_codegen_ssa::traits::*;
-use tracing::debug;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeVisitable};
@@ -79,13 +79,18 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
llfn
}
} else {
- let llfn = cx.declare_fn(sym, fn_abi);
+ let instance_def_id = instance.def_id();
+ let llfn = if tcx.sess.target.arch == "x86" &&
+ let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
+ {
+ cx.declare_fn(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&tcx.sess.target), true), fn_abi)
+ } else {
+ cx.declare_fn(sym, fn_abi)
+ };
debug!("get_fn: not casting pointer!");
attributes::from_fn_attrs(cx, llfn, instance);
- let instance_def_id = instance.def_id();
-
// Apply an appropriate linkage/visibility value to our item that we
// just declared.
//
@@ -174,7 +179,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
// MinGW: For backward compatibility we rely on the linker to decide whether it
// should use dllimport for functions.
if cx.use_dll_storage_attrs
- && tcx.is_dllimport_foreign_item(instance_def_id)
+ && let Some(library) = tcx.native_library(instance_def_id)
+ && library.kind.is_dllimport()
&& !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc")
{
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index fb4da9a5f..acee9134f 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -10,13 +10,17 @@ use crate::value::Value;
use rustc_ast::Mutability;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
+use rustc_target::spec::Target;
use libc::{c_char, c_uint};
-use tracing::debug;
+use std::fmt::Write;
/*
* A note on nomenclature of linking: "extern", "foreign", and "upcall".
@@ -211,7 +215,11 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
- try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
+ try_as_const_integral(v).and_then(|v| unsafe {
+ let mut i = 0u64;
+ let success = llvm::LLVMRustConstIntGetZExtValue(v, &mut i);
+ success.then_some(i)
+ })
}
fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
@@ -222,10 +230,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
})
}
- fn zst_to_backend(&self, _llty: &'ll Type) -> &'ll Value {
- self.const_undef(self.type_ix(0))
- }
-
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
@@ -357,3 +361,74 @@ fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
unsafe { llvm::LLVMIsAConstantInt(v) }
}
+
+pub(crate) fn get_dllimport<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: DefId,
+ name: &str,
+) -> Option<&'tcx DllImport> {
+ tcx.native_library(id)
+ .map(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name))
+ .flatten()
+}
+
+pub(crate) fn is_mingw_gnu_toolchain(target: &Target) -> bool {
+ target.vendor == "pc" && target.os == "windows" && target.env == "gnu" && target.abi.is_empty()
+}
+
+pub(crate) fn i686_decorated_name(
+ dll_import: &DllImport,
+ mingw: bool,
+ disable_name_mangling: bool,
+) -> String {
+ let name = dll_import.name.as_str();
+
+ let (add_prefix, add_suffix) = match dll_import.import_name_type {
+ Some(PeImportNameType::NoPrefix) => (false, true),
+ Some(PeImportNameType::Undecorated) => (false, false),
+ _ => (true, true),
+ };
+
+ // Worst case: +1 for disable name mangling, +1 for prefix, +4 for suffix (@@__).
+ let mut decorated_name = String::with_capacity(name.len() + 6);
+
+ if disable_name_mangling {
+ // LLVM uses a binary 1 ('\x01') prefix to a name to indicate that mangling needs to be disabled.
+ decorated_name.push('\x01');
+ }
+
+ let prefix = if add_prefix && dll_import.is_fn {
+ match dll_import.calling_convention {
+ DllCallingConvention::C | DllCallingConvention::Vectorcall(_) => None,
+ DllCallingConvention::Stdcall(_) => (!mingw
+ || dll_import.import_name_type == Some(PeImportNameType::Decorated))
+ .then_some('_'),
+ DllCallingConvention::Fastcall(_) => Some('@'),
+ }
+ } else if !dll_import.is_fn && !mingw {
+ // For static variables, prefix with '_' on MSVC.
+ Some('_')
+ } else {
+ None
+ };
+ if let Some(prefix) = prefix {
+ decorated_name.push(prefix);
+ }
+
+ decorated_name.push_str(name);
+
+ if add_suffix && dll_import.is_fn {
+ match dll_import.calling_convention {
+ DllCallingConvention::C => {}
+ DllCallingConvention::Stdcall(arg_list_size)
+ | DllCallingConvention::Fastcall(arg_list_size) => {
+ write!(&mut decorated_name, "@{}", arg_list_size).unwrap();
+ }
+ DllCallingConvention::Vectorcall(arg_list_size) => {
+ write!(&mut decorated_name, "@@{}", arg_list_size).unwrap();
+ }
+ }
+ }
+
+ decorated_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 18467e370..dd3c43ba5 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -1,5 +1,5 @@
use crate::base;
-use crate::common::CodegenCx;
+use crate::common::{self, CodegenCx};
use crate::debuginfo;
use crate::llvm::{self, True};
use crate::llvm_util;
@@ -23,16 +23,15 @@ use rustc_target::abi::{
AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
};
use std::ops::Range;
-use tracing::debug;
pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
let alloc = alloc.inner();
- let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
- // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`,
- // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation.
+ // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range`
+ // must be within the bounds of `alloc` and not contain or overlap a pointer provenance.
fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
llvals: &mut Vec<&'ll Value>,
cx: &'a CodegenCx<'ll, 'b>,
@@ -79,12 +78,12 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
}
let mut next_offset = 0;
- for &(offset, alloc_id) in alloc.relocations().iter() {
+ for &(offset, alloc_id) in alloc.provenance().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
- // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // This `inspect` is okay since we have checked that there is no provenance, it
// is within the bounds of the allocation, and it doesn't affect interpreter execution
// (we inspect the result after interpreter execution).
append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
@@ -93,7 +92,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
- // and we properly interpret the relocation as a relocation pointer offset.
+ // and we properly interpret the provenance as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
@@ -121,7 +120,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
}
if alloc.len() >= next_offset {
let range = next_offset..alloc.len();
- // This `inspect` is okay since we have check that it is after all relocations, it is
+ // This `inspect` is okay since we have check that it is after all provenance, it is
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
// inspect the result after interpreter execution).
append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
@@ -160,7 +159,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: &str,
- span_def_id: DefId,
+ def_id: DefId,
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
if let Some(linkage) = attrs.linkage {
@@ -175,7 +174,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
cx.layout_of(mt.ty).llvm_type(cx)
} else {
cx.sess().span_fatal(
- cx.tcx.def_span(span_def_id),
+ cx.tcx.def_span(def_id),
"must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
)
};
@@ -194,7 +193,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
real_name.push_str(sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
cx.sess().span_fatal(
- cx.tcx.def_span(span_def_id),
+ cx.tcx.def_span(def_id),
&format!("symbol `{}` is already defined", &sym),
)
});
@@ -202,6 +201,10 @@ fn check_and_apply_linkage<'ll, 'tcx>(
llvm::LLVMSetInitializer(g2, g1);
g2
}
+ } else if cx.tcx.sess.target.arch == "x86" &&
+ let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
+ {
+ cx.declare_global(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true), llty)
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
@@ -329,7 +332,10 @@ impl<'ll> CodegenCx<'ll, '_> {
}
}
- if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
+ if self.use_dll_storage_attrs
+ && let Some(library) = self.tcx.native_library(def_id)
+ && library.kind.is_dllimport()
+ {
// For foreign (native) libs we know the exact storage type to use.
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
@@ -475,7 +481,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
//
// We could remove this hack whenever we decide to drop macOS 10.10 support.
if self.tcx.sess.target.is_like_osx {
- // The `inspect` method is okay here because we checked relocations, and
+ // The `inspect` method is okay here because we checked for provenance, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
//
@@ -483,7 +489,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
// happens to be zero. Instead, we should only check the value of defined bytes
// and set all undefined bytes to zero if this allocation is headed for the
// BSS.
- let all_bytes_are_zero = alloc.relocations().is_empty()
+ let all_bytes_are_zero = alloc.provenance().is_empty()
&& alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
.iter()
@@ -507,9 +513,9 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
section.as_str().as_ptr().cast(),
section.as_str().len() as c_uint,
);
- assert!(alloc.relocations().is_empty());
+ assert!(alloc.provenance().is_empty());
- // The `inspect` method is okay here because we checked relocations, and
+ // The `inspect` method is okay here because we checked for provenance, and
// because we are doing this access to inspect the final interpreter state (not
// as part of the interpreter execution).
let bytes =
@@ -549,7 +555,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
// `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
// on other targets, in particular MachO targets have *their* static constructor
// lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However,
- // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_typeck`,
+ // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
// so we don't need to take care of it here.
self.add_compiler_used_global(g);
}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 5857b83f6..79ddfd884 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -142,17 +142,6 @@ pub unsafe fn create_module<'ll>(
let mut target_data_layout = sess.target.data_layout.to_string();
let llvm_version = llvm_util::get_version();
- if llvm_version < (13, 0, 0) {
- if sess.target.arch == "powerpc64" {
- target_data_layout = target_data_layout.replace("-S128", "");
- }
- if sess.target.arch == "wasm32" {
- target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string();
- }
- if sess.target.arch == "wasm64" {
- target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string();
- }
- }
if llvm_version < (14, 0, 0) {
if sess.target.llvm_target == "i686-pc-windows-msvc"
|| sess.target.llvm_target == "i586-pc-windows-msvc"
@@ -165,6 +154,11 @@ pub unsafe fn create_module<'ll>(
target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
}
}
+ if llvm_version < (16, 0, 0) {
+ if sess.target.arch == "s390x" {
+ target_data_layout = target_data_layout.replace("-v128:64", "");
+ }
+ }
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.is_builtin {
@@ -464,7 +458,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
self.coverage_cx.as_ref()
}
- fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+ pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
let section = cstr!("llvm.metadata");
let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
@@ -562,14 +556,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.codegen_unit
}
- fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
- &self.used_statics
- }
-
- fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
- &self.compiler_used_statics
- }
-
fn set_frame_pointer_type(&self, llfn: &'ll Value) {
if let Some(attr) = attributes::frame_pointer_type_attr(self) {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
@@ -583,17 +569,6 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
}
- fn create_used_variable(&self) {
- self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
- }
-
- fn create_compiler_used_variable(&self) {
- self.create_used_variable_impl(
- cstr!("llvm.compiler.used"),
- &*self.compiler_used_statics.borrow(),
- );
- }
-
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
if self.get_declared_value("main").is_none() {
Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
@@ -897,6 +872,9 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
}
+
+ ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
+
None
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 58f391692..433f04320 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -16,8 +16,6 @@ use rustc_middle::ty::TyCtxt;
use std::ffi::CString;
-use tracing::debug;
-
/// Generates and exports the Coverage Map.
///
/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
@@ -131,7 +129,7 @@ impl CoverageMapGenerator {
// LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
// requires setting the first filename to the compilation directory.
// Since rustc generates coverage maps with relative paths, the
- // compilation directory can be combined with the the relative paths
+ // compilation directory can be combined with the relative paths
// to get absolute paths, if needed.
let working_dir = tcx
.sess
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 98ba38356..964a632b6 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -28,7 +28,6 @@ use std::cell::RefCell;
use std::ffi::CString;
use std::iter;
-use tracing::debug;
pub mod mapgen;
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index bd84100e0..163ccd946 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -42,7 +42,6 @@ use rustc_span::{self, FileNameDisplayPreference, SourceFile};
use rustc_symbol_mangling::typeid_for_trait_ref;
use rustc_target::abi::{Align, Size};
use smallvec::smallvec;
-use tracing::debug;
use libc::{c_char, c_longlong, c_uint};
use std::borrow::Cow;
@@ -51,7 +50,6 @@ use std::hash::{Hash, Hasher};
use std::iter;
use std::path::{Path, PathBuf};
use std::ptr;
-use tracing::instrument;
impl PartialEq for llvm::Metadata {
fn eq(&self, other: &Self) -> bool {
@@ -114,6 +112,7 @@ macro_rules! return_if_di_node_created_in_meantime {
}
/// Extract size and alignment from a TyAndLayout.
+#[inline]
fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
(ty_and_layout.size, ty_and_layout.align.abi)
}
@@ -1499,24 +1498,18 @@ fn vcall_visibility_metadata<'ll, 'tcx>(
// If there is not LTO and the visibility in public, we have to assume that the vtable can
// be seen from anywhere. With multiple CGUs, the vtable is quasi-public.
(Lto::No | Lto::ThinLocal, Visibility::Public, _)
- | (Lto::No, Visibility::Restricted(_) | Visibility::Invisible, false) => {
- VCallVisibility::Public
- }
+ | (Lto::No, Visibility::Restricted(_), false) => VCallVisibility::Public,
// With LTO and a quasi-public visibility, the usages of the functions of the vtable are
// all known by the `LinkageUnit`.
// FIXME: LLVM only supports this optimization for `Lto::Fat` currently. Once it also
// supports `Lto::Thin` the `VCallVisibility` may have to be adjusted for those.
(Lto::Fat | Lto::Thin, Visibility::Public, _)
- | (
- Lto::ThinLocal | Lto::Thin | Lto::Fat,
- Visibility::Restricted(_) | Visibility::Invisible,
- false,
- ) => VCallVisibility::LinkageUnit,
+ | (Lto::ThinLocal | Lto::Thin | Lto::Fat, Visibility::Restricted(_), false) => {
+ VCallVisibility::LinkageUnit
+ }
// If there is only one CGU, private vtables can only be seen by that CGU/translation unit
// and therefore we know of all usages of functions in the vtable.
- (_, Visibility::Restricted(_) | Visibility::Invisible, true) => {
- VCallVisibility::TranslationUnit
- }
+ (_, Visibility::Restricted(_), true) => VCallVisibility::TranslationUnit,
};
let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index d6e2c8ccd..129e336c7 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -1,19 +1,21 @@
use std::borrow::Cow;
use libc::c_uint;
-use rustc_codegen_ssa::debuginfo::{
- type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo,
+use rustc_codegen_ssa::{
+ debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+ traits::ConstMethods,
};
+
+use rustc_index::vec::IndexVec;
use rustc_middle::{
bug,
ty::{
self,
layout::{LayoutOf, TyAndLayout},
- util::Discr,
- AdtDef, GeneratorSubsts,
+ AdtDef, GeneratorSubsts, Ty,
},
};
-use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
use smallvec::smallvec;
use crate::{
@@ -21,9 +23,9 @@ use crate::{
debuginfo::{
metadata::{
build_field_di_node, closure_saved_names_of_captured_variables,
- enums::tag_base_type,
- file_metadata, generator_layout_and_saved_local_names, size_and_align_of,
- type_map::{self, UniqueTypeId},
+ enums::{tag_base_type, DiscrResult},
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ type_map::{self, Stub, UniqueTypeId},
unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
UNKNOWN_LINE_NUMBER,
},
@@ -35,59 +37,161 @@ use crate::{
},
};
-/// In CPP-like mode, we generate a union of structs for each variant and an
-/// explicit discriminant field roughly equivalent to the following C/C++ code:
+// The names of the associated constants in each variant wrapper struct.
+// These have to match up with the names being used in `intrinsic.natvis`.
+const ASSOC_CONST_DISCR_NAME: &str = "NAME";
+const ASSOC_CONST_DISCR_EXACT: &str = "DISCR_EXACT";
+const ASSOC_CONST_DISCR_BEGIN: &str = "DISCR_BEGIN";
+const ASSOC_CONST_DISCR_END: &str = "DISCR_END";
+
+const ASSOC_CONST_DISCR128_EXACT_LO: &str = "DISCR128_EXACT_LO";
+const ASSOC_CONST_DISCR128_EXACT_HI: &str = "DISCR128_EXACT_HI";
+const ASSOC_CONST_DISCR128_BEGIN_LO: &str = "DISCR128_BEGIN_LO";
+const ASSOC_CONST_DISCR128_BEGIN_HI: &str = "DISCR128_BEGIN_HI";
+const ASSOC_CONST_DISCR128_END_LO: &str = "DISCR128_END_LO";
+const ASSOC_CONST_DISCR128_END_HI: &str = "DISCR128_END_HI";
+
+// The name of the tag field in the top-level union
+const TAG_FIELD_NAME: &str = "tag";
+const TAG_FIELD_NAME_128_LO: &str = "tag128_lo";
+const TAG_FIELD_NAME_128_HI: &str = "tag128_hi";
+
+// We assign a "virtual" discriminant value to the sole variant of
+// a single-variant enum.
+const SINGLE_VARIANT_VIRTUAL_DISR: u64 = 0;
+
+/// In CPP-like mode, we generate a union with a field for each variant and an
+/// explicit tag field. The field of each variant has a struct type
+/// that encodes the discrimiant of the variant and it's data layout.
+/// The union also has a nested enumeration type that is only used for encoding
+/// variant names in an efficient way. Its enumerator values do _not_ correspond
+/// to the enum's discriminant values.
+/// It's roughly equivalent to the following C/C++ code:
///
/// ```c
-/// union enum$<{fully-qualified-name}> {
-/// struct {variant 0 name} {
-/// <variant 0 fields>
+/// union enum2$<{fully-qualified-name}> {
+/// struct Variant0 {
+/// struct {name-of-variant-0} {
+/// <variant 0 fields>
+/// } value;
+///
+/// static VariantNames NAME = {name-of-variant-0};
+/// static int_type DISCR_EXACT = {discriminant-of-variant-0};
/// } variant0;
+///
/// <other variant structs>
-/// {name} discriminant;
+///
+/// int_type tag;
+///
+/// enum VariantNames {
+/// <name-of-variant-0> = 0, // The numeric values are variant index,
+/// <name-of-variant-1> = 1, // not discriminant values.
+/// <name-of-variant-2> = 2,
+/// ...
+/// }
/// }
/// ```
///
-/// As you can see, the type name is wrapped `enum$`. This way we can have a
-/// single NatVis rule for handling all enums.
+/// As you can see, the type name is wrapped in `enum2$<_>`. This way we can
+/// have a single NatVis rule for handling all enums. The `2` in `enum2$<_>`
+/// is an encoding version tag, so that debuggers can decide to decode this
+/// differently than the previous `enum$<_>` encoding emitted by earlier
+/// compiler versions.
///
-/// At the LLVM IR level this looks like
+/// Niche-tag enums have one special variant, usually called the
+/// "untagged variant". This variant has a field that
+/// doubles as the tag of the enum. The variant is active when the value of
+/// that field is within a pre-defined range. Therefore the variant struct
+/// has a `DISCR_BEGIN` and `DISCR_END` field instead of `DISCR_EXACT` in
+/// that case. Both `DISCR_BEGIN` and `DISCR_END` are inclusive bounds.
+/// Note that these ranges can wrap around, so that `DISCR_END < DISCR_BEGIN`.
///
-/// ```txt
-/// DW_TAG_union_type (top-level type for enum)
-/// DW_TAG_member (member for variant 1)
-/// DW_TAG_member (member for variant 2)
-/// DW_TAG_member (member for variant 3)
-/// DW_TAG_structure_type (type of variant 1)
-/// DW_TAG_structure_type (type of variant 2)
-/// DW_TAG_structure_type (type of variant 3)
-/// DW_TAG_enumeration_type (type of tag)
-/// ```
+/// Single-variant enums don't actually have a tag field. In this case we
+/// emit a static tag field (that always has the value 0) so we can use the
+/// same representation (and NatVis).
///
-/// The above encoding applies for enums with a direct tag. For niche-tag we have to do things
-/// differently in order to allow a NatVis visualizer to extract all the information needed:
-/// We generate a union of two fields, one for the dataful variant
-/// and one that just points to the discriminant (which is some field within the dataful variant).
-/// We also create a DW_TAG_enumeration_type DIE that contains tag values for the non-dataful
-/// variants and make the discriminant field that type. We then use NatVis to render the enum type
-/// correctly in Windbg/VS. This will generate debuginfo roughly equivalent to the following C:
+/// For niche-layout enums it's possible to have a 128-bit tag. NatVis, VS, and
+/// WinDbg (the main targets for CPP-like debuginfo at the moment) don't support
+/// 128-bit integers, so all values involved get split into two 64-bit fields.
+/// Instead of the `tag` field, we generate two fields `tag128_lo` and `tag128_hi`,
+/// Instead of `DISCR_EXACT`, we generate `DISCR128_EXACT_LO` and `DISCR128_EXACT_HI`,
+/// and so on.
///
-/// ```c
-/// union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> {
-/// struct <dataful variant name> {
-/// <fields in dataful variant>
-/// } dataful_variant;
-/// enum Discriminant$ {
-/// <non-dataful variants>
-/// } discriminant;
+///
+/// The following pseudocode shows how to decode an enum value in a debugger:
+///
+/// ```text
+///
+/// fn find_active_variant(enum_value) -> (VariantName, VariantValue) {
+/// let is_128_bit = enum_value.has_field("tag128_lo");
+///
+/// if !is_128_bit {
+/// // Note: `tag` can be a static field for enums with only one
+/// // inhabited variant.
+/// let tag = enum_value.field("tag").value;
+///
+/// // For each variant, check if it is a match. Only one of them will match,
+/// // so if we find it we can return it immediately.
+/// for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) {
+/// if variant_field.has_field("DISCR_EXACT") {
+/// // This variant corresponds to a single tag value
+/// if variant_field.field("DISCR_EXACT").value == tag {
+/// return (variant_field.field("NAME"), variant_field.value);
+/// }
+/// } else {
+/// // This is a range variant
+/// let begin = variant_field.field("DISCR_BEGIN");
+/// let end = variant_field.field("DISCR_END");
+///
+/// if is_in_range(tag, begin, end) {
+/// return (variant_field.field("NAME"), variant_field.value);
+/// }
+/// }
+/// }
+/// } else {
+/// // Basically the same as with smaller tags, we just have to
+/// // stitch the values together.
+/// let tag: u128 = (enum_value.field("tag128_lo").value as u128) |
+/// (enum_value.field("tag128_hi").value as u128 << 64);
+///
+/// for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) {
+/// if variant_field.has_field("DISCR128_EXACT_LO") {
+/// let discr_exact = (variant_field.field("DISCR128_EXACT_LO" as u128) |
+/// (variant_field.field("DISCR128_EXACT_HI") as u128 << 64);
+///
+/// // This variant corresponds to a single tag value
+/// if discr_exact.value == tag {
+/// return (variant_field.field("NAME"), variant_field.value);
+/// }
+/// } else {
+/// // This is a range variant
+/// let begin = (variant_field.field("DISCR128_BEGIN_LO").value as u128) |
+/// (variant_field.field("DISCR128_BEGIN_HI").value as u128 << 64);
+/// let end = (variant_field.field("DISCR128_END_LO").value as u128) |
+/// (variant_field.field("DISCR128_END_HI").value as u128 << 64);
+///
+/// if is_in_range(tag, begin, end) {
+/// return (variant_field.field("NAME"), variant_field.value);
+/// }
+/// }
+/// }
+/// }
+///
+/// // We should have found an active variant at this point.
+/// unreachable!();
/// }
-/// ```
///
-/// The NatVis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>`
-/// and evaluates `this.discriminant`. If the value is between the min niche and max
-/// niche, then the enum is in the dataful variant and `this.dataful_variant` is
-/// rendered. Otherwise, the enum is in one of the non-dataful variants. In that
-/// case, we just need to render the name of the `this.discriminant` enum.
+/// // Check if a value is within the given range
+/// // (where the range might wrap around the value space)
+/// fn is_in_range(value, start, end) -> bool {
+/// if start < end {
+/// value >= start && value <= end
+/// } else {
+/// value >= start || value <= end
+/// }
+/// }
+///
+/// ```
pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
@@ -135,27 +239,28 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
ref variants,
tag_field,
..
- } => build_union_fields_for_direct_tag_enum(
+ } => build_union_fields_for_enum(
cx,
enum_adt_def,
enum_type_and_layout,
enum_type_di_node,
- &mut variants.indices(),
+ variants.indices(),
tag_field,
+ None,
),
Variants::Multiple {
- tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag_encoding: TagEncoding::Niche { untagged_variant, .. },
ref variants,
tag_field,
..
- } => build_union_fields_for_niche_tag_enum(
+ } => build_union_fields_for_enum(
cx,
enum_adt_def,
enum_type_and_layout,
enum_type_di_node,
- dataful_variant,
- &mut variants.indices(),
+ variants.indices(),
tag_field,
+ Some(untagged_variant),
),
}
},
@@ -217,137 +322,344 @@ fn build_single_variant_union_fields<'ll, 'tcx>(
let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
cx,
- enum_type_and_layout.ty,
+ enum_type_and_layout,
enum_type_di_node,
variant_index,
enum_adt_def.variant(variant_index),
variant_layout,
);
- // NOTE: The field name of the union is the same as the variant name, not "variant0".
- let variant_name = enum_adt_def.variant(variant_index).name.as_str();
+ let tag_base_type = cx.tcx.types.u32;
+ let tag_base_type_di_node = type_di_node(cx, tag_base_type);
+ let tag_base_type_align = cx.align_of(tag_base_type);
+
+ let variant_names_type_di_node = build_variant_names_type_di_node(
+ cx,
+ enum_type_di_node,
+ std::iter::once((
+ variant_index,
+ Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+ )),
+ );
- smallvec![build_field_di_node(
+ let variant_struct_type_wrapper_di_node = build_variant_struct_wrapper_type_di_node(
cx,
+ enum_type_and_layout,
enum_type_di_node,
- variant_name,
- // NOTE: We use the size and align of the entire type, not from variant_layout
- // since the later is sometimes smaller (if it has fewer fields).
- size_and_align_of(enum_type_and_layout),
- Size::ZERO,
- DIFlags::FlagZero,
+ variant_index,
+ None,
variant_struct_type_di_node,
- )]
+ variant_names_type_di_node,
+ tag_base_type_di_node,
+ tag_base_type,
+ DiscrResult::NoDiscriminant,
+ );
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ &variant_union_field_name(variant_index),
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ variant_struct_type_wrapper_di_node,
+ ),
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticMemberType(
+ DIB(cx),
+ enum_type_di_node,
+ TAG_FIELD_NAME.as_ptr().cast(),
+ TAG_FIELD_NAME.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ variant_names_type_di_node,
+ DIFlags::FlagZero,
+ Some(cx.const_u64(SINGLE_VARIANT_VIRTUAL_DISR)),
+ tag_base_type_align.bits() as u32,
+ )
+ }
+ ]
}
-fn build_union_fields_for_direct_tag_enum<'ll, 'tcx>(
+fn build_union_fields_for_enum<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
enum_adt_def: AdtDef<'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>,
enum_type_di_node: &'ll DIType,
- variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ variant_indices: impl Iterator<Item = VariantIdx> + Clone,
tag_field: usize,
+ untagged_variant_index: Option<VariantIdx>,
) -> SmallVec<&'ll DIType> {
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+
+ let variant_names_type_di_node = build_variant_names_type_di_node(
+ cx,
+ enum_type_di_node,
+ variant_indices.clone().map(|variant_index| {
+ let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+ (variant_index, variant_name)
+ }),
+ );
+
let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices
.map(|variant_index| {
let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+ let variant_def = enum_adt_def.variant(variant_index);
+
+ let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ variant_index,
+ variant_def,
+ variant_layout,
+ );
+
VariantFieldInfo {
variant_index,
- variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
- cx,
- enum_type_and_layout.ty,
- enum_type_di_node,
- variant_index,
- enum_adt_def.variant(variant_index),
- variant_layout,
- ),
+ variant_struct_type_di_node,
source_info: None,
+ discr: super::compute_discriminant_value(cx, enum_type_and_layout, variant_index),
}
})
.collect();
- let discr_type_name = cx.tcx.item_name(enum_adt_def.did());
- let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
- let discr_type_di_node = super::build_enumeration_type_di_node(
- cx,
- discr_type_name.as_str(),
- tag_base_type,
- &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
- (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
- }),
- enum_type_di_node,
- );
-
build_union_fields_for_direct_tag_enum_or_generator(
cx,
enum_type_and_layout,
enum_type_di_node,
&variant_field_infos,
- discr_type_di_node,
+ variant_names_type_di_node,
+ tag_base_type,
tag_field,
+ untagged_variant_index,
)
}
-fn build_union_fields_for_niche_tag_enum<'ll, 'tcx>(
+// The base type of the VariantNames DW_AT_enumeration_type is always the same.
+// It has nothing to do with the tag of the enum and just has to be big enough
+// to hold all variant names.
+fn variant_names_enum_base_type<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> Ty<'tcx> {
+ cx.tcx.types.u32
+}
+
+/// This function builds a DW_AT_enumeration_type that contains an entry for
+/// each variant. Note that this has nothing to do with the discriminant. The
+/// numeric value of each enumerator corresponds to the variant index. The
+/// type is only used for efficiently encoding the name of each variant in
+/// debuginfo.
+fn build_variant_names_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- enum_adt_def: AdtDef<'tcx>,
- enum_type_and_layout: TyAndLayout<'tcx>,
- enum_type_di_node: &'ll DIType,
- dataful_variant_index: VariantIdx,
- variant_indices: &mut dyn Iterator<Item = VariantIdx>,
- tag_field: usize,
-) -> SmallVec<&'ll DIType> {
- let dataful_variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ containing_scope: &'ll DIType,
+ variants: impl Iterator<Item = (VariantIdx, Cow<'tcx, str>)>,
+) -> &'ll DIType {
+ // Create an enumerator for each variant.
+ super::build_enumeration_type_di_node(
cx,
- enum_type_and_layout.ty,
- enum_type_di_node,
- dataful_variant_index,
- &enum_adt_def.variant(dataful_variant_index),
- enum_type_and_layout.for_variant(cx, dataful_variant_index),
- );
+ "VariantNames",
+ variant_names_enum_base_type(cx),
+ variants.map(|(variant_index, variant_name)| (variant_name, variant_index.as_u32() as u64)),
+ containing_scope,
+ )
+}
- let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
- // Create an DW_TAG_enumerator for each variant except the dataful one.
- let discr_type_di_node = super::build_enumeration_type_di_node(
+fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_generator_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+ untagged_variant_index: Option<VariantIdx>,
+ variant_struct_type_di_node: &'ll DIType,
+ variant_names_type_di_node: &'ll DIType,
+ tag_base_type_di_node: &'ll DIType,
+ tag_base_type: Ty<'tcx>,
+ discr: DiscrResult,
+) -> &'ll DIType {
+ type_map::build_type_with_children(
cx,
- "Discriminant$",
- tag_base_type,
- &mut variant_indices.filter_map(|variant_index| {
- if let Some(discr_val) =
- super::compute_discriminant_value(cx, enum_type_and_layout, variant_index)
- {
- let discr = Discr { val: discr_val as u128, ty: tag_base_type };
- let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
- Some((discr, variant_name))
- } else {
- debug_assert_eq!(variant_index, dataful_variant_index);
- None
- }
- }),
- enum_type_di_node,
- );
-
- smallvec![
- build_field_di_node(
- cx,
- enum_type_di_node,
- "dataful_variant",
- size_and_align_of(enum_type_and_layout),
- Size::ZERO,
- DIFlags::FlagZero,
- dataful_variant_struct_type_di_node,
- ),
- build_field_di_node(
+ type_map::stub(
cx,
- enum_type_di_node,
- "discriminant",
- cx.size_and_align_of(tag_base_type),
- enum_type_and_layout.fields.offset(tag_field),
+ Stub::Struct,
+ UniqueTypeId::for_enum_variant_struct_type_wrapper(
+ cx.tcx,
+ enum_or_generator_type_and_layout.ty,
+ variant_index,
+ ),
+ &variant_struct_wrapper_type_name(variant_index),
+ // NOTE: We use size and align of enum_type, not from variant_layout:
+ size_and_align_of(enum_or_generator_type_and_layout),
+ Some(enum_or_generator_type_di_node),
DIFlags::FlagZero,
- discr_type_di_node,
),
- ]
+ |cx, wrapper_struct_type_di_node| {
+ enum DiscrKind {
+ Exact(u64),
+ Exact128(u128),
+ Range(u64, u64),
+ Range128(u128, u128),
+ }
+
+ let (tag_base_type_size, tag_base_type_align) = cx.size_and_align_of(tag_base_type);
+ let is_128_bits = tag_base_type_size.bits() > 64;
+
+ let discr = match discr {
+ DiscrResult::NoDiscriminant => DiscrKind::Exact(SINGLE_VARIANT_VIRTUAL_DISR),
+ DiscrResult::Value(discr_val) => {
+ if is_128_bits {
+ DiscrKind::Exact128(discr_val)
+ } else {
+ debug_assert_eq!(discr_val, discr_val as u64 as u128);
+ DiscrKind::Exact(discr_val as u64)
+ }
+ }
+ DiscrResult::Range(min, max) => {
+ assert_eq!(Some(variant_index), untagged_variant_index);
+ if is_128_bits {
+ DiscrKind::Range128(min, max)
+ } else {
+ debug_assert_eq!(min, min as u64 as u128);
+ debug_assert_eq!(max, max as u64 as u128);
+ DiscrKind::Range(min as u64, max as u64)
+ }
+ }
+ };
+
+ let mut fields = SmallVec::new();
+
+ // We always have a field for the value
+ fields.push(build_field_di_node(
+ cx,
+ wrapper_struct_type_di_node,
+ "value",
+ size_and_align_of(enum_or_generator_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ variant_struct_type_di_node,
+ ));
+
+ let build_assoc_const =
+ |name: &str, type_di_node: &'ll DIType, value: u64, align: Align| unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticMemberType(
+ DIB(cx),
+ wrapper_struct_type_di_node,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ type_di_node,
+ DIFlags::FlagZero,
+ Some(cx.const_u64(value)),
+ align.bits() as u32,
+ )
+ };
+
+ // We also always have an associated constant for the discriminant value
+ // of the variant.
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR_NAME,
+ variant_names_type_di_node,
+ variant_index.as_u32() as u64,
+ cx.align_of(variant_names_enum_base_type(cx)),
+ ));
+
+ // Emit the discriminant value (or range) corresponding to the variant.
+ match discr {
+ DiscrKind::Exact(discr_val) => {
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR_EXACT,
+ tag_base_type_di_node,
+ discr_val,
+ tag_base_type_align,
+ ));
+ }
+ DiscrKind::Exact128(discr_val) => {
+ let align = cx.align_of(cx.tcx.types.u64);
+ let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+ let Split128 { hi, lo } = split_128(discr_val);
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_EXACT_LO,
+ type_di_node,
+ lo,
+ align,
+ ));
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_EXACT_HI,
+ type_di_node,
+ hi,
+ align,
+ ));
+ }
+ DiscrKind::Range(begin, end) => {
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR_BEGIN,
+ tag_base_type_di_node,
+ begin,
+ tag_base_type_align,
+ ));
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR_END,
+ tag_base_type_di_node,
+ end,
+ tag_base_type_align,
+ ));
+ }
+ DiscrKind::Range128(begin, end) => {
+ let align = cx.align_of(cx.tcx.types.u64);
+ let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+ let Split128 { hi: begin_hi, lo: begin_lo } = split_128(begin);
+ let Split128 { hi: end_hi, lo: end_lo } = split_128(end);
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_BEGIN_HI,
+ type_di_node,
+ begin_hi,
+ align,
+ ));
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_BEGIN_LO,
+ type_di_node,
+ begin_lo,
+ align,
+ ));
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_END_HI,
+ type_di_node,
+ end_hi,
+ align,
+ ));
+
+ fields.push(build_assoc_const(
+ ASSOC_CONST_DISCR128_END_LO,
+ type_di_node,
+ end_lo,
+ align,
+ ));
+ }
+ }
+
+ fields
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+struct Split128 {
+ hi: u64,
+ lo: u64,
+}
+
+fn split_128(value: u128) -> Split128 {
+ Split128 { hi: (value >> 64) as u64, lo: value as u64 }
}
fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
@@ -369,6 +681,29 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+ let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
+
+ let tag_base_type = tag_base_type(cx, generator_type_and_layout);
+
+ let variant_names_type_di_node = build_variant_names_type_di_node(
+ cx,
+ generator_type_di_node,
+ variant_range
+ .clone()
+ .map(|variant_index| (variant_index, GeneratorSubsts::variant_name(variant_index))),
+ );
+
+ let discriminants: IndexVec<VariantIdx, DiscrResult> = {
+ let discriminants_iter = generator_substs.discriminants(generator_def_id, cx.tcx);
+ let mut discriminants: IndexVec<VariantIdx, DiscrResult> =
+ IndexVec::with_capacity(variant_count);
+ for (variant_index, discr) in discriminants_iter {
+ // Assert that the index in the IndexMap matches up with the given VariantIdx.
+ assert_eq!(variant_index, discriminants.next_index());
+ discriminants.push(DiscrResult::Value(discr.val));
+ }
+ discriminants
+ };
// Build the type node for each field.
let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
@@ -391,29 +726,24 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
None
};
- VariantFieldInfo { variant_index, variant_struct_type_di_node, source_info }
+ VariantFieldInfo {
+ variant_index,
+ variant_struct_type_di_node,
+ source_info,
+ discr: discriminants[variant_index],
+ }
})
.collect();
- let tag_base_type = tag_base_type(cx, generator_type_and_layout);
- let discr_type_name = "Discriminant$";
- let discr_type_di_node = super::build_enumeration_type_di_node(
- cx,
- discr_type_name,
- tag_base_type,
- &mut generator_substs
- .discriminants(generator_def_id, cx.tcx)
- .map(|(variant_index, discr)| (discr, GeneratorSubsts::variant_name(variant_index))),
- generator_type_di_node,
- );
-
build_union_fields_for_direct_tag_enum_or_generator(
cx,
generator_type_and_layout,
generator_type_di_node,
&variant_field_infos[..],
- discr_type_di_node,
+ variant_names_type_di_node,
+ tag_base_type,
tag_field,
+ None,
)
}
@@ -425,8 +755,11 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
enum_type_di_node: &'ll DIType,
variant_field_infos: &[VariantFieldInfo<'ll>],
discr_type_di_node: &'ll DIType,
+ tag_base_type: Ty<'tcx>,
tag_field: usize,
+ untagged_variant_index: Option<VariantIdx>,
) -> SmallVec<&'ll DIType> {
+ let tag_base_type_di_node = type_di_node(cx, tag_base_type);
let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1);
// We create a field in the union for each variant ...
@@ -438,6 +771,19 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
let field_name = variant_union_field_name(variant_member_info.variant_index);
let (size, align) = size_and_align_of(enum_type_and_layout);
+ let variant_struct_type_wrapper = build_variant_struct_wrapper_type_di_node(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ variant_member_info.variant_index,
+ untagged_variant_index,
+ variant_member_info.variant_struct_type_di_node,
+ discr_type_di_node,
+ tag_base_type_di_node,
+ tag_base_type,
+ variant_member_info.discr,
+ );
+
// We use LLVMRustDIBuilderCreateMemberType() member type directly because
// the build_field_di_node() function does not support specifying a source location,
// which is something that we don't do anywhere else.
@@ -456,7 +802,7 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
// Union fields are always at offset zero
Size::ZERO.bits(),
DIFlags::FlagZero,
- variant_member_info.variant_struct_type_di_node,
+ variant_struct_type_wrapper,
)
}
}));
@@ -466,16 +812,53 @@ fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
);
- // ... and a field for the discriminant.
- unions_fields.push(build_field_di_node(
- cx,
- enum_type_di_node,
- "discriminant",
- cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
- enum_type_and_layout.fields.offset(tag_field),
- DIFlags::FlagZero,
- discr_type_di_node,
- ));
+ // ... and a field for the tag. If the tag is 128 bits wide, this will actually
+ // be two 64-bit fields.
+ let is_128_bits = cx.size_of(tag_base_type).bits() > 64;
+
+ if is_128_bits {
+ let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+ let size_and_align = cx.size_and_align_of(cx.tcx.types.u64);
+
+ let (lo_offset, hi_offset) = match cx.tcx.data_layout.endian {
+ Endian::Little => (0, 8),
+ Endian::Big => (8, 0),
+ };
+
+ let tag_field_offset = enum_type_and_layout.fields.offset(tag_field).bytes();
+ let lo_offset = Size::from_bytes(tag_field_offset + lo_offset);
+ let hi_offset = Size::from_bytes(tag_field_offset + hi_offset);
+
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ TAG_FIELD_NAME_128_LO,
+ size_and_align,
+ lo_offset,
+ DIFlags::FlagZero,
+ type_di_node,
+ ));
+
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ TAG_FIELD_NAME_128_HI,
+ size_and_align,
+ hi_offset,
+ DIFlags::FlagZero,
+ type_di_node,
+ ));
+ } else {
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ TAG_FIELD_NAME,
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ tag_base_type_di_node,
+ ));
+ }
unions_fields
}
@@ -485,6 +868,7 @@ struct VariantFieldInfo<'ll> {
variant_index: VariantIdx,
variant_struct_type_di_node: &'ll DIType,
source_info: Option<(&'ll DIFile, c_uint)>,
+ discr: DiscrResult,
}
fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
@@ -512,3 +896,29 @@ fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
.map(|&s| Cow::from(s))
.unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into())
}
+
+fn variant_struct_wrapper_type_name(variant_index: VariantIdx) -> Cow<'static, str> {
+ const PRE_ALLOCATED: [&str; 16] = [
+ "Variant0",
+ "Variant1",
+ "Variant2",
+ "Variant3",
+ "Variant4",
+ "Variant5",
+ "Variant6",
+ "Variant7",
+ "Variant8",
+ "Variant9",
+ "Variant10",
+ "Variant11",
+ "Variant12",
+ "Variant13",
+ "Variant14",
+ "Variant15",
+ ];
+
+ PRE_ALLOCATED
+ .get(variant_index.as_usize())
+ .map(|&s| Cow::from(s))
+ .unwrap_or_else(|| format!("Variant{}", variant_index.as_usize()).into())
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 73e01d045..14044d0f9 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -10,7 +10,6 @@ use rustc_middle::{
ty::{
self,
layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
- util::Discr,
AdtDef, GeneratorSubsts, Ty, VariantDef,
},
};
@@ -90,8 +89,11 @@ fn build_c_style_enum_di_node<'ll, 'tcx>(
cx,
&compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false),
tag_base_type(cx, enum_type_and_layout),
- &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
- (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ let name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+ // Is there anything we can do to support 128-bit C-Style enums?
+ let value = discr.val as u64;
+ (name, value)
}),
containing_scope,
),
@@ -152,7 +154,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
type_name: &str,
base_type: Ty<'tcx>,
- variants: &mut dyn Iterator<Item = (Discr<'tcx>, Cow<'tcx, str>)>,
+ enumerators: impl Iterator<Item = (Cow<'tcx, str>, u64)>,
containing_scope: &'ll DIType,
) -> &'ll DIType {
let is_unsigned = match base_type.kind() {
@@ -161,18 +163,15 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
_ => bug!("build_enumeration_type_di_node() called with non-integer tag type."),
};
- let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = variants
- .map(|(discr, variant_name)| {
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateEnumerator(
- DIB(cx),
- variant_name.as_ptr().cast(),
- variant_name.len(),
- // FIXME: what if enumeration has i128 discriminant?
- discr.val as i64,
- is_unsigned,
- ))
- }
+ let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = enumerators
+ .map(|(name, value)| unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ value as i64,
+ is_unsigned,
+ ))
})
.collect();
@@ -247,23 +246,27 @@ fn build_enumeration_type_di_node<'ll, 'tcx>(
/// and a DW_TAG_member for each field (but not the discriminant).
fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- enum_type: Ty<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
enum_type_di_node: &'ll DIType,
variant_index: VariantIdx,
variant_def: &VariantDef,
variant_layout: TyAndLayout<'tcx>,
) -> &'ll DIType {
- debug_assert_eq!(variant_layout.ty, enum_type);
+ debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty);
type_map::build_type_with_children(
cx,
type_map::stub(
cx,
Stub::Struct,
- UniqueTypeId::for_enum_variant_struct_type(cx.tcx, enum_type, variant_index),
+ UniqueTypeId::for_enum_variant_struct_type(
+ cx.tcx,
+ enum_type_and_layout.ty,
+ variant_index,
+ ),
variant_def.name.as_str(),
// NOTE: We use size and align of enum_type, not from variant_layout:
- cx.size_and_align_of(enum_type),
+ size_and_align_of(enum_type_and_layout),
Some(enum_type_di_node),
DIFlags::FlagZero,
),
@@ -290,9 +293,9 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
type_di_node(cx, field_layout.ty),
)
})
- .collect()
+ .collect::<SmallVec<_>>()
},
- |cx| build_generic_type_param_di_nodes(cx, enum_type),
+ |cx| build_generic_type_param_di_nodes(cx, enum_type_and_layout.ty),
)
.di_node
}
@@ -398,39 +401,60 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
.di_node
}
+#[derive(Copy, Clone)]
+enum DiscrResult {
+ NoDiscriminant,
+ Value(u128),
+ Range(u128, u128),
+}
+
+impl DiscrResult {
+ fn opt_single_val(&self) -> Option<u128> {
+ if let Self::Value(d) = *self { Some(d) } else { None }
+ }
+}
+
/// Returns the discriminant value corresponding to the variant index.
///
/// Will return `None` if there is less than two variants (because then the enum won't have)
-/// a tag, and if this is the dataful variant of a niche-layout enum (because then there is no
+/// a tag, and if this is the untagged variant of a niche-layout enum (because then there is no
/// single discriminant value).
fn compute_discriminant_value<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>,
variant_index: VariantIdx,
-) -> Option<u64> {
+) -> DiscrResult {
match enum_type_and_layout.layout.variants() {
- &Variants::Single { .. } => None,
- &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => Some(
- enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val
- as u64,
+ &Variants::Single { .. } => DiscrResult::NoDiscriminant,
+ &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => DiscrResult::Value(
+ enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val,
),
&Variants::Multiple {
- tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
+ tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, untagged_variant },
tag,
..
} => {
- if variant_index == dataful_variant {
- None
+ if variant_index == untagged_variant {
+ let valid_range = enum_type_and_layout
+ .for_variant(cx, variant_index)
+ .largest_niche
+ .as_ref()
+ .unwrap()
+ .valid_range;
+
+ let min = valid_range.start.min(valid_range.end);
+ let min = tag.size(cx).truncate(min);
+
+ let max = valid_range.start.max(valid_range.end);
+ let max = tag.size(cx).truncate(max);
+
+ DiscrResult::Range(min, max)
} else {
let value = (variant_index.as_u32() as u128)
.wrapping_sub(niche_variants.start().as_u32() as u128)
.wrapping_add(niche_start);
let value = tag.size(cx).truncate(value);
- // NOTE(eddyb) do *NOT* remove this assert, until
- // we pass the full 128-bit value to LLVM, otherwise
- // truncation will be silent and remain undetected.
- assert_eq!(value as u64 as u128, value);
- Some(value as u64)
+ DiscrResult::Value(value)
}
}
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index f1935e0ec..becbccc43 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -88,7 +88,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
cx,
- enum_type,
+ enum_type_and_layout,
enum_type_di_node,
variant_index,
enum_adt_def.variant(variant_index),
@@ -378,7 +378,7 @@ fn build_discr_member_di_node<'ll, 'tcx>(
///
/// The DW_AT_discr_value is optional, and is omitted if
/// - This is the only variant of a univariant enum (i.e. their is no discriminant)
-/// - This is the "dataful" variant of a niche-layout enum
+/// - This is the "untagged" variant of a niche-layout enum
/// (where only the other variants are identified by a single value)
///
/// There is only ever a single member, the type of which is a struct that describes the
@@ -413,7 +413,13 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
enum_type_and_layout.size.bits(),
enum_type_and_layout.align.abi.bits() as u32,
Size::ZERO.bits(),
- discr_value.map(|v| cx.const_u64(v)),
+ discr_value.opt_single_val().map(|value| {
+ // NOTE(eddyb) do *NOT* remove this assert, until
+ // we pass the full 128-bit value to LLVM, otherwise
+ // truncation will be silent and remain undetected.
+ assert_eq!(value as u64 as u128, value);
+ cx.const_u64(value as u64)
+ }),
DIFlags::FlagZero,
variant_member_info.variant_struct_type_di_node,
)
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
index ce2f419c4..e30622cbd 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -47,6 +47,8 @@ pub(super) enum UniqueTypeId<'tcx> {
VariantPart(Ty<'tcx>, private::HiddenZst),
/// The ID for the artificial struct type describing a single enum variant.
VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
+ /// The ID for the additional wrapper struct type describing an enum variant in CPP-like mode.
+ VariantStructTypeCppLikeWrapper(Ty<'tcx>, VariantIdx, private::HiddenZst),
/// The ID of the artificial type we create for VTables.
VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst),
}
@@ -71,6 +73,15 @@ impl<'tcx> UniqueTypeId<'tcx> {
UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
}
+ pub fn for_enum_variant_struct_type_wrapper(
+ tcx: TyCtxt<'tcx>,
+ enum_ty: Ty<'tcx>,
+ variant_idx: VariantIdx,
+ ) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst)
+ }
+
pub fn for_vtable_ty(
tcx: TyCtxt<'tcx>,
self_type: Ty<'tcx>,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index cf591295b..b23fe3fc9 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -39,7 +39,6 @@ use smallvec::SmallVec;
use std::cell::OnceCell;
use std::cell::RefCell;
use std::iter;
-use tracing::debug;
mod create_scope_map;
pub mod gdb;
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 8f2436739..a40cfc8b2 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -6,7 +6,7 @@ use super::CodegenUnitDebugContext;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
use rustc_middle::ty::{self, DefIdTree, Ty};
-use tracing::trace;
+use trace;
use crate::common::CodegenCx;
use crate::llvm;
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
index fa0ecd18f..f79ef1172 100644
--- a/compiler/rustc_codegen_llvm/src/declare.rs
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -22,7 +22,6 @@ use rustc_codegen_ssa::traits::TypeMembershipMethods;
use rustc_middle::ty::Ty;
use rustc_symbol_mangling::typeid::typeid_for_fnabi;
use smallvec::SmallVec;
-use tracing::debug;
/// Declare a function.
///
@@ -33,6 +32,7 @@ fn declare_raw_fn<'ll>(
name: &str,
callconv: llvm::CallConv,
unnamed: llvm::UnnamedAddr,
+ visibility: llvm::Visibility,
ty: &'ll Type,
) -> &'ll Value {
debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
@@ -42,6 +42,7 @@ fn declare_raw_fn<'ll>(
llvm::SetFunctionCallConv(llfn, callconv);
llvm::SetUnnamedAddress(llfn, unnamed);
+ llvm::set_visibility(llfn, visibility);
let mut attrs = SmallVec::<[_; 4]>::new();
@@ -79,7 +80,14 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
unnamed: llvm::UnnamedAddr,
fn_type: &'ll Type,
) -> &'ll Value {
- declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type)
+ // Declare C ABI functions with the visibility used by C by default.
+ let visibility = if self.tcx.sess.target.default_hidden_visibility {
+ llvm::Visibility::Hidden
+ } else {
+ llvm::Visibility::Default
+ };
+
+ declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type)
}
/// Declare a Rust function.
@@ -96,6 +104,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
name,
fn_abi.llvm_cconv(),
llvm::UnnamedAddr::Global,
+ llvm::Visibility::Default,
fn_abi.llvm_type(self),
);
fn_abi.apply_attrs_llfn(self, llfn);
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 9f3647492..825011941 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -71,6 +71,7 @@ fn get_simple_intrinsic<'ll>(
sym::nearbyintf64 => "llvm.nearbyint.f64",
sym::roundf32 => "llvm.round.f32",
sym::roundf64 => "llvm.round.f64",
+ sym::ptr_mask => "llvm.ptrmask",
_ => return None,
};
Some(cx.get_intrinsic(llvm_name))
@@ -107,6 +108,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let (simple_ty, simple_fn) = simple.unwrap();
self.call(
simple_ty,
+ None,
simple_fn,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None,
@@ -161,7 +163,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0);
let ptr = args[0].immediate();
- let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let llty = ty.llvm_type(self);
let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
self.volatile_load(llty, ptr)
@@ -374,7 +376,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
@@ -434,7 +436,7 @@ fn try_intrinsic<'ll>(
) {
if bx.sess().panic_strategy() == PanicStrategy::Abort {
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
- bx.call(try_func_ty, try_func, &[data], None);
+ bx.call(try_func_ty, None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
let ret_align = bx.tcx().data_layout.i32_align.abi;
@@ -533,7 +535,7 @@ fn codegen_msvc_try<'ll>(
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
- bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None);
+ bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal);
bx.ret(bx.const_i32(0));
@@ -577,7 +579,7 @@ fn codegen_msvc_try<'ll>(
let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
- bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
+ bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all".
@@ -585,7 +587,7 @@ fn codegen_msvc_try<'ll>(
let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p());
let funclet = bx.catch_pad(cs, &[null, flags, null]);
- bx.call(catch_ty, catch_func, &[data, null], Some(&funclet));
+ bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet));
bx.catch_ret(&funclet, caught);
bx.switch_to_block(caught);
@@ -594,7 +596,7 @@ fn codegen_msvc_try<'ll>(
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@@ -637,7 +639,7 @@ fn codegen_gnu_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
- bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+ bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
@@ -655,13 +657,13 @@ fn codegen_gnu_try<'ll>(
bx.add_clause(vals, tydesc);
let ptr = bx.extract_value(vals, 0);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
- bx.call(catch_ty, catch_func, &[data, ptr], None);
+ bx.call(catch_ty, None, catch_func, &[data, ptr], None);
bx.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@@ -701,7 +703,7 @@ fn codegen_emcc_try<'ll>(
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
- bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+ bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then);
bx.ret(bx.const_i32(0));
@@ -740,13 +742,13 @@ fn codegen_emcc_try<'ll>(
let catch_data = bx.bitcast(catch_data, bx.type_i8p());
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
- bx.call(catch_ty, catch_func, &[data, catch_data], None);
+ bx.call(catch_ty, None, catch_func, &[data, catch_data], None);
bx.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
- let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let ret = bx.call(llty, None, llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);
}
@@ -1216,8 +1218,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
- let c =
- bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ let c = bx.call(
+ fn_ty,
+ None,
+ f,
+ &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+ None,
+ );
Ok(c)
}
@@ -1416,8 +1423,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
llvm_elem_vec_ty,
);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
- let v =
- bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
+ let v = bx.call(
+ fn_ty,
+ None,
+ f,
+ &[args[1].immediate(), alignment, mask, args[0].immediate()],
+ None,
+ );
return Ok(v);
}
@@ -1542,8 +1554,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let fn_ty =
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
- let v =
- bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
+ let v = bx.call(
+ fn_ty,
+ None,
+ f,
+ &[args[0].immediate(), args[1].immediate(), alignment, mask],
+ None,
+ );
return Ok(v);
}
@@ -1704,6 +1721,97 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
bitwise_red!(simd_reduce_all: vector_reduce_and, true);
bitwise_red!(simd_reduce_any: vector_reduce_or, true);
+ if name == sym::simd_cast_ptr {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+
+ match in_elem.kind() {
+ ty::RawPtr(p) => {
+ let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
+ bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
+ });
+ assert!(!check_sized); // we are in codegen, so we shouldn't see these types
+ require!(metadata.is_unit(), "cannot cast fat pointer `{}`", in_elem)
+ }
+ _ => return_error!("expected pointer, got `{}`", in_elem),
+ }
+ match out_elem.kind() {
+ ty::RawPtr(p) => {
+ let (metadata, check_sized) = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
+ bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
+ });
+ assert!(!check_sized); // we are in codegen, so we shouldn't see these types
+ require!(metadata.is_unit(), "cannot cast to fat pointer `{}`", out_elem)
+ }
+ _ => return_error!("expected pointer, got `{}`", out_elem),
+ }
+
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ } else {
+ return Ok(bx.pointercast(args[0].immediate(), llret_ty));
+ }
+ }
+
+ if name == sym::simd_expose_addr {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+
+ match in_elem.kind() {
+ ty::RawPtr(_) => {}
+ _ => return_error!("expected pointer, got `{}`", in_elem),
+ }
+ match out_elem.kind() {
+ ty::Uint(ty::UintTy::Usize) => {}
+ _ => return_error!("expected `usize`, got `{}`", out_elem),
+ }
+
+ return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
+ }
+
+ if name == sym::simd_from_exposed_addr {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+
+ match in_elem.kind() {
+ ty::Uint(ty::UintTy::Usize) => {}
+ _ => return_error!("expected `usize`, got `{}`", in_elem),
+ }
+ match out_elem.kind() {
+ ty::RawPtr(_) => {}
+ _ => return_error!("expected pointer, got `{}`", out_elem),
+ }
+
+ return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
+ }
+
if name == sym::simd_cast || name == sym::simd_as {
require_simd!(ret_ty, "return");
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
@@ -1900,7 +2008,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
- let v = bx.call(fn_ty, f, &[lhs, rhs], None);
+ let v = bx.call(fn_ty, None, f, &[lhs, rhs], None);
return Ok(v);
}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 636d689a3..89c7e51d0 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -7,7 +7,6 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(hash_raw_entry)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(extern_types)]
#![feature(once_cell)]
#![feature(iter_intersperse)]
@@ -16,6 +15,8 @@
#[macro_use]
extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
use back::write::{create_informational_target_machine, create_target_machine};
@@ -130,12 +131,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
) -> TargetMachineFactoryFn<Self> {
back::write::target_machine_factory(sess, optlvl, target_features)
}
- fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
- llvm_util::target_cpu(sess)
- }
- fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
- llvm_util::tune_cpu(sess)
- }
fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
where
@@ -169,7 +164,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
- type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
index 64db4f746..7d9489702 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
@@ -83,17 +83,6 @@ impl<'a> Child<'a> {
}
}
}
-
- pub fn data(&self) -> &'a [u8] {
- unsafe {
- let mut data_len = 0;
- let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len);
- if data_ptr.is_null() {
- panic!("failed to read data from archive child");
- }
- slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
- }
- }
}
impl<'a> Drop for Child<'a> {
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 3139f93bf..42cb694c0 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -400,27 +400,6 @@ impl AtomicOrdering {
}
}
-/// LLVMRustSynchronizationScope
-#[derive(Copy, Clone)]
-#[repr(C)]
-pub enum SynchronizationScope {
- SingleThread,
- CrossThread,
-}
-
-impl SynchronizationScope {
- pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self {
- match sc {
- rustc_codegen_ssa::common::SynchronizationScope::SingleThread => {
- SynchronizationScope::SingleThread
- }
- rustc_codegen_ssa::common::SynchronizationScope::CrossThread => {
- SynchronizationScope::CrossThread
- }
- }
- }
-}
-
/// LLVMRustFileType
#[derive(Copy, Clone)]
#[repr(C)]
@@ -1096,7 +1075,7 @@ extern "C" {
pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
- pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong;
+ pub fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool;
pub fn LLVMRustConstInt128Get(
ConstantVal: &ConstantInt,
SExt: bool,
@@ -1782,16 +1761,18 @@ extern "C" {
Order: AtomicOrdering,
) -> &'a Value;
- pub fn LLVMRustBuildAtomicCmpXchg<'a>(
+ pub fn LLVMBuildAtomicCmpXchg<'a>(
B: &Builder<'a>,
LHS: &'a Value,
CMP: &'a Value,
RHS: &'a Value,
Order: AtomicOrdering,
FailureOrder: AtomicOrdering,
- Weak: Bool,
+ SingleThreaded: Bool,
) -> &'a Value;
+ pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool);
+
pub fn LLVMBuildAtomicRMW<'a>(
B: &Builder<'a>,
Op: AtomicRmwBinOp,
@@ -1801,27 +1782,19 @@ extern "C" {
SingleThreaded: Bool,
) -> &'a Value;
- pub fn LLVMRustBuildAtomicFence(
- B: &Builder<'_>,
+ pub fn LLVMBuildFence<'a>(
+ B: &Builder<'a>,
Order: AtomicOrdering,
- Scope: SynchronizationScope,
- );
+ SingleThreaded: Bool,
+ Name: *const c_char,
+ ) -> &'a Value;
/// Writes a module to the specified path. Returns 0 on success.
pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
- /// Creates a pass manager.
+ /// Creates a legacy pass manager -- only used for final codegen.
pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
- /// Creates a function-by-function pass manager
- pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> &mut PassManager<'_>;
-
- /// Disposes a pass manager.
- pub fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>);
-
- /// Runs a pass manager on a module.
- pub fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool;
-
pub fn LLVMInitializePasses();
pub fn LLVMTimeTraceProfilerInitialize();
@@ -1832,32 +1805,6 @@ extern "C" {
pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>);
- pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
- pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
- pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold(
- PMB: &PassManagerBuilder,
- threshold: c_uint,
- );
- pub fn LLVMRustPassManagerBuilderPopulateModulePassManager(
- PMB: &PassManagerBuilder,
- PM: &PassManager<'_>,
- );
-
- pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager(
- PMB: &PassManagerBuilder,
- PM: &PassManager<'_>,
- );
- pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager(
- PMB: &PassManagerBuilder,
- PM: &PassManager<'_>,
- Internalize: Bool,
- RunInliner: Bool,
- );
- pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
- PMB: &PassManagerBuilder,
- PM: &PassManager<'_>,
- );
-
pub fn LLVMGetHostCPUFeatures() -> *mut c_char;
pub fn LLVMDisposeMessage(message: *mut c_char);
@@ -2079,6 +2026,19 @@ extern "C" {
Ty: &'a DIType,
) -> &'a DIType;
+ pub fn LLVMRustDIBuilderCreateStaticMemberType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ Flags: DIFlags,
+ val: Option<&'a Value>,
+ AlignInBits: u32,
+ ) -> &'a DIDerivedType;
+
pub fn LLVMRustDIBuilderCreateLexicalBlock<'a>(
Builder: &DIBuilder<'a>,
Scope: &'a DIScope,
@@ -2249,22 +2209,6 @@ extern "C" {
pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
- pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>;
- pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass;
- pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
- pub fn LLVMRustCreateMemorySanitizerPass(
- TrackOrigins: c_int,
- Recover: bool,
- ) -> &'static mut Pass;
- pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass;
- pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
- pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
- pub fn LLVMRustAddLastExtensionPasses(
- PMB: &PassManagerBuilder,
- Passes: *const &'static mut Pass,
- NumPasses: size_t,
- );
-
pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
@@ -2298,29 +2242,11 @@ extern "C" {
SplitDwarfFile: *const c_char,
) -> Option<&'static mut TargetMachine>;
pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
- pub fn LLVMRustAddBuilderLibraryInfo<'a>(
- PMB: &'a PassManagerBuilder,
- M: &'a Module,
- DisableSimplifyLibCalls: bool,
- );
- pub fn LLVMRustConfigurePassManagerBuilder(
- PMB: &PassManagerBuilder,
- OptLevel: CodeGenOptLevel,
- MergeFunctions: bool,
- SLPVectorize: bool,
- LoopVectorize: bool,
- PrepareForThinLTO: bool,
- PGOGenPath: *const c_char,
- PGOUsePath: *const c_char,
- PGOSampleUsePath: *const c_char,
- SizeLevel: c_int,
- );
pub fn LLVMRustAddLibraryInfo<'a>(
PM: &PassManager<'a>,
M: &'a Module,
DisableSimplifyLibCalls: bool,
);
- pub fn LLVMRustRunFunctionPassManager<'a>(PM: &PassManager<'a>, M: &'a Module);
pub fn LLVMRustWriteOutputFile<'a>(
T: &'a TargetMachine,
PM: &PassManager<'a>,
@@ -2329,7 +2255,7 @@ extern "C" {
DwoOutput: *const c_char,
FileType: FileType,
) -> LLVMRustResult;
- pub fn LLVMRustOptimizeWithNewPassManager<'a>(
+ pub fn LLVMRustOptimize<'a>(
M: &'a Module,
TM: &'a TargetMachine,
OptLevel: PassBuilderOptLevel,
@@ -2347,6 +2273,7 @@ extern "C" {
PGOGenPath: *const c_char,
PGOUsePath: *const c_char,
InstrumentCoverage: bool,
+ InstrProfileOutput: *const c_char,
InstrumentGCOV: bool,
PGOSampleUsePath: *const c_char,
DebugInfoForProfiling: bool,
@@ -2366,7 +2293,6 @@ extern "C" {
pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
pub fn LLVMRustPrintPasses();
pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
- pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool);
pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
@@ -2375,7 +2301,6 @@ extern "C" {
AIR: &ArchiveIterator<'a>,
) -> Option<&'a mut ArchiveChild<'a>>;
pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
- pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
pub fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>);
pub fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>);
pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
@@ -2410,12 +2335,6 @@ extern "C" {
cookie_out: &mut c_uint,
) -> &'a SMDiagnostic;
- pub fn LLVMRustSetInlineAsmDiagnosticHandler(
- C: &Context,
- H: InlineAsmDiagHandlerTy,
- CX: *mut c_void,
- );
-
#[allow(improper_ctypes)]
pub fn LLVMRustUnpackSMDiagnostic(
d: &SMDiagnostic,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index a0a640473..2fd58567c 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,7 +1,6 @@
use crate::back::write::create_informational_target_machine;
-use crate::{llvm, llvm_util};
+use crate::llvm;
use libc::c_int;
-use libloading::Library;
use rustc_codegen_ssa::target_features::{
supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
};
@@ -15,9 +14,7 @@ use rustc_span::symbol::Symbol;
use rustc_target::spec::{MergeFunctions, PanicStrategy};
use smallvec::{smallvec, SmallVec};
use std::ffi::{CStr, CString};
-use tracing::debug;
-use std::mem;
use std::path::Path;
use std::ptr;
use std::slice;
@@ -92,16 +89,6 @@ unsafe fn configure_llvm(sess: &Session) {
add("-generate-arange-section", false);
}
- // Disable the machine outliner by default in LLVM versions 11 and LLVM
- // version 12, where it leads to miscompilation.
- //
- // Ref:
- // - https://github.com/rust-lang/rust/issues/85351
- // - https://reviews.llvm.org/D103167
- if llvm_util::get_version() < (13, 0, 0) {
- add("-enable-machine-outliner=never", false);
- }
-
match sess.opts.unstable_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
MergeFunctions::Aliases => {
@@ -131,22 +118,6 @@ unsafe fn configure_llvm(sess: &Session) {
llvm::LLVMInitializePasses();
- // Use the legacy plugin registration if we don't use the new pass manager
- if !should_use_new_llvm_pass_manager(
- &sess.opts.unstable_opts.new_llvm_pass_manager,
- &sess.target.arch,
- ) {
- // Register LLVM plugins by loading them into the compiler process.
- for plugin in &sess.opts.unstable_opts.llvm_plugins {
- let lib = Library::new(plugin).unwrap_or_else(|e| bug!("couldn't load plugin: {}", e));
- debug!("LLVM plugin loaded successfully {:?} ({})", lib, plugin);
-
- // Intentionally leak the dynamic library. We can't ever unload it
- // since the library can make things that will live arbitrarily long.
- mem::forget(lib);
- }
- }
-
rustc_llvm::initialize_available_targets();
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
@@ -165,6 +136,10 @@ pub fn time_trace_profiler_finish(file_name: &Path) {
//
// To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def
// where the * matches the architecture's name
+//
+// For targets not present in the above location, see llvm-project/llvm/lib/Target/{ARCH}/*.td
+// where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`.
+//
// Beware to not use the llvm github project for this, but check the git submodule
// found in src/llvm-project
// Though note that Rust can also be build with an external precompiled version of LLVM
@@ -440,6 +415,8 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
.features
.split(',')
.filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
+ // Drop +atomics-32 feature introduced in LLVM 15.
+ .filter(|v| *v != "+atomics-32" || get_version() >= (15, 0, 0))
.map(String::from),
);
@@ -544,19 +521,3 @@ pub fn tune_cpu(sess: &Session) -> Option<&str> {
let name = sess.opts.unstable_opts.tune_cpu.as_ref()?;
Some(handle_native(name))
}
-
-pub(crate) fn should_use_new_llvm_pass_manager(user_opt: &Option<bool>, target_arch: &str) -> bool {
- // The new pass manager is enabled by default for LLVM >= 13.
- // This matches Clang, which also enables it since Clang 13.
-
- // Since LLVM 15, the legacy pass manager is no longer supported.
- if llvm_util::get_version() >= (15, 0, 0) {
- return true;
- }
-
- // There are some perf issues with the new pass manager when targeting
- // s390x with LLVM 13, so enable the new pass manager only with LLVM 14.
- // See https://github.com/rust-lang/rust/issues/89609.
- let min_version = if target_arch == "s390x" { 14 } else { 13 };
- user_opt.unwrap_or_else(|| llvm_util::get_version() >= (min_version, 0, 0))
-}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 6e9428485..1eceb7f5c 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -11,7 +11,6 @@ use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
use rustc_middle::ty::{self, Instance, TypeVisitable};
use rustc_session::config::CrateType;
use rustc_target::spec::RelocModel;
-use tracing::debug;
impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
fn predefine_static(
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 9f0e6c80b..dc1165835 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -11,7 +11,6 @@ use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F32, F64};
use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
use smallvec::{smallvec, SmallVec};
-use tracing::debug;
use std::fmt::Write;
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
index 46d6344db..d868e3d56 100644
--- a/compiler/rustc_codegen_ssa/Cargo.toml
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -26,7 +26,6 @@ rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
rustc_middle = { path = "../rustc_middle" }
-rustc_apfloat = { path = "../rustc_apfloat" }
rustc_attr = { path = "../rustc_attr" }
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
index 0d2aa483d..bb76ca5d2 100644
--- a/compiler/rustc_codegen_ssa/src/back/archive.rs
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -1,44 +1,16 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::memmap::Mmap;
use rustc_session::cstore::DllImport;
use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use object::read::archive::ArchiveFile;
+
+use std::fmt::Display;
+use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
-pub(super) fn find_library(
- name: &str,
- verbatim: bool,
- search_paths: &[PathBuf],
- sess: &Session,
-) -> PathBuf {
- // On Windows, static libraries sometimes show up as libfoo.a and other
- // times show up as foo.lib
- let oslibname = if verbatim {
- name.to_string()
- } else {
- format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix)
- };
- let unixlibname = format!("lib{}.a", name);
-
- for path in search_paths {
- debug!("looking for {} inside {:?}", name, path);
- let test = path.join(&oslibname);
- if test.exists() {
- return test;
- }
- if oslibname != unixlibname {
- let test = path.join(&unixlibname);
- if test.exists() {
- return test;
- }
- }
- }
- sess.fatal(&format!(
- "could not find native static library `{}`, \
- perhaps an -L flag is missing?",
- name
- ));
-}
-
pub trait ArchiveBuilderBuilder {
fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a>;
@@ -53,7 +25,38 @@ pub trait ArchiveBuilderBuilder {
lib_name: &str,
dll_imports: &[DllImport],
tmpdir: &Path,
+ is_direct_dependency: bool,
) -> PathBuf;
+
+ fn extract_bundled_libs(
+ &self,
+ rlib: &Path,
+ outdir: &Path,
+ bundled_lib_file_names: &FxHashSet<Symbol>,
+ ) -> Result<(), String> {
+ let message = |msg: &str, e: &dyn Display| format!("{} '{}': {}", msg, &rlib.display(), e);
+ let archive_map = unsafe {
+ Mmap::map(File::open(rlib).map_err(|e| message("failed to open file", &e))?)
+ .map_err(|e| message("failed to mmap file", &e))?
+ };
+ let archive = ArchiveFile::parse(&*archive_map)
+ .map_err(|e| message("failed to parse archive", &e))?;
+
+ for entry in archive.members() {
+ let entry = entry.map_err(|e| message("failed to read entry", &e))?;
+ let data = entry
+ .data(&*archive_map)
+ .map_err(|e| message("failed to get data from archive member", &e))?;
+ let name = std::str::from_utf8(entry.name())
+ .map_err(|e| message("failed to convert name", &e))?;
+ if !bundled_lib_file_names.contains(&Symbol::intern(name)) {
+ continue; // We need to extract only native libraries.
+ }
+ std::fs::write(&outdir.join(&name), data)
+ .map_err(|e| message("failed to write file", &e))?;
+ }
+ Ok(())
+ }
}
pub trait ArchiveBuilder<'a> {
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 63207803e..0dc0dee86 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -1,15 +1,17 @@
use rustc_arena::TypedArena;
use rustc_ast::CRATE_NODE_ID;
-use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::temp_dir::MaybeTempDir;
use rustc_errors::{ErrorGuaranteed, Handler};
use rustc_fs_util::fix_windows_verbatim_for_gcc;
use rustc_hir::def_id::CrateNum;
+use rustc_metadata::find_native_static_library;
use rustc_metadata::fs::{emit_metadata, METADATA_FILENAME};
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Strip};
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Lto, Strip};
use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
use rustc_session::cstore::DllImport;
use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
@@ -20,21 +22,24 @@ use rustc_session::utils::NativeLibKind;
use rustc_session::{filesearch, Session};
use rustc_span::symbol::Symbol;
use rustc_span::DebuggerVisualizerFile;
-use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
-use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor, SplitDebuginfo};
-use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, SanitizerSet, Target};
+use rustc_target::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
+use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy};
+use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo, Target};
-use super::archive::{find_library, ArchiveBuilder, ArchiveBuilderBuilder};
+use super::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
use super::command::Command;
use super::linker::{self, Linker};
use super::metadata::{create_rmeta_file, MetadataPosition};
use super::rpath::{self, RPathConfig};
-use crate::{looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, NativeLib};
+use crate::{
+ errors, looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, NativeLib,
+};
use cc::windows_registry;
use regex::Regex;
use tempfile::Builder as TempFileBuilder;
+use itertools::Itertools;
use std::borrow::Borrow;
use std::cell::OnceCell;
use std::collections::BTreeSet;
@@ -44,7 +49,7 @@ use std::io::{BufWriter, Write};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::process::{ExitStatus, Output, Stdio};
-use std::{ascii, char, env, fmt, fs, io, mem, str};
+use std::{env, fmt, fs, io, mem, str};
pub fn ensure_removed(diag_handler: &Handler, path: &Path) {
if let Err(e) = fs::remove_file(path) {
@@ -91,7 +96,7 @@ pub fn link_binary<'a>(
let tmpdir = TempFileBuilder::new()
.prefix("rustc")
.tempdir()
- .unwrap_or_else(|err| sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+ .unwrap_or_else(|error| sess.emit_fatal(errors::CreateTempDir { error }));
let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
let out_filename = out_filename(
sess,
@@ -204,11 +209,29 @@ pub fn link_binary<'a>(
}
pub fn each_linked_rlib(
+ sess: &Session,
info: &CrateInfo,
f: &mut dyn FnMut(CrateNum, &Path),
-) -> Result<(), String> {
+) -> Result<(), errors::LinkRlibError> {
let crates = info.used_crates.iter();
let mut fmts = None;
+
+ let lto_active = matches!(sess.lto(), Lto::Fat | Lto::Thin);
+ if lto_active {
+ for combination in info.dependency_formats.iter().combinations(2) {
+ let (ty1, list1) = &combination[0];
+ let (ty2, list2) = &combination[1];
+ if list1 != list2 {
+ return Err(errors::LinkRlibError::IncompatibleDependencyFormats {
+ ty1: format!("{ty1:?}"),
+ ty2: format!("{ty2:?}"),
+ list1: format!("{list1:?}"),
+ list2: format!("{list2:?}"),
+ });
+ }
+ }
+ }
+
for (ty, list) in info.dependency_formats.iter() {
match ty {
CrateType::Executable
@@ -218,30 +241,31 @@ pub fn each_linked_rlib(
fmts = Some(list);
break;
}
+ CrateType::Dylib if lto_active => {
+ fmts = Some(list);
+ break;
+ }
_ => {}
}
}
let Some(fmts) = fmts else {
- return Err("could not find formats for rlibs".to_string());
+ return Err(errors::LinkRlibError::MissingFormat);
};
for &cnum in crates {
match fmts.get(cnum.as_usize() - 1) {
Some(&Linkage::NotLinked | &Linkage::IncludedFromDylib) => continue,
Some(_) => {}
- None => return Err("could not find formats for rlibs".to_string()),
+ None => return Err(errors::LinkRlibError::MissingFormat),
}
- let name = info.crate_name[&cnum];
+ let crate_name = info.crate_name[&cnum];
let used_crate_source = &info.used_crate_source[&cnum];
if let Some((path, _)) = &used_crate_source.rlib {
f(cnum, &path);
} else {
if used_crate_source.rmeta.is_some() {
- return Err(format!(
- "could not find rlib for: `{}`, found rmeta (metadata) file",
- name
- ));
+ return Err(errors::LinkRlibError::OnlyRmetaFound { crate_name });
} else {
- return Err(format!("could not find rlib for: `{}`", name));
+ return Err(errors::LinkRlibError::NotFound { crate_name });
}
}
}
@@ -307,6 +331,9 @@ fn link_rlib<'a>(
}
}
+ // Used if packed_bundled_libs flag enabled.
+ let mut packed_bundled_libs = Vec::new();
+
// Note that in this loop we are ignoring the value of `lib.cfg`. That is,
// we may not be configured to actually include a static library if we're
// adding it here. That's because later when we consume this rlib we'll
@@ -326,6 +353,8 @@ fn link_rlib<'a>(
for lib in codegen_results.crate_info.used_libraries.iter() {
match lib.kind {
NativeLibKind::Static { bundle: None | Some(true), whole_archive: Some(true) }
+ if flavor == RlibFlavor::Normal && sess.opts.unstable_opts.packed_bundled_libs => {}
+ NativeLibKind::Static { bundle: None | Some(true), whole_archive: Some(true) }
if flavor == RlibFlavor::Normal =>
{
// Don't allow mixing +bundle with +whole_archive since an rlib may contain
@@ -333,10 +362,7 @@ fn link_rlib<'a>(
// -whole-archive and it isn't clear how we can currently handle such a
// situation correctly.
// See https://github.com/rust-lang/rust/issues/88085#issuecomment-901050897
- sess.err(
- "the linking modifiers `+bundle` and `+whole-archive` are not compatible \
- with each other when generating rlibs",
- );
+ sess.emit_err(errors::IncompatibleLinkingModifiers);
}
NativeLibKind::Static { bundle: None | Some(true), .. } => {}
NativeLibKind::Static { bundle: Some(false), .. }
@@ -348,29 +374,35 @@ fn link_rlib<'a>(
}
if let Some(name) = lib.name {
let location =
- find_library(name.as_str(), lib.verbatim.unwrap_or(false), &lib_search_paths, sess);
- ab.add_archive(&location, Box::new(|_| false)).unwrap_or_else(|e| {
- sess.fatal(&format!(
- "failed to add native library {}: {}",
- location.to_string_lossy(),
- e
+ find_native_static_library(name.as_str(), lib.verbatim, &lib_search_paths, sess);
+ if sess.opts.unstable_opts.packed_bundled_libs && flavor == RlibFlavor::Normal {
+ packed_bundled_libs.push(find_native_static_library(
+ lib.filename.unwrap().as_str(),
+ Some(true),
+ &lib_search_paths,
+ sess,
));
+ continue;
+ }
+ ab.add_archive(&location, Box::new(|_| false)).unwrap_or_else(|error| {
+ sess.emit_fatal(errors::AddNativeLibrary { library_path: location, error });
});
}
}
for (raw_dylib_name, raw_dylib_imports) in
- collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ collate_raw_dylibs(sess, codegen_results.crate_info.used_libraries.iter())?
{
let output_path = archive_builder_builder.create_dll_import_lib(
sess,
&raw_dylib_name,
&raw_dylib_imports,
tmpdir.as_ref(),
+ true,
);
- ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|e| {
- sess.fatal(&format!("failed to add native library {}: {}", output_path.display(), e));
+ ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|error| {
+ sess.emit_fatal(errors::AddNativeLibrary { library_path: output_path, error });
});
}
@@ -403,6 +435,12 @@ fn link_rlib<'a>(
ab.add_file(&trailing_metadata);
}
+ // Add all bundled static native library dependencies.
+ // Archives added to the end of .rlib archive, see comment above for the reason.
+ for lib in packed_bundled_libs {
+ ab.add_file(&lib)
+ }
+
return Ok(ab);
}
@@ -412,9 +450,9 @@ fn link_rlib<'a>(
/// then the CodegenResults value contains one NativeLib instance for each block. However, the
/// linker appears to expect only a single import library for each library used, so we need to
/// collate the symbols together by library name before generating the import libraries.
-fn collate_raw_dylibs(
- sess: &Session,
- used_libraries: &[NativeLib],
+fn collate_raw_dylibs<'a, 'b>(
+ sess: &'a Session,
+ used_libraries: impl IntoIterator<Item = &'b NativeLib>,
) -> Result<Vec<(String, Vec<DllImport>)>, ErrorGuaranteed> {
// Use index maps to preserve original order of imports and libraries.
let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default();
@@ -429,14 +467,11 @@ fn collate_raw_dylibs(
// FIXME: when we add support for ordinals, figure out if we need to do anything
// if we have two DllImport values with the same name but different ordinals.
if import.calling_convention != old_import.calling_convention {
- sess.span_err(
- import.span,
- &format!(
- "multiple declarations of external function `{}` from \
- library `{}` have different calling conventions",
- import.name, name,
- ),
- );
+ sess.emit_err(errors::MultipleExternalFuncDecl {
+ span: import.span,
+ function: import.name,
+ library_name: &name,
+ });
}
}
}
@@ -479,7 +514,7 @@ fn link_staticlib<'a>(
)?;
let mut all_native_libs = vec![];
- let res = each_linked_rlib(&codegen_results.crate_info, &mut |cnum, path| {
+ let res = each_linked_rlib(sess, &codegen_results.crate_info, &mut |cnum, path| {
let name = codegen_results.crate_info.crate_name[&cnum];
let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
@@ -538,7 +573,7 @@ fn link_staticlib<'a>(
all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
});
if let Err(e) = res {
- sess.fatal(&e);
+ sess.emit_fatal(e);
}
ab.build(out_filename);
@@ -552,14 +587,6 @@ fn link_staticlib<'a>(
Ok(())
}
-fn escape_stdout_stderr_string(s: &[u8]) -> String {
- str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
- let mut x = "Non-UTF-8 output: ".to_string();
- x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
- x
- })
-}
-
/// Use `thorin` (rust implementation of a dwarf packaging utility) to link DWARF objects into a
/// DWARF package.
fn link_dwarf_object<'a>(
@@ -659,9 +686,7 @@ fn link_dwarf_object<'a>(
}) {
Ok(()) => {}
Err(e) => {
- sess.struct_err("linking dwarf objects with thorin failed")
- .note(&format!("{:?}", e))
- .emit();
+ sess.emit_err(errors::ThorinErrorWrapper(e));
sess.abort_if_errors();
}
}
@@ -734,8 +759,7 @@ fn link_natively<'a>(
// then it should not default to linking executables as pie. Different
// versions of gcc seem to use different quotes in the error message so
// don't check for them.
- if sess.target.linker_is_gnu
- && flavor != LinkerFlavor::Ld
+ if matches!(flavor, LinkerFlavor::Gnu(Cc::Yes, _))
&& unknown_arg_regex.is_match(&out)
&& out.contains("-no-pie")
&& cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie")
@@ -753,8 +777,7 @@ fn link_natively<'a>(
// Detect '-static-pie' used with an older version of gcc or clang not supporting it.
// Fallback from '-static-pie' to '-static' in that case.
- if sess.target.linker_is_gnu
- && flavor != LinkerFlavor::Ld
+ if matches!(flavor, LinkerFlavor::Gnu(Cc::Yes, _))
&& unknown_arg_regex.is_match(&out)
&& (out.contains("-static-pie") || out.contains("--no-dynamic-linker"))
&& cmd.get_args().iter().any(|e| e.to_string_lossy() == "-static-pie")
@@ -764,15 +787,15 @@ fn link_natively<'a>(
"Linker does not support -static-pie command line option. Retrying with -static instead."
);
// Mirror `add_(pre,post)_link_objects` to replace CRT objects.
- let self_contained = crt_objects_fallback(sess, crate_type);
+ let self_contained = self_contained(sess, crate_type);
let opts = &sess.target;
let pre_objects = if self_contained {
- &opts.pre_link_objects_fallback
+ &opts.pre_link_objects_self_contained
} else {
&opts.pre_link_objects
};
let post_objects = if self_contained {
- &opts.post_link_objects_fallback
+ &opts.post_link_objects_self_contained
} else {
&opts.post_link_objects
};
@@ -866,30 +889,21 @@ fn link_natively<'a>(
if !prog.status.success() {
let mut output = prog.stderr.clone();
output.extend_from_slice(&prog.stdout);
- let escaped_output = escape_stdout_stderr_string(&output);
- let mut err = sess.struct_err(&format!(
- "linking with `{}` failed: {}",
- linker_path.display(),
- prog.status
- ));
- err.note(&format!("{:?}", &cmd)).note(&escaped_output);
- if escaped_output.contains("undefined reference to") {
- err.help(
- "some `extern` functions couldn't be found; some native libraries may \
- need to be installed or have their path specified",
- );
- err.note("use the `-l` flag to specify native libraries to link");
- err.note("use the `cargo:rustc-link-lib` directive to specify the native \
- libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#cargorustc-link-libkindname)");
- }
- err.emit();
-
+ let escaped_output = escape_string(&output);
+ // FIXME: Add UI tests for this error.
+ let err = errors::LinkingFailed {
+ linker_path: &linker_path,
+ exit_status: prog.status,
+ command: &cmd,
+ escaped_output: &escaped_output,
+ };
+ sess.diagnostic().emit_err(err);
// If MSVC's `link.exe` was expected but the return code
// is not a Microsoft LNK error then suggest a way to fix or
// install the Visual Studio build tools.
if let Some(code) = prog.status.code() {
if sess.target.is_like_msvc
- && flavor == LinkerFlavor::Msvc
+ && flavor == LinkerFlavor::Msvc(Lld::No)
// Respect the command line override
&& sess.opts.cg.linker.is_none()
// Match exactly "link.exe"
@@ -934,8 +948,8 @@ fn link_natively<'a>(
sess.abort_if_errors();
}
- info!("linker stderr:\n{}", escape_stdout_stderr_string(&prog.stderr));
- info!("linker stdout:\n{}", escape_stdout_stderr_string(&prog.stdout));
+ info!("linker stderr:\n{}", escape_string(&prog.stderr));
+ info!("linker stdout:\n{}", escape_string(&prog.stdout));
}
Err(e) => {
let linker_not_found = e.kind() == io::ErrorKind::NotFound;
@@ -965,9 +979,10 @@ fn link_natively<'a>(
but `link.exe` was not found",
);
sess.note_without_error(
- "please ensure that VS 2013, VS 2015, VS 2017, VS 2019 or VS 2022 \
- was installed with the Visual C++ option",
+ "please ensure that Visual Studio 2017 or later, or Build Tools \
+ for Visual Studio were installed with the Visual C++ option.",
);
+ sess.note_without_error("VS Code is a different product, and is not sufficient.");
}
sess.abort_if_errors();
}
@@ -1020,16 +1035,36 @@ fn link_natively<'a>(
if sess.target.is_like_osx {
match (strip, crate_type) {
- (Strip::Debuginfo, _) => strip_symbols_in_osx(sess, &out_filename, Some("-S")),
+ (Strip::Debuginfo, _) => {
+ strip_symbols_with_external_utility(sess, "strip", &out_filename, Some("-S"))
+ }
// Per the manpage, `-x` is the maximum safe strip level for dynamic libraries. (#93988)
(Strip::Symbols, CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro) => {
- strip_symbols_in_osx(sess, &out_filename, Some("-x"))
+ strip_symbols_with_external_utility(sess, "strip", &out_filename, Some("-x"))
+ }
+ (Strip::Symbols, _) => {
+ strip_symbols_with_external_utility(sess, "strip", &out_filename, None)
}
- (Strip::Symbols, _) => strip_symbols_in_osx(sess, &out_filename, None),
(Strip::None, _) => {}
}
}
+ if sess.target.os == "illumos" {
+ // Many illumos systems will have both the native 'strip' utility and
+ // the GNU one. Use the native version explicitly and do not rely on
+ // what's in the path.
+ let stripcmd = "/usr/bin/strip";
+ match strip {
+ // Always preserve the symbol table (-x).
+ Strip::Debuginfo => {
+ strip_symbols_with_external_utility(sess, stripcmd, &out_filename, Some("-x"))
+ }
+ // Strip::Symbols is handled via the --strip-all linker option.
+ Strip::Symbols => {}
+ Strip::None => {}
+ }
+ }
+
Ok(())
}
@@ -1041,8 +1076,13 @@ fn strip_value(sess: &Session) -> Strip {
}
}
-fn strip_symbols_in_osx<'a>(sess: &'a Session, out_filename: &Path, option: Option<&str>) {
- let mut cmd = Command::new("strip");
+fn strip_symbols_with_external_utility<'a>(
+ sess: &'a Session,
+ util: &str,
+ out_filename: &Path,
+ option: Option<&str>,
+) {
+ let mut cmd = Command::new(util);
if let Some(option) = option {
cmd.arg(option);
}
@@ -1053,23 +1093,22 @@ fn strip_symbols_in_osx<'a>(sess: &'a Session, out_filename: &Path, option: Opti
let mut output = prog.stderr.clone();
output.extend_from_slice(&prog.stdout);
sess.struct_warn(&format!(
- "stripping debug info with `strip` failed: {}",
- prog.status
+ "stripping debug info with `{}` failed: {}",
+ util, prog.status
))
.note(&escape_string(&output))
.emit();
}
}
- Err(e) => sess.fatal(&format!("unable to run `strip`: {}", e)),
+ Err(e) => sess.fatal(&format!("unable to run `{}`: {}", util, e)),
}
}
fn escape_string(s: &[u8]) -> String {
- str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
- let mut x = "Non-UTF-8 output: ".to_string();
- x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
- x
- })
+ match str::from_utf8(s) {
+ Ok(s) => s.to_owned(),
+ Err(_) => format!("Non-UTF-8 output: {}", s.escape_ascii()),
+ }
}
fn add_sanitizer_libraries(sess: &Session, crate_type: CrateType, linker: &mut dyn Linker) {
@@ -1077,11 +1116,12 @@ fn add_sanitizer_libraries(sess: &Session, crate_type: CrateType, linker: &mut d
// both executables and dynamic shared objects. Everywhere else the runtimes
// are currently distributed as static libraries which should be linked to
// executables only.
- let needs_runtime = match crate_type {
- CrateType::Executable => true,
- CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx,
- CrateType::Rlib | CrateType::Staticlib => false,
- };
+ let needs_runtime = !sess.target.is_like_android
+ && match crate_type {
+ CrateType::Executable => true,
+ CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx,
+ CrateType::Rlib | CrateType::Staticlib => false,
+ };
if !needs_runtime {
return;
@@ -1173,14 +1213,10 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
// only the linker flavor is known; use the default linker for the selected flavor
(None, Some(flavor)) => Some((
PathBuf::from(match flavor {
- LinkerFlavor::Em => {
- if cfg!(windows) {
- "emcc.bat"
- } else {
- "emcc"
- }
- }
- LinkerFlavor::Gcc => {
+ LinkerFlavor::Gnu(Cc::Yes, _)
+ | LinkerFlavor::Darwin(Cc::Yes, _)
+ | LinkerFlavor::WasmLld(Cc::Yes)
+ | LinkerFlavor::Unix(Cc::Yes) => {
if cfg!(any(target_os = "solaris", target_os = "illumos")) {
// On historical Solaris systems, "cc" may have
// been Sun Studio, which is not flag-compatible
@@ -1193,12 +1229,23 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
"cc"
}
}
- LinkerFlavor::Ld => "ld",
- LinkerFlavor::Msvc => "link.exe",
- LinkerFlavor::Lld(_) => "lld",
- LinkerFlavor::PtxLinker => "rust-ptx-linker",
- LinkerFlavor::BpfLinker => "bpf-linker",
- LinkerFlavor::L4Bender => "l4-bender",
+ LinkerFlavor::Gnu(_, Lld::Yes)
+ | LinkerFlavor::Darwin(_, Lld::Yes)
+ | LinkerFlavor::WasmLld(..)
+ | LinkerFlavor::Msvc(Lld::Yes) => "lld",
+ LinkerFlavor::Gnu(..) | LinkerFlavor::Darwin(..) | LinkerFlavor::Unix(..) => {
+ "ld"
+ }
+ LinkerFlavor::Msvc(..) => "link.exe",
+ LinkerFlavor::EmCc => {
+ if cfg!(windows) {
+ "emcc.bat"
+ } else {
+ "emcc"
+ }
+ }
+ LinkerFlavor::Bpf => "bpf-linker",
+ LinkerFlavor::Ptx => "rust-ptx-linker",
}),
flavor,
)),
@@ -1208,21 +1255,26 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
});
let flavor = if stem == "emcc" {
- LinkerFlavor::Em
+ LinkerFlavor::EmCc
} else if stem == "gcc"
|| stem.ends_with("-gcc")
|| stem == "clang"
|| stem.ends_with("-clang")
{
- LinkerFlavor::Gcc
+ LinkerFlavor::from_cli(LinkerFlavorCli::Gcc, &sess.target)
} else if stem == "wasm-ld" || stem.ends_with("-wasm-ld") {
- LinkerFlavor::Lld(LldFlavor::Wasm)
- } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
- LinkerFlavor::Ld
- } else if stem == "link" || stem == "lld-link" {
- LinkerFlavor::Msvc
+ LinkerFlavor::WasmLld(Cc::No)
+ } else if stem == "ld" || stem.ends_with("-ld") {
+ LinkerFlavor::from_cli(LinkerFlavorCli::Ld, &sess.target)
+ } else if stem == "ld.lld" {
+ LinkerFlavor::Gnu(Cc::No, Lld::Yes)
+ } else if stem == "link" {
+ LinkerFlavor::Msvc(Lld::No)
+ } else if stem == "lld-link" {
+ LinkerFlavor::Msvc(Lld::Yes)
} else if stem == "lld" || stem == "rust-lld" {
- LinkerFlavor::Lld(sess.target.lld_flavor)
+ let lld_flavor = sess.target.linker_flavor.lld_flavor();
+ LinkerFlavor::from_cli(LinkerFlavorCli::Lld(lld_flavor), &sess.target)
} else {
// fall back to the value in the target spec
sess.target.linker_flavor
@@ -1236,7 +1288,9 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
// linker and linker flavor specified via command line have precedence over what the target
// specification specifies
- if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+ let linker_flavor =
+ sess.opts.cg.linker_flavor.map(|flavor| LinkerFlavor::from_cli(flavor, &sess.target));
+ if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), linker_flavor) {
return ret;
}
@@ -1306,7 +1360,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
let verbatim = lib.verbatim.unwrap_or(false);
if sess.target.is_like_msvc {
Some(format!("{}{}", name, if verbatim { "" } else { ".lib" }))
- } else if sess.target.linker_is_gnu {
+ } else if sess.target.linker_flavor.is_gnu() {
Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
} else {
Some(format!("-l{}", name))
@@ -1556,26 +1610,26 @@ fn detect_self_contained_mingw(sess: &Session) -> bool {
true
}
-/// Whether we link to our own CRT objects instead of relying on gcc to pull them.
+/// Various toolchain components used during linking are used from rustc distribution
+/// instead of being found somewhere on the host system.
/// We only provide such support for a very limited number of targets.
-fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool {
+fn self_contained(sess: &Session, crate_type: CrateType) -> bool {
if let Some(self_contained) = sess.opts.cg.link_self_contained {
return self_contained;
}
- match sess.target.crt_objects_fallback {
+ match sess.target.link_self_contained {
+ LinkSelfContainedDefault::False => false,
+ LinkSelfContainedDefault::True => true,
// FIXME: Find a better heuristic for "native musl toolchain is available",
// based on host and linker path, for example.
// (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237).
- Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)),
- Some(CrtObjectsFallback::Mingw) => {
+ LinkSelfContainedDefault::Musl => sess.crt_static(Some(crate_type)),
+ LinkSelfContainedDefault::Mingw => {
sess.host == sess.target
&& sess.target.vendor != "uwp"
&& detect_self_contained_mingw(&sess)
}
- // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
- Some(CrtObjectsFallback::Wasm) => true,
- None => false,
}
}
@@ -1583,12 +1637,21 @@ fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool {
fn add_pre_link_objects(
cmd: &mut dyn Linker,
sess: &Session,
+ flavor: LinkerFlavor,
link_output_kind: LinkOutputKind,
self_contained: bool,
) {
+ // FIXME: we are currently missing some infra here (per-linker-flavor CRT objects),
+ // so Fuchsia has to be special-cased.
let opts = &sess.target;
- let objects =
- if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects };
+ let empty = Default::default();
+ let objects = if self_contained {
+ &opts.pre_link_objects_self_contained
+ } else if !(sess.target.os == "fuchsia" && matches!(flavor, LinkerFlavor::Gnu(Cc::Yes, _))) {
+ &opts.pre_link_objects
+ } else {
+ &empty
+ };
for obj in objects.get(&link_output_kind).iter().copied().flatten() {
cmd.add_object(&get_object_file_path(sess, obj, self_contained));
}
@@ -1601,9 +1664,11 @@ fn add_post_link_objects(
link_output_kind: LinkOutputKind,
self_contained: bool,
) {
- let opts = &sess.target;
- let objects =
- if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects };
+ let objects = if self_contained {
+ &sess.target.post_link_objects_self_contained
+ } else {
+ &sess.target.post_link_objects
+ };
for obj in objects.get(&link_output_kind).iter().copied().flatten() {
cmd.add_object(&get_object_file_path(sess, obj, self_contained));
}
@@ -1622,7 +1687,7 @@ fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor)
fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
match (crate_type, &sess.target.link_script) {
(CrateType::Cdylib | CrateType::Executable, Some(script)) => {
- if !sess.target.linker_is_gnu {
+ if !sess.target.linker_flavor.is_gnu() {
sess.fatal("can only use link script when linking with GNU-like linker");
}
@@ -1703,6 +1768,13 @@ fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor
/// that are necessary for the linking. They are only present in symbol table but not actually
/// used in any sections, so the linker will therefore pick relevant rlibs for linking, but
/// unused `#[no_mangle]` or `#[used]` can still be discard by GC sections.
+///
+/// There's a few internal crates in the standard library (aka libcore and
+/// libstd) which actually have a circular dependence upon one another. This
+/// currently arises through "weak lang items" where libcore requires things
+/// like `rust_begin_unwind` but libstd ends up defining it. To get this
+/// circular dependence to work correctly we declare some of these things
+/// in this synthetic object.
fn add_linked_symbol_object(
cmd: &mut dyn Linker,
sess: &Session,
@@ -1858,7 +1930,7 @@ fn add_rpath_args(
out_filename: out_filename.to_path_buf(),
has_rpath: sess.target.has_rpath,
is_like_osx: sess.target.is_like_osx,
- linker_is_gnu: sess.target.linker_is_gnu,
+ linker_is_gnu: sess.target.linker_flavor.is_gnu(),
};
cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
}
@@ -1882,12 +1954,12 @@ fn linker_with_args<'a>(
out_filename: &Path,
codegen_results: &CodegenResults,
) -> Result<Command, ErrorGuaranteed> {
- let crt_objects_fallback = crt_objects_fallback(sess, crate_type);
+ let self_contained = self_contained(sess, crate_type);
let cmd = &mut *super::linker::get_linker(
sess,
path,
flavor,
- crt_objects_fallback,
+ self_contained,
&codegen_results.crate_info.target_cpu,
);
let link_output_kind = link_output_kind(sess, crate_type);
@@ -1914,7 +1986,7 @@ fn linker_with_args<'a>(
// ------------ Object code and libraries, order-dependent ------------
// Pre-link CRT objects.
- add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+ add_pre_link_objects(cmd, sess, flavor, link_output_kind, self_contained);
add_linked_symbol_object(
cmd,
@@ -1947,7 +2019,6 @@ fn linker_with_args<'a>(
// Upstream rust libraries are not supposed to depend on our local native
// libraries as that would violate the structure of the DAG, in that
// scenario they are required to link to them as well in a shared fashion.
- // (The current implementation still doesn't prevent it though, see the FIXME below.)
//
// Note that upstream rust libraries may contain native dependencies as
// well, but they also can't depend on what we just started to add to the
@@ -1968,15 +2039,16 @@ fn linker_with_args<'a>(
// and move this option back to the top.
cmd.add_as_needed();
- // FIXME: Move this below to other native libraries
- // (or alternatively link all native libraries after their respective crates).
- // This change is somewhat breaking in practice due to local static libraries being linked
- // as whole-archive (#85144), so removing whole-archive may be a pre-requisite.
+ // Local native libraries of all kinds.
+ //
+ // If `-Zlink-native-libraries=false` is set, then the assumption is that an
+ // external build system already has the native dependencies defined, and it
+ // will provide them to the linker itself.
if sess.opts.unstable_opts.link_native_libraries {
add_local_native_libraries(cmd, sess, codegen_results);
}
- // Upstream rust libraries and their non-bundled static libraries
+ // Upstream rust libraries and their (possibly bundled) static native libraries.
add_upstream_rust_crates(
cmd,
sess,
@@ -1986,24 +2058,54 @@ fn linker_with_args<'a>(
tmpdir,
);
- // Upstream dynamic native libraries linked with `#[link]` attributes at and `-l`
- // command line options.
- // If -Zlink-native-libraries=false is set, then the assumption is that an
- // external build system already has the native dependencies defined, and it
- // will provide them to the linker itself.
+ // Dynamic native libraries from upstream crates.
+ //
+ // FIXME: Merge this to `add_upstream_rust_crates` so that all native libraries are linked
+ // together with their respective upstream crates, and in their originally specified order.
+ // This may be slightly breaking due to our use of `--as-needed` and needs a crater run.
if sess.opts.unstable_opts.link_native_libraries {
add_upstream_native_libraries(cmd, sess, codegen_results);
}
// Link with the import library generated for any raw-dylib functions.
for (raw_dylib_name, raw_dylib_imports) in
- collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ collate_raw_dylibs(sess, codegen_results.crate_info.used_libraries.iter())?
+ {
+ cmd.add_object(&archive_builder_builder.create_dll_import_lib(
+ sess,
+ &raw_dylib_name,
+ &raw_dylib_imports,
+ tmpdir,
+ true,
+ ));
+ }
+ // As with add_upstream_native_libraries, we need to add the upstream raw-dylib symbols in case
+ // they are used within inlined functions or instantiated generic functions. We do this *after*
+ // handling the raw-dylib symbols in the current crate to make sure that those are chosen first
+ // by the linker.
+ let (_, dependency_linkage) = codegen_results
+ .crate_info
+ .dependency_formats
+ .iter()
+ .find(|(ty, _)| *ty == crate_type)
+ .expect("failed to find crate type in dependency format list");
+ let native_libraries_from_nonstatics = codegen_results
+ .crate_info
+ .native_libraries
+ .iter()
+ .filter_map(|(cnum, libraries)| {
+ (dependency_linkage[cnum.as_usize() - 1] != Linkage::Static).then(|| libraries)
+ })
+ .flatten();
+ for (raw_dylib_name, raw_dylib_imports) in
+ collate_raw_dylibs(sess, native_libraries_from_nonstatics)?
{
cmd.add_object(&archive_builder_builder.create_dll_import_lib(
sess,
&raw_dylib_name,
&raw_dylib_imports,
tmpdir,
+ false,
));
}
@@ -2024,7 +2126,7 @@ fn linker_with_args<'a>(
cmd,
sess,
link_output_kind,
- crt_objects_fallback,
+ self_contained,
flavor,
crate_type,
codegen_results,
@@ -2040,7 +2142,7 @@ fn linker_with_args<'a>(
// ------------ Object code and libraries, order-dependent ------------
// Post-link CRT objects.
- add_post_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+ add_post_link_objects(cmd, sess, link_output_kind, self_contained);
// ------------ Late order-dependent options ------------
@@ -2057,7 +2159,7 @@ fn add_order_independent_options(
cmd: &mut dyn Linker,
sess: &Session,
link_output_kind: LinkOutputKind,
- crt_objects_fallback: bool,
+ self_contained: bool,
flavor: LinkerFlavor,
crate_type: CrateType,
codegen_results: &CodegenResults,
@@ -2070,7 +2172,10 @@ fn add_order_independent_options(
add_link_script(cmd, sess, tmpdir, crate_type);
- if sess.target.os == "fuchsia" && crate_type == CrateType::Executable {
+ if sess.target.os == "fuchsia"
+ && crate_type == CrateType::Executable
+ && !matches!(flavor, LinkerFlavor::Gnu(Cc::Yes, _))
+ {
let prefix = if sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::ADDRESS) {
"asan/"
} else {
@@ -2086,7 +2191,7 @@ fn add_order_independent_options(
// Make the binary compatible with data execution prevention schemes.
cmd.add_no_exec();
- if crt_objects_fallback {
+ if self_contained {
cmd.no_crt_objects();
}
@@ -2099,11 +2204,11 @@ fn add_order_independent_options(
});
}
- if flavor == LinkerFlavor::PtxLinker {
+ if flavor == LinkerFlavor::Ptx {
// Provide the linker with fallback to internal `target-cpu`.
cmd.arg("--fallback-arch");
cmd.arg(&codegen_results.crate_info.target_cpu);
- } else if flavor == LinkerFlavor::BpfLinker {
+ } else if flavor == LinkerFlavor::Bpf {
cmd.arg("--cpu");
cmd.arg(&codegen_results.crate_info.target_cpu);
cmd.arg("--cpu-features");
@@ -2115,7 +2220,7 @@ fn add_order_independent_options(
cmd.linker_plugin_lto();
- add_library_search_dirs(cmd, sess, crt_objects_fallback);
+ add_library_search_dirs(cmd, sess, self_contained);
cmd.output_filename(out_filename);
@@ -2319,72 +2424,25 @@ fn add_upstream_rust_crates<'a>(
// crates.
let deps = &codegen_results.crate_info.used_crates;
- // There's a few internal crates in the standard library (aka libcore and
- // libstd) which actually have a circular dependence upon one another. This
- // currently arises through "weak lang items" where libcore requires things
- // like `rust_begin_unwind` but libstd ends up defining it. To get this
- // circular dependence to work correctly in all situations we'll need to be
- // sure to correctly apply the `--start-group` and `--end-group` options to
- // GNU linkers, otherwise if we don't use any other symbol from the standard
- // library it'll get discarded and the whole application won't link.
- //
- // In this loop we're calculating the `group_end`, after which crate to
- // pass `--end-group` and `group_start`, before which crate to pass
- // `--start-group`. We currently do this by passing `--end-group` after
- // the first crate (when iterating backwards) that requires a lang item
- // defined somewhere else. Once that's set then when we've defined all the
- // necessary lang items we'll pass `--start-group`.
- //
- // Note that this isn't amazing logic for now but it should do the trick
- // for the current implementation of the standard library.
- let mut group_end = None;
- let mut group_start = None;
- // Crates available for linking thus far.
- let mut available = FxHashSet::default();
- // Crates required to satisfy dependencies discovered so far.
- let mut required = FxHashSet::default();
-
- let info = &codegen_results.crate_info;
- for &cnum in deps.iter().rev() {
- if let Some(missing) = info.missing_lang_items.get(&cnum) {
- let missing_crates = missing.iter().map(|i| info.lang_item_to_crate.get(i).copied());
- required.extend(missing_crates);
- }
-
- required.insert(Some(cnum));
- available.insert(Some(cnum));
-
- if required.len() > available.len() && group_end.is_none() {
- group_end = Some(cnum);
- }
- if required.len() == available.len() && group_end.is_some() {
- group_start = Some(cnum);
- break;
- }
- }
-
- // If we didn't end up filling in all lang items from upstream crates then
- // we'll be filling it in with our crate. This probably means we're the
- // standard library itself, so skip this for now.
- if group_end.is_some() && group_start.is_none() {
- group_end = None;
- }
-
let mut compiler_builtins = None;
let search_path = OnceCell::new();
for &cnum in deps.iter() {
- if group_start == Some(cnum) {
- cmd.group_start();
- }
-
// We may not pass all crates through to the linker. Some crates may
// appear statically in an existing dylib, meaning we'll pick up all the
// symbols from the dylib.
let src = &codegen_results.crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
_ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
- add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ add_static_crate(
+ cmd,
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ tmpdir,
+ cnum,
+ &Default::default(),
+ );
}
// compiler-builtins are always placed last to ensure that they're
// linked correctly.
@@ -2394,17 +2452,41 @@ fn add_upstream_rust_crates<'a>(
}
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
- add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ let bundled_libs = if sess.opts.unstable_opts.packed_bundled_libs {
+ codegen_results.crate_info.native_libraries[&cnum]
+ .iter()
+ .filter_map(|lib| lib.filename)
+ .collect::<FxHashSet<_>>()
+ } else {
+ Default::default()
+ };
+ add_static_crate(
+ cmd,
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ tmpdir,
+ cnum,
+ &bundled_libs,
+ );
// Link static native libs with "-bundle" modifier only if the crate they originate from
// is being linked statically to the current crate. If it's linked dynamically
// or is an rlib already included via some other dylib crate, the symbols from
// native libs will have already been included in that dylib.
//
- // If -Zlink-native-libraries=false is set, then the assumption is that an
+ // If `-Zlink-native-libraries=false` is set, then the assumption is that an
// external build system already has the native dependencies defined, and it
// will provide them to the linker itself.
if sess.opts.unstable_opts.link_native_libraries {
+ if sess.opts.unstable_opts.packed_bundled_libs {
+ // If rlib contains native libs as archives, unpack them to tmpdir.
+ let rlib = &src.rlib.as_ref().unwrap().0;
+ archive_builder_builder
+ .extract_bundled_libs(rlib, tmpdir, &bundled_libs)
+ .unwrap_or_else(|e| sess.fatal(e));
+ }
+
let mut last = (None, NativeLibKind::Unspecified, None);
for lib in &codegen_results.crate_info.native_libraries[&cnum] {
let Some(name) = lib.name else {
@@ -2437,6 +2519,14 @@ fn add_upstream_rust_crates<'a>(
bundle: Some(false),
whole_archive: Some(false) | None,
} => {
+ // HACK/FIXME: Fixup a circular dependency between libgcc and libc
+ // with glibc. This logic should be moved to the libc crate.
+ if sess.target.os == "linux"
+ && sess.target.env == "gnu"
+ && name == "c"
+ {
+ cmd.link_staticlib("gcc", false);
+ }
cmd.link_staticlib(name, lib.verbatim.unwrap_or(false));
}
NativeLibKind::LinkArg => {
@@ -2446,20 +2536,23 @@ fn add_upstream_rust_crates<'a>(
| NativeLibKind::Framework { .. }
| NativeLibKind::Unspecified
| NativeLibKind::RawDylib => {}
- NativeLibKind::Static {
- bundle: Some(true) | None,
- whole_archive: _,
- } => {}
+ NativeLibKind::Static { bundle: Some(true) | None, whole_archive } => {
+ if sess.opts.unstable_opts.packed_bundled_libs {
+ // If rlib contains native libs as archives, they are unpacked to tmpdir.
+ let path = tmpdir.join(lib.filename.unwrap().as_str());
+ if whole_archive == Some(true) {
+ cmd.link_whole_rlib(&path);
+ } else {
+ cmd.link_rlib(&path);
+ }
+ }
+ }
}
}
}
}
Linkage::Dynamic => add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0),
}
-
- if group_end == Some(cnum) {
- cmd.group_end();
- }
}
// compiler-builtins are always placed last to ensure that they're
@@ -2468,7 +2561,15 @@ fn add_upstream_rust_crates<'a>(
// was already "included" in a dylib (e.g., `libstd` when `-C prefer-dynamic`
// is used)
if let Some(cnum) = compiler_builtins {
- add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ add_static_crate(
+ cmd,
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ tmpdir,
+ cnum,
+ &Default::default(),
+ );
}
// Converts a library file-stem into a cc -l argument
@@ -2501,6 +2602,7 @@ fn add_upstream_rust_crates<'a>(
codegen_results: &CodegenResults,
tmpdir: &Path,
cnum: CrateNum,
+ bundled_lib_file_names: &FxHashSet<Symbol>,
) {
let src = &codegen_results.crate_info.used_crate_source[&cnum];
let cratepath = &src.rlib.as_ref().unwrap().0;
@@ -2529,6 +2631,7 @@ fn add_upstream_rust_crates<'a>(
let dst = tmpdir.join(cratepath.file_name().unwrap());
let name = cratepath.file_name().unwrap().to_str().unwrap();
let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+ let bundled_lib_file_names = bundled_lib_file_names.clone();
sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| {
let canonical_name = name.replace('-', "_");
@@ -2562,6 +2665,15 @@ fn add_upstream_rust_crates<'a>(
let skip_because_lto =
upstream_rust_objects_already_included && is_rust_object && is_builtins;
+ // We skip native libraries because:
+ // 1. This native libraries won't be used from the generated rlib,
+ // so we can throw them away to avoid the copying work.
+ // 2. We can't allow it to be a single remaining entry in archive
+ // as some linkers may complain on that.
+ if bundled_lib_file_names.contains(&Symbol::intern(f)) {
+ return true;
+ }
+
if skip_because_cfg_say_so || skip_because_lto {
return true;
}
@@ -2657,7 +2769,7 @@ fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {
}
}
-fn are_upstream_rust_objects_already_included(sess: &Session) -> bool {
+pub(crate) fn are_upstream_rust_objects_already_included(sess: &Session) -> bool {
match sess.lto() {
config::Lto::Fat => true,
config::Lto::Thin => {
@@ -2674,11 +2786,16 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
let os = &sess.target.os;
let llvm_target = &sess.target.llvm_target;
if sess.target.vendor != "apple"
- || !matches!(os.as_ref(), "ios" | "tvos" | "watchos")
- || (flavor != LinkerFlavor::Gcc && flavor != LinkerFlavor::Lld(LldFlavor::Ld64))
+ || !matches!(os.as_ref(), "ios" | "tvos" | "watchos" | "macos")
+ || !matches!(flavor, LinkerFlavor::Darwin(..))
{
return;
}
+
+ if os == "macos" && !matches!(flavor, LinkerFlavor::Darwin(Cc::No, _)) {
+ return;
+ }
+
let sdk_name = match (arch.as_ref(), os.as_ref()) {
("aarch64", "tvos") => "appletvos",
("x86_64", "tvos") => "appletvsimulator",
@@ -2694,6 +2811,7 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
("aarch64", "watchos") if llvm_target.ends_with("-simulator") => "watchsimulator",
("aarch64", "watchos") => "watchos",
("arm", "watchos") => "watchos",
+ (_, "macos") => "macosx",
_ => {
sess.err(&format!("unsupported arch `{}` for os `{}`", arch, os));
return;
@@ -2708,10 +2826,10 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
};
match flavor {
- LinkerFlavor::Gcc => {
+ LinkerFlavor::Darwin(Cc::Yes, _) => {
cmd.args(&["-isysroot", &sdk_root, "-Wl,-syslibroot", &sdk_root]);
}
- LinkerFlavor::Lld(LldFlavor::Ld64) => {
+ LinkerFlavor::Darwin(Cc::No, _) => {
cmd.args(&["-syslibroot", &sdk_root]);
}
_ => unreachable!(),
@@ -2774,23 +2892,30 @@ fn get_apple_sdk_root(sdk_name: &str) -> Result<String, String> {
fn add_gcc_ld_path(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
if let Some(ld_impl) = sess.opts.unstable_opts.gcc_ld {
- if let LinkerFlavor::Gcc = flavor {
+ if let LinkerFlavor::Gnu(Cc::Yes, _)
+ | LinkerFlavor::Darwin(Cc::Yes, _)
+ | LinkerFlavor::WasmLld(Cc::Yes) = flavor
+ {
match ld_impl {
LdImpl::Lld => {
- let tools_path = sess.get_tools_search_paths(false);
- let gcc_ld_dir = tools_path
- .into_iter()
- .map(|p| p.join("gcc-ld"))
- .find(|p| {
- p.join(if sess.host.is_like_windows { "ld.exe" } else { "ld" }).exists()
- })
- .unwrap_or_else(|| sess.fatal("rust-lld (as ld) not found"));
- cmd.arg({
- let mut arg = OsString::from("-B");
- arg.push(gcc_ld_dir);
- arg
- });
- cmd.arg(format!("-Wl,-rustc-lld-flavor={}", sess.target.lld_flavor.as_str()));
+ // Implement the "self-contained" part of -Zgcc-ld
+ // by adding rustc distribution directories to the tool search path.
+ for path in sess.get_tools_search_paths(false) {
+ cmd.arg({
+ let mut arg = OsString::from("-B");
+ arg.push(path.join("gcc-ld"));
+ arg
+ });
+ }
+ // Implement the "linker flavor" part of -Zgcc-ld
+ // by asking cc to use some kind of lld.
+ cmd.arg("-fuse-ld=lld");
+ if !flavor.is_gnu() {
+ // Tell clang to use a non-default LLD flavor.
+ // Gcc doesn't understand the target option, but we currently assume
+ // that gcc is not used for Apple and Wasm targets (#97402).
+ cmd.arg(format!("--target={}", sess.target.llvm_target));
+ }
}
}
} else {
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index ce51b2e95..c49b19bdf 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -1,6 +1,6 @@
-use super::archive;
use super::command::Command;
use super::symbol_export;
+use crate::errors;
use rustc_span::symbol::sym;
use std::ffi::{OsStr, OsString};
@@ -11,12 +11,13 @@ use std::path::{Path, PathBuf};
use std::{env, mem, str};
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_metadata::find_native_static_library;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportKind};
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{self, CrateType, DebugInfo, LinkerPluginLto, Lto, OptLevel, Strip};
use rustc_session::Session;
-use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
+use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, Lld};
use cc::windows_registry;
@@ -56,8 +57,13 @@ pub fn get_linker<'a>(
let mut cmd = match linker.to_str() {
Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker),
_ => match flavor {
- LinkerFlavor::Lld(f) => Command::lld(linker, f),
- LinkerFlavor::Msvc if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() => {
+ LinkerFlavor::Gnu(Cc::No, Lld::Yes)
+ | LinkerFlavor::Darwin(Cc::No, Lld::Yes)
+ | LinkerFlavor::WasmLld(Cc::No)
+ | LinkerFlavor::Msvc(Lld::Yes) => Command::lld(linker, flavor.lld_flavor()),
+ LinkerFlavor::Msvc(Lld::No)
+ if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() =>
+ {
Command::new(msvc_tool.as_ref().map_or(linker, |t| t.path()))
}
_ => Command::new(linker),
@@ -68,9 +74,7 @@ pub fn get_linker<'a>(
// To comply with the Windows App Certification Kit,
// MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc).
let t = &sess.target;
- if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link))
- && t.vendor == "uwp"
- {
+ if matches!(flavor, LinkerFlavor::Msvc(..)) && t.vendor == "uwp" {
if let Some(ref tool) = msvc_tool {
let original_path = tool.path();
if let Some(ref root_lib_path) = original_path.ancestors().nth(4) {
@@ -126,29 +130,25 @@ pub fn get_linker<'a>(
// to the linker args construction.
assert!(cmd.get_args().is_empty() || sess.target.vendor == "uwp");
match flavor {
- LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => {
- Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>
- }
- LinkerFlavor::Em => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
- LinkerFlavor::Gcc => {
- Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: false })
- as Box<dyn Linker>
- }
-
- LinkerFlavor::Lld(LldFlavor::Ld)
- | LinkerFlavor::Lld(LldFlavor::Ld64)
- | LinkerFlavor::Ld => {
- Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: true })
- as Box<dyn Linker>
+ LinkerFlavor::Unix(Cc::No) if sess.target.os == "l4re" => {
+ Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>
}
-
- LinkerFlavor::Lld(LldFlavor::Wasm) => Box::new(WasmLd::new(cmd, sess)) as Box<dyn Linker>,
-
- LinkerFlavor::PtxLinker => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
-
- LinkerFlavor::BpfLinker => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>,
-
- LinkerFlavor::L4Bender => Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>,
+ LinkerFlavor::WasmLld(Cc::No) => Box::new(WasmLd::new(cmd, sess)) as Box<dyn Linker>,
+ LinkerFlavor::Gnu(cc, _)
+ | LinkerFlavor::Darwin(cc, _)
+ | LinkerFlavor::WasmLld(cc)
+ | LinkerFlavor::Unix(cc) => Box::new(GccLinker {
+ cmd,
+ sess,
+ target_cpu,
+ hinted_static: false,
+ is_ld: cc == Cc::No,
+ is_gnu: flavor.is_gnu(),
+ }) as Box<dyn Linker>,
+ LinkerFlavor::Msvc(..) => Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>,
+ LinkerFlavor::EmCc => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
+ LinkerFlavor::Bpf => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>,
+ LinkerFlavor::Ptx => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
}
}
@@ -186,8 +186,6 @@ pub trait Linker {
fn no_default_libraries(&mut self);
fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]);
fn subsystem(&mut self, subsystem: &str);
- fn group_start(&mut self);
- fn group_end(&mut self);
fn linker_plugin_lto(&mut self);
fn add_eh_frame_header(&mut self) {}
fn add_no_exec(&mut self) {}
@@ -216,6 +214,7 @@ pub struct GccLinker<'a> {
hinted_static: bool, // Keeps track of the current hinting mode.
// Link as ld
is_ld: bool,
+ is_gnu: bool,
}
impl<'a> GccLinker<'a> {
@@ -364,7 +363,7 @@ impl<'a> Linker for GccLinker<'a> {
fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
match output_kind {
LinkOutputKind::DynamicNoPicExe => {
- if !self.is_ld && self.sess.target.linker_is_gnu {
+ if !self.is_ld && self.is_gnu {
self.cmd.arg("-no-pie");
}
}
@@ -378,7 +377,7 @@ impl<'a> Linker for GccLinker<'a> {
LinkOutputKind::StaticNoPicExe => {
// `-static` works for both gcc wrapper and ld.
self.cmd.arg("-static");
- if !self.is_ld && self.sess.target.linker_is_gnu {
+ if !self.is_ld && self.is_gnu {
self.cmd.arg("-no-pie");
}
}
@@ -436,26 +435,26 @@ impl<'a> Linker for GccLinker<'a> {
// FIXME(81490): ld64 doesn't support these flags but macOS 11
// has -needed-l{} / -needed_library {}
// but we have no way to detect that here.
- self.sess.warn("`as-needed` modifier not implemented yet for ld64");
- } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.sess.emit_warning(errors::Ld64UnimplementedModifier);
+ } else if self.is_gnu && !self.sess.target.is_like_windows {
self.linker_arg("--no-as-needed");
} else {
- self.sess.warn("`as-needed` modifier not supported for current linker");
+ self.sess.emit_warning(errors::LinkerUnsupportedModifier);
}
}
self.hint_dynamic();
- self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },));
if !as_needed {
if self.sess.target.is_like_osx {
// See above FIXME comment
- } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ } else if self.is_gnu && !self.sess.target.is_like_windows {
self.linker_arg("--as-needed");
}
}
}
fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },));
}
fn link_rlib(&mut self, lib: &Path) {
self.hint_static();
@@ -494,7 +493,7 @@ impl<'a> Linker for GccLinker<'a> {
// FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
// flag but we have no way to detect that here.
// self.cmd.arg("-needed_framework").arg(framework);
- self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ self.sess.emit_warning(errors::Ld64UnimplementedModifier);
}
self.cmd.arg("-framework").arg(framework);
}
@@ -509,17 +508,14 @@ impl<'a> Linker for GccLinker<'a> {
self.hint_static();
let target = &self.sess.target;
if !target.is_like_osx {
- self.linker_arg("--whole-archive").cmd.arg(format!(
- "-l{}{}",
- if verbatim { ":" } else { "" },
- lib
- ));
+ self.linker_arg("--whole-archive");
+ self.cmd.arg(format!("-l{}{lib}", if verbatim && self.is_gnu { ":" } else { "" },));
self.linker_arg("--no-whole-archive");
} else {
// -force_load is the macOS equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
self.linker_arg("-force_load");
- let lib = archive::find_library(lib, verbatim, search_path, &self.sess);
+ let lib = find_native_static_library(lib, Some(verbatim), search_path, &self.sess);
self.linker_arg(&lib);
}
}
@@ -558,21 +554,19 @@ impl<'a> Linker for GccLinker<'a> {
// eliminate the metadata. If we're building an executable, however,
// --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
// reduction.
- } else if (self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm)
- && !keep_metadata
- {
+ } else if (self.is_gnu || self.sess.target.is_like_wasm) && !keep_metadata {
self.linker_arg("--gc-sections");
}
}
fn no_gc_sections(&mut self) {
- if self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm {
+ if self.is_gnu || self.sess.target.is_like_wasm {
self.linker_arg("--no-gc-sections");
}
}
fn optimize(&mut self) {
- if !self.sess.target.linker_is_gnu && !self.sess.target.is_like_wasm {
+ if !self.is_gnu && !self.sess.target.is_like_wasm {
return;
}
@@ -586,7 +580,7 @@ impl<'a> Linker for GccLinker<'a> {
}
fn pgo_gen(&mut self) {
- if !self.sess.target.linker_is_gnu {
+ if !self.is_gnu {
return;
}
@@ -616,7 +610,13 @@ impl<'a> Linker for GccLinker<'a> {
match strip {
Strip::None => {}
Strip::Debuginfo => {
- self.linker_arg("--strip-debug");
+ // The illumos linker does not support --strip-debug although
+ // it does support --strip-all as a compatibility alias for -s.
+ // The --strip-debug case is handled by running an external
+ // `strip` utility as a separate step after linking.
+ if self.sess.target.os != "illumos" {
+ self.linker_arg("--strip-debug");
+ }
}
Strip::Symbols => {
self.linker_arg("--strip-all");
@@ -672,8 +672,8 @@ impl<'a> Linker for GccLinker<'a> {
writeln!(f, "_{}", sym)?;
}
};
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ if let Err(error) = res {
+ self.sess.emit_fatal(errors::LibDefWriteFailure { error });
}
} else if is_windows {
let res: io::Result<()> = try {
@@ -687,8 +687,8 @@ impl<'a> Linker for GccLinker<'a> {
writeln!(f, " {}", symbol)?;
}
};
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write list.def file: {}", e));
+ if let Err(error) = res {
+ self.sess.emit_fatal(errors::LibDefWriteFailure { error });
}
} else {
// Write an LD version script
@@ -704,8 +704,8 @@ impl<'a> Linker for GccLinker<'a> {
}
writeln!(f, "\n local:\n *;\n}};")?;
};
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write version script: {}", e));
+ if let Err(error) = res {
+ self.sess.emit_fatal(errors::VersionScriptWriteFailure { error });
}
}
@@ -733,18 +733,6 @@ impl<'a> Linker for GccLinker<'a> {
self.hint_dynamic(); // Reset to default before returning the composed command line.
}
- fn group_start(&mut self) {
- if self.takes_hints() {
- self.linker_arg("--start-group");
- }
- }
-
- fn group_end(&mut self) {
- if self.takes_hints() {
- self.linker_arg("--end-group");
- }
- }
-
fn linker_plugin_lto(&mut self) {
match self.sess.opts.cg.linker_plugin_lto {
LinkerPluginLto::Disabled => {
@@ -769,13 +757,13 @@ impl<'a> Linker for GccLinker<'a> {
fn add_no_exec(&mut self) {
if self.sess.target.is_like_windows {
self.linker_arg("--nxcompat");
- } else if self.sess.target.linker_is_gnu {
+ } else if self.is_gnu {
self.linker_arg("-znoexecstack");
}
}
fn add_as_needed(&mut self) {
- if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ if self.is_gnu && !self.sess.target.is_like_windows {
self.linker_arg("--as-needed");
} else if self.sess.target.is_like_solaris {
// -z ignore is the Solaris equivalent to the GNU ld --as-needed option
@@ -934,9 +922,8 @@ impl<'a> Linker for MsvcLinker<'a> {
self.cmd.arg(arg);
}
}
- Err(err) => {
- self.sess
- .warn(&format!("error enumerating natvis directory: {}", err));
+ Err(error) => {
+ self.sess.emit_warning(errors::NoNatvisDirectory { error });
}
}
}
@@ -990,8 +977,8 @@ impl<'a> Linker for MsvcLinker<'a> {
writeln!(f, " {}", symbol)?;
}
};
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ if let Err(error) = res {
+ self.sess.emit_fatal(errors::LibDefWriteFailure { error });
}
let mut arg = OsString::from("/DEF:");
arg.push(path);
@@ -1022,10 +1009,6 @@ impl<'a> Linker for MsvcLinker<'a> {
}
}
- // MSVC doesn't need group indicators
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
fn linker_plugin_lto(&mut self) {
// Do nothing
}
@@ -1168,10 +1151,6 @@ impl<'a> Linker for EmLinker<'a> {
// noop
}
- // Appears not necessary on Emscripten
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
fn linker_plugin_lto(&mut self) {
// Do nothing
}
@@ -1199,22 +1178,19 @@ impl<'a> WasmLd<'a> {
// sharing memory and instantiating the module multiple times. As a
// result if it were exported then we'd just have no sharing.
//
- // * `--export=__wasm_init_memory` - when using `--passive-segments` the
- // linker will synthesize this function, and so we need to make sure
- // that our usage of `--export` below won't accidentally cause this
- // function to get deleted.
- //
- // * `--export=*tls*` - when `#[thread_local]` symbols are used these
- // symbols are how the TLS segments are initialized and configured.
+ // On wasm32-unknown-unknown, we also export symbols for glue code to use:
+ // * `--export=*tls*` - when `#[thread_local]` symbols are used these
+ // symbols are how the TLS segments are initialized and configured.
if sess.target_features.contains(&sym::atomics) {
cmd.arg("--shared-memory");
cmd.arg("--max-memory=1073741824");
cmd.arg("--import-memory");
- cmd.arg("--export=__wasm_init_memory");
- cmd.arg("--export=__wasm_init_tls");
- cmd.arg("--export=__tls_size");
- cmd.arg("--export=__tls_align");
- cmd.arg("--export=__tls_base");
+ if sess.target.os == "unknown" {
+ cmd.arg("--export=__wasm_init_tls");
+ cmd.arg("--export=__tls_size");
+ cmd.arg("--export=__tls_align");
+ cmd.arg("--export=__tls_base");
+ }
}
WasmLd { cmd, sess }
}
@@ -1339,18 +1315,16 @@ impl<'a> Linker for WasmLd<'a> {
// LLD will hide these otherwise-internal symbols since it only exports
// symbols explicitly passed via the `--export` flags above and hides all
- // others. Various bits and pieces of tooling use this, so be sure these
- // symbols make their way out of the linker as well.
- self.cmd.arg("--export=__heap_base");
- self.cmd.arg("--export=__data_end");
+ // others. Various bits and pieces of wasm32-unknown-unknown tooling use
+ // this, so be sure these symbols make their way out of the linker as well.
+ if self.sess.target.os == "unknown" {
+ self.cmd.arg("--export=__heap_base");
+ self.cmd.arg("--export=__data_end");
+ }
}
fn subsystem(&mut self, _subsystem: &str) {}
- // Not needed for now with LLD
- fn group_start(&mut self) {}
- fn group_end(&mut self) {}
-
fn linker_plugin_lto(&mut self) {
// Do nothing for now
}
@@ -1467,7 +1441,7 @@ impl<'a> Linker for L4Bender<'a> {
fn export_symbols(&mut self, _: &Path, _: CrateType, _: &[String]) {
// ToDo, not implemented, copy from GCC
- self.sess.warn("exporting symbols not implemented yet for L4Bender");
+ self.sess.emit_warning(errors::L4BenderExportingSymbolsUnimplemented);
return;
}
@@ -1479,14 +1453,6 @@ impl<'a> Linker for L4Bender<'a> {
self.hint_static(); // Reset to default before returning the composed command line.
}
- fn group_start(&mut self) {
- self.cmd.arg("--start-group");
- }
-
- fn group_end(&mut self) {
- self.cmd.arg("--end-group");
- }
-
fn linker_plugin_lto(&mut self) {}
fn control_flow_guard(&mut self) {}
@@ -1667,10 +1633,6 @@ impl<'a> Linker for PtxLinker<'a> {
fn subsystem(&mut self, _subsystem: &str) {}
- fn group_start(&mut self) {}
-
- fn group_end(&mut self) {}
-
fn linker_plugin_lto(&mut self) {}
}
@@ -1771,8 +1733,8 @@ impl<'a> Linker for BpfLinker<'a> {
writeln!(f, "{}", sym)?;
}
};
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write symbols file: {}", e));
+ if let Err(error) = res {
+ self.sess.emit_fatal(errors::SymbolFileWriteFailure { error });
} else {
self.cmd.arg("--export-symbols").arg(&path);
}
@@ -1780,9 +1742,5 @@ impl<'a> Linker for BpfLinker<'a> {
fn subsystem(&mut self, _subsystem: &str) {}
- fn group_start(&mut self) {}
-
- fn group_end(&mut self) {}
-
fn linker_plugin_lto(&mut self) {}
}
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 0302c2881..99ddd1764 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -117,6 +117,10 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
"riscv32" => Architecture::Riscv32,
"riscv64" => Architecture::Riscv64,
"sparc64" => Architecture::Sparc64,
+ "avr" => Architecture::Avr,
+ "msp430" => Architecture::Msp430,
+ "hexagon" => Architecture::Hexagon,
+ "bpf" => Architecture::Bpf,
// Unsupported architecture.
_ => return None,
};
@@ -187,12 +191,12 @@ pub enum MetadataPosition {
Last,
}
-// For rlibs we "pack" rustc metadata into a dummy object file. When rustc
-// creates a dylib crate type it will pass `--whole-archive` (or the
-// platform equivalent) to include all object files from an rlib into the
-// final dylib itself. This causes linkers to iterate and try to include all
-// files located in an archive, so if metadata is stored in an archive then
-// it needs to be of a form that the linker will be able to process.
+// For rlibs we "pack" rustc metadata into a dummy object file.
+//
+// Historically it was needed because rustc linked rlibs as whole-archive in some cases.
+// In that case linkers try to include all files located in an archive, so if metadata is stored
+// in an archive then it needs to be of a form that the linker is able to process.
+// Now it's not clear whether metadata still needs to be wrapped into an object file or not.
//
// Note, though, that we don't actually want this metadata to show up in any
// final output of the compiler. Instead this is purely for rustc's own
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index e6b605575..c2ecc4160 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::query::{ExternProviders, Providers};
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::Instance;
use rustc_middle::ty::{self, SymbolName, TyCtxt};
-use rustc_session::config::CrateType;
+use rustc_session::config::{CrateType, OomStrategy};
use rustc_target::spec::SanitizerSet;
pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
@@ -76,7 +76,7 @@ fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<
// let it through if it's included statically.
match tcx.hir().get_by_def_id(def_id) {
Node::ForeignItem(..) => {
- tcx.is_statically_included_foreign_item(def_id).then_some(def_id)
+ tcx.native_library(def_id).map_or(false, |library| library.kind.is_statically_included()).then_some(def_id)
}
// Only consider nodes that actually have exported symbols.
@@ -103,18 +103,14 @@ fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<
}
})
.map(|def_id| {
- let (export_level, used) = if special_runtime_crate {
- let name = tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())).name;
- // We won't link right if these symbols are stripped during LTO.
- let used = match name {
- "rust_eh_personality"
- | "rust_eh_register_frames"
- | "rust_eh_unregister_frames" => true,
- _ => false,
- };
- (SymbolExportLevel::Rust, used)
+ // We won't link right if this symbol is stripped during LTO.
+ let name = tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())).name;
+ let used = name == "rust_eh_personality";
+
+ let export_level = if special_runtime_crate {
+ SymbolExportLevel::Rust
} else {
- (symbol_export_level(tcx, def_id.to_def_id()), false)
+ symbol_export_level(tcx, def_id.to_def_id())
};
let codegen_attrs = tcx.codegen_fn_attrs(def_id.to_def_id());
debug!(
@@ -210,6 +206,15 @@ fn exported_symbols_provider_local<'tcx>(
},
));
}
+
+ symbols.push((
+ ExportedSymbol::NoDefId(SymbolName::new(tcx, OomStrategy::SYMBOL)),
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
}
if tcx.sess.instrument_coverage() || tcx.sess.opts.cg.profile_generate.enabled() {
@@ -544,7 +549,7 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
.map(|fnabi| (fnabi.conv, &fnabi.args[..]))
.unwrap_or((Conv::Rust, &[]));
- // Decorate symbols with prefices, suffices and total number of bytes of arguments.
+ // Decorate symbols with prefixes, suffixes and total number of bytes of arguments.
// Reference: https://docs.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170
let (prefix, suffix) = match conv {
Conv::X86Fastcall => ("@", "@"),
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 1b5ad8710..d0ac016b0 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -2,11 +2,11 @@ use super::link::{self, ensure_removed};
use super::lto::{self, SerializedModule};
use super::symbol_export::symbol_name_for_instance_in_crate;
+use crate::errors;
+use crate::traits::*;
use crate::{
CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
};
-
-use crate::traits::*;
use jobserver::{Acquired, Client};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
@@ -15,7 +15,10 @@ use rustc_data_structures::profiling::TimingGuard;
use rustc_data_structures::profiling::VerboseTimingGuard;
use rustc_data_structures::sync::Lrc;
use rustc_errors::emitter::Emitter;
-use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
+use rustc_errors::{
+ translation::{to_fluent_args, Translate},
+ DiagnosticId, FatalError, Handler, Level,
+};
use rustc_fs_util::link_or_copy;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_incremental::{
@@ -113,7 +116,6 @@ pub struct ModuleConfig {
pub vectorize_slp: bool,
pub merge_functions: bool,
pub inline_threshold: Option<u32>,
- pub new_llvm_pass_manager: Option<bool>,
pub emit_lifetime_markers: bool,
pub llvm_plugins: Vec<String>,
}
@@ -256,13 +258,15 @@ impl ModuleConfig {
{
MergeFunctions::Disabled => false,
MergeFunctions::Trampolines | MergeFunctions::Aliases => {
- sess.opts.optimize == config::OptLevel::Default
- || sess.opts.optimize == config::OptLevel::Aggressive
+ use config::OptLevel::*;
+ match sess.opts.optimize {
+ Aggressive | Default | SizeMin | Size => true,
+ Less | No => false,
+ }
}
},
inline_threshold: sess.opts.cg.inline_threshold,
- new_llvm_pass_manager: sess.opts.unstable_opts.new_llvm_pass_manager,
emit_lifetime_markers: sess.emit_lifetime_markers(),
llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
}
@@ -529,7 +533,7 @@ fn produce_final_output_artifacts(
// Produce final compile outputs.
let copy_gracefully = |from: &Path, to: &Path| {
if let Err(e) = fs::copy(from, to) {
- sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
+ sess.emit_err(errors::CopyPath::new(from, to, e));
}
};
@@ -545,7 +549,7 @@ fn produce_final_output_artifacts(
ensure_removed(sess.diagnostic(), &path);
}
} else {
- let ext = crate_output
+ let extension = crate_output
.temp_path(output_type, None)
.extension()
.unwrap()
@@ -556,19 +560,11 @@ fn produce_final_output_artifacts(
if crate_output.outputs.contains_key(&output_type) {
// 2) Multiple codegen units, with `--emit foo=some_name`. We have
// no good solution for this case, so warn the user.
- sess.warn(&format!(
- "ignoring emit path because multiple .{} files \
- were produced",
- ext
- ));
+ sess.emit_warning(errors::IgnoringEmitPath { extension });
} else if crate_output.single_output_file.is_some() {
// 3) Multiple codegen units, with `-o some_name`. We have
// no good solution for this case, so warn the user.
- sess.warn(&format!(
- "ignoring -o because multiple .{} files \
- were produced",
- ext
- ));
+ sess.emit_warning(errors::IgnoringOutput { extension });
} else {
// 4) Multiple codegen units, but no explicit name. We
// just leave the `foo.0.x` files in place.
@@ -879,14 +875,12 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
);
match link_or_copy(&source_file, &output_path) {
Ok(_) => Some(output_path),
- Err(err) => {
- let diag_handler = cgcx.create_diag_handler();
- diag_handler.err(&format!(
- "unable to copy {} to {}: {}",
- source_file.display(),
- output_path.display(),
- err
- ));
+ Err(error) => {
+ cgcx.create_diag_handler().emit_err(errors::CopyPathBuf {
+ source_file,
+ output_path,
+ error,
+ });
None
}
}
@@ -1005,6 +999,14 @@ fn start_executing_work<B: ExtraBackendMethods>(
let coordinator_send = tx_to_llvm_workers;
let sess = tcx.sess;
+ let mut each_linked_rlib_for_lto = Vec::new();
+ drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| {
+ if link::ignored_for_lto(sess, crate_info, cnum) {
+ return;
+ }
+ each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
+ }));
+
// Compute the set of symbols we need to retain when doing LTO (if we need to)
let exported_symbols = {
let mut exported_symbols = FxHashMap::default();
@@ -1026,7 +1028,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
}
Lto::Fat | Lto::Thin => {
exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
- for &cnum in tcx.crates(()).iter() {
+ for &(cnum, ref _path) in &each_linked_rlib_for_lto {
exported_symbols.insert(cnum, copy_symbols(cnum));
}
Some(Arc::new(exported_symbols))
@@ -1046,14 +1048,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
})
.expect("failed to spawn helper thread");
- let mut each_linked_rlib_for_lto = Vec::new();
- drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
- if link::ignored_for_lto(sess, crate_info, cnum) {
- return;
- }
- each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
- }));
-
let ol =
if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
// If we know that we won’t be doing codegen, create target machines without optimisation.
@@ -1636,7 +1630,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
) {
if config.time_module && llvm_start_time.is_none() {
- *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes", "crate"));
+ *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
}
}
}
@@ -1737,9 +1731,19 @@ impl SharedEmitter {
}
}
+impl Translate for SharedEmitter {
+ fn fluent_bundle(&self) -> Option<&Lrc<rustc_errors::FluentBundle>> {
+ None
+ }
+
+ fn fallback_fluent_bundle(&self) -> &rustc_errors::FluentBundle {
+ panic!("shared emitter attempted to translate a diagnostic");
+ }
+}
+
impl Emitter for SharedEmitter {
fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
- let fluent_args = self.to_fluent_args(diag.args());
+ let fluent_args = to_fluent_args(diag.args());
drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
msg: self.translate_messages(&diag.message, &fluent_args).to_string(),
code: diag.code.clone(),
@@ -1758,14 +1762,6 @@ impl Emitter for SharedEmitter {
fn source_map(&self) -> Option<&Lrc<SourceMap>> {
None
}
-
- fn fluent_bundle(&self) -> Option<&Lrc<rustc_errors::FluentBundle>> {
- None
- }
-
- fn fallback_fluent_bundle(&self) -> &rustc_errors::FluentBundle {
- panic!("shared emitter attempted to translate a diagnostic");
- }
}
impl SharedEmitterMain {
@@ -1887,7 +1883,7 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
}
});
- sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
+ sess.cgu_reuse_tracker.check_expected_reuse(sess);
sess.abort_if_errors();
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index a840b2709..84b89cd71 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -1,3 +1,4 @@
+use crate::back::link::are_upstream_rust_objects_already_included;
use crate::back::metadata::create_compressed_metadata_file;
use crate::back::write::{
compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
@@ -12,7 +13,7 @@ use crate::traits::*;
use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
use rustc_attr as attr;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
use rustc_data_structures::sync::par_iter;
@@ -21,10 +22,12 @@ use rustc_data_structures::sync::ParallelIterator;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
+use rustc_hir::weak_lang_items::WEAK_ITEMS_SYMBOLS;
use rustc_index::vec::Idx;
use rustc_metadata::EncodedMetadata;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::exported_symbols;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
use rustc_middle::middle::lang_items;
use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
@@ -34,8 +37,9 @@ use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
use rustc_session::Session;
use rustc_span::symbol::sym;
+use rustc_span::Symbol;
use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
-use rustc_target::abi::{Align, VariantIdx};
+use rustc_target::abi::{Align, Size, VariantIdx};
use std::collections::BTreeSet;
use std::convert::TryFrom;
@@ -147,10 +151,16 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Array(_, len), &ty::Slice(_)) => {
cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
}
- (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ (
+ &ty::Dynamic(ref data_a, _, src_dyn_kind),
+ &ty::Dynamic(ref data_b, _, target_dyn_kind),
+ ) => {
+ assert_eq!(src_dyn_kind, target_dyn_kind);
+
let old_info =
old_info.expect("unsized_info: missing old info for trait upcasting coercion");
if data_a.principal_def_id() == data_b.principal_def_id() {
+ // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
return old_info;
}
@@ -162,6 +172,7 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if let Some(entry_idx) = vptr_entry_idx {
let ptr_ty = cx.type_i8p();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
+ let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep(
ptr_ty,
@@ -172,23 +183,37 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.nonnull_metadata(new_vptr);
// VTable loads are invariant.
bx.set_invariant_load(new_vptr);
- new_vptr
+ bx.pointercast(new_vptr, vtable_ptr_ty)
} else {
old_info
}
}
- (_, &ty::Dynamic(ref data, ..)) => {
- let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
- cx.layout_of(cx.tcx().mk_mut_ptr(target)),
- 1,
- true,
- );
+ (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
+ let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
+// Returns the vtable pointer type of a `dyn` or `dyn*` type
+fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
+ cx: &Cx,
+ target: Ty<'tcx>,
+ kind: ty::DynKind,
+) -> <Cx as BackendTypes>::Type {
+ cx.scalar_pair_element_backend_type(
+ cx.layout_of(match kind {
+ // vtable is the second field of `*mut dyn Trait`
+ ty::Dyn => cx.tcx().mk_mut_ptr(target),
+ // vtable is the second field of `dyn* Trait`
+ ty::DynStar => target,
+ }),
+ 1,
+ true,
+ )
+}
+
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
@@ -238,6 +263,29 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
}
+/// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type.
+pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: Bx::Value,
+ src_ty_and_layout: TyAndLayout<'tcx>,
+ dst_ty: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ debug!("cast_to_dyn_star: {:?} => {:?}", src_ty_and_layout.ty, dst_ty);
+ assert!(
+ matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ "destination type must be a dyn*"
+ );
+ // FIXME(dyn-star): this is probably not the best way to check if this is
+ // a pointer, and really we should ensure that the value is a suitable
+ // pointer earlier in the compilation process.
+ let src = match src_ty_and_layout.pointee_info_at(bx.cx(), Size::ZERO) {
+ Some(_) => bx.ptrtoint(src, bx.cx().type_isize()),
+ None => bx.bitcast(src, bx.type_isize()),
+ };
+ (src, unsized_info(bx, src_ty_and_layout.ty, dst_ty, old_info))
+}
+
/// Coerces `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty`, and stores the result in `dst`.
pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
@@ -289,40 +337,26 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
- op: hir::BinOpKind,
- lhs: Bx::Value,
- rhs: Bx::Value,
-) -> Bx::Value {
- cast_shift_rhs(bx, op, lhs, rhs)
-}
-
-fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
- bx: &mut Bx,
- op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
// Shifts may have any size int on the rhs
- if op.is_shift() {
- let mut rhs_llty = bx.cx().val_ty(rhs);
- let mut lhs_llty = bx.cx().val_ty(lhs);
- if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
- rhs_llty = bx.cx().element_type(rhs_llty)
- }
- if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
- lhs_llty = bx.cx().element_type(lhs_llty)
- }
- let rhs_sz = bx.cx().int_width(rhs_llty);
- let lhs_sz = bx.cx().int_width(lhs_llty);
- if lhs_sz < rhs_sz {
- bx.trunc(rhs, lhs_llty)
- } else if lhs_sz > rhs_sz {
- // FIXME (#1877: If in the future shifting by negative
- // values is no longer undefined then this is wrong.
- bx.zext(rhs, lhs_llty)
- } else {
- rhs
- }
+ let mut rhs_llty = bx.cx().val_ty(rhs);
+ let mut lhs_llty = bx.cx().val_ty(lhs);
+ if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
+ rhs_llty = bx.cx().element_type(rhs_llty)
+ }
+ if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
+ lhs_llty = bx.cx().element_type(lhs_llty)
+ }
+ let rhs_sz = bx.cx().int_width(rhs_llty);
+ let lhs_sz = bx.cx().int_width(lhs_llty);
+ if lhs_sz < rhs_sz {
+ bx.trunc(rhs, lhs_llty)
+ } else if lhs_sz > rhs_sz {
+ // FIXME (#1877: If in the future shifting by negative
+ // values is no longer undefined then this is wrong.
+ bx.zext(rhs, lhs_llty)
} else {
rhs
}
@@ -388,15 +422,14 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let main_llfn = cx.get_fn_addr(instance);
- let use_start_lang_item = EntryFnType::Start != entry_type;
- let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
+ let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, entry_type);
return Some(entry_fn);
fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
rust_main: Bx::Value,
rust_main_def_id: DefId,
- use_start_lang_item: bool,
+ entry_type: EntryFnType,
) -> Bx::Function {
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
@@ -441,7 +474,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
- let (start_fn, start_ty, args) = if use_start_lang_item {
+ let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
let start_fn = cx.get_fn_addr(
ty::Instance::resolve(
@@ -453,15 +486,20 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
.unwrap()
.unwrap(),
);
- let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
- (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
+
+ let i8_ty = cx.type_i8();
+ let arg_sigpipe = bx.const_u8(sigpipe);
+
+ let start_ty =
+ cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
+ (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else {
debug!("using user-defined start fn");
let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
- let result = bx.call(start_ty, start_fn, &args, None);
+ let result = bx.call(start_ty, None, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast);
@@ -810,21 +848,16 @@ impl CrateInfo {
crate_name: Default::default(),
used_crates,
used_crate_source: Default::default(),
- lang_item_to_crate: Default::default(),
- missing_lang_items: Default::default(),
dependency_formats: tcx.dependency_formats(()).clone(),
windows_subsystem,
natvis_debugger_visualizers: Default::default(),
};
- let lang_items = tcx.lang_items();
-
let crates = tcx.crates(());
let n_crates = crates.len();
info.native_libraries.reserve(n_crates);
info.crate_name.reserve(n_crates);
info.used_crate_source.reserve(n_crates);
- info.missing_lang_items.reserve(n_crates);
for &cnum in crates.iter() {
info.native_libraries
@@ -842,17 +875,41 @@ impl CrateInfo {
if tcx.is_no_builtins(cnum) {
info.is_no_builtins.insert(cnum);
}
- let missing = tcx.missing_lang_items(cnum);
- for &item in missing.iter() {
- if let Ok(id) = lang_items.require(item) {
- info.lang_item_to_crate.insert(item, id.krate);
- }
- }
+ }
- // No need to look for lang items that don't actually need to exist.
- let missing =
- missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
- info.missing_lang_items.insert(cnum, missing);
+ // Handle circular dependencies in the standard library.
+ // See comment before `add_linked_symbol_object` function for the details.
+ // If global LTO is enabled then almost everything (*) is glued into a single object file,
+ // so this logic is not necessary and can cause issues on some targets (due to weak lang
+ // item symbols being "privatized" to that object file), so we disable it.
+ // (*) Native libs, and `#[compiler_builtins]` and `#[no_builtins]` crates are not glued,
+ // and we assume that they cannot define weak lang items. This is not currently enforced
+ // by the compiler, but that's ok because all this stuff is unstable anyway.
+ let target = &tcx.sess.target;
+ if !are_upstream_rust_objects_already_included(tcx.sess) {
+ let missing_weak_lang_items: FxHashSet<&Symbol> = info
+ .used_crates
+ .iter()
+ .flat_map(|cnum| {
+ tcx.missing_lang_items(*cnum)
+ .iter()
+ .filter(|l| lang_items::required(tcx, **l))
+ .filter_map(|item| WEAK_ITEMS_SYMBOLS.get(item))
+ })
+ .collect();
+ let prefix = if target.is_like_windows && target.arch == "x86" { "_" } else { "" };
+ info.linked_symbols
+ .iter_mut()
+ .filter(|(crate_type, _)| {
+ !matches!(crate_type, CrateType::Rlib | CrateType::Staticlib)
+ })
+ .for_each(|(_, linked_symbols)| {
+ linked_symbols.extend(
+ missing_weak_lang_items
+ .iter()
+ .map(|item| (format!("{prefix}{item}"), SymbolExportKind::Text)),
+ )
+ });
}
let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
@@ -873,7 +930,7 @@ impl CrateInfo {
}
});
- if tcx.sess.target.is_like_msvc && embed_visualizers {
+ if target.is_like_msvc && embed_visualizers {
info.natvis_debugger_visualizers =
collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
}
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
index 8ca1a6084..71f9179d0 100644
--- a/compiler/rustc_codegen_ssa/src/common.rs
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -1,7 +1,6 @@
#![allow(non_camel_case_types)]
use rustc_errors::struct_span_err;
-use rustc_hir as hir;
use rustc_hir::LangItem;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
@@ -140,7 +139,7 @@ pub fn build_unchecked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
- let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
+ let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
@@ -152,7 +151,7 @@ pub fn build_unchecked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
- let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
+ let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
let is_signed = lhs_t.is_signed();
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index 8cd5a0fc2..e05646e1e 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -18,11 +18,10 @@ use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathD
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{self, ExistentialProjection, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
-use rustc_target::abi::{Integer, TagEncoding, Variants};
+use rustc_middle::ty::{self, ExistentialProjection, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::Integer;
use smallvec::SmallVec;
-use std::borrow::Cow;
use std::fmt::Write;
use crate::debuginfo::wants_c_like_enum_debuginfo;
@@ -98,7 +97,6 @@ fn push_debuginfo_type_name<'tcx>(
if let Some(ty_and_layout) = layout_for_cpp_like_fallback {
msvc_enum_fallback(
- tcx,
ty_and_layout,
&|output, visited| {
push_item_name(tcx, def.did(), true, output);
@@ -391,11 +389,10 @@ fn push_debuginfo_type_name<'tcx>(
// Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
// "{async_fn_env#0}<T1, T2, ...>", etc.
// In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
- // an artificial `enum$<>` type, as defined in msvc_enum_fallback().
+ // an artificial `enum2$<>` type, as defined in msvc_enum_fallback().
if cpp_like_debuginfo && t.is_generator() {
let ty_and_layout = tcx.layout_of(ParamEnv::reveal_all().and(t)).unwrap();
msvc_enum_fallback(
- tcx,
ty_and_layout,
&|output, visited| {
push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
@@ -428,58 +425,17 @@ fn push_debuginfo_type_name<'tcx>(
/// MSVC names enums differently than other platforms so that the debugging visualization
// format (natvis) is able to understand enums and render the active variant correctly in the
- // debugger. For more information, look in `src/etc/natvis/intrinsic.natvis` and
- // `EnumMemberDescriptionFactor::create_member_descriptions`.
+ // debugger. For more information, look in
+ // rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs.
fn msvc_enum_fallback<'tcx>(
- tcx: TyCtxt<'tcx>,
ty_and_layout: TyAndLayout<'tcx>,
push_inner: &dyn Fn(/*output*/ &mut String, /*visited*/ &mut FxHashSet<Ty<'tcx>>),
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) {
debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
- let ty = ty_and_layout.ty;
-
- output.push_str("enum$<");
+ output.push_str("enum2$<");
push_inner(output, visited);
-
- let variant_name = |variant_index| match ty.kind() {
- ty::Adt(adt_def, _) => {
- debug_assert!(adt_def.is_enum());
- Cow::from(adt_def.variant(variant_index).name.as_str())
- }
- ty::Generator(..) => GeneratorSubsts::variant_name(variant_index),
- _ => unreachable!(),
- };
-
- if let Variants::Multiple {
- tag_encoding: TagEncoding::Niche { dataful_variant, .. },
- tag,
- variants,
- ..
- } = &ty_and_layout.variants
- {
- let dataful_variant_layout = &variants[*dataful_variant];
-
- // calculate the range of values for the dataful variant
- let dataful_discriminant_range =
- dataful_variant_layout.largest_niche().unwrap().valid_range;
-
- let min = dataful_discriminant_range.start;
- let min = tag.size(&tcx).truncate(min);
-
- let max = dataful_discriminant_range.end;
- let max = tag.size(&tcx).truncate(max);
-
- let dataful_variant_name = variant_name(*dataful_variant);
- write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();
- } else if let Variants::Single { index: variant_idx } = &ty_and_layout.variants {
- // Uninhabited enums can't be constructed and should never need to be visualized so
- // skip this step for them.
- if !ty_and_layout.abi.is_uninhabited() {
- write!(output, ", {}", variant_name(*variant_idx)).unwrap();
- }
- }
push_close_angle_bracket(true, output);
}
@@ -710,10 +666,8 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
hcx.while_hashing_spans(false, |hcx| {
ct.to_valtree().hash_stable(hcx, &mut hasher)
});
- // Note: Don't use `StableHashResult` impl of `u64` here directly, since that
- // would lead to endianness problems.
- let hash: u128 = hasher.finish();
- (hash.to_le() as u64).to_le()
+ let hash: u64 = hasher.finish();
+ hash
});
if cpp_like_debuginfo(tcx) {
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
new file mode 100644
index 000000000..ebb531f1c
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -0,0 +1,356 @@
+//! Errors emitted by codegen_ssa
+
+use crate::back::command::Command;
+use rustc_errors::{
+ fluent, DiagnosticArgValue, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic,
+ IntoDiagnosticArg,
+};
+use rustc_macros::Diagnostic;
+use rustc_span::{Span, Symbol};
+use std::borrow::Cow;
+use std::io::Error;
+use std::path::{Path, PathBuf};
+use std::process::ExitStatus;
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_lib_def_write_failure)]
+pub struct LibDefWriteFailure {
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_version_script_write_failure)]
+pub struct VersionScriptWriteFailure {
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_symbol_file_write_failure)]
+pub struct SymbolFileWriteFailure {
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_ld64_unimplemented_modifier)]
+pub struct Ld64UnimplementedModifier;
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_linker_unsupported_modifier)]
+pub struct LinkerUnsupportedModifier;
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_L4Bender_exporting_symbols_unimplemented)]
+pub struct L4BenderExportingSymbolsUnimplemented;
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_no_natvis_directory)]
+pub struct NoNatvisDirectory {
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_copy_path_buf)]
+pub struct CopyPathBuf {
+ pub source_file: PathBuf,
+ pub output_path: PathBuf,
+ pub error: Error,
+}
+
+// Reports Paths using `Debug` implementation rather than Path's `Display` implementation.
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_copy_path)]
+pub struct CopyPath<'a> {
+ from: DebugArgPath<'a>,
+ to: DebugArgPath<'a>,
+ error: Error,
+}
+
+impl<'a> CopyPath<'a> {
+ pub fn new(from: &'a Path, to: &'a Path, error: Error) -> CopyPath<'a> {
+ CopyPath { from: DebugArgPath(from), to: DebugArgPath(to), error }
+ }
+}
+
+struct DebugArgPath<'a>(pub &'a Path);
+
+impl IntoDiagnosticArg for DebugArgPath<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(format!("{:?}", self.0)))
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_ignoring_emit_path)]
+pub struct IgnoringEmitPath {
+ pub extension: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_ignoring_output)]
+pub struct IgnoringOutput {
+ pub extension: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_create_temp_dir)]
+pub struct CreateTempDir {
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_incompatible_linking_modifiers)]
+pub struct IncompatibleLinkingModifiers;
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_add_native_library)]
+pub struct AddNativeLibrary {
+ pub library_path: PathBuf,
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_ssa_multiple_external_func_decl)]
+pub struct MultipleExternalFuncDecl<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub function: Symbol,
+ pub library_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+pub enum LinkRlibError {
+ #[diag(codegen_ssa_rlib_missing_format)]
+ MissingFormat,
+
+ #[diag(codegen_ssa_rlib_only_rmeta_found)]
+ OnlyRmetaFound { crate_name: Symbol },
+
+ #[diag(codegen_ssa_rlib_not_found)]
+ NotFound { crate_name: Symbol },
+
+ #[diag(codegen_ssa_rlib_incompatible_dependency_formats)]
+ IncompatibleDependencyFormats { ty1: String, ty2: String, list1: String, list2: String },
+}
+
+pub struct ThorinErrorWrapper(pub thorin::Error);
+
+impl IntoDiagnostic<'_> for ThorinErrorWrapper {
+ fn into_diagnostic(self, handler: &Handler) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag;
+ match self.0 {
+ thorin::Error::ReadInput(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_read_input_failure);
+ diag
+ }
+ thorin::Error::ParseFileKind(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_input_file_kind);
+ diag
+ }
+ thorin::Error::ParseObjectFile(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_input_object_file);
+ diag
+ }
+ thorin::Error::ParseArchiveFile(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_input_archive_file);
+ diag
+ }
+ thorin::Error::ParseArchiveMember(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_archive_member);
+ diag
+ }
+ thorin::Error::InvalidInputKind => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_invalid_input_kind);
+ diag
+ }
+ thorin::Error::DecompressData(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_decompress_data);
+ diag
+ }
+ thorin::Error::NamelessSection(_, offset) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_section_without_name);
+ diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag
+ }
+ thorin::Error::RelocationWithInvalidSymbol(section, offset) => {
+ diag =
+ handler.struct_err(fluent::codegen_ssa_thorin_relocation_with_invalid_symbol);
+ diag.set_arg("section", section);
+ diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag
+ }
+ thorin::Error::MultipleRelocations(section, offset) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_relocations);
+ diag.set_arg("section", section);
+ diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag
+ }
+ thorin::Error::UnsupportedRelocation(section, offset) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_unsupported_relocation);
+ diag.set_arg("section", section);
+ diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag
+ }
+ thorin::Error::MissingDwoName(id) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_dwo_name);
+ diag.set_arg("id", format!("0x{:08x}", id));
+ diag
+ }
+ thorin::Error::NoCompilationUnits => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_no_compilation_units);
+ diag
+ }
+ thorin::Error::NoDie => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_no_die);
+ diag
+ }
+ thorin::Error::TopLevelDieNotUnit => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_top_level_die_not_unit);
+ diag
+ }
+ thorin::Error::MissingRequiredSection(section) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_required_section);
+ diag.set_arg("section", section);
+ diag
+ }
+ thorin::Error::ParseUnitAbbreviations(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_unit_abbreviations);
+ diag
+ }
+ thorin::Error::ParseUnitAttribute(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_unit_attribute);
+ diag
+ }
+ thorin::Error::ParseUnitHeader(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_unit_header);
+ diag
+ }
+ thorin::Error::ParseUnit(_) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_unit);
+ diag
+ }
+ thorin::Error::IncompatibleIndexVersion(section, format, actual) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_incompatible_index_version);
+ diag.set_arg("section", section);
+ diag.set_arg("actual", actual);
+ diag.set_arg("format", format);
+ diag
+ }
+ thorin::Error::OffsetAtIndex(_, index) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_offset_at_index);
+ diag.set_arg("index", index);
+ diag
+ }
+ thorin::Error::StrAtOffset(_, offset) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_str_at_offset);
+ diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag
+ }
+ thorin::Error::ParseIndex(_, section) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_parse_index);
+ diag.set_arg("section", section);
+ diag
+ }
+ thorin::Error::UnitNotInIndex(unit) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_unit_not_in_index);
+ diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag
+ }
+ thorin::Error::RowNotInIndex(_, row) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_row_not_in_index);
+ diag.set_arg("row", row);
+ diag
+ }
+ thorin::Error::SectionNotInRow => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_section_not_in_row);
+ diag
+ }
+ thorin::Error::EmptyUnit(unit) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_empty_unit);
+ diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag
+ }
+ thorin::Error::MultipleDebugInfoSection => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_debug_info_section);
+ diag
+ }
+ thorin::Error::MultipleDebugTypesSection => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_debug_types_section);
+ diag
+ }
+ thorin::Error::NotSplitUnit => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_not_split_unit);
+ diag
+ }
+ thorin::Error::DuplicateUnit(unit) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_duplicate_unit);
+ diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag
+ }
+ thorin::Error::MissingReferencedUnit(unit) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_referenced_unit);
+ diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag
+ }
+ thorin::Error::NoOutputObjectCreated => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_not_output_object_created);
+ diag
+ }
+ thorin::Error::MixedInputEncodings => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_mixed_input_encodings);
+ diag
+ }
+ thorin::Error::Io(e) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_io);
+ diag.set_arg("error", format!("{e}"));
+ diag
+ }
+ thorin::Error::ObjectRead(e) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_object_read);
+ diag.set_arg("error", format!("{e}"));
+ diag
+ }
+ thorin::Error::ObjectWrite(e) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_object_write);
+ diag.set_arg("error", format!("{e}"));
+ diag
+ }
+ thorin::Error::GimliRead(e) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_gimli_read);
+ diag.set_arg("error", format!("{e}"));
+ diag
+ }
+ thorin::Error::GimliWrite(e) => {
+ diag = handler.struct_err(fluent::codegen_ssa_thorin_gimli_write);
+ diag.set_arg("error", format!("{e}"));
+ diag
+ }
+ _ => unimplemented!("Untranslated thorin error"),
+ }
+ }
+}
+
+pub struct LinkingFailed<'a> {
+ pub linker_path: &'a PathBuf,
+ pub exit_status: ExitStatus,
+ pub command: &'a Command,
+ pub escaped_output: &'a str,
+}
+
+impl IntoDiagnostic<'_> for LinkingFailed<'_> {
+ fn into_diagnostic(self, handler: &Handler) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(fluent::codegen_ssa_linking_failed);
+ diag.set_arg("linker_path", format!("{}", self.linker_path.display()));
+ diag.set_arg("exit_status", format!("{}", self.exit_status));
+
+ diag.note(format!("{:?}", self.command)).note(self.escaped_output);
+
+ // Trying to match an error from OS linkers
+ // which by now we have no way to translate.
+ if self.escaped_output.contains("undefined reference to") {
+ diag.note(fluent::codegen_ssa_extern_funcs_not_found)
+ .note(fluent::codegen_ssa_specify_libraries_to_link)
+ .note(fluent::codegen_ssa_use_cargo_directive);
+ }
+ diag
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index 1802eedf1..ceebe4d41 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -1,12 +1,12 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(box_patterns)]
#![feature(try_blocks)]
-#![feature(let_else)]
#![feature(once_cell)]
#![feature(associated_type_bounds)]
#![feature(strict_provenance)]
#![feature(int_roundings)]
#![feature(if_let_guard)]
+#![feature(never_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
@@ -25,7 +25,6 @@ use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::Lrc;
use rustc_hir::def_id::CrateNum;
-use rustc_hir::LangItem;
use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::dependency_format::Dependencies;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
@@ -45,6 +44,7 @@ pub mod base;
pub mod common;
pub mod coverageinfo;
pub mod debuginfo;
+pub mod errors;
pub mod glue;
pub mod meth;
pub mod mir;
@@ -113,6 +113,7 @@ bitflags::bitflags! {
pub struct NativeLib {
pub kind: NativeLibKind,
pub name: Option<Symbol>,
+ pub filename: Option<Symbol>,
pub cfg: Option<ast::MetaItem>,
pub verbatim: Option<bool>,
pub dll_imports: Vec<cstore::DllImport>,
@@ -122,6 +123,7 @@ impl From<&cstore::NativeLib> for NativeLib {
fn from(lib: &cstore::NativeLib) -> Self {
NativeLib {
kind: lib.kind,
+ filename: lib.filename,
name: lib.name,
cfg: lib.cfg.clone(),
verbatim: lib.verbatim,
@@ -152,8 +154,6 @@ pub struct CrateInfo {
pub used_libraries: Vec<NativeLib>,
pub used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
pub used_crates: Vec<CrateNum>,
- pub lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
- pub missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
pub dependency_formats: Lrc<Dependencies>,
pub windows_subsystem: Option<String>,
pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>,
@@ -168,6 +168,13 @@ pub struct CodegenResults {
pub crate_info: CrateInfo,
}
+pub enum CodegenErrors<'a> {
+ WrongFileType,
+ EmptyVersionNumber,
+ EncodingVersionMismatch { version_array: String, rlink_version: u32 },
+ RustcVersionMismatch { rustc_version: String, current_version: &'a str },
+}
+
pub fn provide(providers: &mut Providers) {
crate::back::symbol_export::provide(providers);
crate::base::provide(providers);
@@ -212,30 +219,34 @@ impl CodegenResults {
encoder.finish()
}
- pub fn deserialize_rlink(data: Vec<u8>) -> Result<Self, String> {
+ pub fn deserialize_rlink<'a>(data: Vec<u8>) -> Result<Self, CodegenErrors<'a>> {
// The Decodable machinery is not used here because it panics if the input data is invalid
// and because its internal representation may change.
if !data.starts_with(RLINK_MAGIC) {
- return Err("The input does not look like a .rlink file".to_string());
+ return Err(CodegenErrors::WrongFileType);
}
let data = &data[RLINK_MAGIC.len()..];
if data.len() < 4 {
- return Err("The input does not contain version number".to_string());
+ return Err(CodegenErrors::EmptyVersionNumber);
}
let mut version_array: [u8; 4] = Default::default();
version_array.copy_from_slice(&data[..4]);
if u32::from_be_bytes(version_array) != RLINK_VERSION {
- return Err(".rlink file was produced with encoding version {version_array}, but the current version is {RLINK_VERSION}".to_string());
+ return Err(CodegenErrors::EncodingVersionMismatch {
+ version_array: String::from_utf8_lossy(&version_array).to_string(),
+ rlink_version: RLINK_VERSION,
+ });
}
let mut decoder = MemDecoder::new(&data[4..], 0);
let rustc_version = decoder.read_str();
let current_version = RUSTC_VERSION.unwrap();
if rustc_version != current_version {
- return Err(format!(
- ".rlink file was produced by rustc version {rustc_version}, but the current version is {current_version}."
- ));
+ return Err(CodegenErrors::RustcVersionMismatch {
+ rustc_version: rustc_version.to_string(),
+ current_version,
+ });
}
let codegen_results = CodegenResults::decode(&mut decoder);
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
index 27d791d90..cae46ebd2 100644
--- a/compiler/rustc_codegen_ssa/src/meth.rs
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -1,6 +1,6 @@
use crate::traits::*;
-use rustc_middle::ty::{self, subst::GenericArgKind, ExistentialPredicate, Ty, TyCtxt};
+use rustc_middle::ty::{self, subst::GenericArgKind, Ty};
use rustc_session::config::Lto;
use rustc_symbol_mangling::typeid_for_trait_ref;
use rustc_target::abi::call::FnAbi;
@@ -29,7 +29,7 @@ impl<'a, 'tcx> VirtualIndex {
&& bx.cx().sess().lto() == Lto::Fat
{
let typeid =
- bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), get_trait_ref(bx.tcx(), ty)));
+ bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), expect_dyn_trait_in_self(ty)));
let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
let type_checked_load = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
let func = bx.extract_value(type_checked_load, 0);
@@ -64,17 +64,13 @@ impl<'a, 'tcx> VirtualIndex {
}
}
-fn get_trait_ref<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::PolyExistentialTraitRef<'tcx> {
+/// This takes a valid `self` receiver type and extracts the principal trait
+/// ref of the type.
+fn expect_dyn_trait_in_self<'tcx>(ty: Ty<'tcx>) -> ty::PolyExistentialTraitRef<'tcx> {
for arg in ty.peel_refs().walk() {
if let GenericArgKind::Type(ty) = arg.unpack() {
- if let ty::Dynamic(trait_refs, _) = ty.kind() {
- return trait_refs[0].map_bound(|trait_ref| match trait_ref {
- ExistentialPredicate::Trait(tr) => tr,
- ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
- ExistentialPredicate::AutoTrait(_) => {
- bug!("auto traits don't have functions")
- }
- });
+ if let ty::Dynamic(data, _, _) = ty.kind() {
+ return data.principal().expect("expected principal trait object");
}
}
}
@@ -90,6 +86,7 @@ fn get_trait_ref<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::PolyExistentialTr
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<dyn Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T: Trait`.
+#[instrument(level = "debug", skip(cx))]
pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
ty: Ty<'tcx>,
@@ -97,8 +94,6 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
) -> Cx::Value {
let tcx = cx.tcx();
- debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
-
// Check the cache.
if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) {
return val;
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 24da48ead..c7617d2e4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -266,7 +266,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
mir: &mir::Body<'tcx>,
) {
- for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in mir.basic_blocks.iter_enumerated() {
match data.terminator().kind {
TerminatorKind::Goto { .. }
| TerminatorKind::Resume
@@ -296,7 +296,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
}
fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
- let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+ let mut funclet_succs = IndexVec::from_elem(None, &mir.basic_blocks);
let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
ref mut s @ None => {
@@ -359,7 +359,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
}
}
- let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+ let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, &mir.basic_blocks);
discover_masters(&mut result, mir);
propagate(&mut result, mir);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 3eee58d9d..29b7c9b0a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -13,15 +13,14 @@ use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
-use rustc_middle::mir::AssertKind;
-use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
use rustc_symbol_mangling::typeid::typeid_for_fnabi;
-use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
@@ -64,7 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
- fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with a landing
+ /// pad next to it.
+ fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
@@ -74,32 +75,36 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
match (self.funclet_bb, target_funclet) {
(None, None) => (lltarget, false),
- (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
- (lltarget, false)
- }
// jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
(None, Some(_)) => (fx.landing_pad_for(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
- (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ (Some(f), Some(t_f)) => {
+ if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
+ (lltarget, false)
+ } else {
+ (fx.landing_pad_for(target), true)
+ }
+ }
}
}
- /// Create a basic block.
- fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with cleanup
+ /// stuff in it or next to it.
+ fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline
-
- debug!("llblock: creating cleanup trampoline for {:?}", target);
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
+ debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
- let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
- let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
- trampoline
+ trampoline_llbb
} else {
lltarget
}
@@ -111,10 +116,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx: &mut Bx,
target: mir::BasicBlock,
) {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
- // micro-optimization: generate a `ret` rather than a jump
+ // MSVC micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
} else {
bx.br(lltarget);
@@ -139,7 +145,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
- Some(self.llblock(fx, cleanup))
+ Some(self.llbb_with_cleanup(fx, cleanup))
} else if fx.mir[self.bb].is_cleanup
&& fn_abi.can_unwind
&& !base::wants_msvc_seh(fx.cx.tcx().sess)
@@ -163,9 +169,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} else {
fx.unreachable_block()
};
- let invokeret =
- bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, invokeret);
+ let invokeret = bx.invoke(
+ fn_ty,
+ Some(&fn_abi),
+ fn_ptr,
+ &llargs,
+ ret_llbb,
+ unwind_block,
+ self.funclet(fx),
+ );
if fx.mir[self.bb].is_cleanup {
bx.do_not_inline(invokeret);
}
@@ -179,8 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
- let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
@@ -227,7 +238,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options,
line_spans,
instance,
- Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
);
} else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
@@ -277,8 +288,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if target_iter.len() == 1 {
// If there are two targets (one conditional, one fallback), emit br instead of switch
let (test_value, target) = target_iter.next().unwrap();
- let lltrue = helper.llblock(self, target);
- let llfalse = helper.llblock(self, targets.otherwise());
+ let lltrue = helper.llbb_with_cleanup(self, target);
+ let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
if switch_ty == bx.tcx().types.bool {
// Don't generate trivial icmps when switching on bool
match test_value {
@@ -295,8 +306,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
bx.switch(
discr.immediate(),
- helper.llblock(self, targets.otherwise()),
- target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ helper.llbb_with_cleanup(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
);
}
}
@@ -324,7 +335,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.unreachable();
return;
}
- let llval = match self.fn_abi.ret.mode {
+ let llval = match &self.fn_abi.ret.mode {
PassMode::Ignore | PassMode::Indirect { .. } => {
bx.ret_void();
return;
@@ -339,7 +350,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- PassMode::Cast(cast_ty) => {
+ PassMode::Cast(cast_ty, _) => {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
@@ -360,7 +371,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval
}
};
- let ty = bx.cast_backend_type(&cast_ty);
+ let ty = bx.cast_backend_type(cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
}
@@ -368,6 +379,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.ret(llval);
}
+ #[tracing::instrument(level = "trace", skip(self, helper, bx))]
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
@@ -398,14 +410,75 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (drop_fn, fn_abi) = match ty.kind() {
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn Trait)
+ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
+ // args[0] args[1]
+ //
+ // args = ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
substs: drop_fn.substs,
};
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
let vtable = args[1];
+ // Truncate vtable off of args list
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn* Trait)
+ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
+ //
+ // args = [ * ]
+ // |
+ // v
+ // ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ //
+ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
+ //
+ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
+ // vtable = (*args[0]).1 // loads the vtable out
+ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
+ //
+ // SO THEN WE CAN USE THE ABOVE CODE.
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let data = args[0];
+ let data_ty = bx.cx().backend_type(place.layout);
+ let vtable_ptr =
+ bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]);
+ let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE);
+ // Truncate vtable off of args list
args = &args[..1];
+ debug!("args' = {:?}", args);
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
.get_fn(&mut bx, vtable, ty, &fn_abi),
@@ -464,7 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cond = bx.expect(cond, expected);
// Create the failure block and the conditional branch to it.
- let lltarget = helper.llblock(self, target);
+ let lltarget = helper.llbb_with_cleanup(self, target);
let panic_block = bx.append_sibling_block("panic");
if expected {
bx.cond_br(cond, lltarget, panic_block);
@@ -798,58 +871,78 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut op = self.codegen_operand(&mut bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
- if let Pair(..) = op.val {
- // In the case of Rc<Self>, we need to explicitly pass a
- // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
- // that is understood elsewhere in the compiler as a method on
- // `dyn Trait`.
- // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
- // we get a value of a built-in pointer type
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
- {
- for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes;
+ match op.val {
+ Pair(data_ptr, meta) => {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
}
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
}
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
}
-
- // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
- // data pointer and vtable. Look up the method in the vtable, and pass
- // the data pointer as the first argument
- match op.val {
- Pair(data_ptr, meta) => {
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue 'make_args;
+ Ref(data_ptr, Some(meta), _) => {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ }
+ Immediate(_) => {
+ let ty::Ref(_, ty, _) = op.layout.ty.kind() else {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
+ };
+ if !ty.is_dyn_star() {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- other => bug!("expected a Pair, got {:?}", other),
+ // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
+ let place = op.deref(bx.cx());
+ let data_ptr = place.project_field(&mut bx, 0);
+ let meta_ptr = place.project_field(&mut bx, 1);
+ let meta = bx.load_operand(meta_ptr);
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta.immediate(),
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr.llval);
+ continue;
+ }
+ _ => {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- } else if let Ref(data_ptr, Some(meta), _) = op.val {
- // by-value dynamic dispatch
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue;
- } else {
- span_bug!(span, "can't codegen a virtual call on {:?}", op);
}
}
@@ -1161,39 +1254,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llargs: &mut Vec<Bx::Value>,
arg: &ArgAbi<'tcx, Ty<'tcx>>,
) {
- // Fill padding with undef value, where applicable.
- if let Some(ty) = arg.pad {
- llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
- }
-
- if arg.is_ignore() {
- return;
- }
-
- if let PassMode::Pair(..) = arg.mode {
- match op.val {
+ match arg.mode {
+ PassMode::Ignore => return,
+ PassMode::Cast(_, true) => {
+ // Fill padding with undef value, where applicable.
+ llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
+ }
+ PassMode::Pair(..) => match op.val {
Pair(a, b) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for pair argument", op),
- }
- } else if arg.is_unsized_indirect() {
- match op.val {
+ },
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
Ref(a, Some(b), _) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
- }
+ },
+ _ => {}
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ PassMode::Indirect { .. } | PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
@@ -1225,8 +1314,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
- if let PassMode::Cast(ty) = arg.mode {
- let llty = bx.cast_backend_type(&ty);
+ if let PassMode::Cast(ty, _) = &arg.mode {
+ let llty = bx.cast_backend_type(ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else {
@@ -1377,20 +1466,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// bar();
// }
Some(&mir::TerminatorKind::Abort) => {
- let cs_bb =
+ let cs_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
- let cp_bb =
+ let cp_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
- ret_llbb = cs_bb;
+ ret_llbb = cs_llbb;
- let mut cs_bx = Bx::build(self.cx, cs_bb);
- let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+ let mut cs_bx = Bx::build(self.cx, cs_llbb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
// The "null" here is actually a RTTI type descriptor for the
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
- let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let mut cp_bx = Bx::build(self.cx, cp_llbb);
let null = cp_bx.const_null(
cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
);
@@ -1399,10 +1488,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cp_bx.br(llbb);
}
_ => {
- let cleanup_bb =
+ let cleanup_llbb =
Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
- ret_llbb = cleanup_bb;
- let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ ret_llbb = cleanup_llbb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
}
@@ -1410,19 +1499,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.funclets[bb] = Some(funclet);
ret_llbb
} else {
- let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
- let mut bx = Bx::build(self.cx, bb);
+ let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
let llpersonality = self.cx.eh_personality();
let llretty = self.landing_pad_type();
- let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+ let lp = cleanup_bx.cleanup_landing_pad(llretty, llpersonality);
- let slot = self.get_personality_slot(&mut bx);
- slot.storage_live(&mut bx);
- Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+ let slot = self.get_personality_slot(&mut cleanup_bx);
+ slot.storage_live(&mut cleanup_bx);
+ Pair(cleanup_bx.extract_value(lp, 0), cleanup_bx.extract_value(lp, 1))
+ .store(&mut cleanup_bx, slot);
- bx.br(llbb);
- bx.llbb()
+ cleanup_bx.br(llbb);
+ cleanup_llbb
}
}
@@ -1456,8 +1546,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let llret = bx.call(fn_ty, fn_ptr, &[], None);
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None);
bx.do_not_inline(llret);
bx.unreachable();
@@ -1625,7 +1714,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
- let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let op = if let PassMode::Cast(..) = ret_abi.mode {
let tmp = PlaceRef::alloca(bx, ret_abi.layout);
tmp.storage_live(bx);
bx.store_arg(&ret_abi, llval, tmp);
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index 9a995fbf6..4c6ab457c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -25,26 +25,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
constant: &mir::Constant<'tcx>,
) -> Result<ConstValue<'tcx>, ErrorHandled> {
let ct = self.monomorphize(constant.literal);
- let ct = match ct {
- mir::ConstantKind::Ty(ct) => ct,
+ let uv = match ct {
+ mir::ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => uv.expand(),
+ ty::ConstKind::Value(val) => {
+ return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val)));
+ }
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ },
+ mir::ConstantKind::Unevaluated(uv, _) => uv,
mir::ConstantKind::Val(val, _) => return Ok(val),
};
- match ct.kind() {
- ty::ConstKind::Unevaluated(ct) => self
- .cx
- .tcx()
- .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None)
- .map_err(|err| {
- self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
- err
- }),
- ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))),
- err => span_bug!(
- constant.span,
- "encountered bad ConstKind after monomorphizing: {:?}",
- err
- ),
- }
+
+ self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ err
+ })
}
/// process constant containing SIMD shuffle indices
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 8c3186efc..157c1c823 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -3,7 +3,7 @@ use rustc_index::vec::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir;
use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{kw, Symbol};
use rustc_span::{BytePos, Span};
@@ -93,15 +93,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
/// In order to have a good line stepping behavior in debugger, we overwrite debug
- /// locations of macro expansions with that of the outermost expansion site
- /// (unless the crate is being compiled with `-Z debug-macros`).
+ /// locations of macro expansions with that of the outermost expansion site (when the macro is
+ /// annotated with `#[collapse_debuginfo]` or when `-Zdebug-macros` is provided).
fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
// Bail out if debug info emission is not enabled.
if self.debug_context.is_none() {
return span;
}
- if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros {
+ if self.cx.tcx().should_collapse_debuginfo(span) {
// Walk up the macro expansion chain until we reach a non-expanded span.
// We also stop at the function body level because no line stepping can occur
// at the level above that.
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 94ac71a4d..215edbe02 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -77,10 +77,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let llval = match name {
- sym::assume => {
- bx.assume(args[0].immediate());
- return;
- }
sym::abort => {
bx.abort();
return;
@@ -555,14 +551,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return;
}
- sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ sym::ptr_guaranteed_cmp => {
let a = args[0].immediate();
let b = args[1].immediate();
- if name == sym::ptr_guaranteed_eq {
- bx.icmp(IntPredicate::IntEQ, a, b)
- } else {
- bx.icmp(IntPredicate::IntNE, a, b)
- }
+ bx.icmp(IntPredicate::IntEQ, a, b)
}
sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
@@ -597,8 +589,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty) = fn_abi.ret.mode {
- let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
+ if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
let ptr = bx.pointercast(result.llval, ptr_llty);
bx.store(llval, ptr, result.align);
} else {
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 8ee375fa9..da9aaf00e 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -148,15 +148,15 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
let start_llbb = Bx::append_block(cx, llfn, "start");
- let mut bx = Bx::build(cx, start_llbb);
+ let mut start_bx = Bx::build(cx, start_llbb);
- if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
- bx.set_personality_fn(cx.eh_personality());
+ if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) {
+ start_bx.set_personality_fn(cx.eh_personality());
}
let cleanup_kinds = analyze::cleanup_kinds(&mir);
let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
- .basic_blocks()
+ .basic_blocks
.indices()
.map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
.collect();
@@ -172,15 +172,15 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
unreachable_block: None,
double_unwind_guard: None,
cleanup_kinds,
- landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
- funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
+ landing_pads: IndexVec::from_elem(None, &mir.basic_blocks),
+ funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()),
locals: IndexVec::new(),
debug_context,
per_local_var_debug_info: None,
caller_location: None,
};
- fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);
// Evaluate all required consts; codegen later assumes that CTFE will never fail.
let mut all_consts_ok = true;
@@ -191,7 +191,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// errored or at least linted
ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
ErrorHandled::TooGeneric => {
- span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+ span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
}
}
}
@@ -206,29 +206,29 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Allocate variable and temp allocas
fx.locals = {
- let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+ let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals);
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
- let layout = bx.layout_of(fx.monomorphize(decl.ty));
+ let layout = start_bx.layout_of(fx.monomorphize(decl.ty));
assert!(!layout.ty.has_erasable_regions());
if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
- let llretptr = bx.get_param(0);
+ let llretptr = start_bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
}
if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
- LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+ LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut start_bx, layout))
} else {
- LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+ LocalRef::Place(PlaceRef::alloca(&mut start_bx, layout))
}
} else {
debug!("alloc: {:?} -> operand", local);
- LocalRef::new_operand(&mut bx, layout)
+ LocalRef::new_operand(&mut start_bx, layout)
}
};
@@ -240,7 +240,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
};
// Apply debuginfo to the newly allocated locals.
- fx.debug_introduce_locals(&mut bx);
+ fx.debug_introduce_locals(&mut start_bx);
// Codegen the body of each block using reverse postorder
for (bb, _) in traversal::reverse_postorder(&mir) {
@@ -283,7 +283,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if arg.pad.is_some() {
+ if let PassMode::Cast(_, true) = arg.mode {
llarg_idx += 1;
}
let pr_field = place.project_field(bx, i);
@@ -309,7 +309,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if arg.pad.is_some() {
+ if let PassMode::Cast(_, true) = arg.mode {
llarg_idx += 1;
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index c612634fc..e6ba642a7 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -72,10 +72,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let layout = bx.layout_of(ty);
- if layout.is_zst() {
- return OperandRef::new_zst(bx, layout);
- }
-
let val = match val {
ConstValue::Scalar(x) => {
let Abi::Scalar(scalar) = layout.abi else {
@@ -84,10 +80,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
OperandValue::Immediate(llval)
}
- ConstValue::ZeroSized => {
- let llval = bx.zst_to_backend(bx.immediate_backend_type(layout));
- OperandValue::Immediate(llval)
- }
+ ConstValue::ZeroSized => return OperandRef::new_zst(bx, layout),
ConstValue::Slice { data, start, end } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
@@ -359,7 +352,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
- let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+ let lldst = bx.byte_array_alloca(llsize, max_align);
bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place.
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index 268c4d765..9c18df564 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -4,7 +4,6 @@ use super::{FunctionCx, LocalRef};
use crate::common::IntPredicate;
use crate::glue;
use crate::traits::*;
-use crate::MemFlags;
use rustc_middle::mir;
use rustc_middle::mir::tcx::PlaceTy;
@@ -245,7 +244,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
};
bx.intcast(tag.immediate(), cast_to, signed)
}
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
// Rebase from niche values to discriminants, and check
// whether the result is in range for the niche variants.
let niche_llty = bx.cx().immediate_backend_type(tag.layout);
@@ -303,7 +302,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx.select(
is_niche,
niche_discr,
- bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
)
}
}
@@ -338,21 +337,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
Variants::Multiple {
tag_encoding:
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag_field,
..
} => {
- if variant_index != dataful_variant {
- if bx.cx().sess().target.arch == "arm"
- || bx.cx().sess().target.arch == "aarch64"
- {
- // FIXME(#34427): as workaround for LLVM bug on ARM,
- // use memset of 0 before assigning niche value.
- let fill_byte = bx.cx().const_u8(0);
- let size = bx.cx().const_usize(self.layout.size.bytes());
- bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
- }
-
+ if variant_index != untagged_variant {
let niche = self.project_field(bx, tag_field);
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
@@ -411,6 +400,21 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
downcast
}
+ pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ ty: Ty<'tcx>,
+ ) -> Self {
+ let mut downcast = *self;
+ downcast.layout = bx.cx().layout_of(ty);
+
+ // Cast to the appropriate type.
+ let variant_ty = bx.cx().backend_type(downcast.layout);
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+ downcast
+ }
+
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_start(self.llval, self.layout.size);
}
@@ -453,6 +457,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::ProjectionElem::Field(ref field, _) => {
cg_base.project_field(bx, field.index())
}
+ mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(mir::Place::from(index));
let index = self.codegen_operand(bx, index);
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 26b9fbf44..4aab31fbf 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -87,7 +87,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
- if bx.cx().const_to_opt_uint(v) == Some(0) {
+ if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
return bx;
@@ -249,7 +249,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
- | mir::CastKind::Misc
+ | mir::CastKind::PtrToPtr
if bx.cx().is_backend_scalar_pair(operand.layout) =>
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
@@ -271,10 +271,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bug!("unexpected non-pair operand");
}
}
+ mir::CastKind::DynStar => {
+ let (lldata, llextra) = match operand.val {
+ OperandValue::Ref(_, _, _) => todo!(),
+ OperandValue::Immediate(v) => (v, None),
+ OperandValue::Pair(v, l) => (v, Some(l)),
+ };
+ let (lldata, llextra) =
+ base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
+ OperandValue::Pair(lldata, llextra)
+ }
mir::CastKind::Pointer(
PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
)
- | mir::CastKind::Misc
+ | mir::CastKind::IntToInt
+ | mir::CastKind::FloatToInt
+ | mir::CastKind::FloatToFloat
+ | mir::CastKind::IntToFloat
+ | mir::CastKind::PtrToPtr
+ | mir::CastKind::FnPtrToPtr
+
// Since int2ptr can have arbitrary integer types as input (so we have to do
// sign extension and all that), it is currently best handled in the same code
// path as the other integer-to-X casts.
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index f452f2988..1db0fb3a6 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -1,4 +1,5 @@
use rustc_middle::mir;
+use rustc_middle::mir::NonDivergingIntrinsic;
use super::FunctionCx;
use super::LocalRef;
@@ -73,11 +74,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
bx
}
- mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
- ref src,
- ref dst,
- ref count,
- }) => {
+ mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
+ let op_val = self.codegen_operand(&mut bx, op);
+ bx.assume(op_val.immediate());
+ bx
+ }
+ mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
+ mir::CopyNonOverlapping { ref count, ref src, ref dst },
+ )) => {
let dst_val = self.codegen_operand(&mut bx, dst);
let src_val = self.codegen_operand(&mut bx, src);
let count = self.codegen_operand(&mut bx, count).immediate();
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
index ecad05185..83407ee8f 100644
--- a/compiler/rustc_codegen_ssa/src/target_features.rs
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -13,17 +13,25 @@ pub const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
// if it doesn't, to_llvm_feature in llvm_util in rustc_codegen_llvm needs to be adapted
const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
("aclass", Some(sym::arm_target_feature)),
- ("mclass", Some(sym::arm_target_feature)),
- ("rclass", Some(sym::arm_target_feature)),
- ("dsp", Some(sym::arm_target_feature)),
- ("neon", Some(sym::arm_target_feature)),
+ ("aes", Some(sym::arm_target_feature)),
("crc", Some(sym::arm_target_feature)),
("crypto", Some(sym::arm_target_feature)),
- ("aes", Some(sym::arm_target_feature)),
- ("sha2", Some(sym::arm_target_feature)),
- ("i8mm", Some(sym::arm_target_feature)),
+ ("d32", Some(sym::arm_target_feature)),
("dotprod", Some(sym::arm_target_feature)),
+ ("dsp", Some(sym::arm_target_feature)),
+ ("fp-armv8", Some(sym::arm_target_feature)),
+ ("i8mm", Some(sym::arm_target_feature)),
+ ("mclass", Some(sym::arm_target_feature)),
+ ("neon", Some(sym::arm_target_feature)),
+ ("rclass", Some(sym::arm_target_feature)),
+ ("sha2", Some(sym::arm_target_feature)),
+ // This is needed for inline assembly, but shouldn't be stabilized as-is
+ // since it should be enabled per-function using #[instruction_set], not
+ // #[target_feature].
+ ("thumb-mode", Some(sym::arm_target_feature)),
+ ("thumb2", Some(sym::arm_target_feature)),
("v5te", Some(sym::arm_target_feature)),
("v6", Some(sym::arm_target_feature)),
("v6k", Some(sym::arm_target_feature)),
@@ -33,104 +41,97 @@ const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("vfp2", Some(sym::arm_target_feature)),
("vfp3", Some(sym::arm_target_feature)),
("vfp4", Some(sym::arm_target_feature)),
- ("fp-armv8", Some(sym::arm_target_feature)),
- // This is needed for inline assembly, but shouldn't be stabilized as-is
- // since it should be enabled per-function using #[instruction_set], not
- // #[target_feature].
- ("thumb-mode", Some(sym::arm_target_feature)),
- ("thumb2", Some(sym::arm_target_feature)),
- ("d32", Some(sym::arm_target_feature)),
+ // tidy-alphabetical-end
];
const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- // FEAT_AdvSimd & FEAT_FP
- ("neon", None),
- // FEAT_FP16
- ("fp16", None),
- // FEAT_SVE
- ("sve", None),
+ // tidy-alphabetical-start
+ // FEAT_AES
+ ("aes", None),
+ // FEAT_BF16
+ ("bf16", None),
+ // FEAT_BTI
+ ("bti", None),
// FEAT_CRC
("crc", None),
- // FEAT_RAS
- ("ras", None),
- // FEAT_LSE
- ("lse", None),
- // FEAT_RDM
- ("rdm", None),
- // FEAT_RCPC
- ("rcpc", None),
- // FEAT_RCPC2
- ("rcpc2", None),
- // FEAT_DotProd
- ("dotprod", None),
- // FEAT_TME
- ("tme", None),
- // FEAT_FHM
- ("fhm", None),
// FEAT_DIT
("dit", None),
- // FEAT_FLAGM
- ("flagm", None),
- // FEAT_SSBS
- ("ssbs", None),
- // FEAT_SB
- ("sb", None),
- // FEAT_PAUTH (address authentication)
- ("paca", None),
- // FEAT_PAUTH (generic authentication)
- ("pacg", None),
+ // FEAT_DotProd
+ ("dotprod", None),
// FEAT_DPB
("dpb", None),
// FEAT_DPB2
("dpb2", None),
- // FEAT_SVE2
- ("sve2", None),
- // FEAT_SVE2_AES
- ("sve2-aes", None),
- // FEAT_SVE2_SM4
- ("sve2-sm4", None),
- // FEAT_SVE2_SHA3
- ("sve2-sha3", None),
- // FEAT_SVE2_BitPerm
- ("sve2-bitperm", None),
- // FEAT_FRINTTS
- ("frintts", None),
- // FEAT_I8MM
- ("i8mm", None),
// FEAT_F32MM
("f32mm", None),
// FEAT_F64MM
("f64mm", None),
- // FEAT_BF16
- ("bf16", None),
- // FEAT_RAND
- ("rand", None),
- // FEAT_BTI
- ("bti", None),
- // FEAT_MTE
- ("mte", None),
- // FEAT_JSCVT
- ("jsconv", None),
// FEAT_FCMA
("fcma", None),
- // FEAT_AES
- ("aes", None),
+ // FEAT_FHM
+ ("fhm", None),
+ // FEAT_FLAGM
+ ("flagm", None),
+ // FEAT_FP16
+ ("fp16", None),
+ // FEAT_FRINTTS
+ ("frintts", None),
+ // FEAT_I8MM
+ ("i8mm", None),
+ // FEAT_JSCVT
+ ("jsconv", None),
+ // FEAT_LOR
+ ("lor", None),
+ // FEAT_LSE
+ ("lse", None),
+ // FEAT_MTE
+ ("mte", None),
+ // FEAT_AdvSimd & FEAT_FP
+ ("neon", None),
+ // FEAT_PAUTH (address authentication)
+ ("paca", None),
+ // FEAT_PAUTH (generic authentication)
+ ("pacg", None),
+ // FEAT_PAN
+ ("pan", None),
+ // FEAT_PMUv3
+ ("pmuv3", None),
+ // FEAT_RAND
+ ("rand", None),
+ // FEAT_RAS
+ ("ras", None),
+ // FEAT_RCPC
+ ("rcpc", None),
+ // FEAT_RCPC2
+ ("rcpc2", None),
+ // FEAT_RDM
+ ("rdm", None),
+ // FEAT_SB
+ ("sb", None),
// FEAT_SHA1 & FEAT_SHA256
("sha2", None),
// FEAT_SHA512 & FEAT_SHA3
("sha3", None),
// FEAT_SM3 & FEAT_SM4
("sm4", None),
- // FEAT_PAN
- ("pan", None),
- // FEAT_LOR
- ("lor", None),
- // FEAT_VHE
- ("vh", None),
- // FEAT_PMUv3
- ("pmuv3", None),
// FEAT_SPE
("spe", None),
+ // FEAT_SSBS
+ ("ssbs", None),
+ // FEAT_SVE
+ ("sve", None),
+ // FEAT_SVE2
+ ("sve2", None),
+ // FEAT_SVE2_AES
+ ("sve2-aes", None),
+ // FEAT_SVE2_BitPerm
+ ("sve2-bitperm", None),
+ // FEAT_SVE2_SHA3
+ ("sve2-sha3", None),
+ // FEAT_SVE2_SM4
+ ("sve2-sm4", None),
+ // FEAT_TME
+ ("tme", None),
("v8.1a", Some(sym::aarch64_ver_target_feature)),
("v8.2a", Some(sym::aarch64_ver_target_feature)),
("v8.3a", Some(sym::aarch64_ver_target_feature)),
@@ -138,6 +139,9 @@ const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("v8.5a", Some(sym::aarch64_ver_target_feature)),
("v8.6a", Some(sym::aarch64_ver_target_feature)),
("v8.7a", Some(sym::aarch64_ver_target_feature)),
+ // FEAT_VHE
+ ("vh", None),
+ // tidy-alphabetical-end
];
const AARCH64_TIED_FEATURES: &[&[&str]] = &[
@@ -145,6 +149,7 @@ const AARCH64_TIED_FEATURES: &[&[&str]] = &[
];
const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
("adx", None),
("aes", None),
("avx", None),
@@ -194,62 +199,80 @@ const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("xsavec", None),
("xsaveopt", None),
("xsaves", None),
+ // tidy-alphabetical-end
];
const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
("hvx", Some(sym::hexagon_target_feature)),
("hvx-length128b", Some(sym::hexagon_target_feature)),
+ // tidy-alphabetical-end
];
const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
("altivec", Some(sym::powerpc_target_feature)),
("power8-altivec", Some(sym::powerpc_target_feature)),
- ("power9-altivec", Some(sym::powerpc_target_feature)),
("power8-vector", Some(sym::powerpc_target_feature)),
+ ("power9-altivec", Some(sym::powerpc_target_feature)),
("power9-vector", Some(sym::powerpc_target_feature)),
("vsx", Some(sym::powerpc_target_feature)),
+ // tidy-alphabetical-end
];
-const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
- &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
+ ("fp64", Some(sym::mips_target_feature)),
+ ("msa", Some(sym::mips_target_feature)),
+ ("virt", Some(sym::mips_target_feature)),
+ // tidy-alphabetical-end
+];
const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("m", Some(sym::riscv_target_feature)),
+ // tidy-alphabetical-start
("a", Some(sym::riscv_target_feature)),
("c", Some(sym::riscv_target_feature)),
- ("f", Some(sym::riscv_target_feature)),
("d", Some(sym::riscv_target_feature)),
("e", Some(sym::riscv_target_feature)),
+ ("f", Some(sym::riscv_target_feature)),
+ ("m", Some(sym::riscv_target_feature)),
("v", Some(sym::riscv_target_feature)),
- ("zfinx", Some(sym::riscv_target_feature)),
- ("zdinx", Some(sym::riscv_target_feature)),
- ("zhinx", Some(sym::riscv_target_feature)),
- ("zhinxmin", Some(sym::riscv_target_feature)),
- ("zfh", Some(sym::riscv_target_feature)),
- ("zfhmin", Some(sym::riscv_target_feature)),
+ ("zba", Some(sym::riscv_target_feature)),
+ ("zbb", Some(sym::riscv_target_feature)),
+ ("zbc", Some(sym::riscv_target_feature)),
("zbkb", Some(sym::riscv_target_feature)),
("zbkc", Some(sym::riscv_target_feature)),
("zbkx", Some(sym::riscv_target_feature)),
+ ("zbs", Some(sym::riscv_target_feature)),
+ ("zdinx", Some(sym::riscv_target_feature)),
+ ("zfh", Some(sym::riscv_target_feature)),
+ ("zfhmin", Some(sym::riscv_target_feature)),
+ ("zfinx", Some(sym::riscv_target_feature)),
+ ("zhinx", Some(sym::riscv_target_feature)),
+ ("zhinxmin", Some(sym::riscv_target_feature)),
+ ("zk", Some(sym::riscv_target_feature)),
+ ("zkn", Some(sym::riscv_target_feature)),
("zknd", Some(sym::riscv_target_feature)),
("zkne", Some(sym::riscv_target_feature)),
("zknh", Some(sym::riscv_target_feature)),
- ("zksed", Some(sym::riscv_target_feature)),
- ("zksh", Some(sym::riscv_target_feature)),
("zkr", Some(sym::riscv_target_feature)),
- ("zkn", Some(sym::riscv_target_feature)),
("zks", Some(sym::riscv_target_feature)),
- ("zk", Some(sym::riscv_target_feature)),
+ ("zksed", Some(sym::riscv_target_feature)),
+ ("zksh", Some(sym::riscv_target_feature)),
("zkt", Some(sym::riscv_target_feature)),
+ // tidy-alphabetical-end
];
const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("simd128", None),
+ // tidy-alphabetical-start
("atomics", Some(sym::wasm_target_feature)),
- ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
("bulk-memory", Some(sym::wasm_target_feature)),
("mutable-globals", Some(sym::wasm_target_feature)),
+ ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
("reference-types", Some(sym::wasm_target_feature)),
("sign-ext", Some(sym::wasm_target_feature)),
+ ("simd128", None),
+ // tidy-alphabetical-end
];
const BPF_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[("alu32", Some(sym::bpf_target_feature))];
diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs
index a00d78daf..60d8f2a9e 100644
--- a/compiler/rustc_codegen_ssa/src/traits/abi.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs
@@ -1,8 +1,5 @@
use super::BackendTypes;
-use rustc_middle::ty::Ty;
-use rustc_target::abi::call::FnAbi;
pub trait AbiBuilderMethods<'tcx>: BackendTypes {
- fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
fn get_param(&mut self, index: usize) -> Self::Value;
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index 779bd3ea2..87e347c61 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -134,8 +134,6 @@ pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Se
opt_level: config::OptLevel,
target_features: &[String],
) -> TargetMachineFactoryFn<Self>;
- fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
- fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
where
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 9f49749bb..01408f39f 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -1,6 +1,5 @@
use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
-use super::consts::ConstMethods;
use super::coverageinfo::CoverageInfoBuilderMethods;
use super::debuginfo::DebugInfoBuilderMethods;
use super::intrinsic::IntrinsicCallMethods;
@@ -15,10 +14,10 @@ use crate::mir::operand::OperandRef;
use crate::mir::place::PlaceRef;
use crate::MemFlags;
-use rustc_apfloat::{ieee, Float, Round, Status};
use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
use rustc_middle::ty::Ty;
use rustc_span::Span;
+use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
use rustc_target::spec::HasTargetSpec;
@@ -73,6 +72,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn invoke(
&mut self,
llty: Self::Type,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: Self::Value,
args: &[Self::Value],
then: Self::BasicBlock,
@@ -135,8 +135,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
- fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
- fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
+ fn byte_array_alloca(&mut self, len: Self::Value, align: Align) -> Self::Value;
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
@@ -188,8 +187,8 @@ pub trait BuilderMethods<'a, 'tcx>:
fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
- fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
- fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
@@ -223,156 +222,7 @@ pub trait BuilderMethods<'a, 'tcx>:
return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
}
- let try_sat_result =
- if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
- if let Some(try_sat_result) = try_sat_result {
- return try_sat_result;
- }
-
- let int_width = self.cx().int_width(int_ty);
- let float_width = self.cx().float_width(float_ty);
- // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
- // destination integer type after rounding towards zero. This `undef` value can cause UB in
- // safe code (see issue #10184), so we implement a saturating conversion on top of it:
- // Semantically, the mathematical value of the input is rounded towards zero to the next
- // mathematical integer, and then the result is clamped into the range of the destination
- // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
- // the destination integer type. NaN is mapped to 0.
- //
- // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
- // a value representable in int_ty.
- // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
- // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
- // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
- // representable. Note that this only works if float_ty's exponent range is sufficiently large.
- // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
- // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
- // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
- // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
- // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
- let int_max = |signed: bool, int_width: u64| -> u128 {
- let shift_amount = 128 - int_width;
- if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
- };
- let int_min = |signed: bool, int_width: u64| -> i128 {
- if signed { i128::MIN >> (128 - int_width) } else { 0 }
- };
-
- let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
- let rounded_min =
- ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
- assert_eq!(rounded_min.status, Status::OK);
- let rounded_max =
- ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
- assert!(rounded_max.value.is_finite());
- (rounded_min.value.to_bits(), rounded_max.value.to_bits())
- };
- let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
- let rounded_min =
- ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
- assert_eq!(rounded_min.status, Status::OK);
- let rounded_max =
- ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
- assert!(rounded_max.value.is_finite());
- (rounded_min.value.to_bits(), rounded_max.value.to_bits())
- };
- // To implement saturation, we perform the following steps:
- //
- // 1. Cast x to an integer with fpto[su]i. This may result in undef.
- // 2. Compare x to f_min and f_max, and use the comparison results to select:
- // a) int_ty::MIN if x < f_min or x is NaN
- // b) int_ty::MAX if x > f_max
- // c) the result of fpto[su]i otherwise
- // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
- //
- // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
- // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
- // undef does not introduce any non-determinism either.
- // More importantly, the above procedure correctly implements saturating conversion.
- // Proof (sketch):
- // If x is NaN, 0 is returned by definition.
- // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
- // This yields three cases to consider:
- // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
- // saturating conversion for inputs in that range.
- // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
- // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
- // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
- // is correct.
- // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
- // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
- // QED.
-
- let float_bits_to_llval = |bx: &mut Self, bits| {
- let bits_llval = match float_width {
- 32 => bx.cx().const_u32(bits as u32),
- 64 => bx.cx().const_u64(bits as u64),
- n => bug!("unsupported float width {}", n),
- };
- bx.bitcast(bits_llval, float_ty)
- };
- let (f_min, f_max) = match float_width {
- 32 => compute_clamp_bounds_single(signed, int_width),
- 64 => compute_clamp_bounds_double(signed, int_width),
- n => bug!("unsupported float width {}", n),
- };
- let f_min = float_bits_to_llval(self, f_min);
- let f_max = float_bits_to_llval(self, f_max);
- let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
- let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
- let zero = self.cx().const_uint(int_ty, 0);
-
- // If we're working with vectors, constants must be "splatted": the constant is duplicated
- // into each lane of the vector. The algorithm stays the same, we are just using the
- // same constant across all lanes.
- let maybe_splat = |bx: &mut Self, val| {
- if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
- bx.vector_splat(bx.vector_length(dest_ty), val)
- } else {
- val
- }
- };
- let f_min = maybe_splat(self, f_min);
- let f_max = maybe_splat(self, f_max);
- let int_max = maybe_splat(self, int_max);
- let int_min = maybe_splat(self, int_min);
- let zero = maybe_splat(self, zero);
-
- // Step 1 ...
- let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
- let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
- let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
-
- // Step 2: We use two comparisons and two selects, with %s1 being the
- // result:
- // %less_or_nan = fcmp ult %x, %f_min
- // %greater = fcmp olt %x, %f_max
- // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
- // %s1 = select %greater, int_ty::MAX, %s0
- // Note that %less_or_nan uses an *unordered* comparison. This
- // comparison is true if the operands are not comparable (i.e., if x is
- // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
- // x is NaN.
- //
- // Performance note: Unordered comparison can be lowered to a "flipped"
- // comparison and a negation, and the negation can be merged into the
- // select. Therefore, it not necessarily any more expensive than an
- // ordered ("normal") comparison. Whether these optimizations will be
- // performed is ultimately up to the backend, but at least x86 does
- // perform them.
- let s0 = self.select(less_or_nan, int_min, fptosui_result);
- let s1 = self.select(greater, int_max, s0);
-
- // Step 3: NaN replacement.
- // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
- // Therefore we only need to execute this step for signed integer types.
- if signed {
- // LLVM has no isNaN predicate, so we use (x == x) instead
- let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
- self.select(cmp, s1, zero)
- } else {
- s1
- }
+ if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) }
}
fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
@@ -471,6 +321,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn call(
&mut self,
llty: Self::Type,
+ fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
llfn: Self::Value,
args: &[Self::Value],
funclet: Option<&Self::Funclet>,
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
index 8a91d4735..fdc7a30e8 100644
--- a/compiler/rustc_codegen_ssa/src/traits/consts.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -29,7 +29,6 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value;
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
- fn zst_to_backend(&self, llty: Self::Type) -> Self::Value;
fn from_const_alloc(
&self,
layout: TyAndLayout<'tcx>,
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
index 7755e6793..450672fb9 100644
--- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -5,7 +5,7 @@ use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
- /// Remember to add all intrinsics here, in `compiler/rustc_typeck/src/check/mod.rs`,
+ /// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`,
/// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
/// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
fn codegen_intrinsic_call(
diff --git a/compiler/rustc_codegen_ssa/src/traits/misc.rs b/compiler/rustc_codegen_ssa/src/traits/misc.rs
index 4266e42ec..04e2b8796 100644
--- a/compiler/rustc_codegen_ssa/src/traits/misc.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/misc.rs
@@ -15,12 +15,8 @@ pub trait MiscMethods<'tcx>: BackendTypes {
fn eh_personality(&self) -> Self::Value;
fn sess(&self) -> &Session;
fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
- fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
- fn compiler_used_statics(&self) -> &RefCell<Vec<Self::Value>>;
fn set_frame_pointer_type(&self, llfn: Self::Function);
fn apply_target_cpu_attr(&self, llfn: Self::Function);
- fn create_used_variable(&self);
- fn create_compiler_used_variable(&self);
/// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists.
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>;
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index 8158e8dd0..bdc6a91cf 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -5,7 +5,6 @@ use crate::common::TypeKind;
use crate::mir::place::PlaceRef;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty};
-use rustc_span::DUMMY_SP;
use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
use rustc_target::abi::{AddressSpace, Integer};
@@ -75,16 +74,16 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
}
fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
- ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ ty.is_sized(self.tcx(), ty::ParamEnv::reveal_all())
}
fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
- ty.is_freeze(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ ty.is_freeze(self.tcx(), ty::ParamEnv::reveal_all())
}
fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
let param_env = ty::ParamEnv::reveal_all();
- if ty.is_sized(self.tcx().at(DUMMY_SP), param_env) {
+ if ty.is_sized(self.tcx(), param_env) {
return false;
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
index e54ec34f1..e0e8ffa89 100644
--- a/compiler/rustc_codegen_ssa/src/traits/write.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -9,7 +9,6 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
type Module: Send + Sync;
type TargetMachine;
type ModuleBuffer: ModuleBufferMethods;
- type Context: ?Sized;
type ThinData: Send + Sync;
type ThinBuffer: ThinBufferMethods;
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 32e8233a0..e09a6d1d6 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
tracing = "0.1"
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index 322bfd5ce..4977a5d6b 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -2,20 +2,19 @@ use std::error::Error;
use std::fmt;
use rustc_errors::Diagnostic;
-use rustc_hir as hir;
use rustc_middle::mir::AssertKind;
use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
use rustc_span::{Span, Symbol};
use super::InterpCx;
use crate::interpret::{
- struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, UnsupportedOpInfo,
+ struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType,
+ UnsupportedOpInfo,
};
/// The CTFE machine has some custom error kinds.
#[derive(Clone, Debug)]
pub enum ConstEvalErrKind {
- NeedsRfc(String),
ConstAccessesStatic,
ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>),
@@ -23,11 +22,7 @@ pub enum ConstEvalErrKind {
Abort(String),
}
-impl MachineStopType for ConstEvalErrKind {
- fn is_hard_err(&self) -> bool {
- matches!(self, Self::Panic { .. })
- }
-}
+impl MachineStopType for ConstEvalErrKind {}
// The errors become `MachineStop` with plain strings when being raised.
// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
@@ -42,9 +37,6 @@ impl fmt::Display for ConstEvalErrKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::ConstEvalErrKind::*;
match *self {
- NeedsRfc(ref msg) => {
- write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
- }
ConstAccessesStatic => write!(f, "constant accesses static"),
ModifiedGlobal => {
write!(f, "modifying a static's initial value from another static's initializer")
@@ -72,7 +64,7 @@ pub struct ConstEvalErr<'tcx> {
impl<'tcx> ConstEvalErr<'tcx> {
/// Turn an interpreter error into something to report to the user.
/// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
- /// Should be called only if the error is actually going to to be reported!
+ /// Should be called only if the error is actually going to be reported!
pub fn new<'mir, M: Machine<'mir, 'tcx>>(
ecx: &InterpCx<'mir, 'tcx, M>,
error: InterpErrorInfo<'tcx>,
@@ -90,48 +82,10 @@ impl<'tcx> ConstEvalErr<'tcx> {
ConstEvalErr { error: error.into_kind(), stacktrace, span }
}
- pub fn struct_error(
- &self,
- tcx: TyCtxtAt<'tcx>,
- message: &str,
- decorate: impl FnOnce(&mut Diagnostic),
- ) -> ErrorHandled {
- self.struct_generic(tcx, message, decorate, None)
- }
-
pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
self.struct_error(tcx, message, |_| {})
}
- pub fn report_as_lint(
- &self,
- tcx: TyCtxtAt<'tcx>,
- message: &str,
- lint_root: hir::HirId,
- span: Option<Span>,
- ) -> ErrorHandled {
- self.struct_generic(
- tcx,
- message,
- |lint: &mut Diagnostic| {
- // Apply the span.
- if let Some(span) = span {
- let primary_spans = lint.span.primary_spans().to_vec();
- // point at the actual error as the primary span
- lint.replace_span_with(span);
- // point to the `const` statement as a secondary span
- // they don't have any label
- for sp in primary_spans {
- if sp != span {
- lint.span_label(sp, "");
- }
- }
- }
- },
- Some(lint_root),
- )
- }
-
/// Create a diagnostic for this const eval error.
///
/// Sets the message passed in via `message` and adds span labels with detailed error
@@ -140,13 +94,12 @@ impl<'tcx> ConstEvalErr<'tcx> {
///
/// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
/// (Except that for some errors, we ignore all that -- see `must_error` below.)
- #[instrument(skip(self, tcx, decorate, lint_root), level = "debug")]
- fn struct_generic(
+ #[instrument(skip(self, tcx, decorate), level = "debug")]
+ pub fn struct_error(
&self,
tcx: TyCtxtAt<'tcx>,
message: &str,
decorate: impl FnOnce(&mut Diagnostic),
- lint_root: Option<hir::HirId>,
) -> ErrorHandled {
let finish = |err: &mut Diagnostic, span_msg: Option<String>| {
trace!("reporting const eval failure at {:?}", self.span);
@@ -158,6 +111,7 @@ impl<'tcx> ConstEvalErr<'tcx> {
InterpError::Unsupported(
UnsupportedOpInfo::ReadPointerAsBytes
| UnsupportedOpInfo::PartialPointerOverwrite(_)
+ | UnsupportedOpInfo::PartialPointerCopy(_),
) => {
err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
@@ -226,27 +180,9 @@ impl<'tcx> ConstEvalErr<'tcx> {
let err_msg = self.error.to_string();
- // Regular case - emit a lint.
- if let Some(lint_root) = lint_root {
- // Report as lint.
- let hir_id =
- self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
- tcx.struct_span_lint_hir(
- rustc_session::lint::builtin::CONST_ERR,
- hir_id,
- tcx.span,
- |lint| {
- let mut lint = lint.build(message);
- finish(&mut lint, Some(err_msg));
- lint.emit();
- },
- );
- ErrorHandled::Linted
- } else {
- // Report as hard error.
- let mut err = struct_error(tcx, message);
- finish(&mut err, Some(err_msg));
- ErrorHandled::Reported(err.emit())
- }
+ // Report as hard error.
+ let mut err = struct_error(tcx, message);
+ finish(&mut err, Some(err_msg));
+ ErrorHandled::Reported(err.emit())
}
}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 975fb4b22..1b1052fdf 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -2,8 +2,8 @@ use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
use crate::interpret::eval_nullary_intrinsic;
use crate::interpret::{
intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
- Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
- ScalarMaybeUninit, StackPopCleanup, InterpError,
+ Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy,
+ RefTracking, StackPopCleanup,
};
use rustc_hir::def::DefKind;
@@ -13,7 +13,7 @@ use rustc_middle::mir::pretty::display_allocation;
use rustc_middle::traits::Reveal;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
use rustc_span::source_map::Span;
use rustc_target::abi::{self, Abi};
use std::borrow::Cow;
@@ -74,14 +74,16 @@ fn eval_body_using_ecx<'mir, 'tcx>(
None => InternKind::Constant,
}
};
+ ecx.machine.check_alignment = false; // interning doesn't need to respect alignment
intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+ // we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway
debug!("eval_body_using_ecx done: {:?}", *ret);
Ok(ret)
}
/// The `InterpCx` is only meant to be used to do field and index projections into constants for
-/// `simd_shuffle` and const patterns in match arms.
+/// `simd_shuffle` and const patterns in match arms. It never performs alignment checks.
///
/// The function containing the `match` that is currently being analyzed may have generic bounds
/// that inform us about the generic bounds of the constant. E.g., using an associated constant
@@ -98,7 +100,11 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
tcx,
root_span,
param_env,
- CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics),
+ CompileTimeInterpreter::new(
+ tcx.const_eval_limit(),
+ can_access_statics,
+ /*check_alignment:*/ false,
+ ),
)
}
@@ -166,10 +172,7 @@ pub(super) fn op_to_const<'tcx>(
// see comment on `let try_as_immediate` above
Err(imm) => match *imm {
_ if imm.layout.is_zst() => ConstValue::ZeroSized,
- Immediate::Scalar(x) => match x {
- ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
- ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
- },
+ Immediate::Scalar(x) => ConstValue::Scalar(x),
Immediate::ScalarPair(a, b) => {
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
// We know `offset` is relative to the allocation, so we can use `into_parts`.
@@ -194,7 +197,7 @@ pub(super) fn op_to_const<'tcx>(
}
}
-#[instrument(skip(tcx), level = "debug")]
+#[instrument(skip(tcx), level = "debug", ret)]
pub(crate) fn turn_into_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
constant: ConstAlloc<'tcx>,
@@ -203,7 +206,13 @@ pub(crate) fn turn_into_const_value<'tcx>(
let cid = key.value;
let def_id = cid.instance.def.def_id();
let is_static = tcx.is_static(def_id);
- let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+ // This is just accessing an already computed constant, so no need to check alginment here.
+ let ecx = mk_eval_cx(
+ tcx,
+ tcx.def_span(key.value.instance.def_id()),
+ key.param_env,
+ /*can_access_statics:*/ is_static,
+ );
let mplace = ecx.raw_const_to_mplace(constant).expect(
"can only fail if layout computation failed, \
@@ -215,10 +224,7 @@ pub(crate) fn turn_into_const_value<'tcx>(
);
// Turn this into a proper constant.
- let const_val = op_to_const(&ecx, &mplace.into());
- debug!(?const_val);
-
- const_val
+ op_to_const(&ecx, &mplace.into())
}
#[instrument(skip(tcx), level = "debug")]
@@ -300,52 +306,34 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
key.param_env,
// Statics (and promoteds inside statics) may access other statics, because unlike consts
// they do not have to behave "as if" they were evaluated at runtime.
- CompileTimeInterpreter::new(tcx.const_eval_limit(), /*can_access_statics:*/ is_static),
+ CompileTimeInterpreter::new(
+ tcx.const_eval_limit(),
+ /*can_access_statics:*/ is_static,
+ /*check_alignment:*/ tcx.sess.opts.unstable_opts.extra_const_ub_checks,
+ ),
);
let res = ecx.load_mir(cid.instance.def, cid.promoted);
match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
Err(error) => {
let err = ConstEvalErr::new(&ecx, error, None);
- // Some CTFE errors raise just a lint, not a hard error; see
- // <https://github.com/rust-lang/rust/issues/71800>.
- let is_hard_err = if let Some(def) = def.as_local() {
- // (Associated) consts only emit a lint, since they might be unused.
- !matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
- // check if the inner InterpError is hard
- || err.error.is_hard_err()
+ let msg = if is_static {
+ Cow::from("could not evaluate static initializer")
} else {
- // use of broken constant from other crate: always an error
- true
- };
-
- if is_hard_err {
- let msg = if is_static {
- Cow::from("could not evaluate static initializer")
+ // If the current item has generics, we'd like to enrich the message with the
+ // instance and its substs: to show the actual compile-time values, in addition to
+ // the expression, leading to the const eval error.
+ let instance = &key.value.instance;
+ if !instance.substs.is_empty() {
+ let instance = with_no_trimmed_paths!(instance.to_string());
+ let msg = format!("evaluation of `{}` failed", instance);
+ Cow::from(msg)
} else {
- // If the current item has generics, we'd like to enrich the message with the
- // instance and its substs: to show the actual compile-time values, in addition to
- // the expression, leading to the const eval error.
- let instance = &key.value.instance;
- if !instance.substs.is_empty() {
- let instance = with_no_trimmed_paths!(instance.to_string());
- let msg = format!("evaluation of `{}` failed", instance);
- Cow::from(msg)
- } else {
- Cow::from("evaluation of constant value failed")
- }
- };
+ Cow::from("evaluation of constant value failed")
+ }
+ };
- Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
- } else {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
- Err(err.report_as_lint(
- tcx.at(tcx.def_span(def.did)),
- "any use of this value will cause an error",
- hir_id,
- Some(err.span),
- ))
- }
+ Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
}
Ok(mplace) => {
// Since evaluation had no errors, validate the resulting constant.
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index fc2e6652a..35d58d2f6 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -2,10 +2,10 @@ use rustc_hir::def::DefKind;
use rustc_middle::mir;
use rustc_middle::ty::{self, Ty, TyCtxt};
use std::borrow::Borrow;
-use std::collections::hash_map::Entry;
use std::hash::Hash;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::fx::IndexEntry;
use std::fmt;
use rustc_ast::Mutability;
@@ -35,21 +35,7 @@ impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
// All `#[rustc_do_not_const_check]` functions should be hooked here.
let def_id = instance.def_id();
- if Some(def_id) == self.tcx.lang_items().const_eval_select() {
- // redirect to const_eval_select_ct
- if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
- return Ok(Some(
- ty::Instance::resolve(
- *self.tcx,
- ty::ParamEnv::reveal_all(),
- const_eval_select,
- instance.substs,
- )
- .unwrap()
- .unwrap(),
- ));
- }
- } else if Some(def_id) == self.tcx.lang_items().panic_display()
+ if Some(def_id) == self.tcx.lang_items().panic_display()
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
// &str or &&str
@@ -89,10 +75,10 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
/// exhaustion error.
///
/// Setting this to `0` disables the limit and allows the interpreter to run forever.
- pub steps_remaining: usize,
+ pub(super) steps_remaining: usize,
/// The virtual call stack.
- pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
+ pub(super) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
/// We need to make sure consts never point to anything mutable, even recursively. That is
/// relied on for pattern matching on consts with references.
@@ -101,30 +87,38 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
/// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
/// This boolean here controls the second part.
pub(super) can_access_statics: bool,
+
+ /// Whether to check alignment during evaluation.
+ pub(super) check_alignment: bool,
}
impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
- pub(crate) fn new(const_eval_limit: Limit, can_access_statics: bool) -> Self {
+ pub(crate) fn new(
+ const_eval_limit: Limit,
+ can_access_statics: bool,
+ check_alignment: bool,
+ ) -> Self {
CompileTimeInterpreter {
steps_remaining: const_eval_limit.0,
stack: Vec::new(),
can_access_statics,
+ check_alignment,
}
}
}
-impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
#[inline(always)]
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
where
K: Borrow<Q>,
{
- FxHashMap::contains_key(self, k)
+ FxIndexMap::contains_key(self, k)
}
#[inline(always)]
fn insert(&mut self, k: K, v: V) -> Option<V> {
- FxHashMap::insert(self, k, v)
+ FxIndexMap::insert(self, k, v)
}
#[inline(always)]
@@ -132,7 +126,7 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
where
K: Borrow<Q>,
{
- FxHashMap::remove(self, k)
+ FxIndexMap::remove(self, k)
}
#[inline(always)]
@@ -154,8 +148,8 @@ impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
#[inline(always)]
fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
match self.entry(k) {
- Entry::Occupied(e) => Ok(e.into_mut()),
- Entry::Vacant(e) => {
+ IndexEntry::Occupied(e) => Ok(e.into_mut()),
+ IndexEntry::Vacant(e) => {
let v = vacant()?;
Ok(e.insert(v))
}
@@ -197,34 +191,35 @@ impl interpret::MayLeak for ! {
}
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
- fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
+ fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
Ok(match (a, b) {
// Comparisons between integers are always known.
- (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
- // Equality with integers can never be known for sure.
- (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
- // FIXME: return `true` for when both sides are the same pointer, *except* that
- // some things (like functions and vtables) do not have stable addresses
- // so we need to be careful around them (see e.g. #73722).
- (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
- })
- }
-
- fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
- Ok(match (a, b) {
- // Comparisons between integers are always known.
- (Scalar::Int(_), Scalar::Int(_)) => a != b,
+ (Scalar::Int { .. }, Scalar::Int { .. }) => {
+ if a == b {
+ 1
+ } else {
+ 0
+ }
+ }
// Comparisons of abstract pointers with null pointers are known if the pointer
// is in bounds, because if they are in bounds, the pointer can't be null.
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
- | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
- int.is_null() && !self.scalar_may_be_null(ptr)?
+ | (ptr @ Scalar::Ptr(..), Scalar::Int(int))
+ if int.is_null() && !self.scalar_may_be_null(ptr)? =>
+ {
+ 0
}
- // FIXME: return `true` for at least some comparisons where we can reliably
+ // Equality with integers can never be known for sure.
+ (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
+ // FIXME: return a `1` for when both sides are the same pointer, *except* that
+ // some things (like functions and vtables) do not have stable addresses
+ // so we need to be careful around them (see e.g. #73722).
+ // FIXME: return `0` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items.
- (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
})
}
}
@@ -236,6 +231,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+ #[inline(always)]
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ ecx.machine.check_alignment
+ }
+
+ #[inline(always)]
+ fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks
+ }
+
fn load_mir(
ecx: &InterpCx<'mir, 'tcx, Self>,
instance: ty::InstanceDef<'tcx>,
@@ -251,9 +256,10 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
);
throw_inval!(AlreadyReported(guar));
} else {
+ // `find_mir_or_eval_fn` checks that this is a const fn before even calling us,
+ // so this should be unreachable.
let path = ecx.tcx.def_path_str(def.did);
- Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
- .into())
+ bug!("trying to call extern function `{path}` at compile-time");
}
}
_ => Ok(ecx.tcx.instance_mir(instance)),
@@ -321,22 +327,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
// CTFE-specific intrinsics.
let Some(ret) = target else {
- return Err(ConstEvalErrKind::NeedsRfc(format!(
- "calling intrinsic `{}`",
- intrinsic_name
- ))
- .into());
+ throw_unsup_format!("intrinsic `{intrinsic_name}` is not supported at compile-time");
};
match intrinsic_name {
- sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
- let a = ecx.read_immediate(&args[0])?.to_scalar()?;
- let b = ecx.read_immediate(&args[1])?.to_scalar()?;
- let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
- ecx.guaranteed_eq(a, b)?
- } else {
- ecx.guaranteed_ne(a, b)?
- };
- ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
+ sym::ptr_guaranteed_cmp => {
+ let a = ecx.read_scalar(&args[0])?;
+ let b = ecx.read_scalar(&args[1])?;
+ let cmp = ecx.guaranteed_cmp(a, b)?;
+ ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
}
sym::const_allocate => {
let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
@@ -382,11 +380,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
}
}
_ => {
- return Err(ConstEvalErrKind::NeedsRfc(format!(
- "calling intrinsic `{}`",
- intrinsic_name
- ))
- .into());
+ throw_unsup_format!(
+ "intrinsic `{intrinsic_name}` is not supported at compile-time"
+ );
}
}
@@ -429,7 +425,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
- Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+ throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}
fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
@@ -451,7 +447,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_ptr: Pointer<AllocId>,
) -> InterpResult<'tcx> {
- Err(ConstEvalErrKind::NeedsRfc("exposing pointers".to_string()).into())
+ // This is only reachable with -Zunleash-the-miri-inside-of-you.
+ throw_unsup_format!("exposing pointers is not possible at compile-time")
}
#[inline(always)]
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 948c33494..1c33e7845 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -1,16 +1,16 @@
// Not in interpret to make sure we do not use private implementation details
+use crate::errors::MaxNumNodesInConstErr;
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
+ Scalar,
+};
use rustc_hir::Mutability;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
-use crate::interpret::{
- intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
- Scalar,
-};
-
mod error;
mod eval_queries;
mod fn_queries;
@@ -72,12 +72,17 @@ pub(crate) fn eval_to_valtree<'tcx>(
Ok(valtree) => Ok(Some(valtree)),
Err(err) => {
let did = cid.instance.def_id();
- let s = cid.display(tcx);
+ let global_const_id = cid.display(tcx);
match err {
ValTreeCreationError::NodesOverflow => {
- let msg = format!("maximum number of nodes exceeded in constant {}", &s);
+ let msg = format!(
+ "maximum number of nodes exceeded in constant {}",
+ &global_const_id
+ );
let mut diag = match tcx.hir().span_if_local(did) {
- Some(span) => tcx.sess.struct_span_err(span, &msg),
+ Some(span) => {
+ tcx.sess.create_err(MaxNumNodesInConstErr { span, global_const_id })
+ }
None => tcx.sess.struct_err(&msg),
};
diag.emit();
@@ -95,10 +100,10 @@ pub(crate) fn try_destructure_mir_constant<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
val: mir::ConstantKind<'tcx>,
-) -> InterpResult<'tcx, mir::DestructuredMirConstant<'tcx>> {
+) -> InterpResult<'tcx, mir::DestructuredConstant<'tcx>> {
trace!("destructure_mir_constant: {:?}", val);
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
- let op = ecx.mir_const_to_op(&val, None)?;
+ let op = ecx.const_to_op(&val, None)?;
// We go to `usize` as we cannot allocate anything bigger anyway.
let (field_count, variant, down) = match val.ty().kind() {
@@ -124,7 +129,7 @@ pub(crate) fn try_destructure_mir_constant<'tcx>(
.collect::<InterpResult<'tcx, Vec<_>>>()?;
let fields = tcx.arena.alloc_from_iter(fields_iter);
- Ok(mir::DestructuredMirConstant { variant, fields })
+ Ok(mir::DestructuredConstant { variant, fields })
}
#[instrument(skip(tcx), level = "debug")]
@@ -134,7 +139,7 @@ pub(crate) fn deref_mir_constant<'tcx>(
val: mir::ConstantKind<'tcx>,
) -> mir::ConstantKind<'tcx> {
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
- let op = ecx.mir_const_to_op(&val, None).unwrap();
+ let op = ecx.const_to_op(&val, None).unwrap();
let mplace = ecx.deref_operand(&op).unwrap();
if let Some(alloc_id) = mplace.ptr.provenance {
assert_eq!(
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 8fff4571d..f4da11883 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -3,7 +3,7 @@ use super::machine::CompileTimeEvalContext;
use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::interpret::{
intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
- MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit,
+ MemoryKind, PlaceTy, Scalar,
};
use crate::interpret::{MPlaceTy, Value};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
@@ -90,14 +90,14 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
let Ok(val) = ecx.read_immediate(&place.into()) else {
return Err(ValTreeCreationError::Other);
};
- let val = val.to_scalar().unwrap();
+ let val = val.to_scalar();
*num_nodes += 1;
Ok(ty::ValTree::Leaf(val.assert_int()))
}
// Raw pointers are not allowed in type level constants, as we cannot properly test them for
- // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+ // equality at compile-time (see `ptr_guaranteed_cmp`).
// Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
// agree with runtime equality tests.
ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
@@ -204,7 +204,7 @@ fn get_info_on_unsized_field<'tcx>(
(unsized_inner_ty, num_elems)
}
-#[instrument(skip(ecx), level = "debug")]
+#[instrument(skip(ecx), level = "debug", ret)]
fn create_pointee_place<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
ty: Ty<'tcx>,
@@ -212,7 +212,7 @@ fn create_pointee_place<'tcx>(
) -> MPlaceTy<'tcx> {
let tcx = ecx.tcx.tcx;
- if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) {
+ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty()) {
// We need to create `Allocation`s for custom DSTs
let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
@@ -237,14 +237,11 @@ fn create_pointee_place<'tcx>(
let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
debug!(?ptr);
- let place = MPlaceTy::from_aligned_ptr_with_meta(
+ MPlaceTy::from_aligned_ptr_with_meta(
ptr.into(),
layout,
MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
- );
- debug!(?place);
-
- place
+ )
} else {
create_mplace_from_layout(ecx, ty)
}
@@ -253,7 +250,7 @@ fn create_pointee_place<'tcx>(
/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
/// construction has finished.
// FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function
-#[instrument(skip(tcx), level = "debug")]
+#[instrument(skip(tcx), level = "debug", ret)]
pub fn valtree_to_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
@@ -294,7 +291,7 @@ pub fn valtree_to_const_value<'tcx>(
dump_place(&ecx, place.into());
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
- let const_val = match ty.kind() {
+ match ty.kind() {
ty::Ref(_, _, _) => {
let ref_place = place.to_ref(&tcx);
let imm =
@@ -303,10 +300,7 @@ pub fn valtree_to_const_value<'tcx>(
op_to_const(&ecx, &imm.into())
}
_ => op_to_const(&ecx, &place.into()),
- };
- debug!(?const_val);
-
- const_val
+ }
}
ty::Never
| ty::Error(_)
@@ -349,11 +343,7 @@ fn valtree_into_mplace<'tcx>(
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
let scalar_int = valtree.unwrap_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
- ecx.write_immediate(
- Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())),
- &place.into(),
- )
- .unwrap();
+ ecx.write_immediate(Immediate::Scalar(scalar_int.into()), &place.into()).unwrap();
}
ty::Ref(_, inner_ty, _) => {
let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
@@ -366,11 +356,10 @@ fn valtree_into_mplace<'tcx>(
let imm = match inner_ty.kind() {
ty::Slice(_) | ty::Str => {
let len = valtree.unwrap_branch().len();
- let len_scalar =
- ScalarMaybeUninit::Scalar(Scalar::from_machine_usize(len as u64, &tcx));
+ let len_scalar = Scalar::from_machine_usize(len as u64, &tcx);
Immediate::ScalarPair(
- ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx),
+ Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx),
len_scalar,
)
}
@@ -409,7 +398,7 @@ fn valtree_into_mplace<'tcx>(
let mut place_inner = match ty.kind() {
ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
- _ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty())
+ _ if !ty.is_sized(*ecx.tcx, ty::ParamEnv::empty())
&& i == branches.len() - 1 =>
{
// Note: For custom DSTs we need to manually process the last unsized field.
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index a463fe7b9..4b0550767 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -1,89 +1,196 @@
use rustc_hir::ConstContext;
-use rustc_macros::SessionDiagnostic;
+use rustc_macros::Diagnostic;
use rustc_span::Span;
-#[derive(SessionDiagnostic)]
-#[error(const_eval::unstable_in_stable)]
+#[derive(Diagnostic)]
+#[diag(const_eval_unstable_in_stable)]
pub(crate) struct UnstableInStable {
pub gate: String,
#[primary_span]
pub span: Span,
#[suggestion(
- const_eval::unstable_sugg,
+ unstable_sugg,
code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
applicability = "has-placeholders"
)]
#[suggestion(
- const_eval::bypass_sugg,
+ bypass_sugg,
code = "#[rustc_allow_const_fn_unstable({gate})]\n",
applicability = "has-placeholders"
)]
pub attr_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::thread_local_access, code = "E0625")]
+#[derive(Diagnostic)]
+#[diag(const_eval_thread_local_access, code = "E0625")]
pub(crate) struct NonConstOpErr {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::static_access, code = "E0013")]
+#[derive(Diagnostic)]
+#[diag(const_eval_static_access, code = "E0013")]
#[help]
pub(crate) struct StaticAccessErr {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
- #[note(const_eval::teach_note)]
- #[help(const_eval::teach_help)]
+ #[note(teach_note)]
+ #[help(teach_help)]
pub teach: Option<()>,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::raw_ptr_to_int)]
+#[derive(Diagnostic)]
+#[diag(const_eval_raw_ptr_to_int)]
#[note]
-#[note(const_eval::note2)]
+#[note(note2)]
pub(crate) struct RawPtrToIntErr {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::raw_ptr_comparison)]
+#[derive(Diagnostic)]
+#[diag(const_eval_raw_ptr_comparison)]
#[note]
pub(crate) struct RawPtrComparisonErr {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::panic_non_str)]
+#[derive(Diagnostic)]
+#[diag(const_eval_panic_non_str)]
pub(crate) struct PanicNonStrErr {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::mut_deref, code = "E0658")]
+#[derive(Diagnostic)]
+#[diag(const_eval_mut_deref, code = "E0658")]
pub(crate) struct MutDerefErr {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::transient_mut_borrow, code = "E0658")]
+#[derive(Diagnostic)]
+#[diag(const_eval_transient_mut_borrow, code = "E0658")]
pub(crate) struct TransientMutBorrowErr {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
}
-#[derive(SessionDiagnostic)]
-#[error(const_eval::transient_mut_borrow_raw, code = "E0658")]
+#[derive(Diagnostic)]
+#[diag(const_eval_transient_mut_borrow_raw, code = "E0658")]
pub(crate) struct TransientMutBorrowErrRaw {
#[primary_span]
pub span: Span,
pub kind: ConstContext,
}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_max_num_nodes_in_const)]
+pub(crate) struct MaxNumNodesInConstErr {
+ #[primary_span]
+ pub span: Span,
+ pub global_const_id: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_fn_pointer_call)]
+pub(crate) struct UnallowedFnPointerCall {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unstable_const_fn)]
+pub(crate) struct UnstableConstFn {
+ #[primary_span]
+ pub span: Span,
+ pub def_path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_mutable_refs, code = "E0764")]
+pub(crate) struct UnallowedMutableRefs {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+ #[note(teach_note)]
+ pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_mutable_refs_raw, code = "E0764")]
+pub(crate) struct UnallowedMutableRefsRaw {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+ #[note(teach_note)]
+ pub teach: Option<()>,
+}
+#[derive(Diagnostic)]
+#[diag(const_eval_non_const_fmt_macro_call, code = "E0015")]
+pub(crate) struct NonConstFmtMacroCall {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_non_const_fn_call, code = "E0015")]
+pub(crate) struct NonConstFnCall {
+ #[primary_span]
+ pub span: Span,
+ pub def_path_str: String,
+ pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_op_in_const_context)]
+pub(crate) struct UnallowedOpInConstContext {
+ #[primary_span]
+ pub span: Span,
+ pub msg: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_heap_allocations, code = "E0010")]
+pub(crate) struct UnallowedHeapAllocations {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub kind: ConstContext,
+ #[note(teach_note)]
+ pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_unallowed_inline_asm, code = "E0015")]
+pub(crate) struct UnallowedInlineAsm {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_interior_mutable_data_refer, code = "E0492")]
+pub(crate) struct InteriorMutableDataRefer {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[help]
+ pub opt_help: Option<()>,
+ pub kind: ConstContext,
+ #[note(teach_note)]
+ pub teach: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(const_eval_interior_mutability_borrow)]
+pub(crate) struct InteriorMutabilityBorrow {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index c97c31eb9..269ae15d4 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -43,9 +43,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.write_immediate(res, dest)?;
}
- Misc => {
+ IntToInt | IntToFloat => {
let src = self.read_immediate(src)?;
- let res = self.misc_cast(&src, cast_ty)?;
+ let res = self.int_to_int_or_float(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ FloatToFloat | FloatToInt => {
+ let src = self.read_immediate(src)?;
+ let res = self.float_to_float_or_int(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ FnPtrToPtr | PtrToPtr => {
+ let src = self.read_immediate(&src)?;
+ let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
}
@@ -108,67 +120,88 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
}
}
+
+ DynStar => {
+ if let ty::Dynamic(data, _, ty::DynStar) = cast_ty.kind() {
+ // Initial cast from sized to dyn trait
+ let vtable = self.get_vtable_ptr(src.layout.ty, data.principal())?;
+ let vtable = Scalar::from_maybe_pointer(vtable, self);
+ let data = self.read_immediate(src)?.to_scalar();
+ let _assert_pointer_sized = data.to_pointer(self)?;
+ let val = Immediate::ScalarPair(data, vtable);
+ self.write_immediate(val, dest)?;
+ } else {
+ bug!()
+ }
+ }
}
Ok(())
}
- pub fn misc_cast(
- &mut self,
+ /// Handles 'IntToInt' and 'IntToFloat' casts.
+ pub fn int_to_int_or_float(
+ &self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
+ assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
+
+ Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
+ }
+
+ /// Handles 'FloatToFloat' and 'FloatToInt' casts.
+ pub fn float_to_float_or_int(
+ &self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
- trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
match src.layout.ty.kind() {
// Floating point
Float(FloatTy::F32) => {
- return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
+ return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
}
Float(FloatTy::F64) => {
- return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
+ return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
+ }
+ _ => {
+ bug!("Can't cast 'Float' type into {:?}", cast_ty);
}
- // The rest is integer/pointer-"like", including fn ptr casts
- _ => assert!(
- src.layout.ty.is_bool()
- || src.layout.ty.is_char()
- || src.layout.ty.is_integral()
- || src.layout.ty.is_any_ptr(),
- "Unexpected cast from type {:?}",
- src.layout.ty
- ),
}
+ }
- // # First handle non-scalar source values.
-
+ /// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
+ pub fn ptr_to_ptr(
+ &self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert!(src.layout.ty.is_any_ptr());
+ assert!(cast_ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr).
- if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
- let dest_layout = self.layout_of(cast_ty)?;
- if dest_layout.size == src.layout.size {
- // Thin or fat pointer that just hast the ptr kind of target type changed.
- return Ok(**src);
- } else {
- // Casting the metadata away from a fat ptr.
- assert_eq!(src.layout.size, 2 * self.pointer_size());
- assert_eq!(dest_layout.size, self.pointer_size());
- assert!(src.layout.ty.is_unsafe_ptr());
- return match **src {
- Immediate::ScalarPair(data, _) => Ok(data.check_init()?.into()),
- Immediate::Scalar(..) => span_bug!(
- self.cur_span(),
- "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
- *src,
- src.layout.ty,
- cast_ty
- ),
- Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
- };
- }
+ let dest_layout = self.layout_of(cast_ty)?;
+ if dest_layout.size == src.layout.size {
+ // Thin or fat pointer that just hast the ptr kind of target type changed.
+ return Ok(**src);
+ } else {
+ // Casting the metadata away from a fat ptr.
+ assert_eq!(src.layout.size, 2 * self.pointer_size());
+ assert_eq!(dest_layout.size, self.pointer_size());
+ assert!(src.layout.ty.is_unsafe_ptr());
+ return match **src {
+ Immediate::ScalarPair(data, _) => Ok(data.into()),
+ Immediate::Scalar(..) => span_bug!(
+ self.cur_span(),
+ "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+ *src,
+ src.layout.ty,
+ cast_ty
+ ),
+ Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ };
}
-
- // # The remaining source values are scalar and "int-like".
- let scalar = src.to_scalar()?;
- Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
}
pub fn pointer_expose_address_cast(
@@ -179,7 +212,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
assert!(cast_ty.is_integral());
- let scalar = src.to_scalar()?;
+ let scalar = src.to_scalar();
let ptr = scalar.to_pointer(self)?;
match ptr.into_pointer_or_addr() {
Ok(ptr) => M::expose_ptr(self, ptr)?,
@@ -189,7 +222,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
pub fn pointer_from_exposed_address_cast(
- &mut self,
+ &self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
@@ -197,7 +230,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_matches!(cast_ty.kind(), ty::RawPtr(_));
// First cast to usize.
- let scalar = src.to_scalar()?;
+ let scalar = src.to_scalar();
let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
let addr = addr.to_machine_usize(self)?;
@@ -206,6 +239,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(Scalar::from_maybe_pointer(ptr, self).into())
}
+ /// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
+ /// type (basically everything with a scalar layout) to int/float/char types.
pub fn cast_from_int_like(
&self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
@@ -245,6 +280,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
})
}
+ /// Low-level cast helper function. Converts an apfloat `f` into int or float types.
fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
where
F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
@@ -291,14 +327,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
(&ty::Array(_, length), &ty::Slice(_)) => {
- let ptr = self.read_immediate(src)?.to_scalar()?;
+ let ptr = self.read_scalar(src)?;
// u64 cast is from usize to u64, which is always good
let val =
Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
self.write_immediate(val, dest)
}
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
- let (old_data, old_vptr) = self.read_immediate(src)?.to_scalar_pair()?;
+ let val = self.read_immediate(src)?;
+ if data_a.principal() == data_b.principal() {
+ // A NOP cast that doesn't actually change anything, should be allowed even with mismatching vtables.
+ return self.write_immediate(*val, dest);
+ }
+ let (old_data, old_vptr) = val.to_scalar_pair();
let old_vptr = old_vptr.to_pointer(self)?;
let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
if old_trait != data_a.principal() {
@@ -307,10 +348,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
}
- (_, &ty::Dynamic(ref data, _)) => {
+ (_, &ty::Dynamic(ref data, _, ty::Dyn)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
- let ptr = self.read_immediate(src)?.to_scalar()?;
+ let ptr = self.read_scalar(src)?;
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
self.write_immediate(val, dest)
}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 150d6589b..a9063ad31 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -21,7 +21,7 @@ use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayou
use super::{
AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
- Scalar, ScalarMaybeUninit, StackPopJump,
+ Scalar, StackPopJump,
};
use crate::transform::validate::equal_up_to_regions;
@@ -187,9 +187,6 @@ pub enum LocalValue<Prov: Provenance = AllocId> {
impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// Read the local's value or error if the local is not yet live or not live anymore.
- ///
- /// Note: This may only be invoked from the `Machine::access_local` hook and not from
- /// anywhere else. You may be invalidating machine invariants if you do!
#[inline]
pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
@@ -261,6 +258,9 @@ impl<'tcx> fmt::Display for FrameInfo<'tcx> {
{
write!(f, "inside closure")?;
} else {
+ // Note: this triggers a `good_path_bug` state, which means that if we ever get here
+ // we must emit a diagnostic. We should never display a `FrameInfo` unless we
+ // actually want to emit a warning or error to the user.
write!(f, "inside `{}`", self.instance)?;
}
if !self.span.is_dummy() {
@@ -468,7 +468,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[inline]
pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
- ty.is_freeze(self.tcx, self.param_env)
+ ty.is_freeze(*self.tcx, self.param_env)
}
pub fn load_mir(
@@ -686,11 +686,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.stack_mut().push(frame);
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
- for const_ in &body.required_consts {
- let span = const_.span;
- let const_ =
- self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal)?;
- self.mir_const_to_op(&const_, None).map_err(|err| {
+ for ct in &body.required_consts {
+ let span = ct.span;
+ let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
+ self.const_to_op(&ct, None).map_err(|err| {
// If there was an error, set the span of the current frame to this constant.
// Avoiding doing this when evaluation succeeds.
self.frame_mut().loc = Err(span);
@@ -782,7 +781,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(
unwinding,
match self.frame().loc {
- Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
+ Ok(loc) => self.body().basic_blocks[loc.block].is_cleanup,
Err(_) => true,
}
);
@@ -932,11 +931,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
#[must_use]
- pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+ pub fn generate_stacktrace_from_stack(
+ stack: &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>],
+ ) -> Vec<FrameInfo<'tcx>> {
let mut frames = Vec::new();
// This deliberately does *not* honor `requires_caller_location` since it is used for much
// more than just panics.
- for frame in self.stack().iter().rev() {
+ for frame in stack.iter().rev() {
let lint_root = frame.current_source_info().and_then(|source_info| {
match &frame.body.source_scopes[source_info.scope].local_data {
mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
@@ -950,6 +951,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
trace!("generate stacktrace: {:#?}", frames);
frames
}
+
+ #[must_use]
+ pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+ Self::generate_stacktrace_from_stack(self.stack())
+ }
}
#[doc(hidden)]
@@ -991,16 +997,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {:?}", val)?;
- if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
+ if let Scalar::Ptr(ptr, _size) = val {
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({:?}, {:?})", val1, val2)?;
- if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
+ if let Scalar::Ptr(ptr, _size) = val1 {
allocs.push(ptr.provenance.get_alloc_id());
}
- if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
+ if let Scalar::Ptr(ptr, _size) = val2 {
allocs.push(ptr.provenance.get_alloc_id());
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 376b8872c..6809a42dc 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -15,7 +15,7 @@
//! that contains allocations whose mutability we cannot identify.)
use super::validity::RefTracking;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_middle::mir::interpret::InterpResult;
@@ -37,7 +37,7 @@ pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
ExtraFnVal = !,
FrameExtra = (),
AllocExtra = (),
- MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>,
+ MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>,
>;
struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
@@ -47,7 +47,7 @@ struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_ev
ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
/// A list of all encountered allocations. After type-based interning, we traverse this list to
/// also intern allocations that are only referenced by a raw pointer or inside a union.
- leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ leftover_allocations: &'rt mut FxIndexSet<AllocId>,
/// The root kind of the value that we're looking at. This field is never mutated for a
/// particular allocation. It is primarily used to make as many allocations as possible
/// read-only so LLVM can place them in const memory.
@@ -79,7 +79,7 @@ struct IsStaticOrFn;
/// to account for (e.g. for vtables).
fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
- leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ leftover_allocations: &'rt mut FxIndexSet<AllocId>,
alloc_id: AllocId,
mode: InternMode,
ty: Option<Ty<'tcx>>,
@@ -114,7 +114,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
if let InternMode::Static(mutability) = mode {
// For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
// no interior mutability.
- let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
+ let frozen = ty.map_or(true, |ty| ty.is_freeze(*ecx.tcx, ecx.param_env));
// For statics, allocation mutability is the combination of place mutability and
// type mutability.
// The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
@@ -134,7 +134,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
alloc.mutability = Mutability::Not;
};
// link the alloc id to the actual allocation
- leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
+ leftover_allocations.extend(alloc.provenance().iter().map(|&(_, alloc_id)| alloc_id));
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
None
@@ -191,10 +191,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
return Ok(true);
};
- // If there are no relocations in this allocation, it does not contain references
+ // If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
- if !alloc.has_relocations() {
+ if !alloc.has_provenance() {
return Ok(false);
}
} else {
@@ -233,8 +233,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
- // Handle Reference types, as these are the only relocations supported by const eval.
- // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
+ // Handle Reference types, as these are the only types with provenance supported by const eval.
+ // Raw pointers (and boxes) are handled by the `leftover_allocations` logic.
let tcx = self.ecx.tcx;
let ty = mplace.layout.ty;
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
@@ -332,9 +332,7 @@ pub enum InternKind {
///
/// This *cannot raise an interpreter error*. Doing so is left to validation, which
/// tracks where in the value we are and thus can show much better error messages.
-/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
-/// are hard errors.
-#[tracing::instrument(level = "debug", skip(ecx))]
+#[instrument(level = "debug", skip(ecx))]
pub fn intern_const_alloc_recursive<
'mir,
'tcx: 'mir,
@@ -357,7 +355,7 @@ pub fn intern_const_alloc_recursive<
// `leftover_allocations` collects *all* allocations we see, because some might not
// be available in a typed way. They get interned at the end.
let mut ref_tracking = RefTracking::empty();
- let leftover_allocations = &mut FxHashSet::default();
+ let leftover_allocations = &mut FxIndexSet::default();
// start with the outermost allocation
intern_shallow(
@@ -410,7 +408,7 @@ pub fn intern_const_alloc_recursive<
// references and a `leftover_allocations` set (where we only have a todo-list here).
// So we hand-roll the interning logic here again.
match intern_kind {
- // Statics may contain mutable allocations even behind relocations.
+ // Statics may point to mutable allocations.
// Even for immutable statics it would be ok to have mutable allocations behind
// raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
InternKind::Static(_) => {}
@@ -441,7 +439,7 @@ pub fn intern_const_alloc_recursive<
}
let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc);
- for &(_, alloc_id) in alloc.inner().relocations().iter() {
+ for &(_, alloc_id) in alloc.inner().provenance().iter() {
if leftover_allocations.insert(alloc_id) {
todo.push(alloc_id);
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 08209eb79..8637d6a77 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -8,7 +8,7 @@ use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar},
- BinOp,
+ BinOp, NonDivergingIntrinsic,
};
use rustc_middle::ty;
use rustc_middle::ty::layout::LayoutOf as _;
@@ -79,9 +79,9 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
ty::Projection(_)
| ty::Opaque(_, _)
| ty::Param(_)
- | ty::Bound(_, _)
| ty::Placeholder(_)
| ty::Infer(_) => throw_inval!(TooGeneric),
+ ty::Bound(_, _) => bug!("bound ty during ctfe"),
ty::Bool
| ty::Char
| ty::Int(_)
@@ -95,7 +95,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::Ref(_, _, _)
| ty::FnDef(_, _)
| ty::FnPtr(_)
- | ty::Dynamic(_, _)
+ | ty::Dynamic(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
| ty::GeneratorWitness(_)
@@ -184,7 +184,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
| sym::bitreverse => {
let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?;
- let val = self.read_scalar(&args[0])?.check_init()?;
+ let val = self.read_scalar(&args[0])?;
let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi {
Abi::Scalar(scalar) => scalar.primitive(),
@@ -256,7 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
if overflowed {
let layout = self.layout_of(substs.type_at(0))?;
- let r_val = r.to_scalar()?.to_bits(layout.size)?;
+ let r_val = r.to_scalar().to_bits(layout.size)?;
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
} else {
@@ -269,9 +269,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?;
- let val = self.read_scalar(&args[0])?.check_init()?;
+ let val = self.read_scalar(&args[0])?;
let val_bits = val.to_bits(layout.size)?;
- let raw_shift = self.read_scalar(&args[1])?.check_init()?;
+ let raw_shift = self.read_scalar(&args[1])?;
let raw_shift_bits = raw_shift.to_bits(layout.size)?;
let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits;
@@ -320,7 +320,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (a_offset, b_offset) =
match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
(Err(a), Err(b)) => {
- // Neither poiner points to an allocation.
+ // Neither pointer points to an allocation.
// If these are inequal or null, this *will* fail the deref check below.
(a, b)
}
@@ -506,12 +506,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// These just return their argument
self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
}
- sym::assume => {
- let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
- if !cond {
- throw_ub_format!("`assume` intrinsic called with `false`");
- }
- }
sym::raw_eq => {
let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
self.write_scalar(result, dest)?;
@@ -536,6 +530,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(true)
}
+ pub(super) fn emulate_nondiverging_intrinsic(
+ &mut self,
+ intrinsic: &NonDivergingIntrinsic<'tcx>,
+ ) -> InterpResult<'tcx> {
+ match intrinsic {
+ NonDivergingIntrinsic::Assume(op) => {
+ let op = self.eval_operand(op, None)?;
+ let cond = self.read_scalar(&op)?.to_bool()?;
+ if !cond {
+ throw_ub_format!("`assume` called with `false`");
+ }
+ Ok(())
+ }
+ NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
+ count,
+ src,
+ dst,
+ }) => {
+ let src = self.eval_operand(src, None)?;
+ let dst = self.eval_operand(dst, None)?;
+ let count = self.eval_operand(count, None)?;
+ self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)
+ }
+ }
+ }
+
pub fn exact_div(
&mut self,
a: &ImmTy<'tcx, M::Provenance>,
@@ -570,7 +590,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no
// overflow can occur)
- let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
+ let first_term: u128 = l.to_scalar().to_bits(l.layout.size)?;
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
if first_term_positive {
// Negative overflow not possible since the positive first term
@@ -687,10 +707,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
assert!(!layout.is_unsized());
- let lhs = self.read_pointer(lhs)?;
- let rhs = self.read_pointer(rhs)?;
- let lhs_bytes = self.read_bytes_ptr(lhs, layout.size)?;
- let rhs_bytes = self.read_bytes_ptr(rhs, layout.size)?;
+ let get_bytes = |this: &InterpCx<'mir, 'tcx, M>,
+ op: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ size|
+ -> InterpResult<'tcx, &[u8]> {
+ let ptr = this.read_pointer(op)?;
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ // zero-sized access
+ return Ok(&[]);
+ };
+ if alloc_ref.has_provenance() {
+ throw_ub_format!("`raw_eq` on bytes with provenance");
+ }
+ alloc_ref.get_bytes_strip_provenance()
+ };
+
+ let lhs_bytes = get_bytes(self, lhs, layout.size)?;
+ let rhs_bytes = get_bytes(self, rhs, layout.size)?;
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
index 5864b9215..0e3867557 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -4,7 +4,6 @@ use rustc_ast::Mutability;
use rustc_hir::lang_items::LangItem;
use rustc_middle::mir::TerminatorKind;
use rustc_middle::ty::layout::LayoutOf;
-use rustc_middle::ty::subst::Subst;
use rustc_span::{Span, Symbol};
use crate::interpret::{
@@ -28,7 +27,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut source_info = *frame.body.source_info(loc);
// If this is a `Call` terminator, use the `fn_span` instead.
- let block = &frame.body.basic_blocks()[loc.block];
+ let block = &frame.body.basic_blocks[loc.block];
if loc.statement_index == block.statements.len() {
debug!(
"find_closest_untracked_caller_location: got terminator {:?} ({:?})",
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
index f9847742f..ffdb8de5b 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
@@ -4,7 +4,7 @@ use rustc_hir::definitions::DisambiguatedDefPathData;
use rustc_middle::mir::interpret::{Allocation, ConstAllocation};
use rustc_middle::ty::{
self,
- print::{PrettyPrinter, Print, Printer},
+ print::{with_no_verbose_constants, PrettyPrinter, Print, Printer},
subst::{GenericArg, GenericArgKind},
Ty, TyCtxt,
};
@@ -48,7 +48,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
| ty::FnPtr(_)
| ty::Never
| ty::Tuple(_)
- | ty::Dynamic(_, _) => self.pretty_print_type(ty),
+ | ty::Dynamic(_, _, _) => self.pretty_print_type(ty),
// Placeholders (all printed as `_` to uniformize them).
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
@@ -190,7 +190,9 @@ impl Write for AbsolutePathPrinter<'_> {
/// Directly returns an `Allocation` containing an absolute path representation of the given type.
pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
- let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
+ let path = with_no_verbose_constants!(
+ AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path
+ );
let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
tcx.intern_const_alloc(alloc)
}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 71ccd1799..351152eba 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -6,6 +6,7 @@ use std::borrow::{Borrow, Cow};
use std::fmt::Debug;
use std::hash::Hash;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::DefId;
@@ -123,18 +124,15 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Whether memory accesses should be alignment-checked.
fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
- /// Whether, when checking alignment, we should `force_int` and thus support
+ /// Whether, when checking alignment, we should look at the actual address and thus support
/// custom alignment logic based on whatever the integer address happens to be.
///
- /// Requires Provenance::OFFSET_IS_ADDR to be true.
- fn force_int_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+ /// If this returns true, Provenance::OFFSET_IS_ADDR must be true.
+ fn use_addr_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
/// Whether to enforce the validity invariant
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
- /// Whether to enforce integers and floats being initialized.
- fn enforce_number_init(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
-
/// Whether function calls should be [ABI](CallAbi)-checked.
fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
true
@@ -218,23 +216,12 @@ pub trait Machine<'mir, 'tcx>: Sized {
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
- /// Called to read the specified `local` from the `frame`.
- /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
- /// for ZST reads.
- #[inline]
- fn access_local<'a>(
- frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
- local: mir::Local,
- ) -> InterpResult<'tcx, &'a Operand<Self::Provenance>>
- where
- 'tcx: 'mir,
- {
- frame.locals[local].access()
- }
-
/// Called to write the specified `local` from the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
+ ///
+ /// Due to borrow checker trouble, we indicate the `frame` as an index rather than an `&mut
+ /// Frame`.
#[inline]
fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
@@ -329,7 +316,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
/// owned allocation to the map even when the map is shared.)
///
- /// This must only fail if `alloc` contains relocations.
+ /// This must only fail if `alloc` contains provenance.
fn adjust_allocation<'b>(
ecx: &InterpCx<'mir, 'tcx, Self>,
id: AllocId,
@@ -337,13 +324,22 @@ pub trait Machine<'mir, 'tcx>: Sized {
kind: Option<MemoryKind<Self::MemoryKind>>,
) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
+ fn eval_inline_asm(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _template: &'tcx [InlineAsmTemplatePiece],
+ _operands: &[mir::InlineAsmOperand<'tcx>],
+ _options: InlineAsmOptions,
+ ) -> InterpResult<'tcx> {
+ throw_unsup_format!("inline assembly is not supported")
+ }
+
/// Hook for performing extra checks on a memory read access.
///
/// Takes read-only access to the allocation so we can keep all the memory read
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
/// need to mutate.
#[inline(always)]
- fn memory_read(
+ fn before_memory_read(
_tcx: TyCtxt<'tcx>,
_machine: &Self,
_alloc_extra: &Self::AllocExtra,
@@ -355,7 +351,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Hook for performing extra checks on a memory write access.
#[inline(always)]
- fn memory_written(
+ fn before_memory_write(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
@@ -367,7 +363,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Hook for performing extra operations on a memory deallocation.
#[inline(always)]
- fn memory_deallocated(
+ fn before_memory_deallocation(
_tcx: TyCtxt<'tcx>,
_machine: &mut Self,
_alloc_extra: &mut Self::AllocExtra,
@@ -430,36 +426,19 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type ExtraFnVal = !;
type MemoryMap =
- rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
+ rustc_data_structures::fx::FxIndexMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
type AllocExtra = ();
type FrameExtra = ();
#[inline(always)]
- fn enforce_alignment(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
- // We do not check for alignment to avoid having to carry an `Align`
- // in `ConstValue::ByRef`.
- false
- }
-
- #[inline(always)]
- fn force_int_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
- // We do not support `force_int`.
+ fn use_addr_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ // We do not support `use_addr`.
false
}
#[inline(always)]
- fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
- false // for now, we don't enforce validity
- }
-
- #[inline(always)]
- fn enforce_number_init(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
- true
- }
-
- #[inline(always)]
fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
true
}
@@ -510,6 +489,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
) -> InterpResult<$tcx, Pointer<Option<AllocId>>> {
// Allow these casts, but make the pointer not dereferenceable.
// (I.e., they behave like transmutation.)
+ // This is correct because no pointers can ever be exposed in compile-time evaluation.
Ok(Pointer::from_addr(addr))
}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index ed2c4edf9..e5e015c1e 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -21,7 +21,6 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
- ScalarMaybeUninit,
};
#[derive(Debug, PartialEq, Copy, Clone)]
@@ -215,7 +214,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.allocate_raw_ptr(alloc, kind).unwrap()
}
- /// This can fail only of `alloc` contains relocations.
+ /// This can fail only of `alloc` contains provenance.
pub fn allocate_raw_ptr(
&mut self,
alloc: Allocation,
@@ -327,7 +326,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Let the machine take some extra action
let size = alloc.size();
- M::memory_deallocated(
+ M::before_memory_deallocation(
*self.tcx,
&mut self.machine,
&mut alloc.extra,
@@ -438,15 +437,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
msg,
})
}
- // Ensure we never consider the null pointer dereferencable.
+ // Ensure we never consider the null pointer dereferenceable.
if M::Provenance::OFFSET_IS_ADDR {
assert_ne!(ptr.addr(), Size::ZERO);
}
// Test align. Check this last; if both bounds and alignment are violated
// we want the error to be about the bounds.
if let Some(align) = align {
- if M::force_int_for_alignment_check(self) {
- // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+ if M::use_addr_for_alignment_check(self) {
+ // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
check_offset_align(ptr.addr().bytes(), align)?;
} else {
// Check allocation alignment and offset alignment.
@@ -520,6 +519,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// The caller is responsible for calling the access hooks!
+ ///
+ /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
fn get_alloc_raw(
&self,
id: AllocId,
@@ -573,7 +574,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)?;
if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
let range = alloc_range(offset, size);
- M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
+ M::before_memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
} else {
// Even in this branch we have to be sure that we actually access the allocation, in
@@ -589,6 +590,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(&self.get_alloc_raw(id)?.extra)
}
+ /// Return the `mutability` field of the given allocation.
+ pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
+ Ok(self.get_alloc_raw(id)?.mutability)
+ }
+
/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
/// The caller is responsible for calling the access hooks!
///
@@ -634,7 +640,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
let range = alloc_range(offset, size);
- M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+ M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
} else {
Ok(None)
@@ -788,10 +794,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
todo.extend(static_roots);
while let Some(id) = todo.pop() {
if reachable.insert(id) {
- // This is a new allocation, add its relocations to `todo`.
+ // This is a new allocation, add the allocation it points to `todo`.
if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
todo.extend(
- alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()),
+ alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()),
);
}
}
@@ -827,7 +833,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Prov, Extra>,
) -> std::fmt::Result {
- for alloc_id in alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()) {
+ for alloc_id in alloc.provenance().values().filter_map(|prov| prov.get_alloc_id()) {
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", display_allocation(tcx, alloc))
@@ -894,11 +900,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
/// Reading and writing.
impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
/// `range` is relative to this allocation reference, not the base of the allocation.
- pub fn write_scalar(
- &mut self,
- range: AllocRange,
- val: ScalarMaybeUninit<Prov>,
- ) -> InterpResult<'tcx> {
+ pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
let range = self.range.subrange(range);
debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
Ok(self
@@ -908,15 +910,11 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
}
/// `offset` is relative to this allocation reference, not the base of the allocation.
- pub fn write_ptr_sized(
- &mut self,
- offset: Size,
- val: ScalarMaybeUninit<Prov>,
- ) -> InterpResult<'tcx> {
+ pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
}
- /// Mark the entire referenced range as uninitalized
+ /// Mark the entire referenced range as uninitialized
pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
Ok(self
.alloc
@@ -931,7 +929,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
&self,
range: AllocRange,
read_provenance: bool,
- ) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ ) -> InterpResult<'tcx, Scalar<Prov>> {
let range = self.range.subrange(range);
let res = self
.alloc
@@ -942,12 +940,12 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
}
/// `range` is relative to this allocation reference, not the base of the allocation.
- pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
self.read_scalar(range, /*read_provenance*/ false)
}
/// `offset` is relative to this allocation reference, not the base of the allocation.
- pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
self.read_scalar(
alloc_range(offset, self.tcx.data_layout().pointer_size),
/*read_provenance*/ true,
@@ -955,29 +953,25 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
}
/// `range` is relative to this allocation reference, not the base of the allocation.
- pub fn check_bytes(
- &self,
- range: AllocRange,
- allow_uninit: bool,
- allow_ptr: bool,
- ) -> InterpResult<'tcx> {
+ pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
Ok(self
.alloc
- .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit, allow_ptr)
+ .get_bytes_strip_provenance(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))?)
}
- /// Returns whether the allocation has relocations for the entire range of the `AllocRef`.
- pub(crate) fn has_relocations(&self) -> bool {
- self.alloc.has_relocations(&self.tcx, self.range)
+ /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
+ pub(crate) fn has_provenance(&self) -> bool {
+ self.alloc.range_has_provenance(&self.tcx, self.range)
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
- /// Reads the given number of bytes from memory. Returns them as a slice.
+ /// Reads the given number of bytes from memory, and strips their provenance if possible.
+ /// Returns them as a slice.
///
/// Performs appropriate bounds checks.
- pub fn read_bytes_ptr(
+ pub fn read_bytes_ptr_strip_provenance(
&self,
ptr: Pointer<Option<M::Provenance>>,
size: Size,
@@ -990,7 +984,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (We are staying inside the bounds here so all is good.)
Ok(alloc_ref
.alloc
- .get_bytes(&alloc_ref.tcx, alloc_ref.range)
+ .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
}
@@ -1071,7 +1065,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
let src_range = alloc_range(src_offset, size);
- M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), src_range)?;
+ M::before_memory_read(
+ *tcx,
+ &self.machine,
+ &src_alloc.extra,
+ (src_alloc_id, src_prov),
+ src_range,
+ )?;
// We need the `dest` ptr for the next operation, so we get it now.
// We already did the source checks and called the hooks so we are good to return early.
let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
@@ -1079,24 +1079,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return Ok(());
};
- // This checks relocation edges on the src, which needs to happen before
- // `prepare_relocation_copy`.
- let src_bytes = src_alloc
- .get_bytes_with_uninit_and_ptr(&tcx, src_range)
- .map_err(|e| e.to_interp_error(src_alloc_id))?
- .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
- // first copy the relocations to a temporary buffer, because
- // `get_bytes_mut` will clear the relocations, which is correct,
- // since we don't want to keep any relocations at the target.
- let relocations =
- src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
+ // Checks provenance edges on the src, which needs to happen before
+ // `prepare_provenance_copy`.
+ if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.start, Size::ZERO)) {
+ throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.start)));
+ }
+ if src_alloc.range_has_provenance(&tcx, alloc_range(src_range.end(), Size::ZERO)) {
+ throw_unsup!(PartialPointerCopy(Pointer::new(src_alloc_id, src_range.end())));
+ }
+ let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+ // first copy the provenance to a temporary buffer, because
+ // `get_bytes_mut` will clear the provenance, which is correct,
+ // since we don't want to keep any provenance at the target.
+ let provenance =
+ src_alloc.prepare_provenance_copy(self, src_range, dest_offset, num_copies);
// Prepare a copy of the initialization mask.
let compressed = src_alloc.compress_uninit_range(src_range);
// Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
let dest_range = alloc_range(dest_offset, size * num_copies);
- M::memory_written(
+ M::before_memory_write(
*tcx,
extra,
&mut dest_alloc.extra,
@@ -1118,7 +1121,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dest_alloc
.write_uninit(&tcx, dest_range)
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
- // We can forget about the relocations, this is all not initialized anyway.
+ // We can forget about the provenance, this is all not initialized anyway.
return Ok(());
}
@@ -1162,8 +1165,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
num_copies,
);
- // copy the relocations to the destination
- dest_alloc.mark_relocation_range(relocations);
+ // copy the provenance to the destination
+ dest_alloc.mark_provenance_range(provenance);
Ok(())
}
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 94ba62c16..0c212cf59 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -1,11 +1,9 @@
//! Functions concerning immediate values and operands, and reading from operands.
//! All high-level functions to read from memory work on operands as sources.
-use std::fmt::Write;
-
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
@@ -14,7 +12,7 @@ use rustc_target::abi::{VariantIdx, Variants};
use super::{
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
- Provenance, Scalar, ScalarMaybeUninit,
+ Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -27,23 +25,14 @@ use super::{
#[derive(Copy, Clone, Debug)]
pub enum Immediate<Prov: Provenance = AllocId> {
/// A single scalar value (must have *initialized* `Scalar` ABI).
- /// FIXME: we also currently often use this for ZST.
- /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
- Scalar(ScalarMaybeUninit<Prov>),
+ Scalar(Scalar<Prov>),
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
- ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>),
+ ScalarPair(Scalar<Prov>, Scalar<Prov>),
/// A value of fully uninitialized memory. Can have and size and layout.
Uninit,
}
-impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> {
- #[inline(always)]
- fn from(val: ScalarMaybeUninit<Prov>) -> Self {
- Immediate::Scalar(val)
- }
-}
-
impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
#[inline(always)]
fn from(val: Scalar<Prov>) -> Self {
@@ -51,13 +40,13 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
}
-impl<'tcx, Prov: Provenance> Immediate<Prov> {
+impl<Prov: Provenance> Immediate<Prov> {
pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
+ Immediate::Scalar(Scalar::from_pointer(p, cx))
}
pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
+ Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
}
pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
@@ -69,41 +58,28 @@ impl<'tcx, Prov: Provenance> Immediate<Prov> {
vtable: Pointer<Option<Prov>>,
cx: &impl HasDataLayout,
) -> Self {
- Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
+ Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx))
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> {
+ pub fn to_scalar(self) -> Scalar<Prov> {
match self {
Immediate::Scalar(val) => val,
Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
- Immediate::Uninit => ScalarMaybeUninit::Uninit,
+ Immediate::Uninit => bug!("Got uninit where a scalar was expected"),
}
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> {
- self.to_scalar_or_uninit().check_init()
- }
-
- #[inline]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) {
+ pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
match self {
Immediate::ScalarPair(val1, val2) => (val1, val2),
Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
- Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
+ Immediate::Uninit => bug!("Got uninit where a scalar pair was expected"),
}
}
-
- #[inline]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> {
- let (val1, val2) = self.to_scalar_or_uninit_pair();
- Ok((val1.check_init()?, val2.check_init()?))
- }
}
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
@@ -119,27 +95,17 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
/// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, Prov: Provenance>(
cx: FmtPrinter<'a, 'tcx>,
- s: ScalarMaybeUninit<Prov>,
+ s: Scalar<Prov>,
ty: Ty<'tcx>,
) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
match s {
- ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
- cx.pretty_print_const_scalar_int(int, ty, true)
- }
- ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
+ Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true),
+ Scalar::Ptr(ptr, _sz) => {
// Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
// print what is points to, which would fail since it has no access to the local
// memory.
cx.pretty_print_const_pointer(ptr, ty, true)
}
- ScalarMaybeUninit::Uninit => cx.typed_value(
- |mut this| {
- this.write_str("uninit ")?;
- Ok(this)
- },
- |this| this.print_type(ty),
- " ",
- ),
}
}
ty::tls::with(|tcx| {
@@ -269,7 +235,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());
- let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
+ let int = self.to_scalar().assert_int();
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
}
}
@@ -327,7 +293,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn read_immediate_from_mplace_raw(
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
- force: bool,
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
@@ -345,47 +310,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
- let scalar_layout = match mplace.layout.abi {
- // `if` does not work nested inside patterns, making this a bit awkward to express.
- Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
- Abi::Scalar(s) if force => Some(s.primitive()),
- _ => None,
- };
- if let Some(s) = scalar_layout {
- let size = s.size(self);
- assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
- let scalar = alloc
- .read_scalar(alloc_range(Size::ZERO, size), /*read_provenance*/ s.is_ptr())?;
- return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
- }
- let scalar_pair_layout = match mplace.layout.abi {
+ Ok(match mplace.layout.abi {
+ Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
+ let size = s.size(self);
+ assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
+ let scalar = alloc.read_scalar(
+ alloc_range(Size::ZERO, size),
+ /*read_provenance*/ s.is_ptr(),
+ )?;
+ Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
+ }
Abi::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
abi::Scalar::Initialized { value: b, .. },
- ) => Some((a, b)),
- Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
- _ => None,
- };
- if let Some((a, b)) = scalar_pair_layout {
- // We checked `ptr_align` above, so all fields will have the alignment they need.
- // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
- // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
- let (a_size, b_size) = (a.size(self), b.size(self));
- let b_offset = a_size.align_to(b.align(self).abi);
- assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
- let a_val = alloc.read_scalar(
- alloc_range(Size::ZERO, a_size),
- /*read_provenance*/ a.is_ptr(),
- )?;
- let b_val = alloc
- .read_scalar(alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr())?;
- return Ok(Some(ImmTy {
- imm: Immediate::ScalarPair(a_val, b_val),
- layout: mplace.layout,
- }));
- }
- // Neither a scalar nor scalar pair.
- return Ok(None);
+ ) => {
+ // We checked `ptr_align` above, so all fields will have the alignment they need.
+ // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+ // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let b_offset = a_size.align_to(b.align(self).abi);
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+ let a_val = alloc.read_scalar(
+ alloc_range(Size::ZERO, a_size),
+ /*read_provenance*/ a.is_ptr(),
+ )?;
+ let b_val = alloc.read_scalar(
+ alloc_range(b_offset, b_size),
+ /*read_provenance*/ b.is_ptr(),
+ )?;
+ Some(ImmTy {
+ imm: Immediate::ScalarPair(a_val.into(), b_val.into()),
+ layout: mplace.layout,
+ })
+ }
+ _ => {
+ // Neither a scalar nor scalar pair.
+ None
+ }
+ })
}
/// Try returning an immediate for the operand. If the layout does not permit loading this as an
@@ -394,20 +356,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// succeed! Whether it succeeds depends on whether the layout can be represented
/// in an `Immediate`, not on which data is stored there currently.
///
- /// If `force` is `true`, then even scalars with fields that can be ununit will be
- /// read. This means the load is lossy and should not be written back!
- /// This flag exists only for validity checking.
- ///
/// This is an internal function that should not usually be used; call `read_immediate` instead.
/// ConstProp needs it, though.
pub fn read_immediate_raw(
&self,
src: &OpTy<'tcx, M::Provenance>,
- force: bool,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
Ok(match src.try_as_mplace() {
Ok(ref mplace) => {
- if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
+ if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
Ok(val)
} else {
Err(*mplace)
@@ -418,24 +375,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
/// Read an immediate from a place, asserting that that is possible with the given layout.
+ ///
+ /// If this suceeds, the `ImmTy` is never `Uninit`.
#[inline(always)]
pub fn read_immediate(
&self,
op: &OpTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
- Ok(imm)
- } else {
- span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
+ if !matches!(
+ op.layout.abi,
+ Abi::Scalar(abi::Scalar::Initialized { .. })
+ | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
+ ) {
+ span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
+ }
+ let imm = self.read_immediate_raw(op)?.unwrap();
+ if matches!(*imm, Immediate::Uninit) {
+ throw_ub!(InvalidUninitBytes(None));
}
+ Ok(imm)
}
/// Read a scalar from a place
pub fn read_scalar(
&self,
op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
- Ok(self.read_immediate(op)?.to_scalar_or_uninit())
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ Ok(self.read_immediate(op)?.to_scalar())
}
/// Read a pointer from a place.
@@ -449,7 +415,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
- let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
+ let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
Ok(str)
}
@@ -478,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Read from a local. Will not actually access the local if reading from a ZST.
+ /// Read from a local.
/// Will not access memory, instead an indirect `Operand` is returned.
///
/// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
@@ -490,12 +456,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(frame, local, layout)?;
- let op = if layout.is_zst() {
- // Bypass `access_local` (helps in ConstProp)
- Operand::Immediate(Immediate::Uninit)
- } else {
- *M::access_local(frame, local)?
- };
+ let op = *frame.locals[local].access()?;
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
@@ -573,7 +534,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
// checked yet.
// * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
- self.mir_const_to_op(&val, layout)?
+ self.const_to_op(&val, layout)?
}
};
trace!("{:?}: {:?}", mir_op, *op);
@@ -588,43 +549,54 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ops.iter().map(|op| self.eval_operand(op, None)).collect()
}
- // Used when the miri-engine runs into a constant and for extracting information from constants
- // in patterns via the `const_eval` module
- /// The `val` and `layout` are assumed to already be in our interpreter
- /// "universe" (param_env).
pub fn const_to_op(
&self,
- c: ty::Const<'tcx>,
+ val: &mir::ConstantKind<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- match c.kind() {
- ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
- ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
- throw_inval!(AlreadyReported(reported))
+ // FIXME(const_prop): normalization needed b/c const prop lint in
+ // `mir_drops_elaborated_and_const_checked`, which happens before
+ // optimized MIR. Only after optimizing the MIR can we guarantee
+ // that the `RevealAll` pass has happened and that the body's consts
+ // are normalized, so any call to resolve before that needs to be
+ // manually normalized.
+ let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
+ match val {
+ mir::ConstantKind::Ty(ct) => {
+ match ct.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
+ throw_inval!(TooGeneric)
+ }
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
+ throw_inval!(AlreadyReported(reported))
+ }
+ ty::ConstKind::Unevaluated(uv) => {
+ // NOTE: We evaluate to a `ValTree` here as a check to ensure
+ // we're working with valid constants, even though we never need it.
+ let instance = self.resolve(uv.def, uv.substs)?;
+ let cid = GlobalId { instance, promoted: None };
+ let _valtree = self
+ .tcx
+ .eval_to_valtree(self.param_env.and(cid))?
+ .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"));
+
+ Ok(self.eval_to_allocation(cid)?.into())
+ }
+ ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
+ span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {ct:?}")
+ }
+ ty::ConstKind::Value(valtree) => {
+ let ty = ct.ty();
+ let const_val = self.tcx.valtree_to_const_val((ty, valtree));
+ self.const_val_to_op(const_val, ty, layout)
+ }
+ }
}
- ty::ConstKind::Unevaluated(uv) => {
+ mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
+ mir::ConstantKind::Unevaluated(uv, _) => {
let instance = self.resolve(uv.def, uv.substs)?;
Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
}
- ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
- span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
- }
- ty::ConstKind::Value(valtree) => {
- let ty = c.ty();
- let const_val = self.tcx.valtree_to_const_val((ty, valtree));
- self.const_val_to_op(const_val, ty, layout)
- }
- }
- }
-
- pub fn mir_const_to_op(
- &self,
- val: &mir::ConstantKind<'tcx>,
- layout: Option<TyAndLayout<'tcx>>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- match val {
- mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
- mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
}
}
@@ -727,7 +699,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding {
TagEncoding::Direct => {
- let scalar = tag_val.to_scalar()?;
+ let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar
@@ -757,8 +729,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Return the cast value, and the index.
(discr_val, index.0)
}
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
- let tag_val = tag_val.to_scalar()?;
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
+ let tag_val = tag_val.to_scalar();
// Compute the variant this niche value/"tag" corresponds to. With niche layout,
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
@@ -775,7 +747,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if !ptr_valid {
throw_ub!(InvalidTag(dbg_val))
}
- dataful_variant
+ untagged_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
@@ -785,9 +757,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
- let variant_index_relative = variant_index_relative_val
- .to_scalar()?
- .assert_bits(tag_val.layout.size);
+ let variant_index_relative =
+ variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
if variant_index_relative <= u128::from(variants_end - variants_start) {
let variant_index_relative = u32::try_from(variant_index_relative)
@@ -806,7 +777,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(usize::try_from(variant_index).unwrap() < variants_len);
VariantIdx::from_u32(variant_index)
} else {
- dataful_variant
+ untagged_variant
}
}
};
@@ -823,9 +794,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- rustc_data_structures::static_assert_size!(Immediate, 56);
- rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
- rustc_data_structures::static_assert_size!(Operand, 64);
- rustc_data_structures::static_assert_size!(OpTy<'_>, 88);
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(Immediate, 48);
+ static_assert_size!(ImmTy<'_>, 64);
+ static_assert_size!(Operand, 56);
+ static_assert_size!(OpTy<'_>, 80);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index f9912d706..1f1d06651 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -329,21 +329,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match left.layout.ty.kind() {
ty::Char => {
assert_eq!(left.layout.ty, right.layout.ty);
- let left = left.to_scalar()?;
- let right = right.to_scalar()?;
+ let left = left.to_scalar();
+ let right = right.to_scalar();
Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
}
ty::Bool => {
assert_eq!(left.layout.ty, right.layout.ty);
- let left = left.to_scalar()?;
- let right = right.to_scalar()?;
+ let left = left.to_scalar();
+ let right = right.to_scalar();
Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
let ty = left.layout.ty;
- let left = left.to_scalar()?;
- let right = right.to_scalar()?;
+ let left = left.to_scalar();
+ let right = right.to_scalar();
Ok(match fty {
FloatTy::F32 => {
self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
@@ -363,8 +363,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right.layout.ty
);
- let l = left.to_scalar()?.to_bits(left.layout.size)?;
- let r = right.to_scalar()?.to_bits(right.layout.size)?;
+ let l = left.to_scalar().to_bits(left.layout.size)?;
+ let r = right.to_scalar().to_bits(right.layout.size)?;
self.binary_int_op(bin_op, l, left.layout, r, right.layout)
}
_ if left.layout.ty.is_any_ptr() => {
@@ -410,7 +410,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
- let val = val.to_scalar()?;
+ let val = val.to_scalar();
trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
match layout.ty.kind() {
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index f4571a1ca..b0625b5f4 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -2,8 +2,6 @@
//! into a place.
//! All high-level functions to write to memory work on places as destinations.
-use std::hash::Hash;
-
use rustc_ast::Mutability;
use rustc_middle::mir;
use rustc_middle::ty;
@@ -13,7 +11,7 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, Vari
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
- Pointer, Provenance, Scalar, ScalarMaybeUninit,
+ Pointer, Provenance, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -254,8 +252,6 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
// These are defined here because they produce a place.
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
- /// Note: do not call `as_ref` on the resulting place. This function should only be used to
- /// read from the resulting mplace, not to get its address back.
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
match **self {
Operand::Indirect(mplace) => {
@@ -267,8 +263,6 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- /// Note: do not call `as_ref` on the resulting place. This function should only be used to
- /// read from the resulting mplace, not to get its address back.
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
self.try_as_mplace().unwrap()
}
@@ -286,7 +280,7 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Prov> {
+ pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
self.try_as_mplace().unwrap()
}
}
@@ -294,7 +288,7 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + Eq + Hash + 'static,
+ Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
/// Take a value, which represents a (thin or wide) reference, and make it a place.
@@ -312,7 +306,7 @@ where
let layout = self.layout_of(pointee_type)?;
let (ptr, meta) = match **val {
Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
- Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
+ Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta)),
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
@@ -467,7 +461,7 @@ where
#[inline(always)]
pub fn write_scalar(
&mut self,
- val: impl Into<ScalarMaybeUninit<M::Provenance>>,
+ val: impl Into<Scalar<M::Provenance>>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
@@ -644,13 +638,19 @@ where
// Let us see if the layout is simple so we take a shortcut,
// avoid force_allocation.
- let src = match self.read_immediate_raw(src, /*force*/ false)? {
+ let src = match self.read_immediate_raw(src)? {
Ok(src_val) => {
- assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
- assert!(
- !dest.layout.is_unsized(),
- "the src is sized, so the dest must also be sized"
- );
+ // FIXME(const_prop): Const-prop can possibly evaluate an
+ // unsized copy operation when it thinks that the type is
+ // actually sized, due to a trivially false where-clause
+ // predicate like `where Self: Sized` with `Self = dyn Trait`.
+ // See #102553 for an example of such a predicate.
+ if src.layout.is_unsized() {
+ throw_inval!(SizeOfUnsizedType(src.layout.ty));
+ }
+ if dest.layout.is_unsized() {
+ throw_inval!(SizeOfUnsizedType(dest.layout.ty));
+ }
assert_eq!(src.layout.size, dest.layout.size);
// Yay, we got a value that we can write directly.
return if layout_compat {
@@ -823,7 +823,7 @@ where
}
abi::Variants::Multiple {
tag_encoding:
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag: tag_layout,
tag_field,
..
@@ -831,7 +831,7 @@ where
// No need to validate that the discriminant here because the
// `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
- if variant_index != dataful_variant {
+ if variant_index != untagged_variant {
let variants_start = niche_variants.start().as_u32();
let variant_index_relative = variant_index
.as_u32()
@@ -891,10 +891,12 @@ where
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
- rustc_data_structures::static_assert_size!(MemPlace, 40);
- rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
- rustc_data_structures::static_assert_size!(Place, 48);
- rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(MemPlace, 40);
+ static_assert_size!(MemPlaceMeta, 24);
+ static_assert_size!(MPlaceTy<'_>, 64);
+ static_assert_size!(Place, 40);
+ static_assert_size!(PlaceTy<'_>, 64);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 742339f2b..6b2e2bb8a 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -1,14 +1,12 @@
//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
//!
-//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
+//! OpTy and PlaceTy generally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
-use std::hash::Hash;
-
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::LayoutOf;
@@ -22,7 +20,7 @@ use super::{
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + Eq + Hash + 'static,
+ Prov: Provenance + 'static,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
//# Field access
@@ -100,6 +98,8 @@ where
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let field_val: Immediate<_> = match (*base, base.layout.abi) {
+ // if the entire value is uninit, then so is the field (can happen in ConstProp)
+ (Immediate::Uninit, _) => Immediate::Uninit,
// the field contains no information, can be left uninit
_ if field_layout.is_zst() => Immediate::Uninit,
// the field covers the entire type
@@ -124,6 +124,7 @@ where
b_val
})
}
+ // everything else is a bug
_ => span_bug!(
self.cur_span(),
"invalid field access on immediate {}, layout {:#?}",
@@ -349,6 +350,11 @@ where
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
+ OpaqueCast(ty) => {
+ let mut place = base.clone();
+ place.layout = self.layout_of(ty)?;
+ place
+ }
Field(field, _) => self.place_field(base, field.index())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
@@ -373,6 +379,11 @@ where
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
+ OpaqueCast(ty) => {
+ let mut op = base.clone();
+ op.layout = self.layout_of(ty)?;
+ op
+ }
Field(field, _) => self.operand_field(base, field.index())?,
Downcast(_, variant) => self.operand_downcast(base, variant)?,
Deref => self.deref_operand(base)?.into(),
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index fea158a9f..c6e04cbfb 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -53,7 +53,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.pop_stack_frame(/* unwinding */ true)?;
return Ok(true);
};
- let basic_block = &self.body().basic_blocks()[loc.block];
+ let basic_block = &self.body().basic_blocks[loc.block];
if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
let old_frames = self.frame_idx();
@@ -114,13 +114,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
M::retag(self, *kind, &dest)?;
}
- // Call CopyNonOverlapping
- CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
- let src = self.eval_operand(src, None)?;
- let dst = self.eval_operand(dst, None)?;
- let count = self.eval_operand(count, None)?;
- self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
- }
+ Intrinsic(box ref intrinsic) => self.emulate_nondiverging_intrinsic(intrinsic)?,
// Statements we do not track.
AscribeUserType(..) => {}
@@ -251,8 +245,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Len(place) => {
let src = self.eval_place(place)?;
- let mplace = self.force_allocation(&src)?;
- let len = mplace.len(self)?;
+ let op = self.place_to_op(&src)?;
+ let len = op.len(self)?;
self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index d563e35f9..57e40e168 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -1,5 +1,6 @@
use std::borrow::Cow;
+use rustc_ast::ast::InlineAsmOptions;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
use rustc_middle::ty::Instance;
use rustc_middle::{
@@ -34,7 +35,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(discr.layout.ty, switch_ty);
// Branch to the `otherwise` case by default, if no match is found.
- assert!(!targets.iter().is_empty());
let mut target_block = targets.otherwise();
for (const_int, target) in targets.iter() {
@@ -129,8 +129,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
Assert { ref cond, expected, ref msg, target, cleanup } => {
- let cond_val =
- self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+ let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
if expected == cond_val {
self.go_to_block(target);
} else {
@@ -167,8 +166,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
terminator.kind
),
- // Inline assembly can't be interpreted.
- InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+ InlineAsm { template, ref operands, options, destination, .. } => {
+ M::eval_inline_asm(self, template, operands, options)?;
+ if options.contains(InlineAsmOptions::NORETURN) {
+ throw_ub_format!("returned from noreturn inline assembly");
+ }
+ self.go_to_block(
+ destination
+ .expect("InlineAsm terminators without noreturn must have a destination"),
+ )
+ }
}
Ok(())
@@ -215,12 +222,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => false,
}
};
- // Padding must be fully equal.
- let pad_compat = || caller_abi.pad == callee_abi.pad;
// When comparing the PassMode, we have to be smart about comparing the attributes.
- let arg_attr_compat = |a1: ArgAttributes, a2: ArgAttributes| {
+ let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| {
// There's only one regular attribute that matters for the call ABI: InReg.
- // Everything else is things like noalias, dereferencable, nonnull, ...
+ // Everything else is things like noalias, dereferenceable, nonnull, ...
// (This also applies to pointee_size, pointee_align.)
if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
{
@@ -233,13 +238,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
return true;
};
- let mode_compat = || match (caller_abi.mode, callee_abi.mode) {
+ let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) {
(PassMode::Ignore, PassMode::Ignore) => true,
(PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
(PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
}
- (PassMode::Cast(c1), PassMode::Cast(c2)) => c1 == c2,
+ (PassMode::Cast(c1, pad1), PassMode::Cast(c2, pad2)) => c1 == c2 && pad1 == pad2,
(
PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
@@ -251,7 +256,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => false,
};
- if layout_compat() && pad_compat() && mode_compat() {
+ if layout_compat() && mode_compat() {
return true;
}
trace!(
@@ -534,7 +539,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let mut non_zst_field = None;
for i in 0..receiver.layout.fields.count() {
let field = self.operand_field(&receiver, i)?;
- if !field.layout.is_zst() {
+ let zst =
+ field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
+ if !zst {
assert!(
non_zst_field.is_none(),
"multiple non-ZST fields in dyn receiver type {}",
@@ -557,7 +564,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.tcx
.struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
let ty::Dynamic(data, ..) = receiver_tail.kind() else {
- span_bug!(self.cur_span(), "dyanmic call on non-`dyn` type {}", receiver_tail)
+ span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
};
// Get the required information from the vtable.
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index b3a511d5a..cab23b724 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -32,7 +32,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(vtable_ptr.into())
}
- /// Returns a high-level representation of the entires of the given vtable.
+ /// Returns a high-level representation of the entries of the given vtable.
pub fn get_vtable_entries(
&self,
vtable: Pointer<Option<M::Provenance>>,
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 0e50d1ed4..8aa56c275 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -5,23 +5,25 @@
//! to be const-safe.
use std::convert::TryFrom;
-use std::fmt::Write;
+use std::fmt::{Display, Write};
use std::num::NonZeroUsize;
+use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_middle::mir::interpret::InterpError;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::DUMMY_SP;
use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange};
use std::hash::Hash;
+// for the validation errors
+use super::UndefinedBehaviorInfo::*;
use super::{
- alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy,
- Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
+ CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine,
+ MemPlaceMeta, OpTy, Scalar, ValueVisitor,
};
macro_rules! throw_validation_failure {
@@ -53,15 +55,16 @@ macro_rules! throw_validation_failure {
/// This lets you use the patterns as a kind of validation list, asserting which errors
/// can possibly happen:
///
-/// ```
+/// ```ignore(illustrative)
/// let v = try_validation!(some_fn(), some_path, {
/// Foo | Bar | Baz => { "some failure" },
/// });
/// ```
///
+/// The patterns must be of type `UndefinedBehaviorInfo`.
/// An additional expected parameter can also be added to the failure message:
///
-/// ```
+/// ```ignore(illustrative)
/// let v = try_validation!(some_fn(), some_path, {
/// Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
/// });
@@ -70,7 +73,7 @@ macro_rules! throw_validation_failure {
/// An additional nicety is that both parameters actually take format args, so you can just write
/// the format string in directly:
///
-/// ```
+/// ```ignore(illustrative)
/// let v = try_validation!(some_fn(), some_path, {
/// Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
/// });
@@ -86,7 +89,7 @@ macro_rules! try_validation {
// allocation here as this can only slow down builds that fail anyway.
Err(e) => match e.kind() {
$(
- $($p)|+ =>
+ InterpError::UndefinedBehavior($($p)|+) =>
throw_validation_failure!(
$where,
{ $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
@@ -304,6 +307,26 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(r)
}
+ fn read_immediate(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ expected: impl Display,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ Ok(try_validation!(
+ self.ecx.read_immediate(op),
+ self.path,
+ InvalidUninitBytes(None) => { "uninitialized memory" } expected { "{expected}" }
+ ))
+ }
+
+ fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ expected: impl Display,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ Ok(self.read_immediate(op, expected)?.to_scalar())
+ }
+
fn check_wide_ptr_meta(
&mut self,
meta: MemPlaceMeta<M::Provenance>,
@@ -317,8 +340,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let (_ty, _trait) = try_validation!(
self.ecx.get_ptr_vtable(vtable),
self.path,
- err_ub!(DanglingIntPointer(..)) |
- err_ub!(InvalidVTablePointer(..)) =>
+ DanglingIntPointer(..) |
+ InvalidVTablePointer(..) =>
{ "{vtable}" } expected { "a vtable pointer" },
);
// FIXME: check if the type/trait match what ty::Dynamic says?
@@ -344,14 +367,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
value: &OpTy<'tcx, M::Provenance>,
kind: &str,
) -> InterpResult<'tcx> {
- let value = self.ecx.read_immediate(value)?;
+ let place =
+ self.ecx.ref_to_mplace(&self.read_immediate(value, format_args!("a {kind}"))?)?;
// Handle wide pointers.
// Check metadata early, for better diagnostics
- let place = try_validation!(
- self.ecx.ref_to_mplace(&value),
- self.path,
- err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
- );
if place.layout.is_unsized() {
self.check_wide_ptr_meta(place.meta, place.layout)?;
}
@@ -359,7 +378,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let size_and_align = try_validation!(
self.ecx.size_and_align_of_mplace(&place),
self.path,
- err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
+ InvalidMeta(msg) => { "invalid {} metadata: {}", kind, msg },
);
let (size, align) = size_and_align
// for the purpose of validity, consider foreign types to have
@@ -375,21 +394,21 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
- err_ub!(AlignmentCheckFailed { required, has }) =>
+ AlignmentCheckFailed { required, has } =>
{
"an unaligned {kind} (required {} byte alignment but found {})",
required.bytes(),
has.bytes()
},
- err_ub!(DanglingIntPointer(0, _)) =>
+ DanglingIntPointer(0, _) =>
{ "a null {kind}" },
- err_ub!(DanglingIntPointer(i, _)) =>
+ DanglingIntPointer(i, _) =>
{ "a dangling {kind} (address {i:#x} is unallocated)" },
- err_ub!(PointerOutOfBounds { .. }) =>
+ PointerOutOfBounds { .. } =>
{ "a dangling {kind} (going beyond the bounds of its allocation)" },
// This cannot happen during const-eval (because interning already detects
// dangling pointers), but it can happen in Miri.
- err_ub!(PointerUseAfterFree(..)) =>
+ PointerUseAfterFree(..) =>
{ "a dangling {kind} (use-after-free)" },
);
// Do not allow pointers to uninhabited types.
@@ -403,34 +422,51 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
- // Special handling for pointers to statics (irrespective of their type).
+ // Let's see what kind of memory this points to.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
- if let Some(GlobalAlloc::Static(did)) = alloc_kind {
- assert!(!self.ecx.tcx.is_thread_local_static(did));
- assert!(self.ecx.tcx.is_static(did));
- if matches!(
- self.ctfe_mode,
- Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
- ) {
- // See const_eval::machine::MemoryExtra::can_access_statics for why
- // this check is so important.
- // This check is reachable when the const just referenced the static,
- // but never read it (so we never entered `before_access_global`).
- throw_validation_failure!(self.path,
- { "a {} pointing to a static variable", kind }
- );
+ match alloc_kind {
+ Some(GlobalAlloc::Static(did)) => {
+ // Special handling for pointers to statics (irrespective of their type).
+ assert!(!self.ecx.tcx.is_thread_local_static(did));
+ assert!(self.ecx.tcx.is_static(did));
+ if matches!(
+ self.ctfe_mode,
+ Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
+ ) {
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important.
+ // This check is reachable when the const just referenced the static,
+ // but never read it (so we never entered `before_access_global`).
+ throw_validation_failure!(self.path,
+ { "a {} pointing to a static variable in a constant", kind }
+ );
+ }
+ // We skip recursively checking other statics. These statics must be sound by
+ // themselves, and the only way to get broken statics here is by using
+ // unsafe code.
+ // The reasons we don't check other statics is twofold. For one, in all
+ // sound cases, the static was already validated on its own, and second, we
+ // trigger cycle errors if we try to compute the value of the other static
+ // and that static refers back to us.
+ // We might miss const-invalid data,
+ // but things are still sound otherwise (in particular re: consts
+ // referring to statics).
+ return Ok(());
}
- // We skip checking other statics. These statics must be sound by
- // themselves, and the only way to get broken statics here is by using
- // unsafe code.
- // The reasons we don't check other statics is twofold. For one, in all
- // sound cases, the static was already validated on its own, and second, we
- // trigger cycle errors if we try to compute the value of the other static
- // and that static refers back to us.
- // We might miss const-invalid data,
- // but things are still sound otherwise (in particular re: consts
- // referring to statics).
- return Ok(());
+ Some(GlobalAlloc::Memory(alloc)) => {
+ if alloc.inner().mutability == Mutability::Mut
+ && matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
+ {
+ // This should be unreachable, but if someone manages to copy a pointer
+ // out of a `static`, then that pointer might point to mutable memory,
+ // and we would catch that here.
+ throw_validation_failure!(self.path,
+ { "a {} pointing to mutable memory in a constant", kind }
+ );
+ }
+ }
+ // Nothing to check for these.
+ None | Some(GlobalAlloc::Function(..) | GlobalAlloc::VTable(..)) => {}
}
}
let path = &self.path;
@@ -446,20 +482,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(())
}
- fn read_scalar(
- &self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
- self.ecx.read_scalar(op)
- }
-
- fn read_immediate_forced(
- &self,
- op: &OpTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
- Ok(*self.ecx.read_immediate_raw(op, /*force*/ true)?.unwrap())
- }
-
/// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive.
fn try_visit_primitive(
@@ -470,41 +492,39 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let ty = value.layout.ty;
match ty.kind() {
ty::Bool => {
- let value = self.read_scalar(value)?;
+ let value = self.read_scalar(value, "a boolean")?;
try_validation!(
value.to_bool(),
self.path,
- err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ InvalidBool(..) =>
{ "{:x}", value } expected { "a boolean" },
);
Ok(true)
}
ty::Char => {
- let value = self.read_scalar(value)?;
+ let value = self.read_scalar(value, "a unicode scalar value")?;
try_validation!(
value.to_char(),
self.path,
- err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ InvalidChar(..) =>
{ "{:x}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
);
Ok(true)
}
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
- let value = self.read_scalar(value)?;
// NOTE: Keep this in sync with the array optimization for int/float
// types below!
- if M::enforce_number_init(self.ecx) {
- try_validation!(
- value.check_init(),
- self.path,
- err_ub!(InvalidUninitBytes(..)) =>
- { "{:x}", value } expected { "initialized bytes" }
- );
- }
+ let value = self.read_scalar(
+ value,
+ if matches!(ty.kind(), ty::Float(..)) {
+ "a floating point number"
+ } else {
+ "an integer"
+ },
+ )?;
// As a special exception we *do* match on a `Scalar` here, since we truly want
// to know its underlying representation (and *not* cast it to an integer).
- let is_ptr = value.check_init().map_or(false, |v| matches!(v, Scalar::Ptr(..)));
- if is_ptr {
+ if matches!(value, Scalar::Ptr(..)) {
throw_validation_failure!(self.path,
{ "{:x}", value } expected { "plain (non-pointer) bytes" }
)
@@ -515,11 +535,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// We are conservative with uninit for integers, but try to
// actually enforce the strict rules for raw pointers (mostly because
// that lets us re-use `ref_to_mplace`).
- let place = try_validation!(
- self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
- self.path,
- err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
- );
+ let place =
+ self.ecx.ref_to_mplace(&self.read_immediate(value, "a raw pointer")?)?;
if place.layout.is_unsized() {
self.check_wide_ptr_meta(place.meta, place.layout)?;
}
@@ -527,7 +544,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
}
ty::Ref(_, ty, mutbl) => {
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
- && *mutbl == hir::Mutability::Mut
+ && *mutbl == Mutability::Mut
{
// A mutable reference inside a const? That does not seem right (except if it is
// a ZST).
@@ -540,11 +557,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Ok(true)
}
ty::FnPtr(_sig) => {
- let value = try_validation!(
- self.ecx.read_scalar(value).and_then(|v| v.check_init()),
- self.path,
- err_ub!(InvalidUninitBytes(None)) => { "uninitialized bytes" } expected { "a proper pointer or integer value" },
- );
+ let value = self.read_scalar(value, "a function pointer")?;
// If we check references recursively, also check that this points to a function.
if let Some(_) = self.ref_tracking {
@@ -552,8 +565,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let _fn = try_validation!(
self.ecx.get_ptr_fn(ptr),
self.path,
- err_ub!(DanglingIntPointer(..)) |
- err_ub!(InvalidFunctionPointer(..)) =>
+ DanglingIntPointer(..) |
+ InvalidFunctionPointer(..) =>
{ "{ptr}" } expected { "a function pointer" },
);
// FIXME: Check if the signature matches
@@ -595,40 +608,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
fn visit_scalar(
&mut self,
- scalar: ScalarMaybeUninit<M::Provenance>,
+ scalar: Scalar<M::Provenance>,
scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> {
- // We check `is_full_range` in a slightly complicated way because *if* we are checking
- // number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized,
- // i.e. that we go over the `check_init` below.
let size = scalar_layout.size(self.ecx);
- let is_full_range = match scalar_layout {
- ScalarAbi::Initialized { .. } => {
- if M::enforce_number_init(self.ecx) {
- false // not "full" since uninit is not accepted
- } else {
- scalar_layout.is_always_valid(self.ecx)
- }
- }
- ScalarAbi::Union { .. } => true,
- };
- if is_full_range {
- // Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
- // fail for `Union` scalars!
- return Ok(());
- }
- // We have something to check: it must at least be initialized.
let valid_range = scalar_layout.valid_range(self.ecx);
let WrappingRange { start, end } = valid_range;
let max_value = size.unsigned_int_max();
assert!(end <= max_value);
- let value = try_validation!(
- scalar.check_init(),
- self.path,
- err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar }
- expected { "something {}", wrapping_range_format(valid_range, max_value) },
- );
- let bits = match value.try_to_int() {
+ let bits = match scalar.try_to_int() {
Ok(int) => int.assert_bits(size),
Err(_) => {
// So this is a pointer then, and casting to an int failed.
@@ -636,7 +624,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// We support 2 kinds of ranges here: full range, and excluding zero.
if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null.
- if self.ecx.scalar_may_be_null(value)? {
+ if self.ecx.scalar_may_be_null(scalar)? {
throw_validation_failure!(self.path,
{ "a potentially null pointer" }
expected {
@@ -693,9 +681,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
Ok(try_validation!(
this.ecx.read_discriminant(op),
this.path,
- err_ub!(InvalidTag(val)) =>
+ InvalidTag(val) =>
{ "{:x}", val } expected { "a valid enum tag" },
- err_ub!(InvalidUninitBytes(None)) =>
+ InvalidUninitBytes(None) =>
{ "uninitialized bytes" } expected { "a valid enum tag" },
)
.1)
@@ -737,7 +725,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
// Special check preventing `UnsafeCell` inside unions in the inner part of constants.
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) {
- if !op.layout.ty.is_freeze(self.ecx.tcx.at(DUMMY_SP), self.ecx.param_env) {
+ if !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
}
}
@@ -788,10 +776,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
);
}
Abi::Scalar(scalar_layout) => {
- // We use a 'forced' read because we always need a `Immediate` here
- // and treating "partially uninit" as "fully uninit" is fine for us.
- let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
- self.visit_scalar(scalar, scalar_layout)?;
+ if !scalar_layout.is_uninit_valid() {
+ // There is something to check here.
+ let scalar = self.read_scalar(op, "initiailized scalar value")?;
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
}
Abi::ScalarPair(a_layout, b_layout) => {
// There is no `rustc_layout_scalar_valid_range_start` for pairs, so
@@ -799,10 +788,15 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// but that can miss bugs in layout computation. Layout computation
// is subtle due to enums having ScalarPair layout, where one field
// is the discriminant.
- if cfg!(debug_assertions) {
- // We use a 'forced' read because we always need a `Immediate` here
- // and treating "partially uninit" as "fully uninit" is fine for us.
- let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
+ if cfg!(debug_assertions)
+ && !a_layout.is_uninit_valid()
+ && !b_layout.is_uninit_valid()
+ {
+ // We can only proceed if *both* scalars need to be initialized.
+ // FIXME: find a way to also check ScalarPair when one side can be uninit but
+ // the other must be init.
+ let (a, b) =
+ self.read_immediate(op, "initiailized scalar value")?.to_scalar_pair();
self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?;
}
@@ -830,9 +824,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
- self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
+ self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
self.path,
- err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
+ InvalidUninitBytes(..) => { "uninitialized data in `str`" },
);
}
ty::Array(tys, ..) | ty::Slice(tys)
@@ -880,13 +874,9 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// We also accept uninit, for consistency with the slow path.
let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
- match alloc.check_bytes(
- alloc_range(Size::ZERO, size),
- /*allow_uninit*/ !M::enforce_number_init(self.ecx),
- /*allow_ptr*/ false,
- ) {
+ match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
- Ok(()) => {}
+ Ok(_) => {}
// Some error happened, try to provide a more detailed description.
Err(err) => {
// For some errors we might be able to provide extra information.
@@ -981,6 +971,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// It will error if the bits at the destination do not match the ones described by the layout.
#[inline(always)]
pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ // Note that we *could* actually be in CTFE here with `-Zextra-const-ub-checks`, but it's
+ // still correct to not use `ctfe_mode`: that mode is for validation of the final constant
+ // value, it rules out things like `UnsafeCell` in awkward places. It also can make checking
+ // recurse through references which, for now, we don't want here, either.
self.validate_operand_internal(op, vec![], None, None)
}
}
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index 72ac6af68..443c01fdb 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -10,7 +10,6 @@ Rust MIR: a lowered representation of Rust.
#![feature(decl_macro)]
#![feature(exact_size_is_empty)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
#![feature(slice_ptr_get)]
@@ -21,9 +20,8 @@ Rust MIR: a lowered representation of Rust.
#![feature(trusted_step)]
#![feature(try_blocks)]
#![feature(yeet_expr)]
-#![feature(is_some_with)]
+#![feature(is_some_and)]
#![recursion_limit = "256"]
-#![allow(rustc::potential_query_instability)]
#[macro_use]
extern crate tracing;
@@ -33,7 +31,6 @@ extern crate rustc_middle;
pub mod const_eval;
mod errors;
pub mod interpret;
-mod might_permit_raw_init;
pub mod transform;
pub mod util;
@@ -62,7 +59,6 @@ pub fn provide(providers: &mut Providers) {
const_eval::deref_mir_constant(tcx, param_env, value)
};
providers.permits_uninit_init =
- |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Uninit);
- providers.permits_zero_init =
- |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Zero);
+ |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::UninitMitigated0x01Fill);
+ providers.permits_zero_init = |tcx, ty| util::might_permit_raw_init(tcx, ty, InitKind::Zero);
}
diff --git a/compiler/rustc_const_eval/src/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/might_permit_raw_init.rs
deleted file mode 100644
index f971c2238..000000000
--- a/compiler/rustc_const_eval/src/might_permit_raw_init.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-use crate::const_eval::CompileTimeInterpreter;
-use crate::interpret::{InterpCx, MemoryKind, OpTy};
-use rustc_middle::ty::layout::LayoutCx;
-use rustc_middle::ty::{layout::TyAndLayout, ParamEnv, TyCtxt};
-use rustc_session::Limit;
-use rustc_target::abi::InitKind;
-
-pub fn might_permit_raw_init<'tcx>(
- tcx: TyCtxt<'tcx>,
- ty: TyAndLayout<'tcx>,
- kind: InitKind,
-) -> bool {
- let strict = tcx.sess.opts.unstable_opts.strict_init_checks;
-
- if strict {
- let machine = CompileTimeInterpreter::new(Limit::new(0), false);
-
- let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
-
- let allocated = cx
- .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
- .expect("OOM: failed to allocate for uninit check");
-
- if kind == InitKind::Zero {
- cx.write_bytes_ptr(
- allocated.ptr,
- std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
- )
- .expect("failed to write bytes for zero valid check");
- }
-
- let ot: OpTy<'_, _> = allocated.into();
-
- // Assume that if it failed, it's a validation failure.
- cx.validate_operand(&ot).is_ok()
- } else {
- let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
- ty.might_permit_raw_init(&layout_cx, kind)
- }
-}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index 0adb88a18..22a61774e 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -13,8 +13,11 @@ use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty,
use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeVisitable};
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
-use rustc_trait_selection::traits::SelectionContext;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCauseCode, SelectionContext, TraitEngine, TraitEngineExt,
+};
use std::mem;
use std::ops::Deref;
@@ -135,7 +138,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
// qualifs for the return type.
let return_block = ccx
.body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return))
.map(|(bb, _)| bb);
@@ -546,7 +549,11 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// Since no pointer can ever get exposed (rejected above), this is easy to support.
}
- Rvalue::Cast(CastKind::Misc, _, _) => {}
+ Rvalue::Cast(CastKind::DynStar, _, _) => {
+ unimplemented!()
+ }
+
+ Rvalue::Cast(_, _, _) => {}
Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
Rvalue::ShallowInitBox(_, _) => {}
@@ -652,6 +659,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Downcast(..)
+ | ProjectionElem::OpaqueCast(..)
| ProjectionElem::Subslice { .. }
| ProjectionElem::Field(..)
| ProjectionElem::Index(_) => {}
@@ -678,7 +686,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Nop => {}
}
}
@@ -729,10 +737,49 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
let obligation =
Obligation::new(ObligationCause::dummy(), param_env, poly_trait_pred);
- let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let implsrc = {
+ let infcx = tcx.infer_ctxt().build();
let mut selcx = SelectionContext::new(&infcx);
selcx.select(&obligation)
- });
+ };
+
+ // do a well-formedness check on the trait method being called. This is because typeck only does a
+ // "non-const" check. This is required for correctness here.
+ {
+ let infcx = tcx.infer_ctxt().build();
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ let predicates = tcx.predicates_of(callee).instantiate(tcx, substs);
+ let hir_id = tcx
+ .hir()
+ .local_def_id_to_hir_id(self.body.source.def_id().expect_local());
+ let cause = || {
+ ObligationCause::new(
+ terminator.source_info.span,
+ hir_id,
+ ObligationCauseCode::ItemObligation(callee),
+ )
+ };
+ let normalized = infcx.partially_normalize_associated_types_in(
+ cause(),
+ param_env,
+ predicates,
+ );
+
+ for p in normalized.obligations {
+ fulfill_cx.register_predicate_obligation(&infcx, p);
+ }
+ for obligation in traits::predicates_for_generics(
+ |_, _| cause(),
+ self.param_env,
+ normalized.value,
+ ) {
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+ }
match implsrc {
Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
@@ -790,16 +837,15 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// improve diagnostics by showing what failed. Our requirements are stricter this time
// as we are going to error again anyways.
- tcx.infer_ctxt().enter(|infcx| {
- if let Err(e) = implsrc {
- infcx.report_selection_error(
- obligation.clone(),
- &obligation,
- &e,
- false,
- );
- }
- });
+ let infcx = tcx.infer_ctxt().build();
+ if let Err(e) = implsrc {
+ infcx.err_ctxt().report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &e,
+ false,
+ );
+ }
self.check_op(ops::FnCallNonConst {
caller,
@@ -863,8 +909,6 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
return;
}
- let is_intrinsic = tcx.is_intrinsic(callee);
-
if !tcx.is_const_fn_raw(callee) {
if !tcx.is_const_default_method(callee) {
// To get to here we must have already found a const impl for the
@@ -924,7 +968,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
// We do not use `const` modifiers for intrinsic "functions", as intrinsics are
// `extern` functions, and these have no way to get marked `const`. So instead we
// use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
- if self.ccx.is_const_stable_const_fn() || is_intrinsic {
+ if self.ccx.is_const_stable_const_fn() || tcx.is_intrinsic(callee) {
self.check_op(ops::FnCallUnstable(callee, None));
return;
}
@@ -964,7 +1008,10 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
if needs_non_const_drop {
self.check_op_spanned(
- ops::LiveDrop { dropped_at: Some(terminator.source_info.span) },
+ ops::LiveDrop {
+ dropped_at: Some(terminator.source_info.span),
+ dropped_ty: ty_of_dropped_place,
+ },
err_span,
);
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
index 338022616..b28d70194 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -1,6 +1,7 @@
//! Concrete error types for all operations which may be invalid in a certain const context.
use hir::def_id::LocalDefId;
+use hir::ConstContext;
use rustc_errors::{
error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
};
@@ -23,8 +24,11 @@ use rustc_trait_selection::traits::SelectionContext;
use super::ConstCx;
use crate::errors::{
- MutDerefErr, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
- TransientMutBorrowErr, TransientMutBorrowErrRaw,
+ InteriorMutabilityBorrow, InteriorMutableDataRefer, MutDerefErr, NonConstFmtMacroCall,
+ NonConstFnCall, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
+ TransientMutBorrowErr, TransientMutBorrowErrRaw, UnallowedFnPointerCall,
+ UnallowedHeapAllocations, UnallowedInlineAsm, UnallowedMutableRefs, UnallowedMutableRefsRaw,
+ UnallowedOpInConstContext, UnstableConstFn,
};
use crate::util::{call_kind, CallDesugaringKind, CallKind};
@@ -96,10 +100,7 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- ccx.tcx.sess.struct_span_err(
- span,
- &format!("function pointer calls are not allowed in {}s", ccx.const_kind()),
- )
+ ccx.tcx.sess.create_err(UnallowedFnPointerCall { span, kind: ccx.const_kind() })
}
}
@@ -155,10 +156,9 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
}),
);
- let implsrc = tcx.infer_ctxt().enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
- selcx.select(&obligation)
- });
+ let infcx = tcx.infer_ctxt().build();
+ let mut selcx = SelectionContext::new(&infcx);
+ let implsrc = selcx.select(&obligation);
if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
let span = tcx.def_span(data.impl_def_id);
@@ -307,22 +307,13 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
err
}
_ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
- struct_span_err!(
- ccx.tcx.sess,
- span,
- E0015,
- "cannot call non-const formatting macro in {}s",
- ccx.const_kind(),
- )
+ ccx.tcx.sess.create_err(NonConstFmtMacroCall { span, kind: ccx.const_kind() })
}
- _ => struct_span_err!(
- ccx.tcx.sess,
+ _ => ccx.tcx.sess.create_err(NonConstFnCall {
span,
- E0015,
- "cannot call non-const fn `{}` in {}s",
- ccx.tcx.def_path_str_with_substs(callee, substs),
- ccx.const_kind(),
- ),
+ def_path_str: ccx.tcx.def_path_str_with_substs(callee, substs),
+ kind: ccx.const_kind(),
+ }),
};
err.note(&format!(
@@ -331,6 +322,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
ccx.const_kind(),
));
+ if let ConstContext::Static(_) = ccx.const_kind() {
+ err.note("consider wrapping this expression in `Lazy::new(|| ...)` from the `once_cell` crate: https://crates.io/crates/once_cell");
+ }
+
err
}
}
@@ -349,10 +344,10 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let FnCallUnstable(def_id, feature) = *self;
- let mut err = ccx.tcx.sess.struct_span_err(
- span,
- &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
- );
+ let mut err = ccx
+ .tcx
+ .sess
+ .create_err(UnstableConstFn { span, def_path: ccx.tcx.def_path_str(def_id) });
if ccx.is_const_stable_const_fn() {
err.help("const-stable functions can only call other const-stable functions");
@@ -387,9 +382,12 @@ impl<'tcx> NonConstOp<'tcx> for Generator {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
- feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
+ ccx.tcx.sess.create_feature_err(
+ UnallowedOpInConstContext { span, msg },
+ sym::const_async_blocks,
+ )
} else {
- ccx.tcx.sess.struct_span_err(span, &msg)
+ ccx.tcx.sess.create_err(UnallowedOpInConstContext { span, msg })
}
}
}
@@ -402,23 +400,11 @@ impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let mut err = struct_span_err!(
- ccx.tcx.sess,
+ ccx.tcx.sess.create_err(UnallowedHeapAllocations {
span,
- E0010,
- "allocations are not allowed in {}s",
- ccx.const_kind()
- );
- err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
- if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
- err.note(
- "The value of statics and constants must be known at compile time, \
- and they live for the entire lifetime of a program. Creating a boxed \
- value allocates memory on the heap at runtime, and therefore cannot \
- be done at compile time.",
- );
- }
- err
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0010)).then_some(()),
+ })
}
}
@@ -430,21 +416,16 @@ impl<'tcx> NonConstOp<'tcx> for InlineAsm {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- struct_span_err!(
- ccx.tcx.sess,
- span,
- E0015,
- "inline assembly is not allowed in {}s",
- ccx.const_kind()
- )
+ ccx.tcx.sess.create_err(UnallowedInlineAsm { span, kind: ccx.const_kind() })
}
}
#[derive(Debug)]
-pub struct LiveDrop {
+pub struct LiveDrop<'tcx> {
pub dropped_at: Option<Span>,
+ pub dropped_ty: Ty<'tcx>,
}
-impl<'tcx> NonConstOp<'tcx> for LiveDrop {
+impl<'tcx> NonConstOp<'tcx> for LiveDrop<'tcx> {
fn build_error(
&self,
ccx: &ConstCx<'_, 'tcx>,
@@ -454,9 +435,13 @@ impl<'tcx> NonConstOp<'tcx> for LiveDrop {
ccx.tcx.sess,
span,
E0493,
- "destructors cannot be evaluated at compile-time"
+ "destructor of `{}` cannot be evaluated at compile-time",
+ self.dropped_ty,
+ );
+ err.span_label(
+ span,
+ format!("the destructor for this type cannot be evaluated in {}s", ccx.const_kind()),
);
- err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
if let Some(span) = self.dropped_at {
err.span_label(span, "value is dropped here");
}
@@ -482,12 +467,7 @@ impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- feature_err(
- &ccx.tcx.sess.parse_sess,
- sym::const_refs_to_cell,
- span,
- "cannot borrow here, since the borrowed element may contain interior mutability",
- )
+ ccx.tcx.sess.create_feature_err(InteriorMutabilityBorrow { span }, sym::const_refs_to_cell)
}
}
@@ -502,32 +482,22 @@ impl<'tcx> NonConstOp<'tcx> for CellBorrow {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let mut err = struct_span_err!(
- ccx.tcx.sess,
- span,
- E0492,
- "{}s cannot refer to interior mutable data",
- ccx.const_kind(),
- );
- err.span_label(
- span,
- "this borrow of an interior mutable value may end up in the final value",
- );
+ // FIXME: Maybe a more elegant solution to this if else case
if let hir::ConstContext::Static(_) = ccx.const_kind() {
- err.help(
- "to fix this, the value can be extracted to a separate \
- `static` item and then referenced",
- );
- }
- if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
- err.note(
- "A constant containing interior mutable data behind a reference can allow you
- to modify that data. This would make multiple uses of a constant to be able to
- see different values and allow circumventing the `Send` and `Sync` requirements
- for shared mutable data, which is unsound.",
- );
+ ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ span,
+ opt_help: Some(()),
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()),
+ })
+ } else {
+ ccx.tcx.sess.create_err(InteriorMutableDataRefer {
+ span,
+ opt_help: None,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0492)).then_some(()),
+ })
}
- err
}
}
@@ -553,33 +523,18 @@ impl<'tcx> NonConstOp<'tcx> for MutBorrow {
ccx: &ConstCx<'_, 'tcx>,
span: Span,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let raw = match self.0 {
- hir::BorrowKind::Raw => "raw ",
- hir::BorrowKind::Ref => "",
- };
-
- let mut err = struct_span_err!(
- ccx.tcx.sess,
- span,
- E0764,
- "{}mutable references are not allowed in the final value of {}s",
- raw,
- ccx.const_kind(),
- );
-
- if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
- err.note(
- "References in statics and constants may only refer \
- to immutable values.\n\n\
- Statics are shared everywhere, and if they refer to \
- mutable data one might violate memory safety since \
- holding multiple mutable references to shared data \
- is not allowed.\n\n\
- If you really want global mutable state, try using \
- static mut or a global UnsafeCell.",
- );
+ match self.0 {
+ hir::BorrowKind::Raw => ccx.tcx.sess.create_err(UnallowedMutableRefsRaw {
+ span,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
+ }),
+ hir::BorrowKind::Ref => ccx.tcx.sess.create_err(UnallowedMutableRefs {
+ span,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0764)).then_some(()),
+ }),
}
- err
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index 4e210f663..d4570c598 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -1,6 +1,6 @@
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::{self, BasicBlock, Location};
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{Ty, TyCtxt};
use rustc_span::{symbol::sym, Span};
use super::check::Qualifs;
@@ -58,9 +58,9 @@ impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
}
}
-impl CheckLiveDrops<'_, '_> {
- fn check_live_drop(&self, span: Span) {
- ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
+impl<'tcx> CheckLiveDrops<'_, 'tcx> {
+ fn check_live_drop(&self, span: Span, dropped_ty: Ty<'tcx>) {
+ ops::LiveDrop { dropped_at: None, dropped_ty }.build_error(self.ccx, span).emit();
}
}
@@ -90,7 +90,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
}
if dropped_place.is_indirect() {
- self.check_live_drop(terminator.source_info.span);
+ self.check_live_drop(terminator.source_info.span, dropped_ty);
return;
}
@@ -101,7 +101,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
// Use the span where the dropped local was declared for the error.
let span = self.body.local_decls[dropped_place.local].source_info.span;
- self.check_live_drop(span);
+ self.check_live_drop(span, dropped_ty);
}
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index c8a63c9c3..335992342 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -5,12 +5,11 @@
use rustc_errors::ErrorGuaranteed;
use rustc_hir::LangItem;
use rustc_infer::infer::TyCtxtInferExt;
-use rustc_infer::traits::TraitEngine;
+use rustc_middle::mir;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
-use rustc_span::DUMMY_SP;
use rustc_trait_selection::traits::{
- self, ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngineExt,
+ self, ImplSource, Obligation, ObligationCause, SelectionContext,
};
use super::ConstCx;
@@ -92,7 +91,7 @@ impl Qualif for HasMutInterior {
}
fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
- !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+ !ty.is_freeze(cx.tcx, cx.param_env)
}
fn in_adt_inherently<'tcx>(
@@ -168,37 +167,28 @@ impl Qualif for NeedsNonConstDrop {
}),
);
- cx.tcx.infer_ctxt().enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
- let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
- // If we couldn't select a const destruct candidate, then it's bad
- return true;
- };
-
- if !matches!(
- impl_src,
- ImplSource::ConstDestruct(_)
- | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
- ) {
- // If our const destruct candidate is not ConstDestruct or implied by the param env,
- // then it's bad
- return true;
- }
-
- if impl_src.borrow_nested_obligations().is_empty() {
- return false;
- }
+ let infcx = cx.tcx.infer_ctxt().build();
+ let mut selcx = SelectionContext::new(&infcx);
+ let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
+ // If we couldn't select a const destruct candidate, then it's bad
+ return true;
+ };
+
+ if !matches!(
+ impl_src,
+ ImplSource::ConstDestruct(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ) {
+ // If our const destruct candidate is not ConstDestruct or implied by the param env,
+ // then it's bad
+ return true;
+ }
- // If we successfully found one, then select all of the predicates
- // implied by our const drop impl.
- let mut fcx = <dyn TraitEngine<'tcx>>::new(cx.tcx);
- for nested in impl_src.nested_obligations() {
- fcx.register_predicate_obligation(&infcx, nested);
- }
+ if impl_src.borrow_nested_obligations().is_empty() {
+ return false;
+ }
- // If we had any errors, then it's bad
- !fcx.select_all_or_error(&infcx).is_empty()
- })
+ // If we had any errors, then it's bad
+ !traits::fully_solve_obligations(&infcx, impl_src.nested_obligations()).is_empty()
}
fn in_adt_inherently<'tcx>(
@@ -316,6 +306,7 @@ where
ProjectionElem::Deref
| ProjectionElem::Field(_, _)
+ | ProjectionElem::OpaqueCast(_)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(_, _)
@@ -354,31 +345,36 @@ where
};
// Check the qualifs of the value of `const` items.
- if let Some(ct) = constant.literal.const_for_ty() {
- if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted }) = ct.kind()
- {
- // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
- // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
- // check performed after the promotion. Verify that with an assertion.
- assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
- // Don't peek inside trait associated constants.
- if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() {
- let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
- cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
- } else {
- cx.tcx.at(constant.span).mir_const_qualif(def.did)
- };
-
- if !Q::in_qualifs(&qualifs) {
- return false;
- }
+ // FIXME(valtrees): check whether const qualifs should behave the same
+ // way for type and mir constants.
+ let uneval = match constant.literal {
+ ConstantKind::Ty(ct) if matches!(ct.kind(), ty::ConstKind::Param(_)) => None,
+ ConstantKind::Ty(c) => bug!("expected ConstKind::Param here, found {:?}", c),
+ ConstantKind::Unevaluated(uv, _) => Some(uv),
+ ConstantKind::Val(..) => None,
+ };
+
+ if let Some(mir::UnevaluatedConst { def, substs: _, promoted }) = uneval {
+ // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
+ // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
+ // check performed after the promotion. Verify that with an assertion.
+ assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
+
+ // Don't peek inside trait associated constants.
+ if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() {
+ assert_eq!(def.const_param_did, None, "expected associated const: {def:?}");
+ let qualifs = cx.tcx.at(constant.span).mir_const_qualif(def.did);
- // Just in case the type is more specific than
- // the definition, e.g., impl associated const
- // with type parameters, take it into account.
+ if !Q::in_qualifs(&qualifs) {
+ return false;
}
+
+ // Just in case the type is more specific than
+ // the definition, e.g., impl associated const
+ // with type parameters, take it into account.
}
}
+
// Otherwise use the qualifs of the type.
Q::in_any_value_of_ty(cx, constant.literal.ty())
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
index 60c1e4950..805e6096b 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -8,7 +8,6 @@ use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementK
use rustc_mir_dataflow::fmt::DebugWithContext;
use rustc_mir_dataflow::JoinSemiLattice;
use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
-use rustc_span::DUMMY_SP;
use std::fmt;
use std::marker::PhantomData;
@@ -120,10 +119,7 @@ where
///
/// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
- !place
- .ty(self.ccx.body, self.ccx.tcx)
- .ty
- .is_freeze(self.ccx.tcx.at(DUMMY_SP), self.ccx.param_env)
+ !place.ty(self.ccx.body, self.ccx.tcx).ty.is_freeze(self.ccx.tcx, self.ccx.param_env)
}
}
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index ed4d8c95d..f3ae16da4 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -13,6 +13,7 @@
//! move analysis runs after promotion on broken MIR.
use rustc_hir as hir;
+use rustc_middle::mir;
use rustc_middle::mir::traversal::ReversePostorderIter;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
@@ -40,10 +41,6 @@ pub struct PromoteTemps<'tcx> {
}
impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
- fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::ConstsPromoted)
- }
-
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// There's not really any point in promoting errorful MIR.
//
@@ -361,7 +358,7 @@ impl<'tcx> Validator<'_, 'tcx> {
return Err(Unpromotable);
}
}
- ProjectionElem::Downcast(..) => {
+ ProjectionElem::OpaqueCast(..) | ProjectionElem::Downcast(..) => {
return Err(Unpromotable);
}
@@ -710,7 +707,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
}
fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
- let last = self.promoted.basic_blocks().last().unwrap();
+ let last = self.promoted.basic_blocks.last().unwrap();
let data = &mut self.promoted[last];
data.statements.push(Statement {
source_info: SourceInfo::outermost(span),
@@ -803,7 +800,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
self.visit_operand(arg, loc);
}
- let last = self.promoted.basic_blocks().last().unwrap();
+ let last = self.promoted.basic_blocks.last().unwrap();
let new_target = self.new_block();
*self.promoted[last].terminator_mut() = Terminator {
@@ -839,27 +836,16 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let mut promoted_operand = |ty, span| {
promoted.span = span;
promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
- let _const = tcx.mk_const(ty::ConstS {
- ty,
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def,
- substs: InternalSubsts::for_item(tcx, def.did, |param, _| {
- if let ty::GenericParamDefKind::Lifetime = param.kind {
- tcx.lifetimes.re_erased.into()
- } else {
- tcx.mk_param_from_def(param)
- }
- }),
- promoted: Some(promoted_id),
- }),
- });
+ let substs = tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def.did));
+ let uneval = mir::UnevaluatedConst { def, substs, promoted: Some(promoted_id) };
Operand::Constant(Box::new(Constant {
span,
user_ty: None,
- literal: ConstantKind::from_const(_const, tcx),
+ literal: ConstantKind::Unevaluated(uneval, ty),
}))
};
+
let blocks = self.source.basic_blocks.as_mut();
let local_decls = &mut self.source.local_decls;
let loc = candidate.location;
@@ -969,7 +955,7 @@ pub fn promote_candidates<'tcx>(
let mut scope = body.source_scopes[body.source_info(candidate.location).scope].clone();
scope.parent_scope = None;
- let promoted = Body::new(
+ let mut promoted = Body::new(
body.source, // `promoted` gets filled in below
IndexVec::new(),
IndexVec::from_elem_n(scope, 1),
@@ -981,6 +967,7 @@ pub fn promote_candidates<'tcx>(
body.generator_kind(),
body.tainted_by_errors,
);
+ promoted.phase = MirPhase::Analysis(AnalysisPhase::Initial);
let promoter = Promoter {
promoted,
@@ -1046,7 +1033,7 @@ pub fn is_const_fn_in_array_repeat_expression<'tcx>(
_ => {}
}
- for block in body.basic_blocks() {
+ for block in body.basic_blocks.iter() {
if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
&block.terminator
{
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 15e820f2d..81b82a21f 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -7,12 +7,12 @@ use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
use rustc_middle::mir::visit::{PlaceContext, Visitor};
use rustc_middle::mir::{
- traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, Local, Location,
- MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope,
- Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
+ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping,
+ Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef,
+ ProjectionElem, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
+ TerminatorKind, UnOp, START_BLOCK,
};
use rustc_middle::ty::fold::BottomUpFolder;
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypeVisitable};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
@@ -89,9 +89,8 @@ pub fn equal_up_to_regions<'tcx>(
// Normalize lifetimes away on both sides, then compare.
let normalize = |ty: Ty<'tcx>| {
- tcx.normalize_erasing_regions(
- param_env,
- ty.fold_with(&mut BottomUpFolder {
+ tcx.try_normalize_erasing_regions(param_env, ty).unwrap_or(ty).fold_with(
+ &mut BottomUpFolder {
tcx,
// FIXME: We erase all late-bound lifetimes, but this is not fully correct.
// If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
@@ -103,10 +102,10 @@ pub fn equal_up_to_regions<'tcx>(
// Leave consts and types unchanged.
ct_op: |ct| ct,
ty_op: |ty| ty,
- }),
+ },
)
};
- tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
+ tcx.infer_ctxt().build().can_eq(param_env, normalize(src), normalize(dest)).is_ok()
}
struct TypeChecker<'a, 'tcx> {
@@ -142,8 +141,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
if bb == START_BLOCK {
self.fail(location, "start block must not have predecessors")
}
- if let Some(bb) = self.body.basic_blocks().get(bb) {
- let src = self.body.basic_blocks().get(location.block).unwrap();
+ if let Some(bb) = self.body.basic_blocks.get(bb) {
+ let src = self.body.basic_blocks.get(location.block).unwrap();
match (src.is_cleanup, bb.is_cleanup, edge_kind) {
// Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
(false, false, EdgeKind::Normal)
@@ -183,16 +182,23 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
if (src, dest).has_opaque_types() {
return true;
}
- // Normalize projections and things like that.
- let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
- let src = self.tcx.normalize_erasing_regions(param_env, src);
- let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+ // Normalize projections and things like that.
// Type-changing assignments can happen when subtyping is used. While
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences. So we compare ignoring lifetimes.
- equal_up_to_regions(self.tcx, param_env, src, dest)
+
+ // First, try with reveal_all. This might not work in some cases, as the predicates
+ // can be cleared in reveal_all mode. We try the reveal first anyways as it is used
+ // by some other passes like inlining as well.
+ let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+ if equal_up_to_regions(self.tcx, param_env, src, dest) {
+ return true;
+ }
+
+ // If this fails, we can try it without the reveal.
+ equal_up_to_regions(self.tcx, self.param_env, src, dest)
}
}
@@ -223,14 +229,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
// This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
- if self.tcx.sess.opts.unstable_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered
+ if self.tcx.sess.opts.unstable_opts.validate_mir
+ && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial)
{
// `Operand::Copy` is only supposed to be used with `Copy` types.
if let Operand::Copy(place) = operand {
let ty = place.ty(&self.body.local_decls, self.tcx).ty;
- let span = self.body.source_info(location).span;
- if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+ if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
}
}
@@ -254,7 +260,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.fail(location, format!("bad index ({:?} != usize)", index_ty))
}
}
- ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => {
+ ProjectionElem::Deref
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
+ {
let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
if base_ty.is_box() {
@@ -275,7 +283,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
this.fail(
location,
format!(
- "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is {:?}",
+ "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
parent, f, ty, f_ty
)
)
@@ -362,7 +370,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// Set off any `bug!`s in the type computation code
let _ = place.ty(&self.body.local_decls, self.tcx);
- if self.mir_phase >= MirPhase::Derefered
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial)
&& place.projection.len() > 1
&& cntxt != PlaceContext::NonUse(VarDebugInfo)
&& place.projection[1..].contains(&ProjectionElem::Deref)
@@ -386,8 +394,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
Rvalue::Aggregate(agg_kind, _) => {
let disallowed = match **agg_kind {
AggregateKind::Array(..) => false,
- AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
- _ => self.mir_phase >= MirPhase::Deaggregated,
+ _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup),
};
if disallowed {
self.fail(
@@ -397,10 +404,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
- "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
+ "`Assign` statement with a `Shallow` borrow should have been removed in runtime MIR",
);
}
}
@@ -548,22 +555,40 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
}
Rvalue::Cast(kind, operand, target_type) => {
+ let op_ty = operand.ty(self.body, self.tcx);
match kind {
- CastKind::Misc => {
- let op_ty = operand.ty(self.body, self.tcx);
- if op_ty.is_enum() {
+ CastKind::DynStar => {
+ // FIXME(dyn-star): make sure nothing needs to be done here.
+ }
+ // FIXME: Add Checks for these
+ CastKind::PointerFromExposedAddress
+ | CastKind::PointerExposeAddress
+ | CastKind::Pointer(_) => {}
+ CastKind::IntToInt | CastKind::IntToFloat => {
+ let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
+ let target_valid = target_type.is_numeric() || target_type.is_char();
+ if !input_valid || !target_valid {
+ self.fail(
+ location,
+ format!("Wrong cast kind {kind:?} for the type {op_ty}",),
+ );
+ }
+ }
+ CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
+ if !(op_ty.is_any_ptr() && target_type.is_unsafe_ptr()) {
+ self.fail(location, "Can't cast {op_ty} into 'Ptr'");
+ }
+ }
+ CastKind::FloatToFloat | CastKind::FloatToInt => {
+ if !op_ty.is_floating_point() || !target_type.is_numeric() {
self.fail(
location,
format!(
- "enum -> int casts should go through `Rvalue::Discriminant`: {operand:?}:{op_ty} as {target_type}",
+ "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
),
);
}
}
- // Nothing to check here
- CastKind::PointerFromExposedAddress
- | CastKind::PointerExposeAddress
- | CastKind::Pointer(_) => {}
}
}
Rvalue::Repeat(_, _)
@@ -614,7 +639,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
StatementKind::AscribeUserType(..) => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`AscribeUserType` should have been removed after drop lowering phase",
@@ -622,18 +647,25 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
StatementKind::FakeRead(..) => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`FakeRead` should have been removed after drop lowering phase",
);
}
}
- StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
- ref src,
- ref dst,
- ref count,
- }) => {
+ StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
+ let ty = op.ty(&self.body.local_decls, self.tcx);
+ if !ty.is_bool() {
+ self.fail(
+ location,
+ format!("`assume` argument must be `bool`, but got: `{}`", ty),
+ );
+ }
+ }
+ StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
+ CopyNonOverlapping { src, dst, count },
+ )) => {
let src_ty = src.ty(&self.body.local_decls, self.tcx);
let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
src_deref.ty
@@ -666,7 +698,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
StatementKind::SetDiscriminant { place, .. } => {
- if self.mir_phase < MirPhase::Deaggregated {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
}
let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
@@ -681,7 +713,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
StatementKind::Deinit(..) => {
- if self.mir_phase < MirPhase::Deaggregated {
+ if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(location, "`Deinit`is not allowed until deaggregation");
}
}
@@ -761,7 +793,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
TerminatorKind::DropAndReplace { target, unwind, .. } => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`DropAndReplace` should have been removed during drop elaboration",
@@ -832,7 +864,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if self.body.generator.is_none() {
self.fail(location, "`Yield` cannot appear outside generator bodies");
}
- if self.mir_phase >= MirPhase::GeneratorsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(location, "`Yield` should have been replaced by generator lowering");
}
self.check_edge(location, *resume, EdgeKind::Normal);
@@ -841,7 +873,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
TerminatorKind::FalseEdge { real_target, imaginary_target } => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`FalseEdge` should have been removed after drop elaboration",
@@ -851,7 +883,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
self.check_edge(location, *imaginary_target, EdgeKind::Normal);
}
TerminatorKind::FalseUnwind { real_target, unwind } => {
- if self.mir_phase >= MirPhase::DropsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`FalseUnwind` should have been removed after drop elaboration",
@@ -874,7 +906,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
if self.body.generator.is_none() {
self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
}
- if self.mir_phase >= MirPhase::GeneratorsLowered {
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
"`GeneratorDrop` should have been replaced by generator lowering",
@@ -883,13 +915,13 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
TerminatorKind::Resume | TerminatorKind::Abort => {
let bb = location.block;
- if !self.body.basic_blocks()[bb].is_cleanup {
+ if !self.body.basic_blocks[bb].is_cleanup {
self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
}
}
TerminatorKind::Return => {
let bb = location.block;
- if self.body.basic_blocks()[bb].is_cleanup {
+ if self.body.basic_blocks[bb].is_cleanup {
self.fail(location, "Cannot `Return` from cleanup basic block")
}
}
diff --git a/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs
new file mode 100644
index 000000000..6ca712233
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/might_permit_raw_init.rs
@@ -0,0 +1,151 @@
+use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_session::Limit;
+use rustc_target::abi::{Abi, FieldsShape, InitKind, Scalar, Variants};
+
+use crate::const_eval::CompileTimeInterpreter;
+use crate::interpret::{InterpCx, MemoryKind, OpTy};
+
+/// Determines if this type permits "raw" initialization by just transmuting some memory into an
+/// instance of `T`.
+///
+/// `init_kind` indicates if the memory is zero-initialized or left uninitialized. We assume
+/// uninitialized memory is mitigated by filling it with 0x01, which reduces the chance of causing
+/// LLVM UB.
+///
+/// By default we check whether that operation would cause *LLVM UB*, i.e., whether the LLVM IR we
+/// generate has UB or not. This is a mitigation strategy, which is why we are okay with accepting
+/// Rust UB as long as there is no risk of miscompilations. The `strict_init_checks` can be set to
+/// do a full check against Rust UB instead (in which case we will also ignore the 0x01-filling and
+/// to the full uninit check).
+pub fn might_permit_raw_init<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: TyAndLayout<'tcx>,
+ kind: InitKind,
+) -> bool {
+ if tcx.sess.opts.unstable_opts.strict_init_checks {
+ might_permit_raw_init_strict(ty, tcx, kind)
+ } else {
+ let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
+ might_permit_raw_init_lax(ty, &layout_cx, kind)
+ }
+}
+
+/// Implements the 'strict' version of the `might_permit_raw_init` checks; see that function for
+/// details.
+fn might_permit_raw_init_strict<'tcx>(
+ ty: TyAndLayout<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ kind: InitKind,
+) -> bool {
+ let machine = CompileTimeInterpreter::new(
+ Limit::new(0),
+ /*can_access_statics:*/ false,
+ /*check_alignment:*/ true,
+ );
+
+ let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
+
+ let allocated = cx
+ .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
+ .expect("OOM: failed to allocate for uninit check");
+
+ if kind == InitKind::Zero {
+ cx.write_bytes_ptr(
+ allocated.ptr,
+ std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
+ )
+ .expect("failed to write bytes for zero valid check");
+ }
+
+ let ot: OpTy<'_, _> = allocated.into();
+
+ // Assume that if it failed, it's a validation failure.
+ // This does *not* actually check that references are dereferenceable, but since all types that
+ // require dereferenceability also require non-null, we don't actually get any false negatives
+ // due to this.
+ cx.validate_operand(&ot).is_ok()
+}
+
+/// Implements the 'lax' (default) version of the `might_permit_raw_init` checks; see that function for
+/// details.
+fn might_permit_raw_init_lax<'tcx>(
+ this: TyAndLayout<'tcx>,
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ init_kind: InitKind,
+) -> bool {
+ let scalar_allows_raw_init = move |s: Scalar| -> bool {
+ match init_kind {
+ InitKind::Zero => {
+ // The range must contain 0.
+ s.valid_range(cx).contains(0)
+ }
+ InitKind::UninitMitigated0x01Fill => {
+ // The range must include an 0x01-filled buffer.
+ let mut val: u128 = 0x01;
+ for _ in 1..s.size(cx).bytes() {
+ // For sizes >1, repeat the 0x01.
+ val = (val << 8) | 0x01;
+ }
+ s.valid_range(cx).contains(val)
+ }
+ }
+ };
+
+ // Check the ABI.
+ let valid = match this.abi {
+ Abi::Uninhabited => false, // definitely UB
+ Abi::Scalar(s) => scalar_allows_raw_init(s),
+ Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+ Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+ Abi::Aggregate { .. } => true, // Fields are checked below.
+ };
+ if !valid {
+ // This is definitely not okay.
+ return false;
+ }
+
+ // Special magic check for references and boxes (i.e., special pointer types).
+ if let Some(pointee) = this.ty.builtin_deref(false) {
+ let pointee = cx.layout_of(pointee.ty).expect("need to be able to compute layouts");
+ // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied.
+ if pointee.align.abi.bytes() > 1 {
+ // 0x01-filling is not aligned.
+ return false;
+ }
+ if pointee.size.bytes() > 0 {
+ // A 'fake' integer pointer is not sufficiently dereferenceable.
+ return false;
+ }
+ }
+
+ // If we have not found an error yet, we need to recursively descend into fields.
+ match &this.fields {
+ FieldsShape::Primitive | FieldsShape::Union { .. } => {}
+ FieldsShape::Array { .. } => {
+ // Arrays never have scalar layout in LLVM, so if the array is not actually
+ // accessed, there is no LLVM UB -- therefore we can skip this.
+ }
+ FieldsShape::Arbitrary { offsets, .. } => {
+ for idx in 0..offsets.len() {
+ if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind) {
+ // We found a field that is unhappy with this kind of initialization.
+ return false;
+ }
+ }
+ }
+ }
+
+ match &this.variants {
+ Variants::Single { .. } => {
+ // All fields of this single variant have already been checked above, there is nothing
+ // else to do.
+ }
+ Variants::Multiple { .. } => {
+ // We cannot tell LLVM anything about the details of this multi-variant layout, so
+ // invalid values "hidden" inside the variant cannot cause LLVM trouble.
+ }
+ }
+
+ true
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index a1876bed8..7a05cfd23 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -3,8 +3,10 @@ mod alignment;
mod call_kind;
pub mod collect_writes;
mod find_self_call;
+mod might_permit_raw_init;
pub use self::aggregate::expand_aggregate;
pub use self::alignment::is_disaligned;
pub use self::call_kind::{call_kind, CallDesugaringKind, CallKind};
pub use self::find_self_call::find_self_call;
+pub use self::might_permit_raw_init::might_permit_raw_init;
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 5c641f54f..9daa21ef6 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -4,29 +4,29 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
arrayvec = { version = "0.7", default-features = false }
+bitflags = "1.2.1"
+cfg-if = "0.1.2"
ena = "0.14"
indexmap = { version = "1.9.1" }
-tracing = "0.1"
jobserver_crate = { version = "0.1.13", package = "jobserver" }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_macros = { path = "../rustc_macros" }
-rustc_graphviz = { path = "../rustc_graphviz" }
-cfg-if = "0.1.2"
-stable_deref_trait = "1.0.0"
-rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+libc = "0.2"
+measureme = "10.0.0"
rayon-core = { version = "0.4.0", package = "rustc-rayon-core", optional = true }
+rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+rustc_graphviz = { path = "../rustc_graphviz" }
rustc-hash = "1.1.0"
-smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] }
rustc_index = { path = "../rustc_index", package = "rustc_index" }
-bitflags = "1.2.1"
-measureme = "10.0.0"
-libc = "0.2"
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] }
+stable_deref_trait = "1.0.0"
stacker = "0.1.14"
tempfile = "3.2"
+thin-vec = "0.2.8"
+tracing = "0.1"
[dependencies.parking_lot]
version = "0.11"
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index 5ff2d18dd..a39178016 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -29,7 +29,7 @@ impl Fingerprint {
// quality hash values, let's still combine the two values because the
// Fingerprints in DefPathHash have the StableCrateId portion which is
// the same for all DefPathHashes from the same crate. Combining the
- // two halfs makes sure we get a good quality hash in such cases too.
+ // two halves makes sure we get a good quality hash in such cases too.
self.0.wrapping_mul(3).wrapping_add(self.1)
}
@@ -120,7 +120,7 @@ impl FingerprintHasher for crate::unhash::Unhasher {
// quality hash values, let's still combine the two values because the
// Fingerprints in DefPathHash have the StableCrateId portion which is
// the same for all DefPathHashes from the same crate. Combining the
- // two halfs makes sure we get a good quality hash in such cases too.
+ // two halves makes sure we get a good quality hash in such cases too.
//
// Since `Unhasher` is used only in the context of HashMaps, it is OK
// to combine the two components in an order-independent way (which is
diff --git a/compiler/rustc_data_structures/src/flock/linux.rs b/compiler/rustc_data_structures/src/flock/linux.rs
index bb3ecfbc3..9ed26e490 100644
--- a/compiler/rustc_data_structures/src/flock/linux.rs
+++ b/compiler/rustc_data_structures/src/flock/linux.rs
@@ -14,12 +14,7 @@ pub struct Lock {
impl Lock {
pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
- let file = OpenOptions::new()
- .read(true)
- .write(true)
- .create(create)
- .mode(libc::S_IRWXU as u32)
- .open(p)?;
+ let file = OpenOptions::new().read(true).write(true).create(create).mode(0o600).open(p)?;
let mut operation = if exclusive { libc::LOCK_EX } else { libc::LOCK_SH };
if !wait {
diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs
index bbeb193db..0d0c51b68 100644
--- a/compiler/rustc_data_structures/src/fx.rs
+++ b/compiler/rustc_data_structures/src/fx.rs
@@ -2,13 +2,26 @@ use std::hash::BuildHasherDefault;
pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
+pub type StdEntry<'a, K, V> = std::collections::hash_map::Entry<'a, K, V>;
+
pub type FxIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
pub type FxIndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
+pub type IndexEntry<'a, K, V> = indexmap::map::Entry<'a, K, V>;
#[macro_export]
macro_rules! define_id_collections {
- ($map_name:ident, $set_name:ident, $key:ty) => {
+ ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
pub type $map_name<T> = $crate::fx::FxHashMap<$key, T>;
pub type $set_name = $crate::fx::FxHashSet<$key>;
+ pub type $entry_name<'a, T> = $crate::fx::StdEntry<'a, $key, T>;
+ };
+}
+
+#[macro_export]
+macro_rules! define_stable_id_collections {
+ ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
+ pub type $map_name<T> = $crate::fx::FxIndexMap<$key, T>;
+ pub type $set_name = $crate::fx::FxIndexSet<$key>;
+ pub type $entry_name<'a, T> = $crate::fx::IndexEntry<'a, $key, T>;
};
}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
index 3d91bcade..e8efbd09a 100644
--- a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -29,8 +29,8 @@ impl<N: Idx + Ord> VecGraph<N> {
// Store the *target* of each edge into `edge_targets`.
let edge_targets: Vec<N> = edge_pairs.iter().map(|&(_, target)| target).collect();
- // Create the *edge starts* array. We are iterating over over
- // the (sorted) edge pairs. We maintain the invariant that the
+ // Create the *edge starts* array. We are iterating over the
+ // (sorted) edge pairs. We maintain the invariant that the
// length of the `node_starts` array is enough to store the
// current source node -- so when we see that the source node
// for an edge is greater than the current length, we grow the
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 265f45b72..3a2000233 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -13,7 +13,6 @@
#![feature(cell_leak)]
#![feature(control_flow_enum)]
#![feature(extend_one)]
-#![feature(let_else)]
#![feature(hash_raw_entry)]
#![feature(hasher_prefixfree_extras)]
#![feature(maybe_uninit_uninit_array)]
@@ -23,11 +22,14 @@
#![feature(new_uninit)]
#![feature(once_cell)]
#![feature(rustc_attrs)]
+#![feature(negative_impls)]
#![feature(test)]
#![feature(thread_id_value)]
#![feature(vec_into_raw_parts)]
#![allow(rustc::default_hash_types)]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -73,7 +75,6 @@ pub mod profiling;
pub mod sharded;
pub mod stack;
pub mod sync;
-pub mod thin_vec;
pub mod tiny_list;
pub mod transitive_relation;
pub mod vec_linked_list;
@@ -86,6 +87,7 @@ pub mod steal;
pub mod tagged_ptr;
pub mod temp_dir;
pub mod unhash;
+pub mod unord;
pub use ena::undo_log;
pub use ena::unify;
diff --git a/compiler/rustc_data_structures/src/map_in_place.rs b/compiler/rustc_data_structures/src/map_in_place.rs
index 874de03d3..a0d4b7ade 100644
--- a/compiler/rustc_data_structures/src/map_in_place.rs
+++ b/compiler/rustc_data_structures/src/map_in_place.rs
@@ -1,5 +1,6 @@
use smallvec::{Array, SmallVec};
use std::ptr;
+use thin_vec::ThinVec;
pub trait MapInPlace<T>: Sized {
fn map_in_place<F>(&mut self, mut f: F)
@@ -15,94 +16,64 @@ pub trait MapInPlace<T>: Sized {
I: IntoIterator<Item = T>;
}
-impl<T> MapInPlace<T> for Vec<T> {
- fn flat_map_in_place<F, I>(&mut self, mut f: F)
- where
- F: FnMut(T) -> I,
- I: IntoIterator<Item = T>,
- {
- let mut read_i = 0;
- let mut write_i = 0;
- unsafe {
- let mut old_len = self.len();
- self.set_len(0); // make sure we just leak elements in case of panic
+// The implementation of this method is syntactically identical for all the
+// different vector types.
+macro_rules! flat_map_in_place {
+ () => {
+ fn flat_map_in_place<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(T) -> I,
+ I: IntoIterator<Item = T>,
+ {
+ let mut read_i = 0;
+ let mut write_i = 0;
+ unsafe {
+ let mut old_len = self.len();
+ self.set_len(0); // make sure we just leak elements in case of panic
- while read_i < old_len {
- // move the read_i'th item out of the vector and map it
- // to an iterator
- let e = ptr::read(self.as_ptr().add(read_i));
- let iter = f(e).into_iter();
- read_i += 1;
+ while read_i < old_len {
+ // move the read_i'th item out of the vector and map it
+ // to an iterator
+ let e = ptr::read(self.as_ptr().add(read_i));
+ let iter = f(e).into_iter();
+ read_i += 1;
- for e in iter {
- if write_i < read_i {
- ptr::write(self.as_mut_ptr().add(write_i), e);
- write_i += 1;
- } else {
- // If this is reached we ran out of space
- // in the middle of the vector.
- // However, the vector is in a valid state here,
- // so we just do a somewhat inefficient insert.
- self.set_len(old_len);
- self.insert(write_i, e);
+ for e in iter {
+ if write_i < read_i {
+ ptr::write(self.as_mut_ptr().add(write_i), e);
+ write_i += 1;
+ } else {
+ // If this is reached we ran out of space
+ // in the middle of the vector.
+ // However, the vector is in a valid state here,
+ // so we just do a somewhat inefficient insert.
+ self.set_len(old_len);
+ self.insert(write_i, e);
- old_len = self.len();
- self.set_len(0);
+ old_len = self.len();
+ self.set_len(0);
- read_i += 1;
- write_i += 1;
+ read_i += 1;
+ write_i += 1;
+ }
}
}
- }
- // write_i tracks the number of actually written new items.
- self.set_len(write_i);
+ // write_i tracks the number of actually written new items.
+ self.set_len(write_i);
+ }
}
- }
+ };
}
-impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
- fn flat_map_in_place<F, I>(&mut self, mut f: F)
- where
- F: FnMut(T) -> I,
- I: IntoIterator<Item = T>,
- {
- let mut read_i = 0;
- let mut write_i = 0;
- unsafe {
- let mut old_len = self.len();
- self.set_len(0); // make sure we just leak elements in case of panic
-
- while read_i < old_len {
- // move the read_i'th item out of the vector and map it
- // to an iterator
- let e = ptr::read(self.as_ptr().add(read_i));
- let iter = f(e).into_iter();
- read_i += 1;
-
- for e in iter {
- if write_i < read_i {
- ptr::write(self.as_mut_ptr().add(write_i), e);
- write_i += 1;
- } else {
- // If this is reached we ran out of space
- // in the middle of the vector.
- // However, the vector is in a valid state here,
- // so we just do a somewhat inefficient insert.
- self.set_len(old_len);
- self.insert(write_i, e);
-
- old_len = self.len();
- self.set_len(0);
+impl<T> MapInPlace<T> for Vec<T> {
+ flat_map_in_place!();
+}
- read_i += 1;
- write_i += 1;
- }
- }
- }
+impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
+ flat_map_in_place!();
+}
- // write_i tracks the number of actually written new items.
- self.set_len(write_i);
- }
- }
+impl<T> MapInPlace<T> for ThinVec<T> {
+ flat_map_in_place!();
}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
index 07a96dd7d..10e673cd9 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -95,6 +95,10 @@ pub trait ForestObligation: Clone + Debug {
pub trait ObligationProcessor {
type Obligation: ForestObligation;
type Error: Debug;
+ type OUT: OutcomeTrait<
+ Obligation = Self::Obligation,
+ Error = Error<Self::Obligation, Self::Error>,
+ >;
fn needs_process_obligation(&self, obligation: &Self::Obligation) -> bool;
@@ -111,12 +115,20 @@ pub trait ObligationProcessor {
/// In other words, if we had O1 which required O2 which required
/// O3 which required O1, we would give an iterator yielding O1,
/// O2, O3 (O1 is not yielded twice).
- fn process_backedge<'c, I>(&mut self, cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ fn process_backedge<'c, I>(
+ &mut self,
+ cycle: I,
+ _marker: PhantomData<&'c Self::Obligation>,
+ ) -> Result<(), Self::Error>
where
I: Clone + Iterator<Item = &'c Self::Obligation>;
}
/// The result type used by `process_obligation`.
+// `repr(C)` to inhibit the niche filling optimization. Otherwise, the `match` appearing
+// in `process_obligations` is significantly slower, which can substantially affect
+// benchmarks like `rustc-perf`'s inflate and keccak.
+#[repr(C)]
#[derive(Debug)]
pub enum ProcessResult<O, E> {
Unchanged,
@@ -398,12 +410,11 @@ impl<O: ForestObligation> ObligationForest<O> {
/// Performs a fixpoint computation over the obligation list.
#[inline(never)]
- pub fn process_obligations<P, OUT>(&mut self, processor: &mut P) -> OUT
+ pub fn process_obligations<P>(&mut self, processor: &mut P) -> P::OUT
where
P: ObligationProcessor<Obligation = O>,
- OUT: OutcomeTrait<Obligation = O, Error = Error<O, P::Error>>,
{
- let mut outcome = OUT::new();
+ let mut outcome = P::OUT::new();
// Fixpoint computation: we repeat until the inner loop stalls.
loop {
@@ -469,7 +480,7 @@ impl<O: ForestObligation> ObligationForest<O> {
}
self.mark_successes();
- self.process_cycles(processor);
+ self.process_cycles(processor, &mut outcome);
self.compress(|obl| outcome.record_completed(obl));
}
@@ -554,7 +565,7 @@ impl<O: ForestObligation> ObligationForest<O> {
/// Report cycles between all `Success` nodes, and convert all `Success`
/// nodes to `Done`. This must be called after `mark_successes`.
- fn process_cycles<P>(&mut self, processor: &mut P)
+ fn process_cycles<P>(&mut self, processor: &mut P, outcome: &mut P::OUT)
where
P: ObligationProcessor<Obligation = O>,
{
@@ -564,7 +575,7 @@ impl<O: ForestObligation> ObligationForest<O> {
// to handle the no-op cases immediately to avoid the cost of the
// function call.
if node.state.get() == NodeState::Success {
- self.find_cycles_from_node(&mut stack, processor, index);
+ self.find_cycles_from_node(&mut stack, processor, index, outcome);
}
}
@@ -572,8 +583,13 @@ impl<O: ForestObligation> ObligationForest<O> {
self.reused_node_vec = stack;
}
- fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
- where
+ fn find_cycles_from_node<P>(
+ &self,
+ stack: &mut Vec<usize>,
+ processor: &mut P,
+ index: usize,
+ outcome: &mut P::OUT,
+ ) where
P: ObligationProcessor<Obligation = O>,
{
let node = &self.nodes[index];
@@ -582,17 +598,20 @@ impl<O: ForestObligation> ObligationForest<O> {
None => {
stack.push(index);
for &dep_index in node.dependents.iter() {
- self.find_cycles_from_node(stack, processor, dep_index);
+ self.find_cycles_from_node(stack, processor, dep_index, outcome);
}
stack.pop();
node.state.set(NodeState::Done);
}
Some(rpos) => {
// Cycle detected.
- processor.process_backedge(
+ let result = processor.process_backedge(
stack[rpos..].iter().map(|&i| &self.nodes[i].obligation),
PhantomData,
);
+ if let Err(err) = result {
+ outcome.record_error(Error { error: err, backtrace: self.error_at(index) });
+ }
}
}
}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
index e2991aae1..bc252f772 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/tests.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -64,6 +64,7 @@ where
{
type Obligation = O;
type Error = E;
+ type OUT = TestOutcome<O, E>;
fn needs_process_obligation(&self, _obligation: &Self::Obligation) -> bool {
true
@@ -76,10 +77,15 @@ where
(self.process_obligation)(obligation)
}
- fn process_backedge<'c, I>(&mut self, _cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ fn process_backedge<'c, I>(
+ &mut self,
+ _cycle: I,
+ _marker: PhantomData<&'c Self::Obligation>,
+ ) -> Result<(), Self::Error>
where
I: Clone + Iterator<Item = &'c Self::Obligation>,
{
+ Ok(())
}
}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index d8b26f984..ba1960805 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -158,30 +158,21 @@ pub struct SelfProfilerRef {
// actually enabled.
event_filter_mask: EventFilter,
- // Print verbose generic activities to stdout
+ // Print verbose generic activities to stderr?
print_verbose_generic_activities: bool,
-
- // Print extra verbose generic activities to stdout
- print_extra_verbose_generic_activities: bool,
}
impl SelfProfilerRef {
pub fn new(
profiler: Option<Arc<SelfProfiler>>,
print_verbose_generic_activities: bool,
- print_extra_verbose_generic_activities: bool,
) -> SelfProfilerRef {
// If there is no SelfProfiler then the filter mask is set to NONE,
// ensuring that nothing ever tries to actually access it.
let event_filter_mask =
profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
- SelfProfilerRef {
- profiler,
- event_filter_mask,
- print_verbose_generic_activities,
- print_extra_verbose_generic_activities,
- }
+ SelfProfilerRef { profiler, event_filter_mask, print_verbose_generic_activities }
}
/// This shim makes sure that calls only get executed if the filter mask
@@ -214,7 +205,7 @@ impl SelfProfilerRef {
/// Start profiling a verbose generic activity. Profiling continues until the
/// VerboseTimingGuard returned from this call is dropped. In addition to recording
/// a measureme event, "verbose" generic activities also print a timing entry to
- /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
+ /// stderr if the compiler is invoked with -Ztime-passes.
pub fn verbose_generic_activity<'a>(
&'a self,
event_label: &'static str,
@@ -225,11 +216,8 @@ impl SelfProfilerRef {
VerboseTimingGuard::start(message, self.generic_activity(event_label))
}
- /// Start profiling an extra verbose generic activity. Profiling continues until the
- /// VerboseTimingGuard returned from this call is dropped. In addition to recording
- /// a measureme event, "extra verbose" generic activities also print a timing entry to
- /// stdout if the compiler is invoked with -Ztime-passes.
- pub fn extra_verbose_generic_activity<'a, A>(
+ /// Like `verbose_generic_activity`, but with an extra arg.
+ pub fn verbose_generic_activity_with_arg<'a, A>(
&'a self,
event_label: &'static str,
event_arg: A,
@@ -237,7 +225,7 @@ impl SelfProfilerRef {
where
A: Borrow<str> + Into<String>,
{
- let message = if self.print_extra_verbose_generic_activities {
+ let message = if self.print_verbose_generic_activities {
Some(format!("{}({})", event_label, event_arg.borrow()))
} else {
None
@@ -745,27 +733,9 @@ impl Drop for VerboseTimingGuard<'_> {
if let Some((start_time, start_rss, ref message)) = self.start_and_message {
let end_rss = get_resident_set_size();
let dur = start_time.elapsed();
-
- if should_print_passes(dur, start_rss, end_rss) {
- print_time_passes_entry(&message, dur, start_rss, end_rss);
- }
- }
- }
-}
-
-fn should_print_passes(dur: Duration, start_rss: Option<usize>, end_rss: Option<usize>) -> bool {
- if dur.as_millis() > 5 {
- return true;
- }
-
- if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
- let change_rss = end_rss.abs_diff(start_rss);
- if change_rss > 0 {
- return true;
+ print_time_passes_entry(&message, dur, start_rss, end_rss);
}
}
-
- false
}
pub fn print_time_passes_entry(
@@ -774,6 +744,26 @@ pub fn print_time_passes_entry(
start_rss: Option<usize>,
end_rss: Option<usize>,
) {
+ // Print the pass if its duration is greater than 5 ms, or it changed the
+ // measured RSS.
+ let is_notable = || {
+ if dur.as_millis() > 5 {
+ return true;
+ }
+
+ if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
+ let change_rss = end_rss.abs_diff(start_rss);
+ if change_rss > 0 {
+ return true;
+ }
+ }
+
+ false
+ };
+ if !is_notable() {
+ return;
+ }
+
let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 9efea1228..fe257e102 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -96,6 +96,23 @@ impl<K: Ord, V> SortedMap<K, V> {
}
}
+ /// Gets a mutable reference to the value in the entry, or insert a new one.
+ #[inline]
+ pub fn get_mut_or_insert_default(&mut self, key: K) -> &mut V
+ where
+ K: Eq,
+ V: Default,
+ {
+ let index = match self.lookup_index_for(&key) {
+ Ok(index) => index,
+ Err(index) => {
+ self.data.insert(index, (key, V::default()));
+ index
+ }
+ };
+ unsafe { &mut self.data.get_unchecked_mut(index).1 }
+ }
+
#[inline]
pub fn clear(&mut self) {
self.data.clear();
@@ -164,7 +181,7 @@ impl<K: Ord, V> SortedMap<K, V> {
/// It is up to the caller to make sure that the elements are sorted by key
/// and that there are no duplicates.
#[inline]
- pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
+ pub fn insert_presorted(&mut self, elements: Vec<(K, V)>) {
if elements.is_empty() {
return;
}
@@ -173,28 +190,28 @@ impl<K: Ord, V> SortedMap<K, V> {
let start_index = self.lookup_index_for(&elements[0].0);
- let drain = match start_index {
+ let elements = match start_index {
Ok(index) => {
- let mut drain = elements.drain(..);
- self.data[index] = drain.next().unwrap();
- drain
+ let mut elements = elements.into_iter();
+ self.data[index] = elements.next().unwrap();
+ elements
}
Err(index) => {
if index == self.data.len() || elements.last().unwrap().0 < self.data[index].0 {
// We can copy the whole range without having to mix with
// existing elements.
- self.data.splice(index..index, elements.drain(..));
+ self.data.splice(index..index, elements.into_iter());
return;
}
- let mut drain = elements.drain(..);
- self.data.insert(index, drain.next().unwrap());
- drain
+ let mut elements = elements.into_iter();
+ self.data.insert(index, elements.next().unwrap());
+ elements
}
};
// Insert the rest
- for (k, v) in drain {
+ for (k, v) in elements {
self.insert(k, v);
}
}
diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs
index 4fda3adb7..406f0270d 100644
--- a/compiler/rustc_data_structures/src/sso/set.rs
+++ b/compiler/rustc_data_structures/src/sso/set.rs
@@ -27,7 +27,7 @@ pub struct SsoHashSet<T> {
map: SsoHashMap<T, ()>,
}
-/// Adapter function used ot return
+/// Adapter function used to return
/// result if SsoHashMap functions into
/// result SsoHashSet should return.
#[inline(always)]
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 52952a793..9c0fb8265 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -48,7 +48,7 @@ cfg_if! {
/// the native atomic types.
/// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
/// as it's not intended to be used separately.
- #[derive(Debug)]
+ #[derive(Debug, Default)]
pub struct Atomic<T: Copy>(Cell<T>);
impl<T: Copy> Atomic<T> {
@@ -56,9 +56,7 @@ cfg_if! {
pub fn new(v: T) -> Self {
Atomic(Cell::new(v))
}
- }
- impl<T: Copy> Atomic<T> {
#[inline]
pub fn into_inner(self) -> T {
self.0.into_inner()
diff --git a/compiler/rustc_data_structures/src/thin_vec.rs b/compiler/rustc_data_structures/src/thin_vec.rs
deleted file mode 100644
index 716259142..000000000
--- a/compiler/rustc_data_structures/src/thin_vec.rs
+++ /dev/null
@@ -1,135 +0,0 @@
-use crate::stable_hasher::{HashStable, StableHasher};
-
-use std::iter::FromIterator;
-
-/// A vector type optimized for cases where this size is usually 0 (cf. `SmallVec`).
-/// The `Option<Box<..>>` wrapping allows us to represent a zero sized vector with `None`,
-/// which uses only a single (null) pointer.
-#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
-pub struct ThinVec<T>(Option<Box<Vec<T>>>);
-
-impl<T> ThinVec<T> {
- pub fn new() -> Self {
- ThinVec(None)
- }
-
- pub fn iter(&self) -> std::slice::Iter<'_, T> {
- self.into_iter()
- }
-
- pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
- self.into_iter()
- }
-
- pub fn push(&mut self, item: T) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.push(item),
- ThinVec(None) => *self = vec![item].into(),
- }
- }
-}
-
-impl<T> From<Vec<T>> for ThinVec<T> {
- fn from(vec: Vec<T>) -> Self {
- if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
- }
-}
-
-impl<T> Into<Vec<T>> for ThinVec<T> {
- fn into(self) -> Vec<T> {
- match self {
- ThinVec(None) => Vec::new(),
- ThinVec(Some(vec)) => *vec,
- }
- }
-}
-
-impl<T> ::std::ops::Deref for ThinVec<T> {
- type Target = [T];
- fn deref(&self) -> &[T] {
- match *self {
- ThinVec(None) => &[],
- ThinVec(Some(ref vec)) => vec,
- }
- }
-}
-
-impl<T> ::std::ops::DerefMut for ThinVec<T> {
- fn deref_mut(&mut self) -> &mut [T] {
- match *self {
- ThinVec(None) => &mut [],
- ThinVec(Some(ref mut vec)) => vec,
- }
- }
-}
-
-impl<T> FromIterator<T> for ThinVec<T> {
- fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
- // `Vec::from_iter()` should not allocate if the iterator is empty.
- let vec: Vec<_> = iter.into_iter().collect();
- if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
- }
-}
-
-impl<T> IntoIterator for ThinVec<T> {
- type Item = T;
- type IntoIter = std::vec::IntoIter<T>;
-
- fn into_iter(self) -> Self::IntoIter {
- // This is still performant because `Vec::new()` does not allocate.
- self.0.map_or_else(Vec::new, |ptr| *ptr).into_iter()
- }
-}
-
-impl<'a, T> IntoIterator for &'a ThinVec<T> {
- type Item = &'a T;
- type IntoIter = std::slice::Iter<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.as_ref().iter()
- }
-}
-
-impl<'a, T> IntoIterator for &'a mut ThinVec<T> {
- type Item = &'a mut T;
- type IntoIter = std::slice::IterMut<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.as_mut().iter_mut()
- }
-}
-
-impl<T> Extend<T> for ThinVec<T> {
- fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.extend(iter),
- ThinVec(None) => *self = iter.into_iter().collect::<Vec<_>>().into(),
- }
- }
-
- fn extend_one(&mut self, item: T) {
- self.push(item)
- }
-
- fn extend_reserve(&mut self, additional: usize) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.reserve(additional),
- ThinVec(None) => *self = Vec::with_capacity(additional).into(),
- }
- }
-}
-
-impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ThinVec<T> {
- fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
- (**self).hash_stable(hcx, hasher)
- }
-}
-
-impl<T> Default for ThinVec<T> {
- fn default() -> Self {
- Self(None)
- }
-}
-
-#[cfg(test)]
-mod tests;
diff --git a/compiler/rustc_data_structures/src/thin_vec/tests.rs b/compiler/rustc_data_structures/src/thin_vec/tests.rs
deleted file mode 100644
index 0221b9912..000000000
--- a/compiler/rustc_data_structures/src/thin_vec/tests.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use super::*;
-
-impl<T> ThinVec<T> {
- fn into_vec(self) -> Vec<T> {
- self.into()
- }
-}
-
-#[test]
-fn test_from_iterator() {
- assert_eq!(std::iter::empty().collect::<ThinVec<String>>().into_vec(), Vec::<String>::new());
- assert_eq!(std::iter::once(42).collect::<ThinVec<_>>().into_vec(), vec![42]);
- assert_eq!([1, 2].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2]);
- assert_eq!([1, 2, 3].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_owned() {
- assert_eq!(ThinVec::new().into_iter().collect::<Vec<String>>(), Vec::<String>::new());
- assert_eq!(ThinVec::from(vec![1]).into_iter().collect::<Vec<_>>(), vec![1]);
- assert_eq!(ThinVec::from(vec![1, 2]).into_iter().collect::<Vec<_>>(), vec![1, 2]);
- assert_eq!(ThinVec::from(vec![1, 2, 3]).into_iter().collect::<Vec<_>>(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_ref() {
- assert_eq!(ThinVec::new().iter().collect::<Vec<&String>>(), Vec::<&String>::new());
- assert_eq!(ThinVec::from(vec![1]).iter().collect::<Vec<_>>(), vec![&1]);
- assert_eq!(ThinVec::from(vec![1, 2]).iter().collect::<Vec<_>>(), vec![&1, &2]);
- assert_eq!(ThinVec::from(vec![1, 2, 3]).iter().collect::<Vec<_>>(), vec![&1, &2, &3]);
-}
-
-#[test]
-fn test_into_iterator_ref_mut() {
- assert_eq!(ThinVec::new().iter_mut().collect::<Vec<&mut String>>(), Vec::<&mut String>::new());
- assert_eq!(ThinVec::from(vec![1]).iter_mut().collect::<Vec<_>>(), vec![&mut 1]);
- assert_eq!(ThinVec::from(vec![1, 2]).iter_mut().collect::<Vec<_>>(), vec![&mut 1, &mut 2]);
- assert_eq!(
- ThinVec::from(vec![1, 2, 3]).iter_mut().collect::<Vec<_>>(),
- vec![&mut 1, &mut 2, &mut 3],
- );
-}
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
index 0ff64969b..cf6162038 100644
--- a/compiler/rustc_data_structures/src/transitive_relation.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -1,55 +1,67 @@
-use crate::fx::FxIndexSet;
-use crate::sync::Lock;
+use crate::frozen::Frozen;
+use crate::fx::{FxHashSet, FxIndexSet};
use rustc_index::bit_set::BitMatrix;
use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
+use std::ops::Deref;
#[cfg(test)]
mod tests;
#[derive(Clone, Debug)]
-pub struct TransitiveRelation<T> {
+pub struct TransitiveRelationBuilder<T> {
// List of elements. This is used to map from a T to a usize.
elements: FxIndexSet<T>,
// List of base edges in the graph. Require to compute transitive
// closure.
- edges: Vec<Edge>,
-
- // This is a cached transitive closure derived from the edges.
- // Currently, we build it lazily and just throw out any existing
- // copy whenever a new edge is added. (The Lock is to permit
- // the lazy computation.) This is kind of silly, except for the
- // fact its size is tied to `self.elements.len()`, so I wanted to
- // wait before building it up to avoid reallocating as new edges
- // are added with new elements. Perhaps better would be to ask the
- // user for a batch of edges to minimize this effect, but I
- // already wrote the code this way. :P -nmatsakis
- closure: Lock<Option<BitMatrix<usize, usize>>>,
+ edges: FxHashSet<Edge>,
}
-// HACK(eddyb) manual impl avoids `Default` bound on `T`.
-impl<T: Eq + Hash> Default for TransitiveRelation<T> {
- fn default() -> Self {
+#[derive(Debug)]
+pub struct TransitiveRelation<T> {
+ // Frozen transitive relation elements and edges.
+ builder: Frozen<TransitiveRelationBuilder<T>>,
+
+ // Cached transitive closure derived from the edges.
+ closure: Frozen<BitMatrix<usize, usize>>,
+}
+
+impl<T> Deref for TransitiveRelation<T> {
+ type Target = Frozen<TransitiveRelationBuilder<T>>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.builder
+ }
+}
+
+impl<T: Clone> Clone for TransitiveRelation<T> {
+ fn clone(&self) -> Self {
TransitiveRelation {
- elements: Default::default(),
- edges: Default::default(),
- closure: Default::default(),
+ builder: Frozen::freeze(self.builder.deref().clone()),
+ closure: Frozen::freeze(self.closure.deref().clone()),
}
}
}
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+// HACK(eddyb) manual impl avoids `Default` bound on `T`.
+impl<T: Eq + Hash> Default for TransitiveRelationBuilder<T> {
+ fn default() -> Self {
+ TransitiveRelationBuilder { elements: Default::default(), edges: Default::default() }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug, Hash)]
struct Index(usize);
-#[derive(Clone, PartialEq, Eq, Debug)]
+#[derive(Clone, PartialEq, Eq, Debug, Hash)]
struct Edge {
source: Index,
target: Index,
}
-impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+impl<T: Eq + Hash + Copy> TransitiveRelationBuilder<T> {
pub fn is_empty(&self) -> bool {
self.edges.is_empty()
}
@@ -63,23 +75,19 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
}
fn add_index(&mut self, a: T) -> Index {
- let (index, added) = self.elements.insert_full(a);
- if added {
- // if we changed the dimensions, clear the cache
- *self.closure.get_mut() = None;
- }
+ let (index, _added) = self.elements.insert_full(a);
Index(index)
}
/// Applies the (partial) function to each edge and returns a new
- /// relation. If `f` returns `None` for any end-point, returns
- /// `None`.
- pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelation<U>>
+ /// relation builder. If `f` returns `None` for any end-point,
+ /// returns `None`.
+ pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelationBuilder<U>>
where
F: FnMut(T) -> Option<U>,
U: Clone + Debug + Eq + Hash + Copy,
{
- let mut result = TransitiveRelation::default();
+ let mut result = TransitiveRelationBuilder::default();
for edge in &self.edges {
result.add(f(self.elements[edge.source.0])?, f(self.elements[edge.target.0])?);
}
@@ -91,12 +99,38 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
let a = self.add_index(a);
let b = self.add_index(b);
let edge = Edge { source: a, target: b };
- if !self.edges.contains(&edge) {
- self.edges.push(edge);
+ self.edges.insert(edge);
+ }
+
+ /// Compute the transitive closure derived from the edges, and converted to
+ /// the final result. After this, all elements will be immutable to maintain
+ /// the correctness of the result.
+ pub fn freeze(self) -> TransitiveRelation<T> {
+ let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
+ let mut changed = true;
+ while changed {
+ changed = false;
+ for edge in &self.edges {
+ // add an edge from S -> T
+ changed |= matrix.insert(edge.source.0, edge.target.0);
- // added an edge, clear the cache
- *self.closure.get_mut() = None;
+ // add all outgoing edges from T into S
+ changed |= matrix.union_rows(edge.target.0, edge.source.0);
+ }
}
+ TransitiveRelation { builder: Frozen::freeze(self), closure: Frozen::freeze(matrix) }
+ }
+}
+
+impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+ /// Applies the (partial) function to each edge and returns a new
+ /// relation including transitive closures.
+ pub fn maybe_map<F, U>(&self, f: F) -> Option<TransitiveRelation<U>>
+ where
+ F: FnMut(T) -> Option<U>,
+ U: Clone + Debug + Eq + Hash + Copy,
+ {
+ Some(self.builder.maybe_map(f)?.freeze())
}
/// Checks whether `a < target` (transitively)
@@ -322,30 +356,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
where
OP: FnOnce(&BitMatrix<usize, usize>) -> R,
{
- let mut closure_cell = self.closure.borrow_mut();
- let mut closure = closure_cell.take();
- if closure.is_none() {
- closure = Some(self.compute_closure());
- }
- let result = op(closure.as_ref().unwrap());
- *closure_cell = closure;
- result
- }
-
- fn compute_closure(&self) -> BitMatrix<usize, usize> {
- let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
- let mut changed = true;
- while changed {
- changed = false;
- for edge in &self.edges {
- // add an edge from S -> T
- changed |= matrix.insert(edge.source.0, edge.target.0);
-
- // add all outgoing edges from T into S
- changed |= matrix.union_rows(edge.target.0, edge.source.0);
- }
- }
- matrix
+ op(&self.closure)
}
/// Lists all the base edges in the graph: the initial _non-transitive_ set of element
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
index e1f4c7ee0..e756c546e 100644
--- a/compiler/rustc_data_structures/src/transitive_relation/tests.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -10,9 +10,10 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
#[test]
fn test_one_step() {
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "b");
relation.add("a", "c");
+ let relation = relation.freeze();
assert!(relation.contains("a", "c"));
assert!(relation.contains("a", "b"));
assert!(!relation.contains("b", "a"));
@@ -21,7 +22,7 @@ fn test_one_step() {
#[test]
fn test_many_steps() {
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "b");
relation.add("a", "c");
relation.add("a", "f");
@@ -31,6 +32,7 @@ fn test_many_steps() {
relation.add("b", "e");
relation.add("e", "g");
+ let relation = relation.freeze();
assert!(relation.contains("a", "b"));
assert!(relation.contains("a", "c"));
@@ -51,9 +53,10 @@ fn mubs_triangle() {
// ^
// |
// b
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "tcx");
relation.add("b", "tcx");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["tcx"]);
assert_eq!(relation.parents("a"), vec!["tcx"]);
assert_eq!(relation.parents("b"), vec!["tcx"]);
@@ -72,7 +75,7 @@ fn mubs_best_choice1() {
// need the second pare down call to get the right result (after
// intersection, we have [1, 2], but 2 -> 1).
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -80,6 +83,7 @@ fn mubs_best_choice1() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["2"]);
assert_eq!(relation.parents("0"), vec!["2"]);
@@ -99,7 +103,7 @@ fn mubs_best_choice2() {
// Like the preceding test, but in this case intersection is [2,
// 1], and hence we rely on the first pare down call.
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -107,6 +111,7 @@ fn mubs_best_choice2() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
assert_eq!(relation.parents("0"), vec!["1"]);
@@ -118,12 +123,13 @@ fn mubs_best_choice2() {
fn mubs_no_best_choice() {
// in this case, the intersection yields [1, 2], and the "pare
// down" calls find nothing to remove.
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1", "2"]);
assert_eq!(relation.parents("0"), vec!["1", "2"]);
@@ -135,7 +141,7 @@ fn mubs_best_choice_scc() {
// in this case, 1 and 2 form a cycle; we pick arbitrarily (but
// consistently).
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -144,6 +150,7 @@ fn mubs_best_choice_scc() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
assert_eq!(relation.parents("0"), vec!["1"]);
@@ -157,13 +164,14 @@ fn pdub_crisscross() {
// /\ |
// b -> b1 ---+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("a", "b1");
relation.add("b", "a1");
relation.add("b", "b1");
relation.add("a1", "x");
relation.add("b1", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -179,7 +187,7 @@ fn pdub_crisscross_more() {
// /\ /\ |
// b -> b1 -> b2 ---------+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("a", "b1");
relation.add("b", "a1");
@@ -194,6 +202,7 @@ fn pdub_crisscross_more() {
relation.add("a3", "x");
relation.add("b2", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
assert_eq!(relation.minimal_upper_bounds("a1", "b1"), vec!["a2", "b2"]);
@@ -210,11 +219,12 @@ fn pdub_lub() {
// |
// b -> b1 ---+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("b", "b1");
relation.add("a1", "x");
relation.add("b1", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["x"]);
assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -233,10 +243,11 @@ fn mubs_intermediate_node_on_one_side_only() {
// b
// "digraph { a -> c -> d; b -> d; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("b", "d");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["d"]);
}
@@ -252,12 +263,13 @@ fn mubs_scc_1() {
// b
// "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "c");
relation.add("a", "d");
relation.add("b", "d");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -272,12 +284,13 @@ fn mubs_scc_2() {
// +--- b
// "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "c");
relation.add("b", "d");
relation.add("b", "c");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -292,13 +305,14 @@ fn mubs_scc_3() {
// b ---+
// "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "e");
relation.add("e", "c");
relation.add("b", "d");
relation.add("b", "e");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -314,13 +328,14 @@ fn mubs_scc_4() {
// b ---+
// "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "e");
relation.add("e", "c");
relation.add("a", "d");
relation.add("b", "e");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -352,10 +367,11 @@ fn parent() {
(1, /*->*/ 3),
];
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
for (a, b) in pairs {
relation.add(a, b);
}
+ let relation = relation.freeze();
let p = relation.postdom_parent(3);
assert_eq!(p, Some(0));
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
new file mode 100644
index 000000000..c015f1232
--- /dev/null
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -0,0 +1,382 @@
+//! This module contains collection types that don't expose their internal
+//! ordering. This is a useful property for deterministic computations, such
+//! as required by the query system.
+
+use rustc_hash::{FxHashMap, FxHashSet};
+use smallvec::SmallVec;
+use std::{
+ borrow::Borrow,
+ hash::Hash,
+ iter::{Product, Sum},
+};
+
+use crate::{
+ fingerprint::Fingerprint,
+ stable_hasher::{HashStable, StableHasher, ToStableHashKey},
+};
+
+/// `UnordItems` is the order-less version of `Iterator`. It only contains methods
+/// that don't (easily) expose an ordering of the underlying items.
+///
+/// Most methods take an `Fn` where the `Iterator`-version takes an `FnMut`. This
+/// is to reduce the risk of accidentally leaking the internal order via the closure
+/// environment. Otherwise one could easily do something like
+///
+/// ```rust,ignore (pseudo code)
+/// let mut ordered = vec![];
+/// unordered_items.all(|x| ordered.push(x));
+/// ```
+///
+/// It's still possible to do the same thing with an `Fn` by using interior mutability,
+/// but the chance of doing it accidentally is reduced.
+pub struct UnordItems<T, I: Iterator<Item = T>>(I);
+
+impl<T, I: Iterator<Item = T>> UnordItems<T, I> {
+ #[inline]
+ pub fn map<U, F: Fn(T) -> U>(self, f: F) -> UnordItems<U, impl Iterator<Item = U>> {
+ UnordItems(self.0.map(f))
+ }
+
+ #[inline]
+ pub fn all<U, F: Fn(T) -> bool>(mut self, f: F) -> bool {
+ self.0.all(f)
+ }
+
+ #[inline]
+ pub fn any<U, F: Fn(T) -> bool>(mut self, f: F) -> bool {
+ self.0.any(f)
+ }
+
+ #[inline]
+ pub fn filter<U, F: Fn(&T) -> bool>(self, f: F) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.filter(f))
+ }
+
+ #[inline]
+ pub fn filter_map<U, F: Fn(T) -> Option<U>>(
+ self,
+ f: F,
+ ) -> UnordItems<U, impl Iterator<Item = U>> {
+ UnordItems(self.0.filter_map(f))
+ }
+
+ #[inline]
+ pub fn max(self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.0.max()
+ }
+
+ #[inline]
+ pub fn min(self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.0.min()
+ }
+
+ #[inline]
+ pub fn sum<S>(self) -> S
+ where
+ S: Sum<T>,
+ {
+ self.0.sum()
+ }
+
+ #[inline]
+ pub fn product<S>(self) -> S
+ where
+ S: Product<T>,
+ {
+ self.0.product()
+ }
+
+ #[inline]
+ pub fn count(self) -> usize {
+ self.0.count()
+ }
+}
+
+impl<'a, T: Clone + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+ #[inline]
+ pub fn cloned(self) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.cloned())
+ }
+}
+
+impl<'a, T: Copy + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+ #[inline]
+ pub fn copied(self) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.copied())
+ }
+}
+
+impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
+ pub fn into_sorted<HCX>(self, hcx: &HCX) -> Vec<T>
+ where
+ T: ToStableHashKey<HCX>,
+ {
+ let mut items: Vec<T> = self.0.collect();
+ items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+ items
+ }
+
+ pub fn into_sorted_small_vec<HCX, const LEN: usize>(self, hcx: &HCX) -> SmallVec<[T; LEN]>
+ where
+ T: ToStableHashKey<HCX>,
+ {
+ let mut items: SmallVec<[T; LEN]> = self.0.collect();
+ items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+ items
+ }
+}
+
+/// This is a set collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for set-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordSet<V: Eq + Hash> {
+ inner: FxHashSet<V>,
+}
+
+impl<V: Eq + Hash> Default for UnordSet<V> {
+ fn default() -> Self {
+ Self { inner: FxHashSet::default() }
+ }
+}
+
+impl<V: Eq + Hash> UnordSet<V> {
+ #[inline]
+ pub fn new() -> Self {
+ Self { inner: Default::default() }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn insert(&mut self, v: V) -> bool {
+ self.inner.insert(v)
+ }
+
+ #[inline]
+ pub fn contains<Q: ?Sized>(&self, v: &Q) -> bool
+ where
+ V: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.inner.contains(v)
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<&'a V, impl Iterator<Item = &'a V>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordSet from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<V: Hash + Eq> Extend<V> for UnordSet<V> {
+ fn extend<T: IntoIterator<Item = V>>(&mut self, iter: T) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordSet<V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+/// This is a map collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for map-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordMap<K: Eq + Hash, V> {
+ inner: FxHashMap<K, V>,
+}
+
+impl<K: Eq + Hash, V> Default for UnordMap<K, V> {
+ fn default() -> Self {
+ Self { inner: FxHashMap::default() }
+ }
+}
+
+impl<K: Hash + Eq, V> Extend<(K, V)> for UnordMap<K, V> {
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<K: Eq + Hash, V> UnordMap<K, V> {
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+ self.inner.insert(k, v)
+ }
+
+ #[inline]
+ pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.inner.contains_key(k)
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<(&'a K, &'a V), impl Iterator<Item = (&'a K, &'a V)>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<(K, V), impl Iterator<Item = (K, V)>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordMap from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = (K, V)>>(&mut self, items: UnordItems<(K, V), I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<HCX, K: Hash + Eq + HashStable<HCX>, V: HashStable<HCX>> HashStable<HCX> for UnordMap<K, V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+/// This is a collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for collections the
+/// keys of which don't have a semantic ordering and don't implement
+/// `Hash` or `Eq`.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Default, Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordBag<V> {
+ inner: Vec<V>,
+}
+
+impl<V> UnordBag<V> {
+ #[inline]
+ pub fn new() -> Self {
+ Self { inner: Default::default() }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn push(&mut self, v: V) {
+ self.inner.push(v);
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<&'a V, impl Iterator<Item = &'a V>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordSet from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<T> Extend<T> for UnordBag<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordBag<V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+fn hash_iter_order_independent<
+ HCX,
+ T: HashStable<HCX>,
+ I: Iterator<Item = T> + ExactSizeIterator,
+>(
+ mut it: I,
+ hcx: &mut HCX,
+ hasher: &mut StableHasher,
+) {
+ let len = it.len();
+ len.hash_stable(hcx, hasher);
+
+ match len {
+ 0 => {
+ // We're done
+ }
+ 1 => {
+ // No need to instantiate a hasher
+ it.next().unwrap().hash_stable(hcx, hasher);
+ }
+ _ => {
+ let mut accumulator = Fingerprint::ZERO;
+ for item in it {
+ let mut item_hasher = StableHasher::new();
+ item.hash_stable(hcx, &mut item_hasher);
+ let item_fingerprint: Fingerprint = item_hasher.finish();
+ accumulator = accumulator.combine_commutative(item_fingerprint);
+ }
+ accumulator.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+// Do not implement IntoIterator for the collections in this module.
+// They only exist to hide iteration order in the first place.
+impl<T> !IntoIterator for UnordBag<T> {}
+impl<V> !IntoIterator for UnordSet<V> {}
+impl<K, V> !IntoIterator for UnordMap<K, V> {}
+impl<T, I> !IntoIterator for UnordItems<T, I> {}
diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml
index 08d5d4f34..59e937777 100644
--- a/compiler/rustc_driver/Cargo.toml
+++ b/compiler/rustc_driver/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
crate-type = ["dylib"]
[dependencies]
-tracing = { version = "0.1.28" }
+tracing = { version = "0.1.35" }
serde_json = "1.0.59"
rustc_log = { path = "../rustc_log" }
rustc_middle = { path = "../rustc_middle" }
@@ -19,6 +19,7 @@ rustc_errors = { path = "../rustc_errors" }
rustc_feature = { path = "../rustc_feature" }
rustc_hir = { path = "../rustc_hir" }
rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_macros = { path = "../rustc_macros" }
rustc_metadata = { path = "../rustc_metadata" }
rustc_parse = { path = "../rustc_parse" }
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
@@ -29,7 +30,7 @@ rustc_error_codes = { path = "../rustc_error_codes" }
rustc_interface = { path = "../rustc_interface" }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
-rustc_typeck = { path = "../rustc_typeck" }
+rustc_hir_analysis = { path = "../rustc_hir_analysis" }
[target.'cfg(unix)'.dependencies]
libc = "0.2"
diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs
index 53ae913f9..cfa734c7d 100644
--- a/compiler/rustc_driver/src/lib.rs
+++ b/compiler/rustc_driver/src/lib.rs
@@ -5,10 +5,11 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(let_else)]
#![feature(once_cell)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -16,7 +17,7 @@ extern crate tracing;
pub extern crate rustc_plugin_impl as plugin;
use rustc_ast as ast;
-use rustc_codegen_ssa::{traits::CodegenBackend, CodegenResults};
+use rustc_codegen_ssa::{traits::CodegenBackend, CodegenErrors, CodegenResults};
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
use rustc_data_structures::sync::SeqCst;
use rustc_errors::registry::{InvalidErrorCode, Registry};
@@ -34,7 +35,7 @@ use rustc_session::config::{ErrorOutputType, Input, OutputType, PrintRequest, Tr
use rustc_session::cstore::MetadataLoader;
use rustc_session::getopts;
use rustc_session::lint::{Lint, LintId};
-use rustc_session::{config, DiagnosticOutput, Session};
+use rustc_session::{config, Session};
use rustc_session::{early_error, early_error_no_abort, early_warn};
use rustc_span::source_map::{FileLoader, FileName};
use rustc_span::symbol::sym;
@@ -56,6 +57,12 @@ use std::time::Instant;
pub mod args;
pub mod pretty;
+mod session_diagnostics;
+
+use crate::session_diagnostics::{
+ RLinkEmptyVersionNumber, RLinkEncodingVersionMismatch, RLinkRustcVersionMismatch,
+ RLinkWrongFileType, RlinkNotAFile, RlinkUnableToRead,
+};
/// Exit status code used for successful compilation and help output.
pub const EXIT_SUCCESS: i32 = 0;
@@ -120,10 +127,13 @@ pub struct TimePassesCallbacks {
}
impl Callbacks for TimePassesCallbacks {
+ // JUSTIFICATION: the session doesn't exist at this point.
+ #[allow(rustc::bad_opt_access)]
fn config(&mut self, config: &mut interface::Config) {
- // If a --prints=... option has been given, we don't print the "total"
- // time because it will mess up the --prints output. See #64339.
- self.time_passes = config.opts.prints.is_empty() && config.opts.time_passes();
+ // If a --print=... option has been given, we don't print the "total"
+ // time because it will mess up the --print output. See #64339.
+ //
+ self.time_passes = config.opts.prints.is_empty() && config.opts.unstable_opts.time_passes;
config.opts.trimmed_def_paths = TrimmedDefPaths::GoodPath;
}
}
@@ -137,19 +147,21 @@ pub struct RunCompiler<'a, 'b> {
at_args: &'a [String],
callbacks: &'b mut (dyn Callbacks + Send),
file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
- emitter: Option<Box<dyn Write + Send>>,
make_codegen_backend:
Option<Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>>,
}
impl<'a, 'b> RunCompiler<'a, 'b> {
pub fn new(at_args: &'a [String], callbacks: &'b mut (dyn Callbacks + Send)) -> Self {
- Self { at_args, callbacks, file_loader: None, emitter: None, make_codegen_backend: None }
+ Self { at_args, callbacks, file_loader: None, make_codegen_backend: None }
}
/// Set a custom codegen backend.
///
- /// Used by cg_clif.
+ /// Has no uses within this repository, but is used by bjorn3 for "the
+ /// hotswapping branch of cg_clif" for "setting the codegen backend from a
+ /// custom driver where the custom codegen backend has arbitrary data."
+ /// (See #102759.)
pub fn set_make_codegen_backend(
&mut self,
make_codegen_backend: Option<
@@ -160,17 +172,11 @@ impl<'a, 'b> RunCompiler<'a, 'b> {
self
}
- /// Emit diagnostics to the specified location.
- ///
- /// Used by RLS.
- pub fn set_emitter(&mut self, emitter: Option<Box<dyn Write + Send>>) -> &mut Self {
- self.emitter = emitter;
- self
- }
-
/// Load files from sources other than the file system.
///
- /// Used by RLS.
+ /// Has no uses within this repository, but may be used in the future by
+ /// bjorn3 for "hooking rust-analyzer's VFS into rustc at some point for
+ /// running rustc without having to save". (See #102759.)
pub fn set_file_loader(
&mut self,
file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
@@ -181,27 +187,20 @@ impl<'a, 'b> RunCompiler<'a, 'b> {
/// Parse args and run the compiler.
pub fn run(self) -> interface::Result<()> {
- run_compiler(
- self.at_args,
- self.callbacks,
- self.file_loader,
- self.emitter,
- self.make_codegen_backend,
- )
+ run_compiler(self.at_args, self.callbacks, self.file_loader, self.make_codegen_backend)
}
}
+
fn run_compiler(
at_args: &[String],
callbacks: &mut (dyn Callbacks + Send),
file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
- emitter: Option<Box<dyn Write + Send>>,
make_codegen_backend: Option<
Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
>,
) -> interface::Result<()> {
let args = args::arg_expand_all(at_args);
- let diagnostic_output = emitter.map_or(DiagnosticOutput::Default, DiagnosticOutput::Raw);
let Some(matches) = handle_options(&args) else { return Ok(()) };
let sopts = config::build_session_options(&matches);
@@ -223,7 +222,6 @@ fn run_compiler(
output_file: ofile,
output_dir: odir,
file_loader,
- diagnostic_output,
lint_caps: Default::default(),
parse_sess_created: None,
register_lints: None,
@@ -429,18 +427,6 @@ fn run_compiler(
})
}
-#[cfg(unix)]
-pub fn set_sigpipe_handler() {
- unsafe {
- // Set the SIGPIPE signal handler, so that an EPIPE
- // will cause rustc to terminate, as expected.
- assert_ne!(libc::signal(libc::SIGPIPE, libc::SIG_DFL), libc::SIG_ERR);
- }
-}
-
-#[cfg(windows)]
-pub fn set_sigpipe_handler() {}
-
// Extract output directory and file from matches.
fn make_output(matches: &getopts::Matches) -> (Option<PathBuf>, Option<PathBuf>) {
let odir = matches.opt_str("out-dir").map(|o| PathBuf::from(&o));
@@ -581,18 +567,35 @@ pub fn try_process_rlink(sess: &Session, compiler: &interface::Compiler) -> Comp
sess.init_crate_types(collect_crate_types(sess, &[]));
let outputs = compiler.build_output_filenames(sess, &[]);
let rlink_data = fs::read(file).unwrap_or_else(|err| {
- sess.fatal(&format!("failed to read rlink file: {}", err));
+ sess.emit_fatal(RlinkUnableToRead { err });
});
let codegen_results = match CodegenResults::deserialize_rlink(rlink_data) {
Ok(codegen) => codegen,
- Err(error) => {
- sess.fatal(&format!("Could not deserialize .rlink file: {error}"));
+ Err(err) => {
+ match err {
+ CodegenErrors::WrongFileType => sess.emit_fatal(RLinkWrongFileType),
+ CodegenErrors::EmptyVersionNumber => {
+ sess.emit_fatal(RLinkEmptyVersionNumber)
+ }
+ CodegenErrors::EncodingVersionMismatch { version_array, rlink_version } => {
+ sess.emit_fatal(RLinkEncodingVersionMismatch {
+ version_array,
+ rlink_version,
+ })
+ }
+ CodegenErrors::RustcVersionMismatch { rustc_version, current_version } => {
+ sess.emit_fatal(RLinkRustcVersionMismatch {
+ rustc_version,
+ current_version,
+ })
+ }
+ };
}
};
let result = compiler.codegen_backend().link(sess, codegen_results, &outputs);
abort_on_err(result, sess);
} else {
- sess.fatal("rlink must be a file")
+ sess.emit_fatal(RlinkNotAFile {})
}
Compilation::Stop
} else {
@@ -717,6 +720,11 @@ fn print_crate_info(
println!("{}", cfg);
}
}
+ CallingConventions => {
+ let mut calling_conventions = rustc_target::spec::abi::all_names();
+ calling_conventions.sort_unstable();
+ println!("{}", calling_conventions.join("\n"));
+ }
RelocationModels
| CodeModels
| TlsModels
@@ -1070,7 +1078,7 @@ pub fn handle_options(args: &[String]) -> Option<getopts::Matches> {
Some(matches)
}
-fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec<ast::Attribute>> {
+fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, ast::AttrVec> {
match input {
Input::File(ifile) => rustc_parse::parse_crate_attrs_from_file(ifile, &sess.parse_sess),
Input::Str { name, input } => rustc_parse::parse_crate_attrs_from_source_str(
@@ -1094,22 +1102,25 @@ fn extra_compiler_flags() -> Option<(Vec<String>, bool)> {
while let Some(arg) = args.next() {
if let Some(a) = ICE_REPORT_COMPILER_FLAGS.iter().find(|a| arg.starts_with(*a)) {
let content = if arg.len() == a.len() {
+ // A space-separated option, like `-C incremental=foo` or `--crate-type rlib`
match args.next() {
Some(arg) => arg.to_string(),
None => continue,
}
} else if arg.get(a.len()..a.len() + 1) == Some("=") {
+ // An equals option, like `--crate-type=rlib`
arg[a.len() + 1..].to_string()
} else {
+ // A non-space option, like `-Cincremental=foo`
arg[a.len()..].to_string()
};
- if ICE_REPORT_COMPILER_FLAGS_EXCLUDE.iter().any(|exc| content.starts_with(exc)) {
+ let option = content.split_once('=').map(|s| s.0).unwrap_or(&content);
+ if ICE_REPORT_COMPILER_FLAGS_EXCLUDE.iter().any(|exc| option == *exc) {
excluded_cargo_defaults = true;
} else {
result.push(a.to_string());
- match ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.iter().find(|s| content.starts_with(*s))
- {
- Some(s) => result.push(s.to_string()),
+ match ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.iter().find(|s| option == **s) {
+ Some(s) => result.push(format!("{}=[REDACTED]", s)),
None => result.push(content),
}
}
@@ -1148,6 +1159,17 @@ static DEFAULT_HOOK: LazyLock<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send +
LazyLock::new(|| {
let hook = panic::take_hook();
panic::set_hook(Box::new(|info| {
+ // If the error was caused by a broken pipe then this is not a bug.
+ // Write the error and return immediately. See #98700.
+ #[cfg(windows)]
+ if let Some(msg) = info.payload().downcast_ref::<String>() {
+ if msg.starts_with("failed printing to stdout: ") && msg.ends_with("(os error 232)")
+ {
+ early_error_no_abort(ErrorOutputType::default(), &msg);
+ return;
+ }
+ };
+
// Invoke the default handler, which prints the actual panic message and optionally a backtrace
(*DEFAULT_HOOK)(info);
diff --git a/compiler/rustc_driver/src/pretty.rs b/compiler/rustc_driver/src/pretty.rs
index f66b1a297..f9b1316d2 100644
--- a/compiler/rustc_driver/src/pretty.rs
+++ b/compiler/rustc_driver/src/pretty.rs
@@ -1,5 +1,6 @@
//! The various pretty-printing routines.
+use crate::session_diagnostics::UnprettyDumpFail;
use rustc_ast as ast;
use rustc_ast_pretty::pprust;
use rustc_errors::ErrorGuaranteed;
@@ -328,7 +329,7 @@ impl<'tcx> pprust_hir::PpAnn for TypedAnnotation<'tcx> {
let typeck_results = self.maybe_typeck_results.get().or_else(|| {
self.tcx
.hir()
- .maybe_body_owned_by(expr.hir_id.owner)
+ .maybe_body_owned_by(expr.hir_id.owner.def_id)
.map(|body_id| self.tcx.typeck_body(body_id))
});
@@ -357,12 +358,15 @@ fn get_source(input: &Input, sess: &Session) -> (String, FileName) {
(src, src_name)
}
-fn write_or_print(out: &str, ofile: Option<&Path>) {
+fn write_or_print(out: &str, ofile: Option<&Path>, sess: &Session) {
match ofile {
None => print!("{}", out),
Some(p) => {
if let Err(e) = std::fs::write(p, out) {
- panic!("print-print failed to write {} due to {}", p.display(), e);
+ sess.emit_fatal(UnprettyDumpFail {
+ path: p.display().to_string(),
+ err: e.to_string(),
+ });
}
}
}
@@ -392,6 +396,7 @@ pub fn print_after_parsing(
annotation.pp_ann(),
false,
parse.edition,
+ &sess.parse_sess.attr_id_generator,
)
})
}
@@ -402,7 +407,7 @@ pub fn print_after_parsing(
_ => unreachable!(),
};
- write_or_print(&out, ofile);
+ write_or_print(&out, ofile, sess);
}
pub fn print_after_hir_lowering<'tcx>(
@@ -434,6 +439,7 @@ pub fn print_after_hir_lowering<'tcx>(
annotation.pp_ann(),
true,
parse.edition,
+ &sess.parse_sess.attr_id_generator,
)
})
}
@@ -468,7 +474,7 @@ pub fn print_after_hir_lowering<'tcx>(
_ => unreachable!(),
};
- write_or_print(&out, ofile);
+ write_or_print(&out, ofile, tcx.sess);
}
// In an ideal world, this would be a public function called by the driver after
@@ -496,7 +502,7 @@ fn print_with_analysis(
ThirTree => {
let mut out = String::new();
- abort_on_err(rustc_typeck::check_crate(tcx), tcx.sess);
+ abort_on_err(rustc_hir_analysis::check_crate(tcx), tcx.sess);
debug!("pretty printing THIR tree");
for did in tcx.hir().body_owners() {
let _ = writeln!(
@@ -512,7 +518,7 @@ fn print_with_analysis(
_ => unreachable!(),
};
- write_or_print(&out, ofile);
+ write_or_print(&out, ofile, tcx.sess);
Ok(())
}
diff --git a/compiler/rustc_driver/src/session_diagnostics.rs b/compiler/rustc_driver/src/session_diagnostics.rs
new file mode 100644
index 000000000..c1bc10891
--- /dev/null
+++ b/compiler/rustc_driver/src/session_diagnostics.rs
@@ -0,0 +1,40 @@
+use rustc_macros::Diagnostic;
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_unable_to_read)]
+pub(crate) struct RlinkUnableToRead {
+ pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_wrong_file_type)]
+pub(crate) struct RLinkWrongFileType;
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_empty_version_number)]
+pub(crate) struct RLinkEmptyVersionNumber;
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_encoding_version_mismatch)]
+pub(crate) struct RLinkEncodingVersionMismatch {
+ pub version_array: String,
+ pub rlink_version: u32,
+}
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_rustc_version_mismatch)]
+pub(crate) struct RLinkRustcVersionMismatch<'a> {
+ pub rustc_version: String,
+ pub current_version: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(driver_rlink_no_a_file)]
+pub(crate) struct RlinkNotAFile;
+
+#[derive(Diagnostic)]
+#[diag(driver_unpretty_dump_fail)]
+pub(crate) struct UnprettyDumpFail {
+ pub path: String,
+ pub err: String,
+}
diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs
index 854625579..1e86d1596 100644
--- a/compiler/rustc_error_codes/src/error_codes.rs
+++ b/compiler/rustc_error_codes/src/error_codes.rs
@@ -159,6 +159,7 @@ E0307: include_str!("./error_codes/E0307.md"),
E0308: include_str!("./error_codes/E0308.md"),
E0309: include_str!("./error_codes/E0309.md"),
E0310: include_str!("./error_codes/E0310.md"),
+E0311: include_str!("./error_codes/E0311.md"),
E0312: include_str!("./error_codes/E0312.md"),
E0316: include_str!("./error_codes/E0316.md"),
E0317: include_str!("./error_codes/E0317.md"),
@@ -568,7 +569,6 @@ E0790: include_str!("./error_codes/E0790.md"),
// E0300, // unexpanded macro
// E0304, // expected signed integer constant
// E0305, // expected constant
- E0311, // thing may not live long enough
E0313, // lifetime of borrowed pointer outlives lifetime of captured
// variable
// E0314, // closure outlives stack frame
diff --git a/compiler/rustc_error_codes/src/error_codes/E0045.md b/compiler/rustc_error_codes/src/error_codes/E0045.md
index 143c693bf..1cb214531 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0045.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0045.md
@@ -3,9 +3,7 @@ Variadic parameters have been used on a non-C ABI function.
Erroneous code example:
```compile_fail,E0045
-#![feature(unboxed_closures)]
-
-extern "rust-call" {
+extern "Rust" {
fn foo(x: u8, ...); // error!
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0092.md b/compiler/rustc_error_codes/src/error_codes/E0092.md
index 496174b28..5cbe2a188 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0092.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0092.md
@@ -19,6 +19,6 @@ functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
#![feature(intrinsics)]
extern "rust-intrinsic" {
- fn atomic_fence(); // ok!
+ fn atomic_fence_seqcst(); // ok!
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0094.md b/compiler/rustc_error_codes/src/error_codes/E0094.md
index ec86ec44e..cc546bdbb 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0094.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0094.md
@@ -6,6 +6,7 @@ Erroneous code example:
#![feature(intrinsics)]
extern "rust-intrinsic" {
+ #[rustc_safe_intrinsic]
fn size_of<T, U>() -> usize; // error: intrinsic has wrong number
// of type parameters
}
@@ -19,6 +20,7 @@ Example:
#![feature(intrinsics)]
extern "rust-intrinsic" {
+ #[rustc_safe_intrinsic]
fn size_of<T>() -> usize; // ok!
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0161.md b/compiler/rustc_error_codes/src/error_codes/E0161.md
index ebd2c9769..643990ef1 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0161.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0161.md
@@ -3,7 +3,6 @@ A value was moved whose size was not known at compile time.
Erroneous code example:
```compile_fail,E0161
-#![feature(box_syntax)]
trait Bar {
fn f(self);
}
@@ -13,7 +12,7 @@ impl Bar for i32 {
}
fn main() {
- let b: Box<dyn Bar> = box (0 as i32);
+ let b: Box<dyn Bar> = Box::new(0i32);
b.f();
// error: cannot move a value of type dyn Bar: the size of dyn Bar cannot
// be statically determined
@@ -27,8 +26,6 @@ either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move
it around as usual. Example:
```
-#![feature(box_syntax)]
-
trait Bar {
fn f(&self);
}
@@ -38,7 +35,7 @@ impl Bar for i32 {
}
fn main() {
- let b: Box<dyn Bar> = box (0 as i32);
+ let b: Box<dyn Bar> = Box::new(0i32);
b.f();
// ok!
}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0210.md b/compiler/rustc_error_codes/src/error_codes/E0210.md
index dc2fd9b0c..41263e5e3 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0210.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0210.md
@@ -76,7 +76,5 @@ Let `Ti` be the first such type.
For information on the design of the orphan rules,
see [RFC 2451] and [RFC 1023].
-For information on the design of the orphan rules, see [RFC 1023].
-
[RFC 2451]: https://rust-lang.github.io/rfcs/2451-re-rebalancing-coherence.html
[RFC 1023]: https://github.com/rust-lang/rfcs/blob/master/text/1023-rebalancing-coherence.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0211.md b/compiler/rustc_error_codes/src/error_codes/E0211.md
index 77289f019..8c2462ebd 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0211.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0211.md
@@ -7,6 +7,7 @@ used. Erroneous code examples:
#![feature(intrinsics)]
extern "rust-intrinsic" {
+ #[rustc_safe_intrinsic]
fn size_of<T>(); // error: intrinsic has wrong type
}
@@ -42,6 +43,7 @@ For the first code example, please check the function definition. Example:
#![feature(intrinsics)]
extern "rust-intrinsic" {
+ #[rustc_safe_intrinsic]
fn size_of<T>() -> usize; // ok!
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0311.md b/compiler/rustc_error_codes/src/error_codes/E0311.md
new file mode 100644
index 000000000..08159d3f4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0311.md
@@ -0,0 +1,42 @@
+This error occurs when there is an unsatisfied outlives bound involving an
+elided region and a generic type parameter or associated type.
+
+Erroneous code example:
+
+```compile_fail,E0311
+fn no_restriction<T>(x: &()) -> &() {
+ with_restriction::<T>(x)
+}
+
+fn with_restriction<'a, T: 'a>(x: &'a ()) -> &'a () {
+ x
+}
+```
+
+Why doesn't this code compile? It helps to look at the lifetime bounds that are
+automatically added by the compiler. For more details see the documentation for
+[lifetime elision]( https://doc.rust-lang.org/reference/lifetime-elision.html).
+
+The compiler elides the lifetime of `x` and the return type to some arbitrary
+lifetime `'anon` in `no_restriction()`. The only information available to the
+compiler is that `'anon` is valid for the duration of the function. When
+calling `with_restriction()`, the compiler requires the completely unrelated
+type parameter `T` to outlive `'anon` because of the `T: 'a` bound in
+`with_restriction()`. This causes an error because `T` is not required to
+outlive `'anon` in `no_restriction()`.
+
+If `no_restriction()` were to use `&T` instead of `&()` as an argument, the
+compiler would have added an implied bound, causing this to compile.
+
+This error can be resolved by explicitly naming the elided lifetime for `x` and
+then explicily requiring that the generic parameter `T` outlives that lifetime:
+
+```
+fn no_restriction<'a, T: 'a>(x: &'a ()) -> &'a () {
+ with_restriction::<T>(x)
+}
+
+fn with_restriction<'a, T: 'a>(x: &'a ()) -> &'a () {
+ x
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0579.md b/compiler/rustc_error_codes/src/error_codes/E0579.md
index f554242a3..e7e6fb682 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0579.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0579.md
@@ -8,9 +8,9 @@ Erroneous code example:
fn main() {
match 5u32 {
// This range is ok, albeit pointless.
- 1 .. 2 => {}
+ 1..2 => {}
// This range is empty, and the compiler can tell.
- 5 .. 5 => {} // error!
+ 5..5 => {} // error!
}
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0591.md b/compiler/rustc_error_codes/src/error_codes/E0591.md
index f49805d9b..6ed8370e8 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0591.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0591.md
@@ -53,8 +53,8 @@ unsafe {
```
Here, transmute is being used to convert the types of the fn arguments.
-This pattern is incorrect because, because the type of `foo` is a function
-**item** (`typeof(foo)`), which is zero-sized, and the target type (`fn()`)
+This pattern is incorrect because the type of `foo` is a function **item**
+(`typeof(foo)`), which is zero-sized, and the target type (`fn()`)
is a function pointer, which is not zero-sized.
This pattern should be rewritten. There are a few possible ways to do this:
diff --git a/compiler/rustc_error_codes/src/error_codes/E0622.md b/compiler/rustc_error_codes/src/error_codes/E0622.md
index 990a25494..3ba3ed10e 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0622.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0622.md
@@ -5,7 +5,7 @@ Erroneous code example:
```compile_fail,E0622
#![feature(intrinsics)]
extern "rust-intrinsic" {
- pub static breakpoint : fn(); // error: intrinsic must be a function
+ pub static breakpoint: fn(); // error: intrinsic must be a function
}
fn main() { unsafe { breakpoint(); } }
diff --git a/compiler/rustc_error_codes/src/error_codes/E0695.md b/compiler/rustc_error_codes/src/error_codes/E0695.md
index 5013e83ca..577f42ef3 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0695.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0695.md
@@ -3,7 +3,6 @@ A `break` statement without a label appeared inside a labeled block.
Erroneous code example:
```compile_fail,E0695
-# #![feature(label_break_value)]
loop {
'a: {
break;
@@ -14,7 +13,6 @@ loop {
Make sure to always label the `break`:
```
-# #![feature(label_break_value)]
'l: loop {
'a: {
break 'l;
@@ -25,7 +23,6 @@ Make sure to always label the `break`:
Or if you want to `break` the labeled block:
```
-# #![feature(label_break_value)]
loop {
'a: {
break 'a;
diff --git a/compiler/rustc_error_codes/src/error_codes/E0732.md b/compiler/rustc_error_codes/src/error_codes/E0732.md
index 7347e6654..9536fdbf0 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0732.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0732.md
@@ -3,8 +3,6 @@ An `enum` with a discriminant must specify a `#[repr(inttype)]`.
Erroneous code example:
```compile_fail,E0732
-#![feature(arbitrary_enum_discriminant)]
-
enum Enum { // error!
Unit = 1,
Tuple() = 2,
@@ -20,8 +18,6 @@ is a well-defined way to extract a variant's discriminant from a value;
for instance:
```
-#![feature(arbitrary_enum_discriminant)]
-
#[repr(u8)]
enum Enum {
Unit = 3,
diff --git a/compiler/rustc_error_codes/src/error_codes/E0743.md b/compiler/rustc_error_codes/src/error_codes/E0743.md
index ddd3136df..a19d3ef96 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0743.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0743.md
@@ -3,8 +3,6 @@ The C-variadic type `...` has been nested inside another type.
Erroneous code example:
```compile_fail,E0743
-#![feature(c_variadic)]
-
fn foo2(x: u8, y: &...) {} // error!
```
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
index f2432f616..bd424dd9d 100644
--- a/compiler/rustc_error_codes/src/lib.rs
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -1,4 +1,6 @@
#![deny(rustdoc::invalid_codeblock_attributes)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
//! This library is used to gather all error codes into one place,
//! the goal being to make their maintenance easier.
diff --git a/compiler/rustc_error_messages/Cargo.toml b/compiler/rustc_error_messages/Cargo.toml
index fc84c7c86..9945f3379 100644
--- a/compiler/rustc_error_messages/Cargo.toml
+++ b/compiler/rustc_error_messages/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
fluent-bundle = "0.15.2"
diff --git a/compiler/rustc_error_messages/locales/en-US/ast_lowering.ftl b/compiler/rustc_error_messages/locales/en-US/ast_lowering.ftl
new file mode 100644
index 000000000..03c88c6c0
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/ast_lowering.ftl
@@ -0,0 +1,141 @@
+ast_lowering_generic_type_with_parentheses =
+ parenthesized type parameters may only be used with a `Fn` trait
+ .label = only `Fn` traits may use parentheses
+
+ast_lowering_use_angle_brackets = use angle brackets instead
+
+ast_lowering_invalid_abi =
+ invalid ABI: found `{$abi}`
+ .label = invalid ABI
+ .note = invoke `{$command}` for a full list of supported calling conventions.
+
+ast_lowering_invalid_abi_suggestion = did you mean
+
+ast_lowering_assoc_ty_parentheses =
+ parenthesized generic arguments cannot be used in associated type constraints
+
+ast_lowering_remove_parentheses = remove these parentheses
+
+ast_lowering_misplaced_impl_trait =
+ `impl Trait` only allowed in function and inherent method return types, not in {$position}
+
+ast_lowering_rustc_box_attribute_error =
+ #[rustc_box] requires precisely one argument and no other attributes are allowed
+
+ast_lowering_underscore_expr_lhs_assign =
+ in expressions, `_` can only be used on the left-hand side of an assignment
+ .label = `_` not allowed here
+
+ast_lowering_base_expression_double_dot =
+ base expression required after `..`
+ .label = add a base expression here
+
+ast_lowering_await_only_in_async_fn_and_blocks =
+ `await` is only allowed inside `async` functions and blocks
+ .label = only allowed inside `async` functions and blocks
+
+ast_lowering_this_not_async = this is not `async`
+
+ast_lowering_generator_too_many_parameters =
+ too many parameters for a generator (expected 0 or 1 parameters)
+
+ast_lowering_closure_cannot_be_static = closures cannot be static
+
+ast_lowering_async_non_move_closure_not_supported =
+ `async` non-`move` closures with parameters are not currently supported
+ .help = consider using `let` statements to manually capture variables by reference before entering an `async move` closure
+
+ast_lowering_functional_record_update_destructuring_assignment =
+ functional record updates are not allowed in destructuring assignments
+ .suggestion = consider removing the trailing pattern
+
+ast_lowering_async_generators_not_supported =
+ `async` generators are not yet supported
+
+ast_lowering_inline_asm_unsupported_target =
+ inline assembly is unsupported on this target
+
+ast_lowering_att_syntax_only_x86 =
+ the `att_syntax` option is only supported on x86
+
+ast_lowering_abi_specified_multiple_times =
+ `{$prev_name}` ABI specified multiple times
+ .label = previously specified here
+ .note = these ABIs are equivalent on the current target
+
+ast_lowering_clobber_abi_not_supported =
+ `clobber_abi` is not supported on this target
+
+ast_lowering_invalid_abi_clobber_abi =
+ invalid ABI for `clobber_abi`
+ .note = the following ABIs are supported on this target: {$supported_abis}
+
+ast_lowering_invalid_register =
+ invalid register `{$reg}`: {$error}
+
+ast_lowering_invalid_register_class =
+ invalid register class `{$reg_class}`: {$error}
+
+ast_lowering_invalid_asm_template_modifier_reg_class =
+ invalid asm template modifier for this register class
+
+ast_lowering_argument = argument
+
+ast_lowering_template_modifier = template modifier
+
+ast_lowering_support_modifiers =
+ the `{$class_name}` register class supports the following template modifiers: {$modifiers}
+
+ast_lowering_does_not_support_modifiers =
+ the `{$class_name}` register class does not support template modifiers
+
+ast_lowering_invalid_asm_template_modifier_const =
+ asm template modifiers are not allowed for `const` arguments
+
+ast_lowering_invalid_asm_template_modifier_sym =
+ asm template modifiers are not allowed for `sym` arguments
+
+ast_lowering_register_class_only_clobber =
+ register class `{$reg_class_name}` can only be used as a clobber, not as an input or output
+
+ast_lowering_register_conflict =
+ register `{$reg1_name}` conflicts with register `{$reg2_name}`
+ .help = use `lateout` instead of `out` to avoid conflict
+
+ast_lowering_register1 = register `{$reg1_name}`
+
+ast_lowering_register2 = register `{$reg2_name}`
+
+ast_lowering_sub_tuple_binding =
+ `{$ident_name} @` is not allowed in a {$ctx}
+ .label = this is only allowed in slice patterns
+ .help = remove this and bind each tuple field independently
+
+ast_lowering_sub_tuple_binding_suggestion = if you don't need to use the contents of {$ident}, discard the tuple's remaining fields
+
+ast_lowering_extra_double_dot =
+ `..` can only be used once per {$ctx} pattern
+ .label = can only be used once per {$ctx} pattern
+
+ast_lowering_previously_used_here = previously used here
+
+ast_lowering_misplaced_double_dot =
+ `..` patterns are not allowed here
+ .note = only allowed in tuple, tuple struct, and slice patterns
+
+ast_lowering_misplaced_relax_trait_bound =
+ `?Trait` bounds are only permitted at the point where a type parameter is declared
+
+ast_lowering_not_supported_for_lifetime_binder_async_closure =
+ `for<...>` binders on `async` closures are not currently supported
+
+ast_lowering_arbitrary_expression_in_pattern =
+ arbitrary expressions aren't allowed in patterns
+
+ast_lowering_inclusive_range_with_no_end = inclusive range with no end
+
+ast_lowering_trait_fn_async =
+ functions in traits cannot be declared `async`
+ .label = `async` because of this
+ .note = `async` trait functions are not currently supported
+ .note2 = consider using the `async-trait` crate: https://crates.io/crates/async-trait
diff --git a/compiler/rustc_error_messages/locales/en-US/ast_passes.ftl b/compiler/rustc_error_messages/locales/en-US/ast_passes.ftl
new file mode 100644
index 000000000..e5cd1142b
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/ast_passes.ftl
@@ -0,0 +1,91 @@
+ast_passes_forbidden_let =
+ `let` expressions are not supported here
+ .note = only supported directly in conditions of `if` and `while` expressions
+ .not_supported_or = `||` operators are not supported in let chain expressions
+ .not_supported_parentheses = `let`s wrapped in parentheses are not supported in a context with let chains
+
+ast_passes_forbidden_let_stable =
+ expected expression, found statement (`let`)
+ .note = variable declaration using `let` is a statement
+
+ast_passes_deprecated_where_clause_location =
+ where clause not allowed here
+
+ast_passes_forbidden_assoc_constraint =
+ associated type bounds are not allowed within structs, enums, or unions
+
+ast_passes_keyword_lifetime =
+ lifetimes cannot use keyword names
+
+ast_passes_invalid_label =
+ invalid label name `{$name}`
+
+ast_passes_invalid_visibility =
+ unnecessary visibility qualifier
+ .implied = `pub` not permitted here because it's implied
+ .individual_impl_items = place qualifiers on individual impl items instead
+ .individual_foreign_items = place qualifiers on individual foreign items instead
+
+ast_passes_trait_fn_const =
+ functions in traits cannot be declared const
+ .label = functions in traits cannot be const
+
+ast_passes_forbidden_lifetime_bound =
+ lifetime bounds cannot be used in this context
+
+ast_passes_forbidden_non_lifetime_param =
+ only lifetime parameters can be used in this context
+
+ast_passes_fn_param_too_many =
+ function can not have more than {$max_num_args} arguments
+
+ast_passes_fn_param_c_var_args_only =
+ C-variadic function must be declared with at least one named argument
+
+ast_passes_fn_param_c_var_args_not_last =
+ `...` must be the last argument of a C-variadic function
+
+ast_passes_fn_param_doc_comment =
+ documentation comments cannot be applied to function parameters
+ .label = doc comments are not allowed here
+
+ast_passes_fn_param_forbidden_attr =
+ allow, cfg, cfg_attr, deny, expect, forbid, and warn are the only allowed built-in attributes in function parameters
+
+ast_passes_fn_param_forbidden_self =
+ `self` parameter is only allowed in associated functions
+ .label = not semantically valid as function parameter
+ .note = associated functions are those in `impl` or `trait` definitions
+
+ast_passes_forbidden_default =
+ `default` is only allowed on items in trait impls
+ .label = `default` because of this
+
+ast_passes_assoc_const_without_body =
+ associated constant in `impl` without body
+ .suggestion = provide a definition for the constant
+
+ast_passes_assoc_fn_without_body =
+ associated function in `impl` without body
+ .suggestion = provide a definition for the function
+
+ast_passes_assoc_type_without_body =
+ associated type in `impl` without body
+ .suggestion = provide a definition for the type
+
+ast_passes_const_without_body =
+ free constant item without body
+ .suggestion = provide a definition for the constant
+
+ast_passes_static_without_body =
+ free static item without body
+ .suggestion = provide a definition for the static
+
+ast_passes_ty_alias_without_body =
+ free type alias without body
+ .suggestion = provide a definition for the type
+
+ast_passes_fn_without_body =
+ free function without a body
+ .suggestion = provide a definition for the function
+ .extern_block_suggestion = if you meant to declare an externally defined function, use an `extern` block
diff --git a/compiler/rustc_error_messages/locales/en-US/attr.ftl b/compiler/rustc_error_messages/locales/en-US/attr.ftl
new file mode 100644
index 000000000..a7f8c993d
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/attr.ftl
@@ -0,0 +1,107 @@
+attr_expected_one_cfg_pattern =
+ expected 1 cfg-pattern
+
+attr_invalid_predicate =
+ invalid predicate `{$predicate}`
+
+attr_multiple_item =
+ multiple '{$item}' items
+
+attr_incorrect_meta_item =
+ incorrect meta item
+
+attr_unknown_meta_item =
+ unknown meta item '{$item}'
+ .label = expected one of {$expected}
+
+attr_missing_since =
+ missing 'since'
+
+attr_missing_note =
+ missing 'note'
+
+attr_multiple_stability_levels =
+ multiple stability levels
+
+attr_invalid_issue_string =
+ `issue` must be a non-zero numeric string or "none"
+ .must_not_be_zero = `issue` must not be "0", use "none" instead
+ .empty = cannot parse integer from empty string
+ .invalid_digit = invalid digit found in string
+ .pos_overflow = number too large to fit in target type
+ .neg_overflow = number too small to fit in target type
+
+attr_missing_feature =
+ missing 'feature'
+
+attr_non_ident_feature =
+ 'feature' is not an identifier
+
+attr_missing_issue =
+ missing 'issue'
+
+attr_incorrect_repr_format_packed_one_or_zero_arg =
+ incorrect `repr(packed)` attribute format: `packed` takes exactly one parenthesized argument, or no parentheses at all
+
+attr_invalid_repr_hint_no_paren =
+ invalid representation hint: `{$name}` does not take a parenthesized argument list
+
+attr_invalid_repr_hint_no_value =
+ invalid representation hint: `{$name}` does not take a value
+
+attr_unsupported_literal_generic =
+ unsupported literal
+attr_unsupported_literal_cfg_string =
+ literal in `cfg` predicate value must be a string
+attr_unsupported_literal_deprecated_string =
+ literal in `deprecated` value must be a string
+attr_unsupported_literal_deprecated_kv_pair =
+ item in `deprecated` must be a key/value pair
+attr_unsupported_literal_suggestion =
+ consider removing the prefix
+
+attr_invalid_repr_align_need_arg =
+ invalid `repr(align)` attribute: `align` needs an argument
+ .suggestion = supply an argument here
+
+attr_invalid_repr_generic =
+ invalid `repr({$repr_arg})` attribute: {$error_part}
+
+attr_incorrect_repr_format_align_one_arg =
+ incorrect `repr(align)` attribute format: `align` takes exactly one argument in parentheses
+
+attr_incorrect_repr_format_generic =
+ incorrect `repr({$repr_arg})` attribute format
+ .suggestion = use parentheses instead
+
+attr_rustc_promotable_pairing =
+ `rustc_promotable` attribute must be paired with either a `rustc_const_unstable` or a `rustc_const_stable` attribute
+
+attr_rustc_allowed_unstable_pairing =
+ `rustc_allowed_through_unstable_modules` attribute must be paired with a `stable` attribute
+
+attr_cfg_predicate_identifier =
+ `cfg` predicate key must be an identifier
+
+attr_deprecated_item_suggestion =
+ suggestions on deprecated items are unstable
+ .help = add `#![feature(deprecated_suggestion)]` to the crate root
+ .note = see #94785 for more details
+
+attr_expected_single_version_literal =
+ expected single version literal
+
+attr_expected_version_literal =
+ expected a version literal
+
+attr_expects_feature_list =
+ `{$name}` expects a list of feature names
+
+attr_expects_features =
+ `{$name}` expects feature names
+
+attr_soft_no_args =
+ `soft` should not have any arguments
+
+attr_unknown_version_literal =
+ unknown version literal format, assuming it refers to a future version
diff --git a/compiler/rustc_error_messages/locales/en-US/borrowck.ftl b/compiler/rustc_error_messages/locales/en-US/borrowck.ftl
index 645673ef4..67f2156f3 100644
--- a/compiler/rustc_error_messages/locales/en-US/borrowck.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/borrowck.ftl
@@ -1,18 +1,60 @@
-borrowck-move-unsized =
+borrowck_move_unsized =
cannot move a value of type `{$ty}`
.label = the size of `{$ty}` cannot be statically determined
-borrowck-higher-ranked-lifetime-error =
+borrowck_higher_ranked_lifetime_error =
higher-ranked lifetime error
-borrowck-could-not-prove =
+borrowck_could_not_prove =
could not prove `{$predicate}`
-borrowck-could-not-normalize =
+borrowck_could_not_normalize =
could not normalize `{$value}`
-borrowck-higher-ranked-subtype-error =
+borrowck_higher_ranked_subtype_error =
higher-ranked subtype error
-
-generic-does-not-live-long-enough =
- `{$kind}` does not live long enough \ No newline at end of file
+
+borrowck_generic_does_not_live_long_enough =
+ `{$kind}` does not live long enough
+
+borrowck_move_borrowed =
+ cannot move out of `{$desc}` beacause it is borrowed
+
+borrowck_var_does_not_need_mut =
+ variable does not need to be mutable
+ .suggestion = remove this `mut`
+
+borrowck_const_not_used_in_type_alias =
+ const parameter `{$ct}` is part of concrete type but not used in parameter list for the `impl Trait` type alias
+
+borrowck_var_cannot_escape_closure =
+ captured variable cannot escape `FnMut` closure body
+ .note = `FnMut` closures only have access to their captured variables while they are executing...
+ .cannot_escape = ...therefore, they cannot allow references to captured variables to escape
+
+borrowck_var_here_defined = variable defined here
+
+borrowck_var_here_captured = variable captured here
+
+borrowck_closure_inferred_mut = inferred to be a `FnMut` closure
+
+borrowck_returned_closure_escaped =
+ returns a closure that contains a reference to a captured variable, which then escapes the closure body
+
+borrowck_returned_async_block_escaped =
+ returns an `async` block that contains a reference to a captured variable, which then escapes the closure body
+
+borrowck_returned_ref_escaped =
+ returns a reference to a captured variable which escapes the closure body
+
+borrowck_lifetime_constraints_error =
+ lifetime may not live long enough
+
+borrowck_returned_lifetime_wrong =
+ {$mir_def_name} was supposed to return data with lifetime `{$outlived_fr_name}` but it is returning data with lifetime `{$fr_name}`
+
+borrowck_returned_lifetime_short =
+ {$category_desc}requires that `{$free_region_name}` must outlive `{$outlived_fr_name}`
+
+borrowck_used_impl_require_static =
+ the used `impl` has a `'static` requirement
diff --git a/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl b/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl
index 1d3e33c81..4d088e27b 100644
--- a/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl
@@ -1,5 +1,5 @@
-builtin-macros-requires-cfg-pattern =
+builtin_macros_requires_cfg_pattern =
macro requires a cfg-pattern as an argument
.label = cfg-pattern required
-builtin-macros-expected-one-cfg-pattern = expected 1 cfg-pattern
+builtin_macros_expected_one_cfg_pattern = expected 1 cfg-pattern
diff --git a/compiler/rustc_error_messages/locales/en-US/codegen_gcc.ftl b/compiler/rustc_error_messages/locales/en-US/codegen_gcc.ftl
new file mode 100644
index 000000000..178e1a67c
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/codegen_gcc.ftl
@@ -0,0 +1,68 @@
+codegen_gcc_ranlib_failure =
+ Ranlib exited with code {$exit_code}
+
+codegen_gcc_linkage_const_or_mut_type =
+ must have type `*const T` or `*mut T` due to `#[linkage]` attribute
+
+codegen_gcc_unwinding_inline_asm =
+ GCC backend does not support unwinding from inline asm
+
+codegen_gcc_lto_not_supported =
+ LTO is not supported. You may get a linker error.
+
+codegen_gcc_invalid_monomorphization_basic_integer =
+ invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}`
+
+codegen_gcc_invalid_monomorphization_invalid_float_vector =
+ invalid monomorphization of `{$name}` intrinsic: unsupported element type `{$elem_ty}` of floating-point vector `{$vec_ty}`
+
+codegen_gcc_invalid_monomorphization_not_float =
+ invalid monomorphization of `{$name}` intrinsic: `{$ty}` is not a floating-point type
+
+codegen_gcc_invalid_monomorphization_unrecognized =
+ invalid monomorphization of `{$name}` intrinsic: unrecognized intrinsic `{$name}`
+
+codegen_gcc_invalid_monomorphization_expected_signed_unsigned =
+ invalid monomorphization of `{$name}` intrinsic: expected element type `{$elem_ty}` of vector type `{$vec_ty}` to be a signed or unsigned integer type
+
+codegen_gcc_invalid_monomorphization_unsupported_element =
+ invalid monomorphization of `{$name}` intrinsic: unsupported {$name} from `{$in_ty}` with element `{$elem_ty}` to `{$ret_ty}`
+
+codegen_gcc_invalid_monomorphization_invalid_bitmask =
+ invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]`
+
+codegen_gcc_invalid_monomorphization_simd_shuffle =
+ invalid monomorphization of `{$name}` intrinsic: simd_shuffle index must be an array of `u32`, got `{$ty}`
+
+codegen_gcc_invalid_monomorphization_expected_simd =
+ invalid monomorphization of `{$name}` intrinsic: expected SIMD {$expected_ty} type, found non-SIMD `{$found_ty}`
+
+codegen_gcc_invalid_monomorphization_mask_type =
+ invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_`
+
+codegen_gcc_invalid_monomorphization_return_length =
+ invalid monomorphization of `{$name}` intrinsic: expected return type of length {$in_len}, found `{$ret_ty}` with length {$out_len}
+
+codegen_gcc_invalid_monomorphization_return_length_input_type =
+ invalid monomorphization of `{$name}` intrinsic: expected return type with length {$in_len} (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_len}
+
+codegen_gcc_invalid_monomorphization_return_element =
+ invalid monomorphization of `{$name}` intrinsic: expected return element type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}` with element type `{$out_ty}`
+
+codegen_gcc_invalid_monomorphization_return_type =
+ invalid monomorphization of `{$name}` intrinsic: expected return type `{$in_elem}` (element of input `{$in_ty}`), found `{$ret_ty}`
+
+codegen_gcc_invalid_monomorphization_inserted_type =
+ invalid monomorphization of `{$name}` intrinsic: expected inserted type `{$in_elem}` (element of input `{$in_ty}`), found `{$out_ty}`
+
+codegen_gcc_invalid_monomorphization_return_integer_type =
+ invalid monomorphization of `{$name}` intrinsic: expected return type with integer elements, found `{$ret_ty}` with non-integer `{$out_ty}`
+
+codegen_gcc_invalid_monomorphization_mismatched_lengths =
+ invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}`
+
+codegen_gcc_invalid_monomorphization_unsupported_cast =
+ invalid monomorphization of `{$name}` intrinsic: unsupported cast from `{$in_ty}` with element `{$in_elem}` to `{$ret_ty}` with element `{$out_elem}`
+
+codegen_gcc_invalid_monomorphization_unsupported_operation =
+ invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}`
diff --git a/compiler/rustc_error_messages/locales/en-US/codegen_ssa.ftl b/compiler/rustc_error_messages/locales/en-US/codegen_ssa.ftl
new file mode 100644
index 000000000..966a421bc
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/codegen_ssa.ftl
@@ -0,0 +1,121 @@
+codegen_ssa_lib_def_write_failure = failed to write lib.def file: {$error}
+
+codegen_ssa_version_script_write_failure = failed to write version script: {$error}
+
+codegen_ssa_symbol_file_write_failure = failed to write symbols file: {$error}
+
+codegen_ssa_ld64_unimplemented_modifier = `as-needed` modifier not implemented yet for ld64
+
+codegen_ssa_linker_unsupported_modifier = `as-needed` modifier not supported for current linker
+
+codegen_ssa_L4Bender_exporting_symbols_unimplemented = exporting symbols not implemented yet for L4Bender
+
+codegen_ssa_no_natvis_directory = error enumerating natvis directory: {$error}
+
+codegen_ssa_copy_path = could not copy {$from} to {$to}: {$error}
+
+codegen_ssa_copy_path_buf = unable to copy {$source_file} to {$output_path}: {$error}
+
+codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced
+
+codegen_ssa_ignoring_output = ignoring -o because multiple .{$extension} files were produced
+
+codegen_ssa_create_temp_dir = couldn't create a temp dir: {$error}
+
+codegen_ssa_incompatible_linking_modifiers = the linking modifiers `+bundle` and `+whole-archive` are not compatible with each other when generating rlibs
+
+codegen_ssa_add_native_library = failed to add native library {$library_path}: {$error}
+
+codegen_ssa_multiple_external_func_decl = multiple declarations of external function `{$function}` from library `{$library_name}` have different calling conventions
+
+codegen_ssa_rlib_missing_format = could not find formats for rlibs
+
+codegen_ssa_rlib_only_rmeta_found = could not find rlib for: `{$crate_name}`, found rmeta (metadata) file
+
+codegen_ssa_rlib_not_found = could not find rlib for: `{$crate_name}`
+
+codegen_ssa_rlib_incompatible_dependency_formats = `{$ty1}` and `{$ty2}` do not have equivalent dependency formats (`{$list1}` vs `{$list2}`)
+
+codegen_ssa_linking_failed = linking with `{$linker_path}` failed: {$exit_status}
+
+codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
+
+codegen_ssa_specify_libraries_to_link = use the `-l` flag to specify native libraries to link
+
+codegen_ssa_use_cargo_directive = use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#cargorustc-link-libkindname)
+
+codegen_ssa_thorin_read_input_failure = failed to read input file
+
+codegen_ssa_thorin_parse_input_file_kind = failed to parse input file kind
+
+codegen_ssa_thorin_parse_input_object_file = failed to parse input object file
+
+codegen_ssa_thorin_parse_input_archive_file = failed to parse input archive file
+
+codegen_ssa_thorin_parse_archive_member = failed to parse archive member
+
+codegen_ssa_thorin_invalid_input_kind = input is not an archive or elf object
+
+codegen_ssa_thorin_decompress_data = failed to decompress compressed section
+
+codegen_ssa_thorin_section_without_name = section without name at offset {$offset}
+
+codegen_ssa_thorin_relocation_with_invalid_symbol = relocation with invalid symbol for section `{$section}` at offset {$offset}
+
+codegen_ssa_thorin_multiple_relocations = multiple relocations for section `{$section}` at offset {$offset}
+
+codegen_ssa_thorin_unsupported_relocation = unsupported relocation for section {$section} at offset {$offset}
+
+codegen_ssa_thorin_missing_dwo_name = missing path attribute to DWARF object ({$id})
+
+codegen_ssa_thorin_no_compilation_units = input object has no compilation units
+
+codegen_ssa_thorin_no_die = no top-level debugging information entry in compilation/type unit
+
+codegen_ssa_thorin_top_level_die_not_unit = top-level debugging information entry is not a compilation/type unit
+
+codegen_ssa_thorin_missing_required_section = input object missing required section `{$section}`
+
+codegen_ssa_thorin_parse_unit_abbreviations = failed to parse unit abbreviations
+
+codegen_ssa_thorin_parse_unit_attribute = failed to parse unit attribute
+
+codegen_ssa_thorin_parse_unit_header = failed to parse unit header
+
+codegen_ssa_thorin_parse_unit = failed to parse unit
+
+codegen_ssa_thorin_incompatible_index_version = incompatible `{$section}` index version: found version {$actual}, expected version {$format}
+
+codegen_ssa_thorin_offset_at_index = read offset at index {$index} of `.debug_str_offsets.dwo` section
+
+codegen_ssa_thorin_str_at_offset = read string at offset {$offset} of `.debug_str.dwo` section
+
+codegen_ssa_thorin_parse_index = failed to parse `{$section}` index section
+
+codegen_ssa_thorin_unit_not_in_index = unit {$unit} from input package is not in its index
+
+codegen_ssa_thorin_row_not_in_index = row {$row} found in index's hash table not present in index
+
+codegen_ssa_thorin_section_not_in_row = section not found in unit's row in index
+
+codegen_ssa_thorin_empty_unit = unit {$unit} in input DWARF object with no data
+
+codegen_ssa_thorin_multiple_debug_info_section = multiple `.debug_info.dwo` sections
+
+codegen_ssa_thorin_multiple_debug_types_section = multiple `.debug_types.dwo` sections in a package
+
+codegen_ssa_thorin_not_split_unit = regular compilation unit in object (missing dwo identifier)
+
+codegen_ssa_thorin_duplicate_unit = duplicate split compilation unit ({$unit})
+
+codegen_ssa_thorin_missing_referenced_unit = unit {$unit} referenced by executable was not found
+
+codegen_ssa_thorin_not_output_object_created = no output object was created from inputs
+
+codegen_ssa_thorin_mixed_input_encodings = input objects haved mixed encodings
+
+codegen_ssa_thorin_io = {$error}
+codegen_ssa_thorin_object_read = {$error}
+codegen_ssa_thorin_object_write = {$error}
+codegen_ssa_thorin_gimli_read = {$error}
+codegen_ssa_thorin_gimli_write = {$error}
diff --git a/compiler/rustc_error_messages/locales/en-US/compiletest.ftl b/compiler/rustc_error_messages/locales/en-US/compiletest.ftl
new file mode 100644
index 000000000..55061fbce
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/compiletest.ftl
@@ -0,0 +1,5 @@
+compiletest_example = this is an example message used in testing
+ .note = with a note
+ .help = with a help
+ .suggestion = with a suggestion
+ .label = with a label
diff --git a/compiler/rustc_error_messages/locales/en-US/const_eval.ftl b/compiler/rustc_error_messages/locales/en-US/const_eval.ftl
index 3f2ff8610..33bb116d6 100644
--- a/compiler/rustc_error_messages/locales/en-US/const_eval.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/const_eval.ftl
@@ -1,31 +1,83 @@
-const-eval-unstable-in-stable =
+const_eval_unstable_in_stable =
const-stable function cannot use `#[feature({$gate})]`
- .unstable-sugg = if it is not part of the public API, make this function unstably const
- .bypass-sugg = otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks
+ .unstable_sugg = if it is not part of the public API, make this function unstably const
+ .bypass_sugg = otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks
-const-eval-thread-local-access =
+const_eval_thread_local_access =
thread-local statics cannot be accessed at compile-time
-const-eval-static-access =
+const_eval_static_access =
{$kind}s cannot refer to statics
.help = consider extracting the value of the `static` to a `const`, and referring to that
- .teach-note = `static` and `const` variables can refer to other `const` variables. A `const` variable, however, cannot refer to a `static` variable.
- .teach-help = To fix this, the value can be extracted to a `const` and then used.
+ .teach_note = `static` and `const` variables can refer to other `const` variables. A `const` variable, however, cannot refer to a `static` variable.
+ .teach_help = To fix this, the value can be extracted to a `const` and then used.
-const-eval-raw-ptr-to-int =
+const_eval_raw_ptr_to_int =
pointers cannot be cast to integers during const eval
.note = at compile-time, pointers do not have an integer value
.note2 = avoiding this restriction via `transmute`, `union`, or raw pointers leads to compile-time undefined behavior
-const-eval-raw-ptr-comparison =
+const_eval_raw_ptr_comparison =
pointers cannot be reliably compared during const eval
.note = see issue #53020 <https://github.com/rust-lang/rust/issues/53020> for more information
-const-eval-panic-non-str = argument to `panic!()` in a const context must have type `&str`
+const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str`
-const-eval-mut-deref =
+const_eval_mut_deref =
mutation through a reference is not allowed in {$kind}s
-const-eval-transient-mut-borrow = mutable references are not allowed in {$kind}s
+const_eval_transient_mut_borrow = mutable references are not allowed in {$kind}s
-const-eval-transient-mut-borrow-raw = raw mutable references are not allowed in {$kind}s
+const_eval_transient_mut_borrow_raw = raw mutable references are not allowed in {$kind}s
+
+const_eval_max_num_nodes_in_const = maximum number of nodes exceeded in constant {$global_const_id}
+
+const_eval_unallowed_fn_pointer_call = function pointer calls are not allowed in {$kind}s
+
+const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn
+
+const_eval_unallowed_mutable_refs =
+ mutable references are not allowed in the final value of {$kind}s
+ .teach_note =
+ References in statics and constants may only refer to immutable values.\n\n
+ Statics are shared everywhere, and if they refer to mutable data one might violate memory
+ safety since holding multiple mutable references to shared data is not allowed.\n\n
+ If you really want global mutable state, try using static mut or a global UnsafeCell.
+
+const_eval_unallowed_mutable_refs_raw =
+ raw mutable references are not allowed in the final value of {$kind}s
+ .teach_note =
+ References in statics and constants may only refer to immutable values.\n\n
+ Statics are shared everywhere, and if they refer to mutable data one might violate memory
+ safety since holding multiple mutable references to shared data is not allowed.\n\n
+ If you really want global mutable state, try using static mut or a global UnsafeCell.
+
+const_eval_non_const_fmt_macro_call =
+ cannot call non-const formatting macro in {$kind}s
+
+const_eval_non_const_fn_call =
+ cannot call non-const fn `{$def_path_str}` in {$kind}s
+
+const_eval_unallowed_op_in_const_context =
+ {$msg}
+
+const_eval_unallowed_heap_allocations =
+ allocations are not allowed in {$kind}s
+ .label = allocation not allowed in {$kind}s
+ .teach_note =
+ The value of statics and constants must be known at compile time, and they live for the entire lifetime of a program. Creating a boxed value allocates memory on the heap at runtime, and therefore cannot be done at compile time.
+
+const_eval_unallowed_inline_asm =
+ inline assembly is not allowed in {$kind}s
+
+const_eval_interior_mutable_data_refer =
+ {$kind}s cannot refer to interior mutable data
+ .label = this borrow of an interior mutable value may end up in the final value
+ .help = to fix this, the value can be extracted to a separate `static` item and then referenced
+ .teach_note =
+ A constant containing interior mutable data behind a reference can allow you to modify that data.
+ This would make multiple uses of a constant to be able to see different values and allow circumventing
+ the `Send` and `Sync` requirements for shared mutable data, which is unsound.
+
+const_eval_interior_mutability_borrow =
+ cannot borrow here, since the borrowed element may contain interior mutability
diff --git a/compiler/rustc_error_messages/locales/en-US/driver.ftl b/compiler/rustc_error_messages/locales/en-US/driver.ftl
new file mode 100644
index 000000000..8ad198c86
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/driver.ftl
@@ -0,0 +1,13 @@
+driver_rlink_unable_to_read = failed to read rlink file: `{$err}`
+
+driver_rlink_wrong_file_type = The input does not look like a .rlink file
+
+driver_rlink_empty_version_number = The input does not contain version number
+
+driver_rlink_encoding_version_mismatch = .rlink file was produced with encoding version `{$version_array}`, but the current version is `{$rlink_version}`
+
+driver_rlink_rustc_version_mismatch = .rlink file was produced by rustc version `{$rustc_version}`, but the current version is `{$current_version}`
+
+driver_rlink_no_a_file = rlink must be a file
+
+driver_unpretty_dump_fail = pretty-print failed to write `{$path}` due to error `{$err}`
diff --git a/compiler/rustc_error_messages/locales/en-US/errors.ftl b/compiler/rustc_error_messages/locales/en-US/errors.ftl
new file mode 100644
index 000000000..429bdd277
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/errors.ftl
@@ -0,0 +1,13 @@
+errors_target_invalid_address_space = invalid address space `{$addr_space}` for `{$cause}` in "data-layout": {$err}
+
+errors_target_invalid_bits = invalid {$kind} `{$bit}` for `{$cause}` in "data-layout": {$err}
+
+errors_target_missing_alignment = missing alignment for `{$cause}` in "data-layout"
+
+errors_target_invalid_alignment = invalid alignment for `{$cause}` in "data-layout": {$err}
+
+errors_target_inconsistent_architecture = inconsistent target specification: "data-layout" claims architecture is {$dl}-endian, while "target-endian" is `{$target}`
+
+errors_target_inconsistent_pointer_width = inconsistent target specification: "data-layout" claims pointers are {$pointer_size}-bit, while "target-pointer-width" is `{$target}`
+
+errors_target_invalid_bits_size = {$err}
diff --git a/compiler/rustc_error_messages/locales/en-US/expand.ftl b/compiler/rustc_error_messages/locales/en-US/expand.ftl
index 8d506a3ea..572059115 100644
--- a/compiler/rustc_error_messages/locales/en-US/expand.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/expand.ftl
@@ -1,5 +1,22 @@
-expand-explain-doc-comment-outer =
+expand_explain_doc_comment_outer =
outer doc comments expand to `#[doc = "..."]`, which is what this macro attempted to match
-expand-explain-doc-comment-inner =
+expand_explain_doc_comment_inner =
inner doc comments expand to `#![doc = "..."]`, which is what this macro attempted to match
+
+expand_expr_repeat_no_syntax_vars =
+ attempted to repeat an expression containing no syntax variables matched as repeating at this depth
+
+expand_must_repeat_once =
+ this must repeat at least once
+
+expand_count_repetition_misplaced =
+ `count` can not be placed inside the inner-most repetition
+
+expand_meta_var_expr_unrecognized_var =
+ variable `{$key}` is not recognized in meta-variable expression
+
+expand_var_still_repeating =
+ variable '{$ident}' is still repeating at this depth
+
+expand_meta_var_dif_seq_matchers = {$msg}
diff --git a/compiler/rustc_error_messages/locales/en-US/hir_analysis.ftl b/compiler/rustc_error_messages/locales/en-US/hir_analysis.ftl
new file mode 100644
index 000000000..74088f4df
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/hir_analysis.ftl
@@ -0,0 +1,152 @@
+hir_analysis_field_multiply_specified_in_initializer =
+ field `{$ident}` specified more than once
+ .label = used more than once
+ .previous_use_label = first use of `{$ident}`
+
+hir_analysis_unrecognized_atomic_operation =
+ unrecognized atomic operation function: `{$op}`
+ .label = unrecognized atomic operation
+
+hir_analysis_wrong_number_of_generic_arguments_to_intrinsic =
+ intrinsic has wrong number of {$descr} parameters: found {$found}, expected {$expected}
+ .label = expected {$expected} {$descr} {$expected ->
+ [one] parameter
+ *[other] parameters
+ }
+
+hir_analysis_unrecognized_intrinsic_function =
+ unrecognized intrinsic function: `{$name}`
+ .label = unrecognized intrinsic
+
+hir_analysis_lifetimes_or_bounds_mismatch_on_trait =
+ lifetime parameters or bounds on {$item_kind} `{$ident}` do not match the trait declaration
+ .label = lifetimes do not match {$item_kind} in trait
+ .generics_label = lifetimes in impl do not match this {$item_kind} in trait
+
+hir_analysis_drop_impl_on_wrong_item =
+ the `Drop` trait may only be implemented for local structs, enums, and unions
+ .label = must be a struct, enum, or union in the current crate
+
+hir_analysis_field_already_declared =
+ field `{$field_name}` is already declared
+ .label = field already declared
+ .previous_decl_label = `{$field_name}` first declared here
+
+hir_analysis_copy_impl_on_type_with_dtor =
+ the trait `Copy` may not be implemented for this type; the type has a destructor
+ .label = `Copy` not allowed on types with destructors
+
+hir_analysis_multiple_relaxed_default_bounds =
+ type parameter has more than one relaxed default bound, only one is supported
+
+hir_analysis_copy_impl_on_non_adt =
+ the trait `Copy` may not be implemented for this type
+ .label = type is not a structure or enumeration
+
+hir_analysis_trait_object_declared_with_no_traits =
+ at least one trait is required for an object type
+ .alias_span = this alias does not contain a trait
+
+hir_analysis_ambiguous_lifetime_bound =
+ ambiguous lifetime bound, explicit lifetime bound required
+
+hir_analysis_assoc_type_binding_not_allowed =
+ associated type bindings are not allowed here
+ .label = associated type not allowed here
+
+hir_analysis_functional_record_update_on_non_struct =
+ functional record update syntax requires a struct
+
+hir_analysis_typeof_reserved_keyword_used =
+ `typeof` is a reserved keyword but unimplemented
+ .suggestion = consider replacing `typeof(...)` with an actual type
+ .label = reserved keyword
+
+hir_analysis_return_stmt_outside_of_fn_body =
+ return statement outside of function body
+ .encl_body_label = the return is part of this body...
+ .encl_fn_label = ...not the enclosing function body
+
+hir_analysis_yield_expr_outside_of_generator =
+ yield expression outside of generator literal
+
+hir_analysis_struct_expr_non_exhaustive =
+ cannot create non-exhaustive {$what} using struct expression
+
+hir_analysis_method_call_on_unknown_type =
+ the type of this value must be known to call a method on a raw pointer on it
+
+hir_analysis_value_of_associated_struct_already_specified =
+ the value of the associated type `{$item_name}` (from trait `{$def_path}`) is already specified
+ .label = re-bound here
+ .previous_bound_label = `{$item_name}` bound here first
+
+hir_analysis_address_of_temporary_taken = cannot take address of a temporary
+ .label = temporary value
+
+hir_analysis_add_return_type_add = try adding a return type
+
+hir_analysis_add_return_type_missing_here = a return type might be missing here
+
+hir_analysis_expected_default_return_type = expected `()` because of default return type
+
+hir_analysis_expected_return_type = expected `{$expected}` because of return type
+
+hir_analysis_unconstrained_opaque_type = unconstrained opaque type
+ .note = `{$name}` must be used in combination with a concrete type within the same {$what}
+
+hir_analysis_missing_type_params =
+ the type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } {$parameters} must be explicitly specified
+ .label = type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } {$parameters} must be specified for this
+ .suggestion = set the type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } to the desired {$parameterCount ->
+ [one] type
+ *[other] types
+ }
+ .no_suggestion_label = missing {$parameterCount ->
+ [one] reference
+ *[other] references
+ } to {$parameters}
+ .note = because of the default `Self` reference, type parameters must be specified on object types
+
+hir_analysis_manual_implementation =
+ manual implementations of `{$trait_name}` are experimental
+ .label = manual implementations of `{$trait_name}` are experimental
+ .help = add `#![feature(unboxed_closures)]` to the crate attributes to enable
+
+hir_analysis_substs_on_overridden_impl = could not resolve substs on overridden impl
+
+hir_analysis_unused_extern_crate =
+ unused extern crate
+ .suggestion = remove it
+
+hir_analysis_extern_crate_not_idiomatic =
+ `extern crate` is not idiomatic in the new edition
+ .suggestion = convert it to a `{$msg_code}`
+
+hir_analysis_expected_used_symbol = expected `used`, `used(compiler)` or `used(linker)`
+
+hir_analysis_missing_parentheses_in_range = can't call method `{$method_name}` on type `{$ty_str}`
+
+hir_analysis_add_missing_parentheses_in_range = you must surround the range in parentheses to call its `{$func_name}` function
+
+hir_analysis_const_impl_for_non_const_trait =
+ const `impl` for trait `{$trait_name}` which is not marked with `#[const_trait]`
+ .suggestion = mark `{$trait_name}` as const
+ .note = marking a trait with `#[const_trait]` ensures all default method bodies are `const`
+ .adding = adding a non-const method body in the future would be a breaking change
+
+hir_analysis_const_bound_for_non_const_trait =
+ ~const can only be applied to `#[const_trait]` traits
+
+hir_analysis_self_in_impl_self =
+ `Self` is not valid in the self type of an impl block
+ .note = replace `Self` with a different type
diff --git a/compiler/rustc_error_messages/locales/en-US/infer.ftl b/compiler/rustc_error_messages/locales/en-US/infer.ftl
new file mode 100644
index 000000000..18b3408b0
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/infer.ftl
@@ -0,0 +1,173 @@
+infer_opaque_hidden_type =
+ opaque type's hidden type cannot be another opaque type from the same scope
+ .label = one of the two opaque types used here has to be outside its defining scope
+ .opaque_type = opaque type whose hidden type is being assigned
+ .hidden_type = opaque type being used as hidden type
+
+infer_type_annotations_needed = {$source_kind ->
+ [closure] type annotations needed for the closure `{$source_name}`
+ [normal] type annotations needed for `{$source_name}`
+ *[other] type annotations needed
+}
+ .label = type must be known at this point
+
+infer_label_bad = {$bad_kind ->
+ *[other] cannot infer type
+ [more_info] cannot infer {$prefix_kind ->
+ *[type] type for {$prefix}
+ [const_with_param] the value of const parameter
+ [const] the value of the constant
+ } `{$name}`{$has_parent ->
+ [true] {" "}declared on the {$parent_prefix} `{$parent_name}`
+ *[false] {""}
+ }
+}
+
+infer_source_kind_subdiag_let = {$kind ->
+ [with_pattern] consider giving `{$name}` an explicit type
+ [closure] consider giving this closure parameter an explicit type
+ *[other] consider giving this pattern a type
+}{$x_kind ->
+ [has_name] , where the {$prefix_kind ->
+ *[type] type for {$prefix}
+ [const_with_param] the value of const parameter
+ [const] the value of the constant
+ } `{$arg_name}` is specified
+ [underscore] , where the placeholders `_` are specified
+ *[empty] {""}
+}
+
+infer_source_kind_subdiag_generic_label =
+ cannot infer {$is_type ->
+ [true] type
+ *[false] the value
+ } of the {$is_type ->
+ [true] type
+ *[false] const
+ } {$parent_exists ->
+ [true] parameter `{$param_name}` declared on the {$parent_prefix} `{$parent_name}`
+ *[false] parameter {$param_name}
+ }
+
+infer_source_kind_subdiag_generic_suggestion =
+ consider specifying the generic {$arg_count ->
+ [one] argument
+ *[other] arguments
+ }
+
+infer_source_kind_fully_qualified =
+ try using a fully qualified path to specify the expected types
+
+infer_source_kind_closure_return =
+ try giving this closure an explicit return type
+
+# generator_kind may need to be translated
+infer_need_type_info_in_generator =
+ type inside {$generator_kind ->
+ [async_block] `async` block
+ [async_closure] `async` closure
+ [async_fn] `async fn` body
+ *[generator] generator
+ } must be known in this context
+
+
+infer_subtype = ...so that the {$requirement ->
+ [method_compat] method type is compatible with trait
+ [type_compat] associated type is compatible with trait
+ [const_compat] const is compatible with trait
+ [expr_assignable] expression is assignable
+ [if_else_different] `if` and `else` have incompatible types
+ [no_else] `if` missing an `else` returns `()`
+ [fn_main_correct_type] `main` function has the correct type
+ [fn_start_correct_type] #[start]` function has the correct type
+ [intristic_correct_type] intrinsic has the correct type
+ [method_correct_type] method receiver has the correct type
+ *[other] types are compatible
+}
+infer_subtype_2 = ...so that {$requirement ->
+ [method_compat] method type is compatible with trait
+ [type_compat] associated type is compatible with trait
+ [const_compat] const is compatible with trait
+ [expr_assignable] expression is assignable
+ [if_else_different] `if` and `else` have incompatible types
+ [no_else] `if` missing an `else` returns `()`
+ [fn_main_correct_type] `main` function has the correct type
+ [fn_start_correct_type] #[start]` function has the correct type
+ [intristic_correct_type] intrinsic has the correct type
+ [method_correct_type] method receiver has the correct type
+ *[other] types are compatible
+}
+
+infer_reborrow = ...so that reference does not outlive borrowed content
+infer_reborrow_upvar = ...so that closure can access `{$name}`
+infer_relate_object_bound = ...so that it can be closed over into an object
+infer_data_borrowed = ...so that the type `{$name}` is not borrowed for too long
+infer_reference_outlives_referent = ...so that the reference type `{$name}` does not outlive the data it points at
+infer_relate_param_bound = ...so that the type `{$name}` will meet its required lifetime bounds{$continues ->
+ [true] ...
+ *[false] {""}
+}
+infer_relate_param_bound_2 = ...that is required by this bound
+infer_relate_region_param_bound = ...so that the declared lifetime parameter bounds are satisfied
+infer_compare_impl_item_obligation = ...so that the definition in impl matches the definition from the trait
+infer_ascribe_user_type_prove_predicate = ...so that the where clause holds
+
+infer_nothing = {""}
+
+infer_lifetime_mismatch = lifetime mismatch
+
+infer_declared_different = this parameter and the return type are declared with different lifetimes...
+infer_data_returned = ...but data{$label_var1_exists ->
+ [true] {" "}from `{$label_var1}`
+ *[false] {""}
+} is returned here
+
+infer_data_lifetime_flow = ...but data with one lifetime flows into the other here
+infer_declared_multiple = this type is declared with multiple lifetimes...
+infer_types_declared_different = these two types are declared with different lifetimes...
+infer_data_flows = ...but data{$label_var1_exists ->
+ [true] -> {" "}from `{$label_var1}`
+ *[false] -> {""}
+} flows{$label_var2_exists ->
+ [true] -> {" "}into `{$label_var2}`
+ *[false] -> {""}
+} here
+
+infer_lifetime_param_suggestion = consider introducing a named lifetime parameter{$is_impl ->
+ [true] {" "}and update trait if needed
+ *[false] {""}
+}
+infer_lifetime_param_suggestion_elided = each elided lifetime in input position becomes a distinct lifetime
+
+infer_region_explanation = {$pref_kind ->
+ *[should_not_happen] [{$pref_kind}]
+ [empty] {""}
+}{$pref_kind ->
+ [empty] {""}
+ *[other] {" "}
+}{$desc_kind ->
+ *[should_not_happen] [{$desc_kind}]
+ [restatic] the static lifetime
+ [reempty] the empty lifetime
+ [reemptyuni] the empty lifetime in universe {$desc_arg}
+ [revar] lifetime {$desc_arg}
+
+ [as_defined] the lifetime `{$desc_arg}` as defined here
+ [as_defined_anon] the anonymous lifetime as defined here
+ [defined_here] the anonymous lifetime defined here
+ [anon_num_here] the anonymous lifetime #{$desc_num_arg} defined here
+ [defined_here_reg] the lifetime `{$desc_arg}` as defined here
+}{$suff_kind ->
+ *[should_not_happen] [{$suff_kind}]
+ [empty]{""}
+ [continues] ...
+}
+
+infer_mismatched_static_lifetime = incompatible lifetime on type
+infer_does_not_outlive_static_from_impl = ...does not necessarily outlive the static lifetime introduced by the compatible `impl`
+infer_implicit_static_lifetime_note = this has an implicit `'static` lifetime requirement
+infer_implicit_static_lifetime_suggestion = consider relaxing the implicit `'static` requirement
+infer_msl_introduces_static = introduces a `'static` lifetime requirement
+infer_msl_unmet_req = because this has an unmet lifetime requirement
+infer_msl_trait_note = this has an implicit `'static` lifetime requirement
+infer_msl_trait_sugg = consider relaxing the implicit `'static` requirement
diff --git a/compiler/rustc_error_messages/locales/en-US/interface.ftl b/compiler/rustc_error_messages/locales/en-US/interface.ftl
new file mode 100644
index 000000000..bbcb8fc28
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/interface.ftl
@@ -0,0 +1,43 @@
+interface_ferris_identifier =
+ Ferris cannot be used as an identifier
+ .suggestion = try using their name instead
+
+interface_emoji_identifier =
+ identifiers cannot contain emoji: `{$ident}`
+
+interface_mixed_bin_crate =
+ cannot mix `bin` crate type with others
+
+interface_mixed_proc_macro_crate =
+ cannot mix `proc-macro` crate type with others
+
+interface_proc_macro_doc_without_arg =
+ Trying to document proc macro crate without passing '--crate-type proc-macro to rustdoc
+ .warn = The generated documentation may be incorrect
+
+interface_error_writing_dependencies =
+ error writing dependencies to `{$path}`: {$error}
+
+interface_input_file_would_be_overwritten =
+ the input file "{$path}" would be overwritten by the generated executable
+
+interface_generated_file_conflicts_with_directory =
+ the generated executable for the input file "{$input_path}" conflicts with the existing directory "{$dir_path}"
+
+interface_temps_dir_error =
+ failed to find or create the directory specified by `--temps-dir`
+
+interface_out_dir_error =
+ failed to find or create the directory specified by `--out-dir`
+
+interface_cant_emit_mir =
+ could not emit MIR: {$error}
+
+interface_rustc_error_fatal =
+ fatal error triggered by #[rustc_error]
+
+interface_rustc_error_unexpected_annotation =
+ unexpected annotation used with `#[rustc_error(...)]!
+
+interface_failed_writing_file =
+ failed to write file {$path}: {$error}"
diff --git a/compiler/rustc_error_messages/locales/en-US/lint.ftl b/compiler/rustc_error_messages/locales/en-US/lint.ftl
index 55e96e58e..7e28f22c0 100644
--- a/compiler/rustc_error_messages/locales/en-US/lint.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/lint.ftl
@@ -1,22 +1,22 @@
-lint-array-into-iter =
+lint_array_into_iter =
this method call resolves to `<&{$target} as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <{$target} as IntoIterator>::into_iter in Rust 2021
- .use-iter-suggestion = use `.iter()` instead of `.into_iter()` to avoid ambiguity
- .remove-into-iter-suggestion = or remove `.into_iter()` to iterate by value
- .use-explicit-into-iter-suggestion =
+ .use_iter_suggestion = use `.iter()` instead of `.into_iter()` to avoid ambiguity
+ .remove_into_iter_suggestion = or remove `.into_iter()` to iterate by value
+ .use_explicit_into_iter_suggestion =
or use `IntoIterator::into_iter(..)` instead of `.into_iter()` to explicitly iterate by value
-lint-enum-intrinsics-mem-discriminant =
+lint_enum_intrinsics_mem_discriminant =
the return value of `mem::discriminant` is unspecified when called with a non-enum type
.note = the argument to `discriminant` should be a reference to an enum, but it was passed a reference to a `{$ty_param}`, which is not an enum.
-lint-enum-intrinsics-mem-variant =
+lint_enum_intrinsics_mem_variant =
the return value of `mem::variant_count` is unspecified when called with a non-enum type
.note = the type parameter of `variant_count` should be an enum, but it was instantiated with the type `{$ty_param}`, which is not an enum.
-lint-expectation = this lint expectation is unfulfilled
+lint_expectation = this lint expectation is unfulfilled
.note = the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
-lint-hidden-unicode-codepoints = unicode codepoint changing visible direction of text present in {$label}
+lint_hidden_unicode_codepoints = unicode codepoint changing visible direction of text present in {$label}
.label = this {$label} contains {$count ->
[one] an invisible
*[other] invisible
@@ -25,68 +25,68 @@ lint-hidden-unicode-codepoints = unicode codepoint changing visible direction of
*[other] codepoints
}
.note = these kind of unicode codepoints change the way text flows on applications that support them, but can cause confusion because they change the order of characters on the screen
- .suggestion-remove = if their presence wasn't intentional, you can remove them
- .suggestion-escape = if you want to keep them but make them visible in your source code, you can escape them
- .no-suggestion-note-escape = if you want to keep them but make them visible in your source code, you can escape them: {$escaped}
+ .suggestion_remove = if their presence wasn't intentional, you can remove them
+ .suggestion_escape = if you want to keep them but make them visible in your source code, you can escape them
+ .no_suggestion_note_escape = if you want to keep them but make them visible in your source code, you can escape them: {$escaped}
-lint-default-hash-types = prefer `{$preferred}` over `{$used}`, it has better performance
+lint_default_hash_types = prefer `{$preferred}` over `{$used}`, it has better performance
.note = a `use rustc_data_structures::fx::{$preferred}` may be necessary
-lint-query-instability = using `{$query}` can result in unstable query results
+lint_query_instability = using `{$query}` can result in unstable query results
.note = if you believe this case to be fine, allow this lint and add a comment explaining your rationale
-lint-tykind-kind = usage of `ty::TyKind::<kind>`
+lint_tykind_kind = usage of `ty::TyKind::<kind>`
.suggestion = try using `ty::<kind>` directly
-lint-tykind = usage of `ty::TyKind`
+lint_tykind = usage of `ty::TyKind`
.help = try using `Ty` instead
-lint-ty-qualified = usage of qualified `ty::{$ty}`
+lint_ty_qualified = usage of qualified `ty::{$ty}`
.suggestion = try importing it and using it unqualified
-lint-lintpass-by-hand = implementing `LintPass` by hand
+lint_lintpass_by_hand = implementing `LintPass` by hand
.help = try using `declare_lint_pass!` or `impl_lint_pass!` instead
-lint-non-existant-doc-keyword = found non-existing keyword `{$keyword}` used in `#[doc(keyword = \"...\")]`
+lint_non_existant_doc_keyword = found non-existing keyword `{$keyword}` used in `#[doc(keyword = \"...\")]`
.help = only existing keywords are allowed in core/std
-lint-diag-out-of-impl =
- diagnostics should only be created in `SessionDiagnostic`/`AddSubdiagnostic` impls
+lint_diag_out_of_impl =
+ diagnostics should only be created in `IntoDiagnostic`/`AddToDiagnostic` impls
-lint-untranslatable-diag = diagnostics should be created using translatable messages
+lint_untranslatable_diag = diagnostics should be created using translatable messages
-lint-cstring-ptr = getting the inner pointer of a temporary `CString`
- .as-ptr-label = this pointer will be invalid
- .unwrap-label = this `CString` is deallocated at the end of the statement, bind it to a variable to extend its lifetime
+lint_cstring_ptr = getting the inner pointer of a temporary `CString`
+ .as_ptr_label = this pointer will be invalid
+ .unwrap_label = this `CString` is deallocated at the end of the statement, bind it to a variable to extend its lifetime
.note = pointers do not have a lifetime; when calling `as_ptr` the `CString` will be deallocated at the end of the statement because nothing is referencing it as far as the type system is concerned
.help = for more information, see https://doc.rust-lang.org/reference/destructors.html
-lint-identifier-non-ascii-char = identifier contains non-ASCII characters
+lint_identifier_non_ascii_char = identifier contains non-ASCII characters
-lint-identifier-uncommon-codepoints = identifier contains uncommon Unicode codepoints
+lint_identifier_uncommon_codepoints = identifier contains uncommon Unicode codepoints
-lint-confusable-identifier-pair = identifier pair considered confusable between `{$existing_sym}` and `{$sym}`
+lint_confusable_identifier_pair = identifier pair considered confusable between `{$existing_sym}` and `{$sym}`
.label = this is where the previous identifier occurred
-lint-mixed-script-confusables =
+lint_mixed_script_confusables =
the usage of Script Group `{$set}` in this crate consists solely of mixed script confusables
- .includes-note = the usage includes {$includes}
+ .includes_note = the usage includes {$includes}
.note = please recheck to make sure their usages are indeed what you want
-lint-non-fmt-panic = panic message is not a string literal
+lint_non_fmt_panic = panic message is not a string literal
.note = this usage of `{$name}!()` is deprecated; it will be a hard error in Rust 2021
- .more-info-note = for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/panic-macro-consistency.html>
- .supports-fmt-note = the `{$name}!()` macro supports formatting, so there's no need for the `format!()` macro here
- .supports-fmt-suggestion = remove the `format!(..)` macro call
- .display-suggestion = add a "{"{"}{"}"}" format string to `Display` the message
- .debug-suggestion =
+ .more_info_note = for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/panic-macro-consistency.html>
+ .supports_fmt_note = the `{$name}!()` macro supports formatting, so there's no need for the `format!()` macro here
+ .supports_fmt_suggestion = remove the `format!(..)` macro call
+ .display_suggestion = add a "{"{"}{"}"}" format string to `Display` the message
+ .debug_suggestion =
add a "{"{"}:?{"}"}" format string to use the `Debug` implementation of `{$ty}`
- .panic-suggestion = {$already_suggested ->
+ .panic_suggestion = {$already_suggested ->
[true] or use
*[false] use
} std::panic::panic_any instead
-lint-non-fmt-panic-unused =
+lint_non_fmt_panic_unused =
panic message contains {$count ->
[one] an unused
*[other] unused
@@ -95,13 +95,13 @@ lint-non-fmt-panic-unused =
*[other] placeholders
}
.note = this message is not used as a format string when given without arguments, but will be in Rust 2021
- .add-args-suggestion = add the missing {$count ->
+ .add_args_suggestion = add the missing {$count ->
[one] argument
*[other] arguments
}
- .add-fmt-suggestion = or add a "{"{"}{"}"}" format string to use the message literally
+ .add_fmt_suggestion = or add a "{"{"}{"}"}" format string to use the message literally
-lint-non-fmt-panic-braces =
+lint_non_fmt_panic_braces =
panic message contains {$count ->
[one] a brace
*[other] braces
@@ -109,30 +109,30 @@ lint-non-fmt-panic-braces =
.note = this message is not used as a format string, but will be in Rust 2021
.suggestion = add a "{"{"}{"}"}" format string to use the message literally
-lint-non-camel-case-type = {$sort} `{$name}` should have an upper camel case name
+lint_non_camel_case_type = {$sort} `{$name}` should have an upper camel case name
.suggestion = convert the identifier to upper camel case
.label = should have an UpperCamelCase name
-lint-non-snake-case = {$sort} `{$name}` should have a snake case name
- .rename-or-convert-suggestion = rename the identifier or convert it to a snake case raw identifier
- .cannot-convert-note = `{$sc}` cannot be used as a raw identifier
- .rename-suggestion = rename the identifier
- .convert-suggestion = convert the identifier to snake case
+lint_non_snake_case = {$sort} `{$name}` should have a snake case name
+ .rename_or_convert_suggestion = rename the identifier or convert it to a snake case raw identifier
+ .cannot_convert_note = `{$sc}` cannot be used as a raw identifier
+ .rename_suggestion = rename the identifier
+ .convert_suggestion = convert the identifier to snake case
.help = convert the identifier to snake case: `{$sc}`
.label = should have a snake_case name
-lint-non-upper_case-global = {$sort} `{$name}` should have an upper case name
+lint_non_upper_case_global = {$sort} `{$name}` should have an upper case name
.suggestion = convert the identifier to upper case
.label = should have an UPPER_CASE name
-lint-noop-method-call = call to `.{$method}()` on a reference in this situation does nothing
+lint_noop_method_call = call to `.{$method}()` on a reference in this situation does nothing
.label = unnecessary method call
.note = the type `{$receiver_ty}` which `{$method}` is being called on is the same as the type returned from `{$method}`, so the method call does not do anything and can be removed
-lint-pass-by-value = passing `{$ty}` by reference
+lint_pass_by_value = passing `{$ty}` by reference
.suggestion = try passing by value
-lint-redundant-semicolons =
+lint_redundant_semicolons =
unnecessary trailing {$multiple ->
[true] semicolons
*[false] semicolon
@@ -142,254 +142,299 @@ lint-redundant-semicolons =
*[false] this semicolon
}
-lint-drop-trait-constraints =
+lint_drop_trait_constraints =
bounds on `{$predicate}` are most likely incorrect, consider instead using `{$needs_drop}` to detect whether a type can be trivially dropped
-lint-drop-glue =
+lint_drop_glue =
types that do not implement `Drop` can still have drop glue, consider instead using `{$needs_drop}` to detect whether a type is trivially dropped
-lint-range-endpoint-out-of-range = range endpoint is out of range for `{$ty}`
+lint_range_endpoint_out_of_range = range endpoint is out of range for `{$ty}`
.suggestion = use an inclusive range instead
-lint-overflowing-bin-hex = literal out of range for `{$ty}`
- .negative-note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}`
- .negative-becomes-note = and the value `-{$lit}` will become `{$actually}{$ty}`
- .positive-note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}` and will become `{$actually}{$ty}`
+lint_overflowing_bin_hex = literal out of range for `{$ty}`
+ .negative_note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}`
+ .negative_becomes_note = and the value `-{$lit}` will become `{$actually}{$ty}`
+ .positive_note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}` and will become `{$actually}{$ty}`
.suggestion = consider using the type `{$suggestion_ty}` instead
.help = consider using the type `{$suggestion_ty}` instead
-lint-overflowing-int = literal out of range for `{$ty}`
+lint_overflowing_int = literal out of range for `{$ty}`
.note = the literal `{$lit}` does not fit into the type `{$ty}` whose range is `{$min}..={$max}`
.help = consider using the type `{$suggestion_ty}` instead
-lint-only-cast-u8-to-char = only `u8` can be cast into `char`
+lint_only_cast_u8_to_char = only `u8` can be cast into `char`
.suggestion = use a `char` literal instead
-lint-overflowing-uint = literal out of range for `{$ty}`
+lint_overflowing_uint = literal out of range for `{$ty}`
.note = the literal `{$lit}` does not fit into the type `{$ty}` whose range is `{$min}..={$max}`
-lint-overflowing-literal = literal out of range for `{$ty}`
+lint_overflowing_literal = literal out of range for `{$ty}`
.note = the literal `{$lit}` does not fit into the type `{$ty}` and will be converted to `{$ty}::INFINITY`
-lint-unused-comparisons = comparison is useless due to type limits
+lint_unused_comparisons = comparison is useless due to type limits
-lint-improper-ctypes = `extern` {$desc} uses type `{$ty}`, which is not FFI-safe
+lint_improper_ctypes = `extern` {$desc} uses type `{$ty}`, which is not FFI-safe
.label = not FFI-safe
.note = the type is defined here
-lint-improper-ctypes-opaque = opaque types have no C equivalent
+lint_improper_ctypes_opaque = opaque types have no C equivalent
-lint-improper-ctypes-fnptr-reason = this function pointer has Rust-specific calling convention
-lint-improper-ctypes-fnptr-help = consider using an `extern fn(...) -> ...` function pointer instead
+lint_improper_ctypes_fnptr_reason = this function pointer has Rust-specific calling convention
+lint_improper_ctypes_fnptr_help = consider using an `extern fn(...) -> ...` function pointer instead
-lint-improper-ctypes-tuple-reason = tuples have unspecified layout
-lint-improper-ctypes-tuple-help = consider using a struct instead
+lint_improper_ctypes_tuple_reason = tuples have unspecified layout
+lint_improper_ctypes_tuple_help = consider using a struct instead
-lint-improper-ctypes-str-reason = string slices have no C equivalent
-lint-improper-ctypes-str-help = consider using `*const u8` and a length instead
+lint_improper_ctypes_str_reason = string slices have no C equivalent
+lint_improper_ctypes_str_help = consider using `*const u8` and a length instead
-lint-improper-ctypes-dyn = trait objects have no C equivalent
+lint_improper_ctypes_dyn = trait objects have no C equivalent
-lint-improper-ctypes-slice-reason = slices have no C equivalent
-lint-improper-ctypes-slice-help = consider using a raw pointer instead
+lint_improper_ctypes_slice_reason = slices have no C equivalent
+lint_improper_ctypes_slice_help = consider using a raw pointer instead
-lint-improper-ctypes-128bit = 128-bit integers don't currently have a known stable ABI
+lint_improper_ctypes_128bit = 128-bit integers don't currently have a known stable ABI
-lint-improper-ctypes-char-reason = the `char` type has no C equivalent
-lint-improper-ctypes-char-help = consider using `u32` or `libc::wchar_t` instead
+lint_improper_ctypes_char_reason = the `char` type has no C equivalent
+lint_improper_ctypes_char_help = consider using `u32` or `libc::wchar_t` instead
-lint-improper-ctypes-non-exhaustive = this enum is non-exhaustive
-lint-improper-ctypes-non-exhaustive-variant = this enum has non-exhaustive variants
+lint_improper_ctypes_non_exhaustive = this enum is non-exhaustive
+lint_improper_ctypes_non_exhaustive_variant = this enum has non-exhaustive variants
-lint-improper-ctypes-enum-repr-reason = enum has no representation hint
-lint-improper-ctypes-enum-repr-help =
+lint_improper_ctypes_enum_repr_reason = enum has no representation hint
+lint_improper_ctypes_enum_repr_help =
consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
-lint-improper-ctypes-struct-fieldless-reason = this struct has no fields
-lint-improper-ctypes-struct-fieldless-help = consider adding a member to this struct
+lint_improper_ctypes_struct_fieldless_reason = this struct has no fields
+lint_improper_ctypes_struct_fieldless_help = consider adding a member to this struct
-lint-improper-ctypes-union-fieldless-reason = this union has no fields
-lint-improper-ctypes-union-fieldless-help = consider adding a member to this union
+lint_improper_ctypes_union_fieldless_reason = this union has no fields
+lint_improper_ctypes_union_fieldless_help = consider adding a member to this union
-lint-improper-ctypes-struct-non-exhaustive = this struct is non-exhaustive
-lint-improper-ctypes-union-non-exhaustive = this union is non-exhaustive
+lint_improper_ctypes_struct_non_exhaustive = this struct is non-exhaustive
+lint_improper_ctypes_union_non_exhaustive = this union is non-exhaustive
-lint-improper-ctypes-struct-layout-reason = this struct has unspecified layout
-lint-improper-ctypes-struct-layout-help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
+lint_improper_ctypes_struct_layout_reason = this struct has unspecified layout
+lint_improper_ctypes_struct_layout_help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
-lint-improper-ctypes-union-layout-reason = this union has unspecified layout
-lint-improper-ctypes-union-layout-help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this union
+lint_improper_ctypes_union_layout_reason = this union has unspecified layout
+lint_improper_ctypes_union_layout_help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this union
-lint-improper-ctypes-box = box cannot be represented as a single pointer
+lint_improper_ctypes_box = box cannot be represented as a single pointer
-lint-improper-ctypes-enum-phantomdata = this enum contains a PhantomData field
+lint_improper_ctypes_enum_phantomdata = this enum contains a PhantomData field
-lint-improper-ctypes-struct-zst = this struct contains only zero-sized fields
+lint_improper_ctypes_struct_zst = this struct contains only zero-sized fields
-lint-improper-ctypes-array-reason = passing raw arrays by value is not FFI-safe
-lint-improper-ctypes-array-help = consider passing a pointer to the array
+lint_improper_ctypes_array_reason = passing raw arrays by value is not FFI-safe
+lint_improper_ctypes_array_help = consider passing a pointer to the array
-lint-improper-ctypes-only-phantomdata = composed only of `PhantomData`
+lint_improper_ctypes_only_phantomdata = composed only of `PhantomData`
-lint-variant-size-differences =
+lint_variant_size_differences =
enum variant is more than three times larger ({$largest} bytes) than the next largest
-lint-atomic-ordering-load = atomic loads cannot have `Release` or `AcqRel` ordering
+lint_atomic_ordering_load = atomic loads cannot have `Release` or `AcqRel` ordering
.help = consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`
-lint-atomic-ordering-store = atomic stores cannot have `Acquire` or `AcqRel` ordering
+lint_atomic_ordering_store = atomic stores cannot have `Acquire` or `AcqRel` ordering
.help = consider using ordering modes `Release`, `SeqCst` or `Relaxed`
-lint-atomic-ordering-fence = memory fences cannot have `Relaxed` ordering
+lint_atomic_ordering_fence = memory fences cannot have `Relaxed` ordering
.help = consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`
-lint-atomic-ordering-invalid = `{$method}`'s failure ordering may not be `Release` or `AcqRel`, since a failed `{$method}` does not result in a write
+lint_atomic_ordering_invalid = `{$method}`'s failure ordering may not be `Release` or `AcqRel`, since a failed `{$method}` does not result in a write
.label = invalid failure ordering
.help = consider using `Acquire` or `Relaxed` failure ordering instead
-lint-unused-op = unused {$op} that must be used
+lint_unused_op = unused {$op} that must be used
.label = the {$op} produces a value
.suggestion = use `let _ = ...` to ignore the resulting value
-lint-unused-result = unused result of type `{$ty}`
+lint_unused_result = unused result of type `{$ty}`
-lint-unused-closure =
+lint_unused_closure =
unused {$pre}{$count ->
[one] closure
*[other] closures
}{$post} that must be used
.note = closures are lazy and do nothing unless called
-lint-unused-generator =
+lint_unused_generator =
unused {$pre}{$count ->
[one] generator
*[other] generator
}{$post} that must be used
.note = generators are lazy and do nothing unless resumed
-lint-unused-def = unused {$pre}`{$def}`{$post} that must be used
+lint_unused_def = unused {$pre}`{$def}`{$post} that must be used
-lint-path-statement-drop = path statement drops value
+lint_path_statement_drop = path statement drops value
.suggestion = use `drop` to clarify the intent
-lint-path-statement-no-effect = path statement with no effect
+lint_path_statement_no_effect = path statement with no effect
-lint-unused-delim = unnecessary {$delim} around {$item}
+lint_unused_delim = unnecessary {$delim} around {$item}
.suggestion = remove these {$delim}
-lint-unused-import-braces = braces around {$node} is unnecessary
+lint_unused_import_braces = braces around {$node} is unnecessary
-lint-unused-allocation = unnecessary allocation, use `&` instead
-lint-unused-allocation-mut = unnecessary allocation, use `&mut` instead
+lint_unused_allocation = unnecessary allocation, use `&` instead
+lint_unused_allocation_mut = unnecessary allocation, use `&mut` instead
-lint-builtin-while-true = denote infinite loops with `loop {"{"} ... {"}"}`
+lint_builtin_while_true = denote infinite loops with `loop {"{"} ... {"}"}`
.suggestion = use `loop`
-lint-builtin-box-pointers = type uses owned (Box type) pointers: {$ty}
+lint_builtin_box_pointers = type uses owned (Box type) pointers: {$ty}
-lint-builtin-non-shorthand-field-patterns = the `{$ident}:` in this pattern is redundant
+lint_builtin_non_shorthand_field_patterns = the `{$ident}:` in this pattern is redundant
.suggestion = use shorthand field pattern
-lint-builtin-overridden-symbol-name =
+lint_builtin_overridden_symbol_name =
the linker's behavior with multiple libraries exporting duplicate symbol names is undefined and Rust cannot provide guarantees when you manually override them
-lint-builtin-overridden-symbol-section =
+lint_builtin_overridden_symbol_section =
the program's behavior with overridden link sections on items is unpredictable and Rust cannot provide guarantees when you manually override them
-lint-builtin-allow-internal-unsafe =
+lint_builtin_allow_internal_unsafe =
`allow_internal_unsafe` allows defining macros using unsafe without triggering the `unsafe_code` lint at their call site
-lint-builtin-unsafe-block = usage of an `unsafe` block
+lint_builtin_unsafe_block = usage of an `unsafe` block
-lint-builtin-unsafe-trait = declaration of an `unsafe` trait
+lint_builtin_unsafe_trait = declaration of an `unsafe` trait
-lint-builtin-unsafe-impl = implementation of an `unsafe` trait
+lint_builtin_unsafe_impl = implementation of an `unsafe` trait
-lint-builtin-no-mangle-fn = declaration of a `no_mangle` function
-lint-builtin-export-name-fn = declaration of a function with `export_name`
-lint-builtin-link-section-fn = declaration of a function with `link_section`
+lint_builtin_no_mangle_fn = declaration of a `no_mangle` function
+lint_builtin_export_name_fn = declaration of a function with `export_name`
+lint_builtin_link_section_fn = declaration of a function with `link_section`
-lint-builtin-no-mangle-static = declaration of a `no_mangle` static
-lint-builtin-export-name-static = declaration of a static with `export_name`
-lint-builtin-link-section-static = declaration of a static with `link_section`
+lint_builtin_no_mangle_static = declaration of a `no_mangle` static
+lint_builtin_export_name_static = declaration of a static with `export_name`
+lint_builtin_link_section_static = declaration of a static with `link_section`
-lint-builtin-no-mangle-method = declaration of a `no_mangle` method
-lint-builtin-export-name-method = declaration of a method with `export_name`
+lint_builtin_no_mangle_method = declaration of a `no_mangle` method
+lint_builtin_export_name_method = declaration of a method with `export_name`
-lint-builtin-decl-unsafe-fn = declaration of an `unsafe` function
-lint-builtin-decl-unsafe-method = declaration of an `unsafe` method
-lint-builtin-impl-unsafe-method = implementation of an `unsafe` method
+lint_builtin_decl_unsafe_fn = declaration of an `unsafe` function
+lint_builtin_decl_unsafe_method = declaration of an `unsafe` method
+lint_builtin_impl_unsafe_method = implementation of an `unsafe` method
-lint-builtin-missing-doc = missing documentation for {$article} {$desc}
+lint_builtin_missing_doc = missing documentation for {$article} {$desc}
-lint-builtin-missing-copy-impl = type could implement `Copy`; consider adding `impl Copy`
+lint_builtin_missing_copy_impl = type could implement `Copy`; consider adding `impl Copy`
-lint-builtin-missing-debug-impl =
+lint_builtin_missing_debug_impl =
type does not implement `{$debug}`; consider adding `#[derive(Debug)]` or a manual implementation
-lint-builtin-anonymous-params = anonymous parameters are deprecated and will be removed in the next edition
+lint_builtin_anonymous_params = anonymous parameters are deprecated and will be removed in the next edition
.suggestion = try naming the parameter or explicitly ignoring it
-lint-builtin-deprecated-attr-link = use of deprecated attribute `{$name}`: {$reason}. See {$link}
-lint-builtin-deprecated-attr-used = use of deprecated attribute `{$name}`: no longer used.
-lint-builtin-deprecated-attr-default-suggestion = remove this attribute
+lint_builtin_deprecated_attr_link = use of deprecated attribute `{$name}`: {$reason}. See {$link}
+lint_builtin_deprecated_attr_used = use of deprecated attribute `{$name}`: no longer used.
+lint_builtin_deprecated_attr_default_suggestion = remove this attribute
-lint-builtin-unused-doc-comment = unused doc comment
+lint_builtin_unused_doc_comment = unused doc comment
.label = rustdoc does not generate documentation for {$kind}
- .plain-help = use `//` for a plain comment
- .block-help = use `/* */` for a plain comment
+ .plain_help = use `//` for a plain comment
+ .block_help = use `/* */` for a plain comment
-lint-builtin-no-mangle-generic = functions generic over types or consts must be mangled
+lint_builtin_no_mangle_generic = functions generic over types or consts must be mangled
.suggestion = remove this attribute
-lint-builtin-const-no-mangle = const items should never be `#[no_mangle]`
+lint_builtin_const_no_mangle = const items should never be `#[no_mangle]`
.suggestion = try a static value
-lint-builtin-mutable-transmutes =
+lint_builtin_mutable_transmutes =
transmuting &T to &mut T is undefined behavior, even if the reference is unused, consider instead using an UnsafeCell
-lint-builtin-unstable-features = unstable feature
+lint_builtin_unstable_features = unstable feature
-lint-builtin-unreachable-pub = unreachable `pub` {$what}
+lint_builtin_unreachable_pub = unreachable `pub` {$what}
.suggestion = consider restricting its visibility
.help = or consider exporting it for use by other crates
-lint-builtin-type-alias-bounds-help = use fully disambiguated paths (i.e., `<T as Trait>::Assoc`) to refer to associated types in type aliases
+lint_builtin_unexpected_cli_config_name = unexpected `{$name}` as condition name
+ .help = was set with `--cfg` but isn't in the `--check-cfg` expected names
-lint-builtin-type-alias-where-clause = where clauses are not enforced in type aliases
+lint_builtin_unexpected_cli_config_value = unexpected condition value `{$value}` for condition name `{$name}`
+ .help = was set with `--cfg` but isn't in the `--check-cfg` expected values
+
+lint_builtin_type_alias_bounds_help = use fully disambiguated paths (i.e., `<T as Trait>::Assoc`) to refer to associated types in type aliases
+
+lint_builtin_type_alias_where_clause = where clauses are not enforced in type aliases
.suggestion = the clause will not be checked when the type alias is used, and should be removed
-lint-builtin-type-alias-generic-bounds = bounds on generic parameters are not enforced in type aliases
+lint_builtin_type_alias_generic_bounds = bounds on generic parameters are not enforced in type aliases
.suggestion = the bound will not be checked when the type alias is used, and should be removed
-lint-builtin-trivial-bounds = {$predicate_kind_name} bound {$predicate} does not depend on any type or lifetime parameters
+lint_builtin_trivial_bounds = {$predicate_kind_name} bound {$predicate} does not depend on any type or lifetime parameters
-lint-builtin-ellipsis-inclusive-range-patterns = `...` range patterns are deprecated
+lint_builtin_ellipsis_inclusive_range_patterns = `...` range patterns are deprecated
.suggestion = use `..=` for an inclusive range
-lint-builtin-unnameable-test-items = cannot test inner items
+lint_builtin_unnameable_test_items = cannot test inner items
-lint-builtin-keyword-idents = `{$kw}` is a keyword in the {$next} edition
+lint_builtin_keyword_idents = `{$kw}` is a keyword in the {$next} edition
.suggestion = you can use a raw identifier to stay compatible
-lint-builtin-explicit-outlives = outlives requirements can be inferred
+lint_builtin_explicit_outlives = outlives requirements can be inferred
.suggestion = remove {$count ->
[one] this bound
*[other] these bounds
}
-lint-builtin-incomplete-features = the feature `{$name}` is incomplete and may not be safe to use and/or cause compiler crashes
+lint_builtin_incomplete_features = the feature `{$name}` is incomplete and may not be safe to use and/or cause compiler crashes
.note = see issue #{$n} <https://github.com/rust-lang/rust/issues/{$n}> for more information
.help = consider using `min_{$name}` instead, which is more stable and complete
-lint-builtin-clashing-extern-same-name = `{$this_fi}` redeclared with a different signature
- .previous-decl-label = `{$orig}` previously declared here
- .mismatch-label = this signature doesn't match the previous declaration
-lint-builtin-clashing-extern-diff-name = `{$this_fi}` redeclares `{$orig}` with a different signature
- .previous-decl-label = `{$orig}` previously declared here
- .mismatch-label = this signature doesn't match the previous declaration
+lint_builtin_clashing_extern_same_name = `{$this_fi}` redeclared with a different signature
+ .previous_decl_label = `{$orig}` previously declared here
+ .mismatch_label = this signature doesn't match the previous declaration
+lint_builtin_clashing_extern_diff_name = `{$this_fi}` redeclares `{$orig}` with a different signature
+ .previous_decl_label = `{$orig}` previously declared here
+ .mismatch_label = this signature doesn't match the previous declaration
-lint-builtin-deref-nullptr = dereferencing a null pointer
+lint_builtin_deref_nullptr = dereferencing a null pointer
.label = this code causes undefined behavior when executed
-lint-builtin-asm-labels = avoid using named labels in inline assembly
+lint_builtin_asm_labels = avoid using named labels in inline assembly
+
+lint_overruled_attribute = {$lint_level}({$lint_source}) incompatible with previous forbid
+ .label = overruled by previous forbid
+
+lint_default_source = `forbid` lint level is the default for {$id}
+
+lint_node_source = `forbid` level set here
+ .note = {$reason}
+
+lint_command_line_source = `forbid` lint level was set on command line
+
+lint_malformed_attribute = malformed lint attribute input
+
+lint_bad_attribute_argument = bad attribute argument
+
+lint_reason_must_be_string_literal = reason must be a string literal
+
+lint_reason_must_come_last = reason in lint attribute must come last
+
+lint_unknown_tool_in_scoped_lint = unknown tool name `{$tool_name}` found in scoped lint: `{$tool_name}::{$lint_name}`
+ .help = add `#![register_tool({$tool_name})]` to the crate root
+
+lint_unsupported_group = `{$lint_group}` lint group is not supported with ´--force-warn´
+
+lint_requested_level = requested on the command line with `{$level} {$lint_name}`
+
+lint_check_name_unknown = unknown lint: `{$lint_name}`
+ .help = did you mean: `{$suggestion}`
+
+lint_check_name_unknown_tool = unknown lint tool: `{$tool_name}`
+
+lint_check_name_warning = {$msg}
+
+lint_check_name_deprecated = lint name `{$lint_name}` is deprecated and does not have an effect anymore. Use: {$new_name}
+
+lint_opaque_hidden_inferred_bound = opaque type `{$ty}` does not satisfy its associated type bounds
+ .specifically = this associated type bound is unsatisfied for `{$proj_ty}`
+
+lint_opaque_hidden_inferred_bound_sugg = add this bound
diff --git a/compiler/rustc_error_messages/locales/en-US/metadata.ftl b/compiler/rustc_error_messages/locales/en-US/metadata.ftl
new file mode 100644
index 000000000..08e553d9f
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/metadata.ftl
@@ -0,0 +1,277 @@
+metadata_rlib_required =
+ crate `{$crate_name}` required to be available in rlib format, but was not found in this form
+
+metadata_lib_required =
+ crate `{$crate_name}` required to be available in {$kind} format, but was not found in this form
+
+metadata_crate_dep_multiple =
+ cannot satisfy dependencies so `{$crate_name}` only shows up once
+ .help = having upstream crates all available in one format will likely make this go away
+
+metadata_two_panic_runtimes =
+ cannot link together two panic runtimes: {$prev_name} and {$cur_name}
+
+metadata_bad_panic_strategy =
+ the linked panic runtime `{$runtime}` is not compiled with this crate's panic strategy `{$strategy}`
+
+metadata_required_panic_strategy =
+ the crate `{$crate_name}` requires panic strategy `{$found_strategy}` which is incompatible with this crate's strategy of `{$desired_strategy}`
+
+metadata_incompatible_panic_in_drop_strategy =
+ the crate `{$crate_name}` is compiled with the panic-in-drop strategy `{$found_strategy}` which is incompatible with this crate's strategy of `{$desired_strategy}`
+
+metadata_multiple_names_in_link =
+ multiple `name` arguments in a single `#[link]` attribute
+
+metadata_multiple_kinds_in_link =
+ multiple `kind` arguments in a single `#[link]` attribute
+
+metadata_link_name_form =
+ link name must be of the form `name = "string"`
+
+metadata_link_kind_form =
+ link kind must be of the form `kind = "string"`
+
+metadata_link_modifiers_form =
+ link modifiers must be of the form `modifiers = "string"`
+
+metadata_link_cfg_form =
+ link cfg must be of the form `cfg(/* predicate */)`
+
+metadata_wasm_import_form =
+ wasm import module must be of the form `wasm_import_module = "string"`
+
+metadata_empty_link_name =
+ link name must not be empty
+ .label = empty link name
+
+metadata_link_framework_apple =
+ link kind `framework` is only supported on Apple targets
+
+metadata_framework_only_windows =
+ link kind `raw-dylib` is only supported on Windows targets
+
+metadata_unknown_link_kind =
+ unknown link kind `{$kind}`, expected one of: static, dylib, framework, raw-dylib
+ .label = unknown link kind
+
+metadata_multiple_link_modifiers =
+ multiple `modifiers` arguments in a single `#[link]` attribute
+
+metadata_multiple_cfgs =
+ multiple `cfg` arguments in a single `#[link]` attribute
+
+metadata_link_cfg_single_predicate =
+ link cfg must have a single predicate argument
+
+metadata_multiple_wasm_import =
+ multiple `wasm_import_module` arguments in a single `#[link]` attribute
+
+metadata_unexpected_link_arg =
+ unexpected `#[link]` argument, expected one of: name, kind, modifiers, cfg, wasm_import_module, import_name_type
+
+metadata_invalid_link_modifier =
+ invalid linking modifier syntax, expected '+' or '-' prefix before one of: bundle, verbatim, whole-archive, as-needed
+
+metadata_multiple_modifiers =
+ multiple `{$modifier}` modifiers in a single `modifiers` argument
+
+metadata_bundle_needs_static =
+ linking modifier `bundle` is only compatible with `static` linking kind
+
+metadata_whole_archive_needs_static =
+ linking modifier `whole-archive` is only compatible with `static` linking kind
+
+metadata_as_needed_compatibility =
+ linking modifier `as-needed` is only compatible with `dylib` and `framework` linking kinds
+
+metadata_unknown_link_modifier =
+ unknown linking modifier `{$modifier}`, expected one of: bundle, verbatim, whole-archive, as-needed
+
+metadata_incompatible_wasm_link =
+ `wasm_import_module` is incompatible with other arguments in `#[link]` attributes
+
+metadata_link_requires_name =
+ `#[link]` attribute requires a `name = "string"` argument
+ .label = missing `name` argument
+
+metadata_raw_dylib_no_nul =
+ link name must not contain NUL characters if link kind is `raw-dylib`
+
+metadata_link_ordinal_raw_dylib =
+ `#[link_ordinal]` is only supported if link kind is `raw-dylib`
+
+metadata_lib_framework_apple =
+ library kind `framework` is only supported on Apple targets
+
+metadata_empty_renaming_target =
+ an empty renaming target was specified for library `{$lib_name}`
+
+metadata_renaming_no_link =
+ renaming of the library `{$lib_name}` was specified, however this crate contains no `#[link(...)]` attributes referencing this library
+
+metadata_multiple_renamings =
+ multiple renamings were specified for library `{$lib_name}`
+
+metadata_no_link_mod_override =
+ overriding linking modifiers from command line is not supported
+
+metadata_unsupported_abi_i686 =
+ ABI not supported by `#[link(kind = "raw-dylib")]` on i686
+
+metadata_unsupported_abi =
+ ABI not supported by `#[link(kind = "raw-dylib")]` on this architecture
+
+metadata_fail_create_file_encoder =
+ failed to create file encoder: {$err}
+
+metadata_fail_seek_file =
+ failed to seek the file: {$err}
+
+metadata_fail_write_file =
+ failed to write to the file: {$err}
+
+metadata_crate_not_panic_runtime =
+ the crate `{$crate_name}` is not a panic runtime
+
+metadata_no_panic_strategy =
+ the crate `{$crate_name}` does not have the panic strategy `{$strategy}`
+
+metadata_profiler_builtins_needs_core =
+ `profiler_builtins` crate (required by compiler options) is not compatible with crate attribute `#![no_core]`
+
+metadata_not_profiler_runtime =
+ the crate `{$crate_name}` is not a profiler runtime
+
+metadata_no_multiple_global_alloc =
+ cannot define multiple global allocators
+ .label = cannot define a new global allocator
+
+metadata_prev_global_alloc =
+ previous global allocator defined here
+
+metadata_conflicting_global_alloc =
+ the `#[global_allocator]` in {$other_crate_name} conflicts with global allocator in: {$crate_name}
+
+metadata_global_alloc_required =
+ no global memory allocator found but one is required; link to std or add `#[global_allocator]` to a static item that implements the GlobalAlloc trait
+
+metadata_no_transitive_needs_dep =
+ the crate `{$crate_name}` cannot depend on a crate that needs {$needs_crate_name}, but it depends on `{$deps_crate_name}`
+
+metadata_failed_write_error =
+ failed to write {$filename}: {$err}
+
+metadata_missing_native_library =
+ could not find native static library `{$libname}`, perhaps an -L flag is missing?
+
+metadata_only_provide_library_name = only provide the library name `{$suggested_name}`, not the full filename
+
+metadata_failed_create_tempdir =
+ couldn't create a temp dir: {$err}
+
+metadata_failed_create_file =
+ failed to create the file {$filename}: {$err}
+
+metadata_failed_create_encoded_metadata =
+ failed to create encoded metadata from file: {$err}
+
+metadata_non_ascii_name =
+ cannot load a crate with a non-ascii name `{$crate_name}`
+
+metadata_extern_location_not_exist =
+ extern location for {$crate_name} does not exist: {$location}
+
+metadata_extern_location_not_file =
+ extern location for {$crate_name} is not a file: {$location}
+
+metadata_multiple_candidates =
+ multiple {$flavor} candidates for `{$crate_name}` found
+
+metadata_multiple_matching_crates =
+ multiple matching crates for `{$crate_name}`
+ .note = candidates:{$candidates}
+
+metadata_symbol_conflicts_current =
+ the current crate is indistinguishable from one of its dependencies: it has the same crate-name `{$crate_name}` and was compiled with the same `-C metadata` arguments. This will result in symbol conflicts between the two.
+
+metadata_symbol_conflicts_others =
+ found two different crates with name `{$crate_name}` that are not distinguished by differing `-C metadata`. This will result in symbol conflicts between the two.
+
+metadata_stable_crate_id_collision =
+ found crates (`{$crate_name0}` and `{$crate_name1}`) with colliding StableCrateId values.
+
+metadata_dl_error =
+ {$err}
+
+metadata_newer_crate_version =
+ found possibly newer version of crate `{$crate_name}`{$add_info}
+ .note = perhaps that crate needs to be recompiled?
+
+metadata_found_crate_versions =
+ the following crate versions were found:{$found_crates}
+
+metadata_no_crate_with_triple =
+ couldn't find crate `{$crate_name}` with expected target triple {$locator_triple}{$add_info}
+
+metadata_found_staticlib =
+ found staticlib `{$crate_name}` instead of rlib or dylib{$add_info}
+ .help = please recompile that crate using --crate-type lib
+
+metadata_incompatible_rustc =
+ found crate `{$crate_name}` compiled by an incompatible version of rustc{$add_info}
+ .help = please recompile that crate using this compiler ({$rustc_version}) (consider running `cargo clean` first)
+
+metadata_invalid_meta_files =
+ found invalid metadata files for crate `{$crate_name}`{$add_info}
+
+metadata_cannot_find_crate =
+ can't find crate for `{$crate_name}`{$add_info}
+
+metadata_no_dylib_plugin =
+ plugin `{$crate_name}` only found in rlib format, but must be available in dylib format
+
+metadata_target_not_installed =
+ the `{$locator_triple}` target may not be installed
+
+metadata_target_no_std_support =
+ the `{$locator_triple}` target may not support the standard library
+
+metadata_consider_downloading_target =
+ consider downloading the target with `rustup target add {$locator_triple}`
+
+metadata_std_required =
+ `std` is required by `{$current_crate}` because it does not declare `#![no_std]`
+
+metadata_consider_building_std =
+ consider building the standard library from source with `cargo build -Zbuild-std`
+
+metadata_compiler_missing_profiler =
+ the compiler may have been built without the profiler runtime
+
+metadata_install_missing_components =
+ maybe you need to install the missing components with: `rustup component add rust-src rustc-dev llvm-tools-preview`
+
+metadata_cant_find_crate =
+ can't find crate
+
+metadata_crate_location_unknown_type =
+ extern location for {$crate_name} is of an unknown type: {$path}
+
+metadata_lib_filename_form =
+ file name should be lib*.rlib or {dll_prefix}*.{dll_suffix}
+
+metadata_multiple_import_name_type =
+ multiple `import_name_type` arguments in a single `#[link]` attribute
+
+metadata_import_name_type_form =
+ import name type must be of the form `import_name_type = "string"`
+
+metadata_import_name_type_x86 =
+ import name type is only supported on x86
+
+metadata_unknown_import_name_type =
+ unknown import name type `{$import_name_type}`, expected one of: decorated, noprefix, undecorated
+
+metadata_import_name_type_raw =
+ import name type can only be used with link kind `raw-dylib`
diff --git a/compiler/rustc_error_messages/locales/en-US/middle.ftl b/compiler/rustc_error_messages/locales/en-US/middle.ftl
new file mode 100644
index 000000000..b9e4499d4
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/middle.ftl
@@ -0,0 +1,29 @@
+middle_drop_check_overflow =
+ overflow while adding drop-check rules for {$ty}
+ .note = overflowed on {$overflow_ty}
+
+middle_opaque_hidden_type_mismatch =
+ concrete type differs from previous defining opaque type use
+ .label = expected `{$self_ty}`, got `{$other_ty}`
+
+middle_conflict_types =
+ this expression supplies two conflicting concrete types for the same opaque type
+
+middle_previous_use_here =
+ previous use here
+
+middle_limit_invalid =
+ `limit` must be a non-negative integer
+ .label = {$error_str}
+
+middle_const_eval_non_int =
+ constant evaluation of enum discriminant resulted in non-integer
+
+middle_unknown_layout =
+ the type `{$ty}` has an unknown layout
+
+middle_values_too_big =
+ values of the type `{$ty}` are too big for the current architecture
+
+middle_cannot_be_normalized =
+ unable to determine layout for `{$ty}` because `{$failure_ty}` cannot be normalized
diff --git a/compiler/rustc_error_messages/locales/en-US/mir_dataflow.ftl b/compiler/rustc_error_messages/locales/en-US/mir_dataflow.ftl
new file mode 100644
index 000000000..988541525
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/mir_dataflow.ftl
@@ -0,0 +1,29 @@
+mir_dataflow_path_must_end_in_filename =
+ path must end in a filename
+
+mir_dataflow_unknown_formatter =
+ unknown formatter
+
+mir_dataflow_duplicate_values_for =
+ duplicate values for `{$name}`
+
+mir_dataflow_requires_an_argument =
+ `{$name}` requires an argument
+
+mir_dataflow_stop_after_dataflow_ended_compilation =
+ stop_after_dataflow ended compilation
+
+mir_dataflow_peek_must_be_place_or_ref_place =
+ rustc_peek: argument expression must be either `place` or `&place`
+
+mir_dataflow_peek_must_be_not_temporary =
+ dataflow::sanity_check cannot feed a non-temp to rustc_peek
+
+mir_dataflow_peek_bit_not_set =
+ rustc_peek: bit not set
+
+mir_dataflow_peek_argument_not_a_local =
+ rustc_peek: argument was not a local
+
+mir_dataflow_peek_argument_untracked =
+ rustc_peek: argument untracked
diff --git a/compiler/rustc_error_messages/locales/en-US/monomorphize.ftl b/compiler/rustc_error_messages/locales/en-US/monomorphize.ftl
new file mode 100644
index 000000000..42c84fdd2
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/monomorphize.ftl
@@ -0,0 +1,26 @@
+monomorphize_recursion_limit =
+ reached the recursion limit while instantiating `{$shrunk}`
+ .note = `{$def_path_str}` defined here
+
+monomorphize_written_to_path = the full type name has been written to '{$path}'
+
+monomorphize_type_length_limit = reached the type-length limit while instantiating `{$shrunk}`
+
+monomorphize_consider_type_length_limit =
+ consider adding a `#![type_length_limit="{$type_length}"]` attribute to your crate
+
+monomorphize_fatal_error = {$error_message}
+
+monomorphize_unknown_partition_strategy = unknown partitioning strategy
+
+monomorphize_symbol_already_defined = symbol `{$symbol}` is already defined
+
+monomorphize_unused_generic_params = item has unused generic parameters
+
+monomorphize_large_assignments =
+ moving {$size} bytes
+ .label = value moved from here
+ .note = The current maximum size is {$limit}, but it can be customized with the move_size_limit attribute: `#![move_size_limit = "..."]`
+
+monomorphize_requires_lang_item =
+ requires `{$lang_item}` lang_item
diff --git a/compiler/rustc_error_messages/locales/en-US/parser.ftl b/compiler/rustc_error_messages/locales/en-US/parser.ftl
index 076b1b1ca..13c368d1c 100644
--- a/compiler/rustc_error_messages/locales/en-US/parser.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/parser.ftl
@@ -1,34 +1,371 @@
-parser-struct-literal-body-without-path =
+parser_struct_literal_body_without_path =
struct literal body without path
.suggestion = you might have forgotten to add the struct literal inside the block
-parser-maybe-report-ambiguous-plus =
+parser_maybe_report_ambiguous_plus =
ambiguous `+` in a type
.suggestion = use parentheses to disambiguate
-parser-maybe-recover-from-bad-type-plus =
+parser_maybe_recover_from_bad_type_plus =
expected a path on the left-hand side of `+`, not `{$ty}`
-parser-add-paren = try adding parentheses
+parser_add_paren = try adding parentheses
-parser-forgot-paren = perhaps you forgot parentheses?
+parser_forgot_paren = perhaps you forgot parentheses?
-parser-expect-path = expected a path
+parser_expect_path = expected a path
-parser-maybe-recover-from-bad-qpath-stage-2 =
+parser_maybe_recover_from_bad_qpath_stage_2 =
missing angle brackets in associated item path
.suggestion = try: `{$ty}`
-parser-incorrect-semicolon =
+parser_incorrect_semicolon =
expected item, found `;`
.suggestion = remove this semicolon
.help = {$name} declarations are not followed by a semicolon
-parser-incorrect-use-of-await =
+parser_incorrect_use_of_await =
incorrect use of `await`
- .parentheses-suggestion = `await` is not a method call, remove the parentheses
- .postfix-suggestion = `await` is a postfix operation
+ .parentheses_suggestion = `await` is not a method call, remove the parentheses
+ .postfix_suggestion = `await` is a postfix operation
-parser-in-in-typo =
+parser_in_in_typo =
expected iterable, found keyword `in`
.suggestion = remove the duplicated `in`
+
+parser_invalid_variable_declaration =
+ invalid variable declaration
+
+parser_switch_mut_let_order =
+ switch the order of `mut` and `let`
+parser_missing_let_before_mut = missing keyword
+parser_use_let_not_auto = write `let` instead of `auto` to introduce a new variable
+parser_use_let_not_var = write `let` instead of `var` to introduce a new variable
+
+parser_invalid_comparison_operator = invalid comparison operator `{$invalid}`
+ .use_instead = `{$invalid}` is not a valid comparison operator, use `{$correct}`
+ .spaceship_operator_invalid = `<=>` is not a valid comparison operator, use `std::cmp::Ordering`
+
+parser_invalid_logical_operator = `{$incorrect}` is not a logical operator
+ .note = unlike in e.g., Python and PHP, `&&` and `||` are used for logical operators
+ .use_amp_amp_for_conjunction = use `&&` to perform logical conjunction
+ .use_pipe_pipe_for_disjunction = use `||` to perform logical disjunction
+
+parser_tilde_is_not_unary_operator = `~` cannot be used as a unary operator
+ .suggestion = use `!` to perform bitwise not
+
+parser_unexpected_token_after_not = unexpected {$negated_desc} after identifier
+parser_unexpected_token_after_not_bitwise = use `!` to perform bitwise not
+parser_unexpected_token_after_not_logical = use `!` to perform logical negation
+parser_unexpected_token_after_not_default = use `!` to perform logical negation or bitwise not
+
+parser_malformed_loop_label = malformed loop label
+ .suggestion = use the correct loop label format
+
+parser_lifetime_in_borrow_expression = borrow expressions cannot be annotated with lifetimes
+ .suggestion = remove the lifetime annotation
+ .label = annotated with lifetime here
+
+parser_field_expression_with_generic = field expressions cannot have generic arguments
+
+parser_macro_invocation_with_qualified_path = macros cannot use qualified paths
+
+parser_unexpected_token_after_label = expected `while`, `for`, `loop` or `{"{"}` after a label
+ .suggestion_remove_label = consider removing the label
+ .suggestion_enclose_in_block = consider enclosing expression in a block
+
+parser_require_colon_after_labeled_expression = labeled expression must be followed by `:`
+ .note = labels are used before loops and blocks, allowing e.g., `break 'label` to them
+ .label = the label
+ .suggestion = add `:` after the label
+
+parser_do_catch_syntax_removed = found removed `do catch` syntax
+ .note = following RFC #2388, the new non-placeholder syntax is `try`
+ .suggestion = replace with the new syntax
+
+parser_float_literal_requires_integer_part = float literals must have an integer part
+ .suggestion = must have an integer part
+
+parser_invalid_int_literal_width = invalid width `{$width}` for integer literal
+ .help = valid widths are 8, 16, 32, 64 and 128
+
+parser_invalid_num_literal_base_prefix = invalid base prefix for number literal
+ .note = base prefixes (`0xff`, `0b1010`, `0o755`) are lowercase
+ .suggestion = try making the prefix lowercase
+
+parser_invalid_num_literal_suffix = invalid suffix `{$suffix}` for number literal
+ .label = invalid suffix `{$suffix}`
+ .help = the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)
+
+parser_invalid_float_literal_width = invalid width `{$width}` for float literal
+ .help = valid widths are 32 and 64
+
+parser_invalid_float_literal_suffix = invalid suffix `{$suffix}` for float literal
+ .label = invalid suffix `{$suffix}`
+ .help = valid suffixes are `f32` and `f64`
+
+parser_int_literal_too_large = integer literal is too large
+
+parser_missing_semicolon_before_array = expected `;`, found `[`
+ .suggestion = consider adding `;` here
+
+parser_invalid_block_macro_segment = cannot use a `block` macro fragment here
+ .label = the `block` fragment is within this context
+
+parser_if_expression_missing_then_block = this `if` expression is missing a block after the condition
+ .add_then_block = add a block here
+ .condition_possibly_unfinished = this binary operation is possibly unfinished
+
+parser_if_expression_missing_condition = missing condition for `if` expression
+ .condition_label = expected condition here
+ .block_label = if this block is the condition of the `if` expression, then it must be followed by another block
+
+parser_expected_expression_found_let = expected expression, found `let` statement
+
+parser_expected_else_block = expected `{"{"}`, found {$first_tok}
+ .label = expected an `if` or a block after this `else`
+ .suggestion = add an `if` if this is the condition of a chained `else if` statement
+
+parser_outer_attribute_not_allowed_on_if_else = outer attributes are not allowed on `if` and `else` branches
+ .branch_label = the attributes are attached to this branch
+ .ctx_label = the branch belongs to this `{$ctx}`
+ .suggestion = remove the attributes
+
+parser_missing_in_in_for_loop = missing `in` in `for` loop
+ .use_in_not_of = try using `in` here instead
+ .add_in = try adding `in` here
+
+parser_missing_comma_after_match_arm = expected `,` following `match` arm
+ .suggestion = missing a comma here to end this `match` arm
+
+parser_catch_after_try = keyword `catch` cannot follow a `try` block
+ .help = try using `match` on the result of the `try` block instead
+
+parser_comma_after_base_struct = cannot use a comma after the base struct
+ .note = the base struct must always be the last field
+ .suggestion = remove this comma
+
+parser_eq_field_init = expected `:`, found `=`
+ .suggestion = replace equals symbol with a colon
+
+parser_dotdotdot = unexpected token: `...`
+ .suggest_exclusive_range = use `..` for an exclusive range
+ .suggest_inclusive_range = or `..=` for an inclusive range
+
+parser_left_arrow_operator = unexpected token: `<-`
+ .suggestion = if you meant to write a comparison against a negative value, add a space in between `<` and `-`
+
+parser_remove_let = expected pattern, found `let`
+ .suggestion = remove the unnecessary `let` keyword
+
+parser_use_eq_instead = unexpected `==`
+ .suggestion = try using `=` instead
+
+parser_use_empty_block_not_semi = expected { "`{}`" }, found `;`
+ .suggestion = try using { "`{}`" } instead
+
+parser_comparison_interpreted_as_generic =
+ `<` is interpreted as a start of generic arguments for `{$type}`, not a comparison
+ .label_args = interpreted as generic arguments
+ .label_comparison = not interpreted as comparison
+ .suggestion = try comparing the cast value
+
+parser_shift_interpreted_as_generic =
+ `<<` is interpreted as a start of generic arguments for `{$type}`, not a shift
+ .label_args = interpreted as generic arguments
+ .label_comparison = not interpreted as shift
+ .suggestion = try shifting the cast value
+
+parser_found_expr_would_be_stmt = expected expression, found `{$token}`
+ .label = expected expression
+
+parser_leading_plus_not_supported = leading `+` is not supported
+ .label = unexpected `+`
+ .suggestion_remove_plus = try removing the `+`
+
+parser_parentheses_with_struct_fields = invalid `struct` delimiters or `fn` call arguments
+ .suggestion_braces_for_struct = if `{$type}` is a struct, use braces as delimiters
+ .suggestion_no_fields_for_fn = if `{$type}` is a function, use the arguments directly
+
+parser_labeled_loop_in_break = parentheses are required around this expression to avoid confusion with a labeled break expression
+
+parser_sugg_wrap_expression_in_parentheses = wrap the expression in parentheses
+
+parser_array_brackets_instead_of_braces = this is a block expression, not an array
+ .suggestion = to make an array, use square brackets instead of curly braces
+
+parser_match_arm_body_without_braces = `match` arm body without braces
+ .label_statements = {$num_statements ->
+ [one] this statement is not surrounded by a body
+ *[other] these statements are not surrounded by a body
+ }
+ .label_arrow = while parsing the `match` arm starting here
+ .suggestion_add_braces = surround the {$num_statements ->
+ [one] statement
+ *[other] statements
+ } with a body
+ .suggestion_use_comma_not_semicolon = use a comma to end a `match` arm expression
+
+parser_struct_literal_not_allowed_here = struct literals are not allowed here
+ .suggestion = surround the struct literal with parentheses
+
+parser_invalid_interpolated_expression = invalid interpolated expression
+
+parser_hexadecimal_float_literal_not_supported = hexadecimal float literal is not supported
+parser_octal_float_literal_not_supported = octal float literal is not supported
+parser_binary_float_literal_not_supported = binary float literal is not supported
+parser_not_supported = not supported
+
+parser_invalid_literal_suffix = suffixes on {$kind} literals are invalid
+ .label = invalid suffix `{$suffix}`
+
+parser_invalid_literal_suffix_on_tuple_index = suffixes on a tuple index are invalid
+ .label = invalid suffix `{$suffix}`
+ .tuple_exception_line_1 = `{$suffix}` is *temporarily* accepted on tuple index fields as it was incorrectly accepted on stable for a few releases
+ .tuple_exception_line_2 = on proc macros, you'll want to use `syn::Index::from` or `proc_macro::Literal::*_unsuffixed` for code that will desugar to tuple field access
+ .tuple_exception_line_3 = see issue #60210 <https://github.com/rust-lang/rust/issues/60210> for more information
+
+parser_non_string_abi_literal = non-string ABI literal
+ .suggestion = specify the ABI with a string literal
+
+parser_mismatched_closing_delimiter = mismatched closing delimiter: `{$delimiter}`
+ .label_unmatched = mismatched closing delimiter
+ .label_opening_candidate = closing delimiter possibly meant for this
+ .label_unclosed = unclosed delimiter
+
+parser_incorrect_visibility_restriction = incorrect visibility restriction
+ .help = some possible visibility restrictions are:
+ `pub(crate)`: visible only on the current crate
+ `pub(super)`: visible only in the current module's parent
+ `pub(in path::to::module)`: visible only on the specified path
+ .suggestion = make this visible only to module `{$inner_str}` with `in`
+
+parser_assignment_else_not_allowed = <assignment> ... else {"{"} ... {"}"} is not allowed
+
+parser_expected_statement_after_outer_attr = expected statement after outer attribute
+
+parser_doc_comment_does_not_document_anything = found a documentation comment that doesn't document anything
+ .help = doc comments must come before what they document, if a comment was intended use `//`
+ .suggestion = missing comma here
+
+parser_const_let_mutually_exclusive = `const` and `let` are mutually exclusive
+ .suggestion = remove `let`
+
+parser_invalid_expression_in_let_else = a `{$operator}` expression cannot be directly assigned in `let...else`
+parser_invalid_curly_in_let_else = right curly brace `{"}"}` before `else` in a `let...else` statement not allowed
+
+parser_compound_assignment_expression_in_let = can't reassign to an uninitialized variable
+ .suggestion = initialize the variable
+ .help = if you meant to overwrite, remove the `let` binding
+
+parser_suffixed_literal_in_attribute = suffixed literals are not allowed in attributes
+ .help = instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), use an unsuffixed version (`1`, `1.0`, etc.)
+
+parser_invalid_meta_item = expected unsuffixed literal or identifier, found `{$token}`
+
+parser_label_inner_attr_does_not_annotate_this = the inner attribute doesn't annotate this {$item}
+parser_sugg_change_inner_attr_to_outer = to annotate the {$item}, change the attribute from inner to outer style
+
+parser_inner_attr_not_permitted_after_outer_doc_comment = an inner attribute is not permitted following an outer doc comment
+ .label_attr = not permitted following an outer doc comment
+ .label_prev_doc_comment = previous doc comment
+ .label_does_not_annotate_this = {parser_label_inner_attr_does_not_annotate_this}
+ .sugg_change_inner_to_outer = {parser_sugg_change_inner_attr_to_outer}
+
+parser_inner_attr_not_permitted_after_outer_attr = an inner attribute is not permitted following an outer attribute
+ .label_attr = not permitted following an outer attribute
+ .label_prev_attr = previous outer attribute
+ .label_does_not_annotate_this = {parser_label_inner_attr_does_not_annotate_this}
+ .sugg_change_inner_to_outer = {parser_sugg_change_inner_attr_to_outer}
+
+parser_inner_attr_not_permitted = an inner attribute is not permitted in this context
+ .label_does_not_annotate_this = {parser_label_inner_attr_does_not_annotate_this}
+ .sugg_change_inner_to_outer = {parser_sugg_change_inner_attr_to_outer}
+
+parser_inner_attr_explanation = inner attributes, like `#![no_std]`, annotate the item enclosing them, and are usually found at the beginning of source files
+parser_outer_attr_explanation = outer attributes, like `#[test]`, annotate the item following them
+
+parser_inner_doc_comment_not_permitted = expected outer doc comment
+ .note = inner doc comments like this (starting with `//!` or `/*!`) can only appear before items
+ .suggestion = you might have meant to write a regular comment
+ .label_does_not_annotate_this = the inner doc comment doesn't annotate this {$item}
+ .sugg_change_inner_to_outer = to annotate the {$item}, change the doc comment from inner to outer style
+
+parser_expected_identifier_found_reserved_identifier_str = expected identifier, found reserved identifier `{$token}`
+parser_expected_identifier_found_keyword_str = expected identifier, found keyword `{$token}`
+parser_expected_identifier_found_reserved_keyword_str = expected identifier, found reserved keyword `{$token}`
+parser_expected_identifier_found_doc_comment_str = expected identifier, found doc comment `{$token}`
+parser_expected_identifier_found_str = expected identifier, found `{$token}`
+
+parser_expected_identifier_found_reserved_identifier = expected identifier, found reserved identifier
+parser_expected_identifier_found_keyword = expected identifier, found keyword
+parser_expected_identifier_found_reserved_keyword = expected identifier, found reserved keyword
+parser_expected_identifier_found_doc_comment = expected identifier, found doc comment
+parser_expected_identifier = expected identifier
+
+parser_sugg_escape_to_use_as_identifier = escape `{$ident_name}` to use it as an identifier
+
+parser_sugg_remove_comma = remove this comma
+
+parser_expected_semi_found_reserved_identifier_str = expected `;`, found reserved identifier `{$token}`
+parser_expected_semi_found_keyword_str = expected `;`, found keyword `{$token}`
+parser_expected_semi_found_reserved_keyword_str = expected `;`, found reserved keyword `{$token}`
+parser_expected_semi_found_doc_comment_str = expected `;`, found doc comment `{$token}`
+parser_expected_semi_found_str = expected `;`, found `{$token}`
+
+parser_sugg_change_this_to_semi = change this to `;`
+parser_sugg_add_semi = add `;` here
+parser_label_unexpected_token = unexpected token
+
+parser_unmatched_angle_brackets = {$num_extra_brackets ->
+ [one] unmatched angle bracket
+ *[other] unmatched angle brackets
+ }
+ .suggestion = {$num_extra_brackets ->
+ [one] remove extra angle bracket
+ *[other] remove extra angle brackets
+ }
+
+parser_generic_parameters_without_angle_brackets = generic parameters without surrounding angle brackets
+ .suggestion = surround the type parameters with angle brackets
+
+parser_comparison_operators_cannot_be_chained = comparison operators cannot be chained
+ .sugg_parentheses_for_function_args = or use `(...)` if you meant to specify fn arguments
+ .sugg_split_comparison = split the comparison into two
+ .sugg_parenthesize = parenthesize the comparison
+parser_sugg_turbofish_syntax = use `::<...>` instead of `<...>` to specify lifetime, type, or const arguments
+
+parser_question_mark_in_type = invalid `?` in type
+ .label = `?` is only allowed on expressions, not types
+ .suggestion = if you meant to express that the type might not contain a value, use the `Option` wrapper type
+
+parser_unexpected_parentheses_in_for_head = unexpected parentheses surrounding `for` loop head
+ .suggestion = remove parentheses in `for` loop
+
+parser_doc_comment_on_param_type = documentation comments cannot be applied to a function parameter's type
+ .label = doc comments are not allowed here
+
+parser_attribute_on_param_type = attributes cannot be applied to a function parameter's type
+ .label = attributes are not allowed here
+
+parser_pattern_method_param_without_body = patterns aren't allowed in methods without bodies
+ .suggestion = give this argument a name or use an underscore to ignore it
+
+parser_self_param_not_first = unexpected `self` parameter in function
+ .label = must be the first parameter of an associated function
+
+parser_const_generic_without_braces = expressions must be enclosed in braces to be used as const generic arguments
+ .suggestion = enclose the `const` expression in braces
+
+parser_unexpected_const_param_declaration = unexpected `const` parameter declaration
+ .label = expected a `const` expression, not a parameter declaration
+ .suggestion = `const` parameters must be declared for the `impl`
+
+parser_unexpected_const_in_generic_param = expected lifetime, type, or constant, found keyword `const`
+ .suggestion = the `const` keyword is only needed in the definition of the type
+
+parser_async_move_order_incorrect = the order of `move` and `async` is incorrect
+ .suggestion = try switching the order
+
+parser_double_colon_in_bound = expected `:` followed by trait or lifetime
+ .suggestion = use single colon
diff --git a/compiler/rustc_error_messages/locales/en-US/passes.ftl b/compiler/rustc_error_messages/locales/en-US/passes.ftl
index b17eb9c2d..4bc6bd9fb 100644
--- a/compiler/rustc_error_messages/locales/en-US/passes.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/passes.ftl
@@ -1,178 +1,233 @@
--passes-previously-accepted =
+-passes_previously_accepted =
this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
--passes-see-issue =
+-passes_see_issue =
see issue #{$issue} <https://github.com/rust-lang/rust/issues/{$issue}> for more information
-passes-outer-crate-level-attr =
+passes_outer_crate_level_attr =
crate-level attribute should be an inner attribute: add an exclamation mark: `#![foo]`
-passes-inner-crate-level-attr =
+passes_inner_crate_level_attr =
crate-level attribute should be in the root module
-passes-ignored-attr-with-macro = `#[{$sym}]` is ignored on struct fields, match arms and macro defs
- .warn = {-passes-previously-accepted}
- .note = {-passes-see-issue(issue: "80564")}
+passes_ignored_attr_with_macro =
+ `#[{$sym}]` is ignored on struct fields, match arms and macro defs
+ .warn = {-passes_previously_accepted}
+ .note = {-passes_see_issue(issue: "80564")}
-passes-ignored-attr = `#[{$sym}]` is ignored on struct fields and match arms
- .warn = {-passes-previously-accepted}
- .note = {-passes-see-issue(issue: "80564")}
+passes_ignored_attr =
+ `#[{$sym}]` is ignored on struct fields and match arms
+ .warn = {-passes_previously_accepted}
+ .note = {-passes_see_issue(issue: "80564")}
-passes-inline-ignored-function-prototype = `#[inline]` is ignored on function prototypes
+passes_inline_ignored_function_prototype =
+ `#[inline]` is ignored on function prototypes
-passes-inline-ignored-constants = `#[inline]` is ignored on constants
- .warn = {-passes-previously-accepted}
- .note = {-passes-see-issue(issue: "65833")}
+passes_inline_ignored_constants =
+ `#[inline]` is ignored on constants
+ .warn = {-passes_previously_accepted}
+ .note = {-passes_see_issue(issue: "65833")}
-passes-inline-not-fn-or-closure = attribute should be applied to function or closure
+passes_inline_not_fn_or_closure =
+ attribute should be applied to function or closure
.label = not a function or closure
-passes-no-coverage-ignored-function-prototype = `#[no_coverage]` is ignored on function prototypes
+passes_no_coverage_ignored_function_prototype =
+ `#[no_coverage]` is ignored on function prototypes
-passes-no-coverage-propagate =
+passes_no_coverage_propagate =
`#[no_coverage]` does not propagate into items and must be applied to the contained functions directly
-passes-no-coverage-fn-defn = `#[no_coverage]` may only be applied to function definitions
+passes_no_coverage_fn_defn =
+ `#[no_coverage]` may only be applied to function definitions
-passes-no-coverage-not-coverable = `#[no_coverage]` must be applied to coverable code
+passes_no_coverage_not_coverable =
+ `#[no_coverage]` must be applied to coverable code
.label = not coverable code
-passes-should-be-applied-to-fn = attribute should be applied to a function definition
+passes_should_be_applied_to_fn =
+ attribute should be applied to a function definition
.label = not a function definition
-passes-naked-tracked-caller = cannot use `#[track_caller]` with `#[naked]`
+passes_naked_tracked_caller =
+ cannot use `#[track_caller]` with `#[naked]`
-passes-should-be-applied-to-struct-enum = attribute should be applied to a struct or enum
+passes_should_be_applied_to_struct_enum =
+ attribute should be applied to a struct or enum
.label = not a struct or enum
-passes-should-be-applied-to-trait = attribute should be applied to a trait
+passes_should_be_applied_to_trait =
+ attribute should be applied to a trait
.label = not a trait
-passes-target-feature-on-statement = {passes-should-be-applied-to-fn}
- .warn = {-passes-previously-accepted}
- .label = {passes-should-be-applied-to-fn.label}
+passes_target_feature_on_statement =
+ {passes_should_be_applied_to_fn}
+ .warn = {-passes_previously_accepted}
+ .label = {passes_should_be_applied_to_fn.label}
-passes-should-be-applied-to-static = attribute should be applied to a static
+passes_should_be_applied_to_static =
+ attribute should be applied to a static
.label = not a static
-passes-doc-expect-str = doc {$attr_name} attribute expects a string: #[doc({$attr_name} = "a")]
+passes_doc_expect_str =
+ doc {$attr_name} attribute expects a string: #[doc({$attr_name} = "a")]
-passes-doc-alias-empty = {$attr_str} attribute cannot have empty value
+passes_doc_alias_empty =
+ {$attr_str} attribute cannot have empty value
-passes-doc-alias-bad-char = {$char_} character isn't allowed in {$attr_str}
+passes_doc_alias_bad_char =
+ {$char_} character isn't allowed in {$attr_str}
-passes-doc-alias-start-end = {$attr_str} cannot start or end with ' '
+passes_doc_alias_start_end =
+ {$attr_str} cannot start or end with ' '
-passes-doc-alias-bad-location = {$attr_str} isn't allowed on {$location}
+passes_doc_alias_bad_location =
+ {$attr_str} isn't allowed on {$location}
-passes-doc-alias-not-an-alias = {$attr_str} is the same as the item's name
+passes_doc_alias_not_an_alias =
+ {$attr_str} is the same as the item's name
-passes-doc-alias-duplicated = doc alias is duplicated
+passes_doc_alias_duplicated = doc alias is duplicated
.label = first defined here
-passes-doc-alias-not-string-literal = `#[doc(alias("a"))]` expects string literals
+passes_doc_alias_not_string_literal =
+ `#[doc(alias("a"))]` expects string literals
-passes-doc-alias-malformed =
+passes_doc_alias_malformed =
doc alias attribute expects a string `#[doc(alias = "a")]` or a list of strings `#[doc(alias("a", "b"))]`
-passes-doc-keyword-empty-mod = `#[doc(keyword = "...")]` should be used on empty modules
+passes_doc_keyword_empty_mod =
+ `#[doc(keyword = "...")]` should be used on empty modules
-passes-doc-keyword-not-mod = `#[doc(keyword = "...")]` should be used on modules
+passes_doc_keyword_not_mod =
+ `#[doc(keyword = "...")]` should be used on modules
-passes-doc-keyword-invalid-ident = `{$doc_keyword}` is not a valid identifier
+passes_doc_keyword_invalid_ident =
+ `{$doc_keyword}` is not a valid identifier
-passes-doc-fake-variadic-not-valid =
+passes_doc_fake_variadic_not_valid =
`#[doc(fake_variadic)]` must be used on the first of a set of tuple or fn pointer trait impls with varying arity
-passes-doc-keyword-only-impl = `#[doc(keyword = "...")]` should be used on impl blocks
+passes_doc_keyword_only_impl =
+ `#[doc(keyword = "...")]` should be used on impl blocks
-passes-doc-inline-conflict-first = this attribute...
-passes-doc-inline-conflict-second = ...conflicts with this attribute
-passes-doc-inline-conflict = conflicting doc inlining attributes
+passes_doc_inline_conflict_first =
+ this attribute...
+
+passes_doc_inline_conflict_second =
+ {"."}..conflicts with this attribute
+
+passes_doc_inline_conflict =
+ conflicting doc inlining attributes
.help = remove one of the conflicting attributes
-passes-doc-inline-only-use = this attribute can only be applied to a `use` item
+passes_doc_inline_only_use =
+ this attribute can only be applied to a `use` item
.label = only applicable on `use` items
- .not-a-use-item-label = not a `use` item
+ .not_a_use_item_label = not a `use` item
.note = read <https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline> for more information
-passes-doc-attr-not-crate-level =
+passes_doc_attr_not_crate_level =
`#![doc({$attr_name} = "...")]` isn't allowed as a crate-level attribute
-passes-attr-crate-level = this attribute can only be applied at the crate level
+passes_attr_crate_level =
+ this attribute can only be applied at the crate level
.suggestion = to apply to the crate, use an inner attribute
.help = to apply to the crate, use an inner attribute
.note = read <https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level> for more information
-passes-doc-test-unknown = unknown `doc(test)` attribute `{$path}`
+passes_doc_test_unknown =
+ unknown `doc(test)` attribute `{$path}`
+
+passes_doc_test_takes_list =
+ `#[doc(test(...)]` takes a list of attributes
-passes-doc-test-takes-list = `#[doc(test(...)]` takes a list of attributes
+passes_doc_primitive =
+ `doc(primitive)` should never have been stable
-passes-doc-primitive = `doc(primitive)` should never have been stable
+passes_doc_cfg_hide_takes_list =
+ `#[doc(cfg_hide(...)]` takes a list of attributes
-passes-doc-test-unknown-any = unknown `doc` attribute `{$path}`
+passes_doc_test_unknown_any =
+ unknown `doc` attribute `{$path}`
-passes-doc-test-unknown-spotlight = unknown `doc` attribute `{$path}`
+passes_doc_test_unknown_spotlight =
+ unknown `doc` attribute `{$path}`
.note = `doc(spotlight)` was renamed to `doc(notable_trait)`
.suggestion = use `notable_trait` instead
- .no-op-note = `doc(spotlight)` is now a no-op
+ .no_op_note = `doc(spotlight)` is now a no-op
-passes-doc-test-unknown-include = unknown `doc` attribute `{$path}`
+passes_doc_test_unknown_include =
+ unknown `doc` attribute `{$path}`
.suggestion = use `doc = include_str!` instead
-passes-doc-invalid = invalid `doc` attribute
+passes_doc_invalid =
+ invalid `doc` attribute
-passes-pass-by-value = `pass_by_value` attribute should be applied to a struct, enum or type alias
+passes_pass_by_value =
+ `pass_by_value` attribute should be applied to a struct, enum or type alias
.label = is not a struct, enum or type alias
-passes-allow-incoherent-impl =
+passes_allow_incoherent_impl =
`rustc_allow_incoherent_impl` attribute should be applied to impl items.
.label = the only currently supported targets are inherent methods
-passes-has-incoherent-inherent-impl =
+passes_has_incoherent_inherent_impl =
`rustc_has_incoherent_inherent_impls` attribute should be applied to types or traits.
.label = only adts, extern types and traits are supported
-passes-must-use-async =
+passes_must_use_async =
`must_use` attribute on `async` functions applies to the anonymous `Future` returned by the function, not the value within
.label = this attribute does nothing, the `Future`s returned by async functions are already `must_use`
-passes-must-use-no-effect = `#[must_use]` has no effect when applied to {$article} {$target}
+passes_must_use_no_effect =
+ `#[must_use]` has no effect when applied to {$article} {$target}
-passes-must-not-suspend = `must_not_suspend` attribute should be applied to a struct, enum, or trait
+passes_must_not_suspend =
+ `must_not_suspend` attribute should be applied to a struct, enum, or trait
.label = is not a struct, enum, or trait
-passes-cold = {passes-should-be-applied-to-fn}
- .warn = {-passes-previously-accepted}
- .label = {passes-should-be-applied-to-fn.label}
+passes_cold =
+ {passes_should_be_applied_to_fn}
+ .warn = {-passes_previously_accepted}
+ .label = {passes_should_be_applied_to_fn.label}
-passes-link = attribute should be applied to an `extern` block with non-Rust ABI
- .warn = {-passes-previously-accepted}
+passes_link =
+ attribute should be applied to an `extern` block with non-Rust ABI
+ .warn = {-passes_previously_accepted}
.label = not an `extern` block
-passes-link-name = attribute should be applied to a foreign function or static
- .warn = {-passes-previously-accepted}
+passes_link_name =
+ attribute should be applied to a foreign function or static
+ .warn = {-passes_previously_accepted}
.label = not a foreign function or static
.help = try `#[link(name = "{$value}")]` instead
-passes-no-link = attribute should be applied to an `extern crate` item
+passes_no_link =
+ attribute should be applied to an `extern crate` item
.label = not an `extern crate` item
-passes-export-name = attribute should be applied to a free function, impl method or static
+passes_export_name =
+ attribute should be applied to a free function, impl method or static
.label = not a free function, impl method or static
-passes-rustc-layout-scalar-valid-range-not-struct = attribute should be applied to a struct
+passes_rustc_layout_scalar_valid_range_not_struct =
+ attribute should be applied to a struct
.label = not a struct
-passes-rustc-layout-scalar-valid-range-arg = expected exactly one integer literal argument
+passes_rustc_layout_scalar_valid_range_arg =
+ expected exactly one integer literal argument
-passes-rustc-legacy-const-generics-only = #[rustc_legacy_const_generics] functions must only have const generics
+passes_rustc_legacy_const_generics_only =
+ #[rustc_legacy_const_generics] functions must only have const generics
.label = non-const generic parameter
-passes-rustc-legacy-const-generics-index = #[rustc_legacy_const_generics] must have one index for each generic parameter
+passes_rustc_legacy_const_generics_index =
+ #[rustc_legacy_const_generics] must have one index for each generic parameter
.label = generic parameters
-passes-rustc-legacy-const-generics-index-exceed = index exceeds number of arguments
+passes_rustc_legacy_const_generics_index_exceed =
+ index exceeds number of arguments
.label = there {$arg_count ->
[one] is
*[other] are
@@ -181,84 +236,438 @@ passes-rustc-legacy-const-generics-index-exceed = index exceeds number of argume
*[other] arguments
}
-passes-rustc-legacy-const-generics-index-negative = arguments should be non-negative integers
+passes_rustc_legacy_const_generics_index_negative =
+ arguments should be non-negative integers
-passes-rustc-dirty-clean = attribute requires -Z query-dep-graph to be enabled
+passes_rustc_dirty_clean =
+ attribute requires -Z query-dep-graph to be enabled
-passes-link-section = attribute should be applied to a function or static
- .warn = {-passes-previously-accepted}
+passes_link_section =
+ attribute should be applied to a function or static
+ .warn = {-passes_previously_accepted}
.label = not a function or static
-passes-no-mangle-foreign = `#[no_mangle]` has no effect on a foreign {$foreign_item_kind}
- .warn = {-passes-previously-accepted}
+passes_no_mangle_foreign =
+ `#[no_mangle]` has no effect on a foreign {$foreign_item_kind}
+ .warn = {-passes_previously_accepted}
.label = foreign {$foreign_item_kind}
.note = symbol names in extern blocks are not mangled
.suggestion = remove this attribute
-passes-no-mangle = attribute should be applied to a free function, impl method or static
- .warn = {-passes-previously-accepted}
+passes_no_mangle =
+ attribute should be applied to a free function, impl method or static
+ .warn = {-passes_previously_accepted}
.label = not a free function, impl method or static
-passes-repr-ident = meta item in `repr` must be an identifier
+passes_repr_ident =
+ meta item in `repr` must be an identifier
-passes-repr-conflicting = conflicting representation hints
+passes_repr_conflicting =
+ conflicting representation hints
-passes-used-static = attribute must be applied to a `static` variable
+passes_used_static =
+ attribute must be applied to a `static` variable
-passes-used-compiler-linker = `used(compiler)` and `used(linker)` can't be used together
+passes_used_compiler_linker =
+ `used(compiler)` and `used(linker)` can't be used together
-passes-allow-internal-unstable = attribute should be applied to a macro
+passes_allow_internal_unstable =
+ attribute should be applied to a macro
.label = not a macro
-passes-debug-visualizer-placement = attribute should be applied to a module
+passes_debug_visualizer_placement =
+ attribute should be applied to a module
-passes-debug-visualizer-invalid = invalid argument
- .note-1 = expected: `natvis_file = "..."`
- .note-2 = OR
- .note-3 = expected: `gdb_script_file = "..."`
+passes_debug_visualizer_invalid =
+ invalid argument
+ .note_1 = expected: `natvis_file = "..."`
+ .note_2 = OR
+ .note_3 = expected: `gdb_script_file = "..."`
-passes-rustc-allow-const-fn-unstable = attribute should be applied to `const fn`
+passes_debug_visualizer_unreadable =
+ couldn't read {$file}: {$error}
+
+passes_rustc_allow_const_fn_unstable =
+ attribute should be applied to `const fn`
.label = not a `const fn`
-passes-rustc-std-internal-symbol = attribute should be applied to functions or statics
+passes_rustc_std_internal_symbol =
+ attribute should be applied to functions or statics
.label = not a function or static
-passes-const-trait = attribute should be applied to a trait
+passes_const_trait =
+ attribute should be applied to a trait
-passes-stability-promotable = attribute cannot be applied to an expression
+passes_stability_promotable =
+ attribute cannot be applied to an expression
-passes-deprecated = attribute is ignored here
+passes_deprecated =
+ attribute is ignored here
-passes-macro-use = `#[{$name}]` only has an effect on `extern crate` and modules
+passes_macro_use =
+ `#[{$name}]` only has an effect on `extern crate` and modules
-passes-macro-export = `#[macro_export]` only has an effect on macro definitions
+passes_macro_export =
+ `#[macro_export]` only has an effect on macro definitions
-passes-plugin-registrar = `#[plugin_registrar]` only has an effect on functions
+passes_plugin_registrar =
+ `#[plugin_registrar]` only has an effect on functions
-passes-unused-empty-lints-note = attribute `{$name}` with an empty list has no effect
+passes_unused_empty_lints_note =
+ attribute `{$name}` with an empty list has no effect
-passes-unused-no-lints-note = attribute `{$name}` without any lints has no effect
+passes_unused_no_lints_note =
+ attribute `{$name}` without any lints has no effect
-passes-unused-default-method-body-const-note =
+passes_unused_default_method_body_const_note =
`default_method_body_is_const` has been replaced with `#[const_trait]` on traits
-passes-unused = unused attribute
+passes_unused =
+ unused attribute
.suggestion = remove this attribute
-passes-non-exported-macro-invalid-attrs = attribute should be applied to function or closure
+passes_non_exported_macro_invalid_attrs =
+ attribute should be applied to function or closure
.label = not a function or closure
-passes-unused-duplicate = unused attribute
+passes_unused_duplicate =
+ unused attribute
.suggestion = remove this attribute
.note = attribute also specified here
- .warn = {-passes-previously-accepted}
+ .warn = {-passes_previously_accepted}
-passes-unused-multiple = multiple `{$name}` attributes
+passes_unused_multiple =
+ multiple `{$name}` attributes
.suggestion = remove this attribute
.note = attribute also specified here
-passes-rustc-lint-opt-ty = `#[rustc_lint_opt_ty]` should be applied to a struct
+passes_rustc_lint_opt_ty =
+ `#[rustc_lint_opt_ty]` should be applied to a struct
.label = not a struct
-passes-rustc-lint-opt-deny-field-access = `#[rustc_lint_opt_deny_field_access]` should be applied to a field
+passes_rustc_lint_opt_deny_field_access =
+ `#[rustc_lint_opt_deny_field_access]` should be applied to a field
.label = not a field
+
+passes_link_ordinal =
+ attribute should be applied to a foreign function or static
+ .label = not a foreign function or static
+
+passes_collapse_debuginfo =
+ `collapse_debuginfo` attribute should be applied to macro definitions
+ .label = not a macro definition
+
+passes_deprecated_annotation_has_no_effect =
+ this `#[deprecated]` annotation has no effect
+ .suggestion = remove the unnecessary deprecation attribute
+
+passes_unknown_external_lang_item =
+ unknown external lang item: `{$lang_item}`
+
+passes_missing_panic_handler =
+ `#[panic_handler]` function required, but not found
+
+passes_alloc_func_required =
+ `#[alloc_error_handler]` function required, but not found
+
+passes_missing_alloc_error_handler =
+ use `#![feature(default_alloc_error_handler)]` for a default error handler
+
+passes_missing_lang_item =
+ language item required, but not found: `{$name}`
+ .note = this can occur when a binary crate with `#![no_std]` is compiled for a target where `{$name}` is defined in the standard library
+ .help = you may be able to compile for a target that doesn't need `{$name}`, specify a target with `--target` or in `.cargo/config`
+
+passes_lang_item_on_incorrect_target =
+ `{$name}` language item must be applied to a {$expected_target}
+ .label = attribute should be applied to a {$expected_target}, not a {$actual_target}
+
+passes_unknown_lang_item =
+ definition of an unknown language item: `{$name}`
+ .label = definition of unknown language item `{$name}`
+
+passes_invalid_attr_at_crate_level =
+ `{$name}` attribute cannot be used at crate level
+ .suggestion = perhaps you meant to use an outer attribute
+
+passes_duplicate_diagnostic_item =
+ duplicate diagnostic item found: `{$name}`.
+
+passes_duplicate_diagnostic_item_in_crate =
+ duplicate diagnostic item in crate `{$crate_name}`: `{$name}`.
+
+passes_diagnostic_item_first_defined =
+ the diagnostic item is first defined here
+ .note = the diagnostic item is first defined in crate `{$orig_crate_name}`.
+
+passes_abi =
+ abi: {$abi}
+
+passes_align =
+ align: {$align}
+
+passes_size =
+ size: {$size}
+
+passes_homogeneous_aggregate =
+ homogeneous_aggregate: {$homogeneous_aggregate}
+
+passes_layout_of =
+ layout_of({$normalized_ty}) = {$ty_layout}
+
+passes_unrecognized_field =
+ unrecognized field name `{$name}`
+
+passes_layout =
+ layout error: {$layout_error}
+
+passes_feature_stable_twice =
+ feature `{$feature}` is declared stable since {$since}, but was previously declared stable since {$prev_since}
+
+passes_feature_previously_declared =
+ feature `{$feature}` is declared {$declared}, but was previously declared {$prev_declared}
+
+passes_expr_not_allowed_in_context =
+ {$expr} is not allowed in a `{$context}`
+
+passes_const_impl_const_trait =
+ const `impl`s must be for traits marked with `#[const_trait]`
+ .note = this trait must be annotated with `#[const_trait]`
+
+passes_break_non_loop =
+ `break` with value from a `{$kind}` loop
+ .label = can only break with a value inside `loop` or breakable block
+ .label2 = you can't `break` with a value in a `{$kind}` loop
+ .suggestion = use `break` on its own without a value inside this `{$kind}` loop
+ .break_expr_suggestion = alternatively, you might have meant to use the available loop label
+
+passes_continue_labeled_block =
+ `continue` pointing to a labeled block
+ .label = labeled blocks cannot be `continue`'d
+ .block_label = labeled block the `continue` points to
+
+passes_break_inside_closure =
+ `{$name}` inside of a closure
+ .label = cannot `{$name}` inside of a closure
+ .closure_label = enclosing closure
+
+passes_break_inside_async_block =
+ `{$name}` inside of an `async` block
+ .label = cannot `{$name}` inside of an `async` block
+ .async_block_label = enclosing `async` block
+
+passes_outside_loop =
+ `{$name}` outside of a loop
+ .label = cannot `{$name}` outside of a loop
+
+passes_unlabeled_in_labeled_block =
+ unlabeled `{$cf_type}` inside of a labeled block
+ .label = `{$cf_type}` statements that would diverge to or through a labeled block need to bear a label
+
+passes_unlabeled_cf_in_while_condition =
+ `break` or `continue` with no label in the condition of a `while` loop
+ .label = unlabeled `{$cf_type}` in the condition of a `while` loop
+
+passes_cannot_inline_naked_function =
+ naked functions cannot be inlined
+
+passes_undefined_naked_function_abi =
+ Rust ABI is unsupported in naked functions
+
+passes_no_patterns =
+ patterns not allowed in naked function parameters
+
+passes_params_not_allowed =
+ referencing function parameters is not allowed in naked functions
+ .help = follow the calling convention in asm block to use parameters
+
+passes_naked_functions_asm_block =
+ naked functions must contain a single asm block
+ .label_multiple_asm = multiple asm blocks are unsupported in naked functions
+ .label_non_asm = non-asm is unsupported in naked functions
+
+passes_naked_functions_operands =
+ only `const` and `sym` operands are supported in naked functions
+
+passes_naked_functions_asm_options =
+ asm options unsupported in naked functions: {$unsupported_options}
+
+passes_naked_functions_must_use_noreturn =
+ asm in naked functions must use `noreturn` option
+ .suggestion = consider specifying that the asm block is responsible for returning from the function
+
+passes_attr_only_on_main =
+ `{$attr}` attribute can only be used on `fn main()`
+
+passes_attr_only_on_root_main =
+ `{$attr}` attribute can only be used on root `fn main()`
+
+passes_attr_only_in_functions =
+ `{$attr}` attribute can only be used on functions
+
+passes_multiple_rustc_main =
+ multiple functions with a `#[rustc_main]` attribute
+ .first = first `#[rustc_main]` function
+ .additional = additional `#[rustc_main]` function
+
+passes_multiple_start_functions =
+ multiple `start` functions
+ .label = multiple `start` functions
+ .previous = previous `#[start]` function here
+
+passes_extern_main =
+ the `main` function cannot be declared in an `extern` block
+
+passes_unix_sigpipe_values =
+ valid values for `#[unix_sigpipe = "..."]` are `inherit`, `sig_ign`, or `sig_dfl`
+
+passes_no_main_function =
+ `main` function not found in crate `{$crate_name}`
+ .here_is_main = here is a function named `main`
+ .one_or_more_possible_main = you have one or more functions named `main` not defined at the crate level
+ .consider_moving_main = consider moving the `main` function definitions
+ .main_must_be_defined_at_crate = the main function must be defined at the crate level{$has_filename ->
+ [true] {" "}(in `{$filename}`)
+ *[false] {""}
+ }
+ .consider_adding_main_to_file = consider adding a `main` function to `{$filename}`
+ .consider_adding_main_at_crate = consider adding a `main` function at the crate level
+ .teach_note = If you don't know the basics of Rust, you can go look to the Rust Book to get started: https://doc.rust-lang.org/book/
+ .non_function_main = non-function item at `crate::main` is found
+
+passes_duplicate_lang_item =
+ found duplicate lang item `{$lang_item_name}`
+ .first_defined_span = the lang item is first defined here
+ .first_defined_crate_depends = the lang item is first defined in crate `{$orig_crate_name}` (which `{$orig_dependency_of}` depends on)
+ .first_defined_crate = the lang item is first defined in crate `{$orig_crate_name}`.
+ .first_definition_local = first definition in the local crate (`{$orig_crate_name}`)
+ .second_definition_local = second definition in the local crate (`{$crate_name}`)
+ .first_definition_path = first definition in `{$orig_crate_name}` loaded from {$orig_path}
+ .second_definition_path = second definition in `{$crate_name}` loaded from {$path}
+
+passes_duplicate_lang_item_crate =
+ duplicate lang item in crate `{$crate_name}`: `{$lang_item_name}`.
+ .first_defined_span = the lang item is first defined here
+ .first_defined_crate_depends = the lang item is first defined in crate `{$orig_crate_name}` (which `{$orig_dependency_of}` depends on)
+ .first_defined_crate = the lang item is first defined in crate `{$orig_crate_name}`.
+ .first_definition_local = first definition in the local crate (`{$orig_crate_name}`)
+ .second_definition_local = second definition in the local crate (`{$crate_name}`)
+ .first_definition_path = first definition in `{$orig_crate_name}` loaded from {$orig_path}
+ .second_definition_path = second definition in `{$crate_name}` loaded from {$path}
+
+passes_duplicate_lang_item_crate_depends =
+ duplicate lang item in crate `{$crate_name}` (which `{$dependency_of}` depends on): `{$lang_item_name}`.
+ .first_defined_span = the lang item is first defined here
+ .first_defined_crate_depends = the lang item is first defined in crate `{$orig_crate_name}` (which `{$orig_dependency_of}` depends on)
+ .first_defined_crate = the lang item is first defined in crate `{$orig_crate_name}`.
+ .first_definition_local = first definition in the local crate (`{$orig_crate_name}`)
+ .second_definition_local = second definition in the local crate (`{$crate_name}`)
+ .first_definition_path = first definition in `{$orig_crate_name}` loaded from {$orig_path}
+ .second_definition_path = second definition in `{$crate_name}` loaded from {$path}
+
+passes_incorrect_target =
+ `{$name}` language item must be applied to a {$kind} with {$at_least ->
+ [true] at least {$num}
+ *[false] {$num}
+ } generic {$num ->
+ [one] argument
+ *[other] arguments
+ }
+ .label = this {$kind} has {$actual_num} generic {$actual_num ->
+ [one] argument
+ *[other] arguments
+ }
+
+passes_useless_assignment =
+ useless assignment of {$is_field_assign ->
+ [true] field
+ *[false] variable
+ } of type `{$ty}` to itself
+
+passes_only_has_effect_on =
+ `#[{$attr_name}]` only has an effect on {$target_name ->
+ [function] functions
+ [module] modules
+ [implementation_block] implementation blocks
+ *[unspecified] (unspecified--this is a compiler bug)
+ }
+
+passes_object_lifetime_err =
+ {$repr}
+
+passes_unrecognized_repr_hint =
+ unrecognized representation hint
+ .help = valid reprs are `C`, `align`, `packed`, `transparent`, `simd`, `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `i128`, `u128`, `isize`, `usize`
+
+passes_attr_application_enum =
+ attribute should be applied to an enum
+ .label = not an enum
+
+passes_attr_application_struct =
+ attribute should be applied to a struct
+ .label = not a struct
+
+passes_attr_application_struct_union =
+ attribute should be applied to a struct or union
+ .label = not a struct or union
+
+passes_attr_application_struct_enum_union =
+ attribute should be applied to a struct, enum, or union
+ .label = not a struct, enum, or union
+
+passes_attr_application_struct_enum_function_union =
+ attribute should be applied to a struct, enum, function, or union
+ .label = not a struct, enum, function, or union
+
+passes_transparent_incompatible =
+ transparent {$target} cannot have other repr hints
+
+passes_deprecated_attribute =
+ deprecated attribute must be paired with either stable or unstable attribute
+
+passes_useless_stability =
+ this stability annotation is useless
+ .label = useless stability annotation
+ .item = the stability attribute annotates this item
+
+passes_invalid_stability =
+ invalid stability version found
+ .label = invalid stability version
+ .item = the stability attribute annotates this item
+
+passes_cannot_stabilize_deprecated =
+ an API can't be stabilized after it is deprecated
+ .label = invalid version
+ .item = the stability attribute annotates this item
+
+passes_invalid_deprecation_version =
+ invalid deprecation version found
+ .label = invalid deprecation version
+ .item = the stability attribute annotates this item
+
+passes_missing_stability_attr =
+ {$descr} has missing stability attribute
+
+passes_missing_const_stab_attr =
+ {$descr} has missing const stability attribute
+
+passes_trait_impl_const_stable =
+ trait implementations cannot be const stable yet
+ .note = see issue #67792 <https://github.com/rust-lang/rust/issues/67792> for more information
+
+passes_feature_only_on_nightly =
+ `#![feature]` may not be used on the {$release_channel} release channel
+
+passes_unknown_feature =
+ unknown feature `{$feature}`
+
+passes_implied_feature_not_exist =
+ feature `{$implied_by}` implying `{$feature}` does not exist
+
+passes_duplicate_feature_err =
+ the feature `{$feature}` has already been declared
+
+passes_missing_const_err =
+ attributes `#[rustc_const_unstable]` and `#[rustc_const_stable]` require the function or method to be `const`
+ .help = make the function or method const
+ .label = attribute specified here
diff --git a/compiler/rustc_error_messages/locales/en-US/plugin_impl.ftl b/compiler/rustc_error_messages/locales/en-US/plugin_impl.ftl
new file mode 100644
index 000000000..8db32a42c
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/plugin_impl.ftl
@@ -0,0 +1,4 @@
+plugin_impl_load_plugin_error = {$msg}
+
+plugin_impl_malformed_plugin_attribute = malformed `plugin` attribute
+ .label = malformed attribute
diff --git a/compiler/rustc_error_messages/locales/en-US/privacy.ftl b/compiler/rustc_error_messages/locales/en-US/privacy.ftl
index f8a750da9..a26d1b2b3 100644
--- a/compiler/rustc_error_messages/locales/en-US/privacy.ftl
+++ b/compiler/rustc_error_messages/locales/en-US/privacy.ftl
@@ -1,20 +1,22 @@
-privacy-field-is-private = field `{$field_name}` of {$variant_descr} `{$def_path_str}` is private
-privacy-field-is-private-is-update-syntax-label = field `{$field_name}` is private
-privacy-field-is-private-label = private field
+privacy_field_is_private = field `{$field_name}` of {$variant_descr} `{$def_path_str}` is private
+privacy_field_is_private_is_update_syntax_label = field `{$field_name}` is private
+privacy_field_is_private_label = private field
-privacy-item-is-private = {$kind} `{$descr}` is private
+privacy_item_is_private = {$kind} `{$descr}` is private
.label = private {$kind}
-privacy-unnamed-item-is-private = {$kind} is private
+privacy_unnamed_item_is_private = {$kind} is private
.label = private {$kind}
-privacy-in-public-interface = {$vis_descr} {$kind} `{$descr}` in public interface
+privacy_in_public_interface = {$vis_descr} {$kind} `{$descr}` in public interface
.label = can't leak {$vis_descr} {$kind}
- .visibility-label = `{$descr}` declared as {$vis_descr}
+ .visibility_label = `{$descr}` declared as {$vis_descr}
-privacy-from-private-dep-in-public-interface =
+privacy_report_effective_visibility = {$descr}
+
+privacy_from_private_dep_in_public_interface =
{$kind} `{$descr}` from private dependency '{$krate}' in public interface
-private-in-public-lint =
+privacy_private_in_public_lint =
{$vis_descr} {$kind} `{$descr}` in public interface (error {$kind ->
[trait] E0445
*[other] E0446
diff --git a/compiler/rustc_error_messages/locales/en-US/query_system.ftl b/compiler/rustc_error_messages/locales/en-US/query_system.ftl
new file mode 100644
index 000000000..870e82403
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/query_system.ftl
@@ -0,0 +1,30 @@
+query_system_reentrant = internal compiler error: re-entrant incremental verify failure, suppressing message
+
+query_system_increment_compilation = internal compiler error: encountered incremental compilation error with {$dep_node}
+ .help = This is a known issue with the compiler. Run {$run_cmd} to allow your project to compile
+
+query_system_increment_compilation_note1 = Please follow the instructions below to create a bug report with the provided information
+query_system_increment_compilation_note2 = See <https://github.com/rust-lang/rust/issues/84970> for more information
+
+query_system_cycle = cycle detected when {$stack_bottom}
+
+query_system_cycle_usage = cycle used when {$usage}
+
+query_system_cycle_stack_single = ...which immediately requires {$stack_bottom} again
+
+query_system_cycle_stack_middle = ...which requires {$desc}...
+
+query_system_cycle_stack_multiple = ...which again requires {$stack_bottom}, completing the cycle
+
+query_system_cycle_recursive_ty_alias = type aliases cannot be recursive
+query_system_cycle_recursive_ty_alias_help1 = consider using a struct, enum, or union instead to break the cycle
+query_system_cycle_recursive_ty_alias_help2 = see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information
+
+query_system_cycle_recursive_trait_alias = trait aliases cannot be recursive
+
+query_system_cycle_which_requires = ...which requires {$desc}...
+
+query_system_query_overflow = queries overflow the depth limit!
+ .help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`)
+
+query_system_layout_of_depth = query depth increased by {$depth} when {$desc}
diff --git a/compiler/rustc_error_messages/locales/en-US/save_analysis.ftl b/compiler/rustc_error_messages/locales/en-US/save_analysis.ftl
new file mode 100644
index 000000000..36c2ff468
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/save_analysis.ftl
@@ -0,0 +1 @@
+save_analysis_could_not_open = Could not open `{$file_name}`: `{$err}`
diff --git a/compiler/rustc_error_messages/locales/en-US/session.ftl b/compiler/rustc_error_messages/locales/en-US/session.ftl
new file mode 100644
index 000000000..e22779230
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/session.ftl
@@ -0,0 +1,60 @@
+session_incorrect_cgu_reuse_type =
+ CGU-reuse for `{$cgu_user_name}` is `{$actual_reuse}` but should be {$at_least ->
+ [one] {"at least "}
+ *[other] {""}
+ }`{$expected_reuse}`
+
+session_cgu_not_recorded =
+ CGU-reuse for `{$cgu_user_name}` is (mangled: `{$cgu_name}`) was not recorded`
+
+session_feature_gate_error = {$explain}
+
+session_feature_diagnostic_for_issue =
+ see issue #{$n} <https://github.com/rust-lang/rust/issues/{$n}> for more information
+
+session_feature_diagnostic_help =
+ add `#![feature({$feature})]` to the crate attributes to enable
+
+session_not_circumvent_feature = `-Zunleash-the-miri-inside-of-you` may not be used to circumvent feature gates, except when testing error paths in the CTFE engine
+
+session_profile_use_file_does_not_exist = file `{$path}` passed to `-C profile-use` does not exist.
+
+session_linker_plugin_lto_windows_not_supported = linker plugin based LTO is not supported together with `-C prefer-dynamic` when targeting Windows-like targets
+
+session_profile_sample_use_file_does_not_exist = file `{$path}` passed to `-C profile-sample-use` does not exist.
+
+session_target_requires_unwind_tables = target requires unwind tables, they cannot be disabled with `-C force-unwind-tables=no`
+
+session_sanitizer_not_supported = {$us} sanitizer is not supported for this target
+
+session_sanitizers_not_supported = {$us} sanitizers are not supported for this target
+
+session_cannot_mix_and_match_sanitizers = `-Zsanitizer={$first}` is incompatible with `-Zsanitizer={$second}`
+
+session_cannot_enable_crt_static_linux = sanitizer is incompatible with statically linked libc, disable it using `-C target-feature=-crt-static`
+
+session_sanitizer_cfi_enabled = `-Zsanitizer=cfi` requires `-Clto`
+
+session_unstable_virtual_function_elimination = `-Zvirtual-function-elimination` requires `-Clto`
+
+session_unsupported_dwarf_version = requested DWARF version {$dwarf_version} is greater than 5
+
+session_target_stack_protector_not_supported = `-Z stack-protector={$stack_protector}` is not supported for target {$target_triple} and will be ignored
+
+session_split_debuginfo_unstable_platform = `-Csplit-debuginfo={$debuginfo}` is unstable on this platform
+
+session_file_is_not_writeable = output file {$file} is not writeable -- check its permissions
+
+session_crate_name_does_not_match = `--crate-name` and `#[crate_name]` are required to match, but `{$s}` != `{$name}`
+
+session_crate_name_invalid = crate names cannot start with a `-`, but `{$s}` has a leading hyphen
+
+session_crate_name_empty = crate name must not be empty
+
+session_invalid_character_in_create_name = invalid character `{$character}` in crate name: `{$crate_name}`
+
+session_expr_parentheses_needed = parentheses are required to parse this as an expression
+
+session_skipping_const_checks = skipping const checks
+session_unleashed_feature_help_named = skipping check for `{$gate}` feature
+session_unleashed_feature_help_unnamed = skipping check that does not even have a feature gate
diff --git a/compiler/rustc_error_messages/locales/en-US/symbol_mangling.ftl b/compiler/rustc_error_messages/locales/en-US/symbol_mangling.ftl
new file mode 100644
index 000000000..b7d48280f
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/symbol_mangling.ftl
@@ -0,0 +1 @@
+symbol_mangling_test_output = {$kind}({$content})
diff --git a/compiler/rustc_error_messages/locales/en-US/trait_selection.ftl b/compiler/rustc_error_messages/locales/en-US/trait_selection.ftl
new file mode 100644
index 000000000..004e0ab18
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/trait_selection.ftl
@@ -0,0 +1,26 @@
+trait_selection_dump_vtable_entries = vtable entries for `{$trait_ref}`: {$entries}
+
+trait_selection_unable_to_construct_constant_value = unable to construct a constant value for the unevaluated constant {$unevaluated}
+
+trait_selection_auto_deref_reached_recursion_limit = reached the recursion limit while auto-dereferencing `{$ty}`
+ .label = deref recursion limit reached
+ .help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`)
+
+trait_selection_empty_on_clause_in_rustc_on_unimplemented = empty `on`-clause in `#[rustc_on_unimplemented]`
+ .label = empty on-clause here
+
+trait_selection_invalid_on_clause_in_rustc_on_unimplemented = invalid `on`-clause in `#[rustc_on_unimplemented]`
+ .label = invalid on-clause here
+
+trait_selection_no_value_in_rustc_on_unimplemented = this attribute must have a valid value
+ .label = expected value here
+ .note = eg `#[rustc_on_unimplemented(message="foo")]`
+
+trait_selection_negative_positive_conflict = found both positive and negative implementation of trait `{$trait_desc}`{$self_desc ->
+ [none] {""}
+ *[default] {" "}for type `{$self_desc}`
+ }:
+ .negative_implementation_here = negative implementation here
+ .negative_implementation_in_crate = negative implementation in crate `{$negative_impl_cname}`
+ .positive_implementation_here = positive implementation here
+ .positive_implementation_in_crate = positive implementation in crate `{$positive_impl_cname}`
diff --git a/compiler/rustc_error_messages/locales/en-US/ty_utils.ftl b/compiler/rustc_error_messages/locales/en-US/ty_utils.ftl
new file mode 100644
index 000000000..1040ee1c9
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/ty_utils.ftl
@@ -0,0 +1,47 @@
+ty_utils_needs_drop_overflow = overflow while checking whether `{$query_ty}` requires drop
+
+ty_utils_generic_constant_too_complex = overly complex generic constant
+ .help = consider moving this anonymous constant into a `const` function
+ .maybe_supported = this operation may be supported in the future
+
+ty_utils_borrow_not_supported = borrowing is not supported in generic constants
+
+ty_utils_address_and_deref_not_supported = dereferencing or taking the address is not supported in generic constants
+
+ty_utils_array_not_supported = array construction is not supported in generic constants
+
+ty_utils_block_not_supported = blocks are not supported in generic constant
+
+ty_utils_never_to_any_not_supported = converting nevers to any is not supported in generic constant
+
+ty_utils_tuple_not_supported = tuple construction is not supported in generic constants
+
+ty_utils_index_not_supported = indexing is not supported in generic constant
+
+ty_utils_field_not_supported = field access is not supported in generic constant
+
+ty_utils_const_block_not_supported = const blocks are not supported in generic constant
+
+ty_utils_adt_not_supported = struct/enum construction is not supported in generic constants
+
+ty_utils_pointer_not_supported = pointer casts are not allowed in generic constants
+
+ty_utils_yield_not_supported = generator control flow is not allowed in generic constants
+
+ty_utils_loop_not_supported = loops and loop control flow are not supported in generic constants
+
+ty_utils_box_not_supported = allocations are not allowed in generic constants
+
+ty_utils_binary_not_supported = unsupported binary operation in generic constants
+
+ty_utils_logical_op_not_supported = unsupported operation in generic constants, short-circuiting operations would imply control flow
+
+ty_utils_assign_not_supported = assignment is not supported in generic constants
+
+ty_utils_closure_and_return_not_supported = closures and function keywords are not supported in generic constants
+
+ty_utils_control_flow_not_supported = control flow is not supported in generic constants
+
+ty_utils_inline_asm_not_supported = assembly is not supported in generic constants
+
+ty_utils_operation_not_supported = unsupported operation in generic constant
diff --git a/compiler/rustc_error_messages/locales/en-US/typeck.ftl b/compiler/rustc_error_messages/locales/en-US/typeck.ftl
deleted file mode 100644
index c61735a57..000000000
--- a/compiler/rustc_error_messages/locales/en-US/typeck.ftl
+++ /dev/null
@@ -1,125 +0,0 @@
-typeck-field-multiply-specified-in-initializer =
- field `{$ident}` specified more than once
- .label = used more than once
- .previous-use-label = first use of `{$ident}`
-
-typeck-unrecognized-atomic-operation =
- unrecognized atomic operation function: `{$op}`
- .label = unrecognized atomic operation
-
-typeck-wrong-number-of-generic-arguments-to-intrinsic =
- intrinsic has wrong number of {$descr} parameters: found {$found}, expected {$expected}
- .label = expected {$expected} {$descr} {$expected ->
- [one] parameter
- *[other] parameters
- }
-
-typeck-unrecognized-intrinsic-function =
- unrecognized intrinsic function: `{$name}`
- .label = unrecognized intrinsic
-
-typeck-lifetimes-or-bounds-mismatch-on-trait =
- lifetime parameters or bounds on {$item_kind} `{$ident}` do not match the trait declaration
- .label = lifetimes do not match {$item_kind} in trait
- .generics-label = lifetimes in impl do not match this {$item_kind} in trait
-
-typeck-drop-impl-on-wrong-item =
- the `Drop` trait may only be implemented for structs, enums, and unions
- .label = must be a struct, enum, or union
-
-typeck-field-already-declared =
- field `{$field_name}` is already declared
- .label = field already declared
- .previous-decl-label = `{$field_name}` first declared here
-
-typeck-copy-impl-on-type-with-dtor =
- the trait `Copy` may not be implemented for this type; the type has a destructor
- .label = `Copy` not allowed on types with destructors
-
-typeck-multiple-relaxed-default-bounds =
- type parameter has more than one relaxed default bound, only one is supported
-
-typeck-copy-impl-on-non-adt =
- the trait `Copy` may not be implemented for this type
- .label = type is not a structure or enumeration
-
-typeck-trait-object-declared-with-no-traits =
- at least one trait is required for an object type
- .alias-span = this alias does not contain a trait
-
-typeck-ambiguous-lifetime-bound =
- ambiguous lifetime bound, explicit lifetime bound required
-
-typeck-assoc-type-binding-not-allowed =
- associated type bindings are not allowed here
- .label = associated type not allowed here
-
-typeck-functional-record-update-on-non-struct =
- functional record update syntax requires a struct
-
-typeck-typeof-reserved-keyword-used =
- `typeof` is a reserved keyword but unimplemented
- .suggestion = consider replacing `typeof(...)` with an actual type
- .label = reserved keyword
-
-typeck-return-stmt-outside-of-fn-body =
- return statement outside of function body
- .encl-body-label = the return is part of this body...
- .encl-fn-label = ...not the enclosing function body
-
-typeck-yield-expr-outside-of-generator =
- yield expression outside of generator literal
-
-typeck-struct-expr-non-exhaustive =
- cannot create non-exhaustive {$what} using struct expression
-
-typeck-method-call-on-unknown-type =
- the type of this value must be known to call a method on a raw pointer on it
-
-typeck-value-of-associated-struct-already-specified =
- the value of the associated type `{$item_name}` (from trait `{$def_path}`) is already specified
- .label = re-bound here
- .previous-bound-label = `{$item_name}` bound here first
-
-typeck-address-of-temporary-taken = cannot take address of a temporary
- .label = temporary value
-
-typeck-add-return-type-add = try adding a return type
-
-typeck-add-return-type-missing-here = a return type might be missing here
-
-typeck-expected-default-return-type = expected `()` because of default return type
-
-typeck-expected-return-type = expected `{$expected}` because of return type
-
-typeck-unconstrained-opaque-type = unconstrained opaque type
- .note = `{$name}` must be used in combination with a concrete type within the same module
-
-typeck-missing-type-params =
- the type {$parameterCount ->
- [one] parameter
- *[other] parameters
- } {$parameters} must be explicitly specified
- .label = type {$parameterCount ->
- [one] parameter
- *[other] parameters
- } {$parameters} must be specified for this
- .suggestion = set the type {$parameterCount ->
- [one] parameter
- *[other] parameters
- } to the desired {$parameterCount ->
- [one] type
- *[other] types
- }
- .no-suggestion-label = missing {$parameterCount ->
- [one] reference
- *[other] references
- } to {$parameters}
- .note = because of the default `Self` reference, type parameters must be specified on object types
-
-typeck-manual-implementation =
- manual implementations of `{$trait_name}` are experimental
- .label = manual implementations of `{$trait_name}` are experimental
- .help = add `#![feature(unboxed_closures)]` to the crate attributes to enable
-
-typeck-substs-on-overridden-impl = could not resolve substs on overridden impl
diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs
index 02bb04d98..9465051dd 100644
--- a/compiler/rustc_error_messages/src/lib.rs
+++ b/compiler/rustc_error_messages/src/lib.rs
@@ -2,6 +2,11 @@
#![feature(once_cell)]
#![feature(rustc_attrs)]
#![feature(type_alias_impl_trait)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
+#[macro_use]
+extern crate tracing;
use fluent_bundle::FluentResource;
use fluent_syntax::parser::ParserError;
@@ -14,7 +19,6 @@ use std::fmt;
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
-use tracing::{instrument, trace};
#[cfg(not(parallel_compiler))]
use std::cell::LazyCell as Lazy;
@@ -31,15 +35,38 @@ pub use unic_langid::{langid, LanguageIdentifier};
// Generates `DEFAULT_LOCALE_RESOURCES` static and `fluent_generated` module.
fluent_messages! {
+ // tidy-alphabetical-start
+ ast_lowering => "../locales/en-US/ast_lowering.ftl",
+ ast_passes => "../locales/en-US/ast_passes.ftl",
+ attr => "../locales/en-US/attr.ftl",
borrowck => "../locales/en-US/borrowck.ftl",
builtin_macros => "../locales/en-US/builtin_macros.ftl",
+ codegen_gcc => "../locales/en-US/codegen_gcc.ftl",
+ codegen_ssa => "../locales/en-US/codegen_ssa.ftl",
+ compiletest => "../locales/en-US/compiletest.ftl",
const_eval => "../locales/en-US/const_eval.ftl",
+ driver => "../locales/en-US/driver.ftl",
+ errors => "../locales/en-US/errors.ftl",
expand => "../locales/en-US/expand.ftl",
+ hir_analysis => "../locales/en-US/hir_analysis.ftl",
+ infer => "../locales/en-US/infer.ftl",
+ interface => "../locales/en-US/interface.ftl",
lint => "../locales/en-US/lint.ftl",
+ metadata => "../locales/en-US/metadata.ftl",
+ middle => "../locales/en-US/middle.ftl",
+ mir_dataflow => "../locales/en-US/mir_dataflow.ftl",
+ monomorphize => "../locales/en-US/monomorphize.ftl",
parser => "../locales/en-US/parser.ftl",
passes => "../locales/en-US/passes.ftl",
+ plugin_impl => "../locales/en-US/plugin_impl.ftl",
privacy => "../locales/en-US/privacy.ftl",
- typeck => "../locales/en-US/typeck.ftl",
+ query_system => "../locales/en-US/query_system.ftl",
+ save_analysis => "../locales/en-US/save_analysis.ftl",
+ session => "../locales/en-US/session.ftl",
+ symbol_mangling => "../locales/en-US/symbol_mangling.ftl",
+ trait_selection => "../locales/en-US/trait_selection.ftl",
+ ty_utils => "../locales/en-US/ty_utils.ftl",
+ // tidy-alphabetical-end
}
pub use fluent_generated::{self as fluent, DEFAULT_LOCALE_RESOURCES};
@@ -247,14 +274,26 @@ type FluentId = Cow<'static, str>;
/// Translatable messages for subdiagnostics are typically attributes attached to a larger Fluent
/// message so messages of this type must be combined with a `DiagnosticMessage` (using
/// `DiagnosticMessage::with_subdiagnostic_message`) before rendering. However, subdiagnostics from
-/// the `SessionSubdiagnostic` derive refer to Fluent identifiers directly.
+/// the `Subdiagnostic` derive refer to Fluent identifiers directly.
#[rustc_diagnostic_item = "SubdiagnosticMessage"]
pub enum SubdiagnosticMessage {
/// Non-translatable diagnostic message.
// FIXME(davidtwco): can a `Cow<'static, str>` be used here?
Str(String),
+ /// Translatable message which has already been translated eagerly.
+ ///
+ /// Some diagnostics have repeated subdiagnostics where the same interpolated variables would
+ /// be instantiated multiple times with different values. As translation normally happens
+ /// immediately prior to emission, after the diagnostic and subdiagnostic derive logic has run,
+ /// the setting of diagnostic arguments in the derived code will overwrite previous variable
+ /// values and only the final value will be set when translation occurs - resulting in
+ /// incorrect diagnostics. Eager translation results in translation for a subdiagnostic
+ /// happening immediately after the subdiagnostic derive's logic has been run. This variant
+ /// stores messages which have been translated eagerly.
+ // FIXME(#100717): can a `Cow<'static, str>` be used here?
+ Eager(String),
/// Identifier of a Fluent message. Instances of this variant are generated by the
- /// `SessionSubdiagnostic` derive.
+ /// `Subdiagnostic` derive.
FluentIdentifier(FluentId),
/// Attribute of a Fluent message. Needs to be combined with a Fluent identifier to produce an
/// actual translated message. Instances of this variant are generated by the `fluent_messages`
@@ -280,8 +319,20 @@ impl<S: Into<String>> From<S> for SubdiagnosticMessage {
#[rustc_diagnostic_item = "DiagnosticMessage"]
pub enum DiagnosticMessage {
/// Non-translatable diagnostic message.
- // FIXME(davidtwco): can a `Cow<'static, str>` be used here?
+ // FIXME(#100717): can a `Cow<'static, str>` be used here?
Str(String),
+ /// Translatable message which has already been translated eagerly.
+ ///
+ /// Some diagnostics have repeated subdiagnostics where the same interpolated variables would
+ /// be instantiated multiple times with different values. As translation normally happens
+ /// immediately prior to emission, after the diagnostic and subdiagnostic derive logic has run,
+ /// the setting of diagnostic arguments in the derived code will overwrite previous variable
+ /// values and only the final value will be set when translation occurs - resulting in
+ /// incorrect diagnostics. Eager translation results in translation for a subdiagnostic
+ /// happening immediately after the subdiagnostic derive's logic has been run. This variant
+ /// stores messages which have been translated eagerly.
+ // FIXME(#100717): can a `Cow<'static, str>` be used here?
+ Eager(String),
/// Identifier for a Fluent message (with optional attribute) corresponding to the diagnostic
/// message.
///
@@ -300,6 +351,7 @@ impl DiagnosticMessage {
pub fn with_subdiagnostic_message(&self, sub: SubdiagnosticMessage) -> Self {
let attr = match sub {
SubdiagnosticMessage::Str(s) => return DiagnosticMessage::Str(s),
+ SubdiagnosticMessage::Eager(s) => return DiagnosticMessage::Eager(s),
SubdiagnosticMessage::FluentIdentifier(id) => {
return DiagnosticMessage::FluentIdentifier(id, None);
}
@@ -308,24 +360,12 @@ impl DiagnosticMessage {
match self {
DiagnosticMessage::Str(s) => DiagnosticMessage::Str(s.clone()),
+ DiagnosticMessage::Eager(s) => DiagnosticMessage::Eager(s.clone()),
DiagnosticMessage::FluentIdentifier(id, _) => {
DiagnosticMessage::FluentIdentifier(id.clone(), Some(attr))
}
}
}
-
- /// Returns the `String` contained within the `DiagnosticMessage::Str` variant, assuming that
- /// this diagnostic message is of the legacy, non-translatable variety. Panics if this
- /// assumption does not hold.
- ///
- /// Don't use this - it exists to support some places that do comparison with diagnostic
- /// strings.
- pub fn expect_str(&self) -> &str {
- match self {
- DiagnosticMessage::Str(s) => s,
- _ => panic!("expected non-translatable diagnostic message"),
- }
- }
}
/// `From` impl that enables existing diagnostic calls to functions which now take
@@ -336,6 +376,17 @@ impl<S: Into<String>> From<S> for DiagnosticMessage {
}
}
+/// A workaround for "good path" ICEs when formatting types in disables lints.
+///
+/// Delays formatting until `.into(): DiagnosticMessage` is used.
+pub struct DelayDm<F>(pub F);
+
+impl<F: FnOnce() -> String> From<DelayDm<F>> for DiagnosticMessage {
+ fn from(DelayDm(f): DelayDm<F>) -> Self {
+ DiagnosticMessage::from(f())
+ }
+}
+
/// Translating *into* a subdiagnostic message from a diagnostic message is a little strange - but
/// the subdiagnostic functions (e.g. `span_label`) take a `SubdiagnosticMessage` and the
/// subdiagnostic derive refers to typed identifiers that are `DiagnosticMessage`s, so need to be
@@ -345,6 +396,7 @@ impl Into<SubdiagnosticMessage> for DiagnosticMessage {
fn into(self) -> SubdiagnosticMessage {
match self {
DiagnosticMessage::Str(s) => SubdiagnosticMessage::Str(s),
+ DiagnosticMessage::Eager(s) => SubdiagnosticMessage::Eager(s),
DiagnosticMessage::FluentIdentifier(id, None) => {
SubdiagnosticMessage::FluentIdentifier(id)
}
diff --git a/compiler/rustc_errors/Cargo.toml b/compiler/rustc_errors/Cargo.toml
index 7d7e92c52..7803a0792 100644
--- a/compiler/rustc_errors/Cargo.toml
+++ b/compiler/rustc_errors/Cargo.toml
@@ -4,24 +4,26 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_error_messages = { path = "../rustc_error_messages" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_span = { path = "../rustc_span" }
rustc_macros = { path = "../rustc_macros" }
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_target = { path = "../rustc_target" }
rustc_hir = { path = "../rustc_hir" }
rustc_lint_defs = { path = "../rustc_lint_defs" }
unicode-width = "0.1.4"
atty = "0.2"
termcolor = "1.0"
-annotate-snippets = "0.8.0"
+annotate-snippets = "0.9"
termize = "0.1.1"
-serde = { version = "1.0.125", features = ["derive"] }
+serde = { version = "1.0.125", features = [ "derive" ] }
serde_json = "1.0.59"
[target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["handleapi", "synchapi", "winbase"] }
+winapi = { version = "0.3", features = [ "handleapi", "synchapi", "winbase" ] }
diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
index 0fcd61d1e..f14b8ee32 100644
--- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
+++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
@@ -7,6 +7,7 @@
use crate::emitter::FileWithAnnotatedLines;
use crate::snippet::Line;
+use crate::translation::{to_fluent_args, Translate};
use crate::{
CodeSuggestion, Diagnostic, DiagnosticId, DiagnosticMessage, Emitter, FluentBundle,
LazyFallbackBundle, Level, MultiSpan, Style, SubDiagnostic,
@@ -32,10 +33,20 @@ pub struct AnnotateSnippetEmitterWriter {
macro_backtrace: bool,
}
+impl Translate for AnnotateSnippetEmitterWriter {
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ self.fluent_bundle.as_ref()
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ &**self.fallback_bundle
+ }
+}
+
impl Emitter for AnnotateSnippetEmitterWriter {
/// The entry point for the diagnostics generation
fn emit_diagnostic(&mut self, diag: &Diagnostic) {
- let fluent_args = self.to_fluent_args(diag.args());
+ let fluent_args = to_fluent_args(diag.args());
let mut children = diag.children.clone();
let (mut primary_span, suggestions) = self.primary_span_formatted(&diag, &fluent_args);
@@ -63,14 +74,6 @@ impl Emitter for AnnotateSnippetEmitterWriter {
self.source_map.as_ref()
}
- fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
- self.fluent_bundle.as_ref()
- }
-
- fn fallback_fluent_bundle(&self) -> &FluentBundle {
- &**self.fallback_bundle
- }
-
fn should_show_explain(&self) -> bool {
!self.short_message
}
@@ -183,7 +186,11 @@ impl AnnotateSnippetEmitterWriter {
annotation_type: annotation_type_for_level(*level),
}),
footer: vec![],
- opt: FormatOptions { color: true, anonymized_line_numbers: self.ui_testing },
+ opt: FormatOptions {
+ color: true,
+ anonymized_line_numbers: self.ui_testing,
+ margin: None,
+ },
slices: annotated_files
.iter()
.map(|(source, line_index, annotations)| {
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index 17e6c9e95..23f29a24f 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -1,15 +1,14 @@
use crate::snippet::Style;
use crate::{
- CodeSuggestion, DiagnosticMessage, EmissionGuarantee, Level, LintDiagnosticBuilder, MultiSpan,
+ CodeSuggestion, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, Level, MultiSpan,
SubdiagnosticMessage, Substitution, SubstitutionPart, SuggestionStyle,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_error_messages::FluentValue;
-use rustc_hir as hir;
use rustc_lint_defs::{Applicability, LintExpectationId};
use rustc_span::edition::LATEST_STABLE_EDITION;
-use rustc_span::symbol::{Ident, Symbol};
-use rustc_span::{edition::Edition, Span, DUMMY_SP};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
use std::borrow::Cow;
use std::fmt;
use std::hash::{Hash, Hasher};
@@ -22,7 +21,11 @@ pub struct SuggestionsDisabled;
/// Simplified version of `FluentArg` that can implement `Encodable` and `Decodable`. Collection of
/// `DiagnosticArg` are converted to `FluentArgs` (consuming the collection) at the start of
/// diagnostic emission.
-pub type DiagnosticArg<'source> = (Cow<'source, str>, DiagnosticArgValue<'source>);
+pub type DiagnosticArg<'iter, 'source> =
+ (&'iter DiagnosticArgName<'source>, &'iter DiagnosticArgValue<'source>);
+
+/// Name of a diagnostic argument.
+pub type DiagnosticArgName<'source> = Cow<'source, str>;
/// Simplified version of `FluentValue` that can implement `Encodable` and `Decodable`. Converted
/// to a `FluentValue` by the emitter to be used in diagnostic translation.
@@ -32,7 +35,7 @@ pub enum DiagnosticArgValue<'source> {
Number(usize),
}
-/// Converts a value of a type into a `DiagnosticArg` (typically a field of a `SessionDiagnostic`
+/// Converts a value of a type into a `DiagnosticArg` (typically a field of an `IntoDiagnostic`
/// struct). Implemented as a custom trait rather than `From` so that it is implemented on the type
/// being converted rather than on `DiagnosticArgValue`, which enables types from other `rustc_*`
/// crates to implement this.
@@ -40,95 +43,6 @@ pub trait IntoDiagnosticArg {
fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static>;
}
-pub struct DiagnosticArgFromDisplay<'a>(pub &'a dyn fmt::Display);
-
-impl IntoDiagnosticArg for DiagnosticArgFromDisplay<'_> {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- self.0.to_string().into_diagnostic_arg()
- }
-}
-
-impl<'a> From<&'a dyn fmt::Display> for DiagnosticArgFromDisplay<'a> {
- fn from(t: &'a dyn fmt::Display) -> Self {
- DiagnosticArgFromDisplay(t)
- }
-}
-
-impl<'a, T: fmt::Display> From<&'a T> for DiagnosticArgFromDisplay<'a> {
- fn from(t: &'a T) -> Self {
- DiagnosticArgFromDisplay(t)
- }
-}
-
-macro_rules! into_diagnostic_arg_using_display {
- ($( $ty:ty ),+ $(,)?) => {
- $(
- impl IntoDiagnosticArg for $ty {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- self.to_string().into_diagnostic_arg()
- }
- }
- )+
- }
-}
-
-into_diagnostic_arg_using_display!(
- i8,
- u8,
- i16,
- u16,
- i32,
- u32,
- i64,
- u64,
- i128,
- u128,
- std::num::NonZeroU32,
- hir::Target,
- Edition,
- Ident,
-);
-
-impl IntoDiagnosticArg for bool {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- if self {
- DiagnosticArgValue::Str(Cow::Borrowed("true"))
- } else {
- DiagnosticArgValue::Str(Cow::Borrowed("false"))
- }
- }
-}
-
-impl IntoDiagnosticArg for char {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- DiagnosticArgValue::Str(Cow::Owned(format!("{:?}", self)))
- }
-}
-
-impl IntoDiagnosticArg for Symbol {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- self.to_ident_string().into_diagnostic_arg()
- }
-}
-
-impl<'a> IntoDiagnosticArg for &'a str {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- self.to_string().into_diagnostic_arg()
- }
-}
-
-impl IntoDiagnosticArg for String {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- DiagnosticArgValue::Str(Cow::Owned(self))
- }
-}
-
-impl IntoDiagnosticArg for usize {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- DiagnosticArgValue::Number(self)
- }
-}
-
impl<'source> Into<FluentValue<'source>> for DiagnosticArgValue<'source> {
fn into(self) -> FluentValue<'source> {
match self {
@@ -138,22 +52,24 @@ impl<'source> Into<FluentValue<'source>> for DiagnosticArgValue<'source> {
}
}
-impl IntoDiagnosticArg for hir::ConstContext {
- fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
- DiagnosticArgValue::Str(Cow::Borrowed(match self {
- hir::ConstContext::ConstFn => "constant function",
- hir::ConstContext::Static(_) => "static",
- hir::ConstContext::Const => "constant",
- }))
- }
-}
-
/// Trait implemented by error types. This should not be implemented manually. Instead, use
-/// `#[derive(SessionSubdiagnostic)]` -- see [rustc_macros::SessionSubdiagnostic].
-#[rustc_diagnostic_item = "AddSubdiagnostic"]
-pub trait AddSubdiagnostic {
+/// `#[derive(Subdiagnostic)]` -- see [rustc_macros::Subdiagnostic].
+#[cfg_attr(bootstrap, rustc_diagnostic_item = "AddSubdiagnostic")]
+#[cfg_attr(not(bootstrap), rustc_diagnostic_item = "AddToDiagnostic")]
+pub trait AddToDiagnostic
+where
+ Self: Sized,
+{
/// Add a subdiagnostic to an existing diagnostic.
- fn add_to_diagnostic(self, diag: &mut Diagnostic);
+ fn add_to_diagnostic(self, diag: &mut Diagnostic) {
+ self.add_to_diagnostic_with(diag, |_, m| m);
+ }
+
+ /// Add a subdiagnostic to an existing diagnostic where `f` is invoked on every message used
+ /// (to optionally perform eager translation).
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, f: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage;
}
/// Trait implemented by lint types. This should not be implemented manually. Instead, use
@@ -161,7 +77,12 @@ pub trait AddSubdiagnostic {
#[rustc_diagnostic_item = "DecorateLint"]
pub trait DecorateLint<'a, G: EmissionGuarantee> {
/// Decorate and emit a lint.
- fn decorate_lint(self, diag: LintDiagnosticBuilder<'a, G>);
+ fn decorate_lint<'b>(
+ self,
+ diag: &'b mut DiagnosticBuilder<'a, G>,
+ ) -> &'b mut DiagnosticBuilder<'a, G>;
+
+ fn msg(&self) -> DiagnosticMessage;
}
#[must_use]
@@ -176,7 +97,7 @@ pub struct Diagnostic {
pub span: MultiSpan,
pub children: Vec<SubDiagnostic>,
pub suggestions: Result<Vec<CodeSuggestion>, SuggestionsDisabled>,
- args: Vec<DiagnosticArg<'static>>,
+ args: FxHashMap<DiagnosticArgName<'static>, DiagnosticArgValue<'static>>,
/// This is not used for highlighting or rendering any error message. Rather, it can be used
/// as a sort key to sort a buffer of diagnostics. By default, it is the primary span of
@@ -268,7 +189,7 @@ impl Diagnostic {
span: MultiSpan::new(),
children: vec![],
suggestions: Ok(vec![]),
- args: vec![],
+ args: Default::default(),
sort_span: DUMMY_SP,
is_lint: false,
}
@@ -311,9 +232,10 @@ impl Diagnostic {
// The lint index inside the attribute is manually transferred here.
let lint_index = expectation_id.get_lint_index();
expectation_id.set_lint_index(None);
- let mut stable_id = *unstable_to_stable
+ let mut stable_id = unstable_to_stable
.get(&expectation_id)
- .expect("each unstable `LintExpectationId` must have a matching stable id");
+ .expect("each unstable `LintExpectationId` must have a matching stable id")
+ .normalize();
stable_id.set_lint_index(lint_index);
*expectation_id = stable_id;
@@ -645,6 +567,11 @@ impl Diagnostic {
style: SuggestionStyle,
) -> &mut Self {
assert!(!suggestion.is_empty());
+ debug_assert!(
+ !(suggestion.iter().any(|(sp, text)| sp.is_empty() && text.is_empty())),
+ "Span must not be empty and have no suggestion"
+ );
+
self.push_suggestion(CodeSuggestion {
substitutions: vec![Substitution {
parts: suggestion
@@ -671,19 +598,12 @@ impl Diagnostic {
suggestion: Vec<(Span, String)>,
applicability: Applicability,
) -> &mut Self {
- assert!(!suggestion.is_empty());
- self.push_suggestion(CodeSuggestion {
- substitutions: vec![Substitution {
- parts: suggestion
- .into_iter()
- .map(|(span, snippet)| SubstitutionPart { snippet, span })
- .collect(),
- }],
- msg: self.subdiagnostic_message_to_diagnostic_message(msg),
- style: SuggestionStyle::CompletelyHidden,
+ self.multipart_suggestion_with_style(
+ msg,
+ suggestion,
applicability,
- });
- self
+ SuggestionStyle::CompletelyHidden,
+ )
}
/// Prints out a message with a suggested edit of the code.
@@ -729,6 +649,10 @@ impl Diagnostic {
applicability: Applicability,
style: SuggestionStyle,
) -> &mut Self {
+ debug_assert!(
+ !(sp.is_empty() && suggestion.to_string().is_empty()),
+ "Span must not be empty and have no suggestion"
+ );
self.push_suggestion(CodeSuggestion {
substitutions: vec![Substitution {
parts: vec![SubstitutionPart { snippet: suggestion.to_string(), span: sp }],
@@ -767,8 +691,32 @@ impl Diagnostic {
suggestions: impl Iterator<Item = String>,
applicability: Applicability,
) -> &mut Self {
+ self.span_suggestions_with_style(
+ sp,
+ msg,
+ suggestions,
+ applicability,
+ SuggestionStyle::ShowCode,
+ )
+ }
+
+ /// [`Diagnostic::span_suggestions()`] but you can set the [`SuggestionStyle`].
+ pub fn span_suggestions_with_style(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestions: impl Iterator<Item = String>,
+ applicability: Applicability,
+ style: SuggestionStyle,
+ ) -> &mut Self {
let mut suggestions: Vec<_> = suggestions.collect();
suggestions.sort();
+
+ debug_assert!(
+ !(sp.is_empty() && suggestions.iter().any(|suggestion| suggestion.is_empty())),
+ "Span must not be empty and have no suggestion"
+ );
+
let substitutions = suggestions
.into_iter()
.map(|snippet| Substitution { parts: vec![SubstitutionPart { snippet, span: sp }] })
@@ -776,22 +724,33 @@ impl Diagnostic {
self.push_suggestion(CodeSuggestion {
substitutions,
msg: self.subdiagnostic_message_to_diagnostic_message(msg),
- style: SuggestionStyle::ShowCode,
+ style,
applicability,
});
self
}
- /// Prints out a message with multiple suggested edits of the code.
- /// See also [`Diagnostic::span_suggestion()`].
+ /// Prints out a message with multiple suggested edits of the code, where each edit consists of
+ /// multiple parts.
+ /// See also [`Diagnostic::multipart_suggestion()`].
pub fn multipart_suggestions(
&mut self,
msg: impl Into<SubdiagnosticMessage>,
suggestions: impl Iterator<Item = Vec<(Span, String)>>,
applicability: Applicability,
) -> &mut Self {
+ let suggestions: Vec<_> = suggestions.collect();
+ debug_assert!(
+ !(suggestions
+ .iter()
+ .flat_map(|suggs| suggs)
+ .any(|(sp, suggestion)| sp.is_empty() && suggestion.is_empty())),
+ "Span must not be empty and have no suggestion"
+ );
+
self.push_suggestion(CodeSuggestion {
substitutions: suggestions
+ .into_iter()
.map(|sugg| Substitution {
parts: sugg
.into_iter()
@@ -805,6 +764,7 @@ impl Diagnostic {
});
self
}
+
/// Prints out a message with a suggested edit of the code. If the suggestion is presented
/// inline, it will only show the message and not the suggestion.
///
@@ -870,13 +830,30 @@ impl Diagnostic {
self
}
- /// Add a subdiagnostic from a type that implements `SessionSubdiagnostic` - see
- /// [rustc_macros::SessionSubdiagnostic].
- pub fn subdiagnostic(&mut self, subdiagnostic: impl AddSubdiagnostic) -> &mut Self {
+ /// Add a subdiagnostic from a type that implements `Subdiagnostic` (see
+ /// [rustc_macros::Subdiagnostic]).
+ pub fn subdiagnostic(&mut self, subdiagnostic: impl AddToDiagnostic) -> &mut Self {
subdiagnostic.add_to_diagnostic(self);
self
}
+ /// Add a subdiagnostic from a type that implements `Subdiagnostic` (see
+ /// [rustc_macros::Subdiagnostic]). Performs eager translation of any translatable messages
+ /// used in the subdiagnostic, so suitable for use with repeated messages (i.e. re-use of
+ /// interpolated variables).
+ pub fn eager_subdiagnostic(
+ &mut self,
+ handler: &crate::Handler,
+ subdiagnostic: impl AddToDiagnostic,
+ ) -> &mut Self {
+ subdiagnostic.add_to_diagnostic_with(self, |diag, msg| {
+ let args = diag.args();
+ let msg = diag.subdiagnostic_message_to_diagnostic_message(msg);
+ handler.eagerly_translate(msg, args)
+ });
+ self
+ }
+
pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self {
self.span = sp.into();
if let Some(span) = self.span.primary_span() {
@@ -909,8 +886,11 @@ impl Diagnostic {
self
}
- pub fn args(&self) -> &[DiagnosticArg<'static>] {
- &self.args
+ // Exact iteration order of diagnostic arguments shouldn't make a difference to output because
+ // they're only used in interpolation.
+ #[allow(rustc::potential_query_instability)]
+ pub fn args<'a>(&'a self) -> impl Iterator<Item = DiagnosticArg<'a, 'static>> {
+ self.args.iter()
}
pub fn set_arg(
@@ -918,7 +898,7 @@ impl Diagnostic {
name: impl Into<Cow<'static, str>>,
arg: impl IntoDiagnosticArg,
) -> &mut Self {
- self.args.push((name.into(), arg.into_diagnostic_arg()));
+ self.args.insert(name.into(), arg.into_diagnostic_arg());
self
}
@@ -929,7 +909,7 @@ impl Diagnostic {
/// Helper function that takes a `SubdiagnosticMessage` and returns a `DiagnosticMessage` by
/// combining it with the primary message of the diagnostic (if translatable, otherwise it just
/// passes the user's string along).
- fn subdiagnostic_message_to_diagnostic_message(
+ pub(crate) fn subdiagnostic_message_to_diagnostic_message(
&self,
attr: impl Into<SubdiagnosticMessage>,
) -> DiagnosticMessage {
@@ -966,12 +946,12 @@ impl Diagnostic {
fn sub_with_highlights<M: Into<SubdiagnosticMessage>>(
&mut self,
level: Level,
- mut message: Vec<(M, Style)>,
+ message: Vec<(M, Style)>,
span: MultiSpan,
render_span: Option<MultiSpan>,
) {
let message = message
- .drain(..)
+ .into_iter()
.map(|m| (self.subdiagnostic_message_to_diagnostic_message(m.0), m.1))
.collect();
let sub = SubDiagnostic { level, message, span, render_span };
diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs
index 9e68ee282..9b41234dc 100644
--- a/compiler/rustc_errors/src/diagnostic_builder.rs
+++ b/compiler/rustc_errors/src/diagnostic_builder.rs
@@ -5,6 +5,7 @@ use crate::{
};
use crate::{Handler, Level, MultiSpan, StashKey};
use rustc_lint_defs::Applicability;
+use rustc_span::source_map::Spanned;
use rustc_span::Span;
use std::borrow::Cow;
@@ -12,7 +13,28 @@ use std::fmt::{self, Debug};
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::thread::panicking;
-use tracing::debug;
+
+/// Trait implemented by error types. This should not be implemented manually. Instead, use
+/// `#[derive(Diagnostic)]` -- see [rustc_macros::Diagnostic].
+#[cfg_attr(bootstrap, rustc_diagnostic_item = "SessionDiagnostic")]
+#[cfg_attr(not(bootstrap), rustc_diagnostic_item = "IntoDiagnostic")]
+pub trait IntoDiagnostic<'a, T: EmissionGuarantee = ErrorGuaranteed> {
+ /// Write out as a diagnostic out of `Handler`.
+ #[must_use]
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, T>;
+}
+
+impl<'a, T, E> IntoDiagnostic<'a, E> for Spanned<T>
+where
+ T: IntoDiagnostic<'a, E>,
+ E: EmissionGuarantee,
+{
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, E> {
+ let mut diag = self.node.into_diagnostic(handler);
+ diag.set_span(self.span);
+ diag
+ }
+}
/// Used for emitting structured error messages and other diagnostic information.
///
@@ -84,6 +106,13 @@ pub trait EmissionGuarantee: Sized {
/// of `Self` without actually performing the emission.
#[track_caller]
fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self;
+
+ /// Creates a new `DiagnosticBuilder` that will return this type of guarantee.
+ #[track_caller]
+ fn make_diagnostic_builder(
+ handler: &Handler,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, Self>;
}
/// Private module for sealing the `IsError` helper trait.
@@ -166,6 +195,15 @@ impl EmissionGuarantee for ErrorGuaranteed {
}
}
}
+
+ fn make_diagnostic_builder(
+ handler: &Handler,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, Self> {
+ DiagnosticBuilder::new_guaranteeing_error::<_, { Level::Error { lint: false } }>(
+ handler, msg,
+ )
+ }
}
impl<'a> DiagnosticBuilder<'a, ()> {
@@ -208,6 +246,63 @@ impl EmissionGuarantee for () {
DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
}
}
+
+ fn make_diagnostic_builder(
+ handler: &Handler,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, Self> {
+ DiagnosticBuilder::new(handler, Level::Warning(None), msg)
+ }
+}
+
+/// Marker type which enables implementation of `create_note` and `emit_note` functions for
+/// note-without-error struct diagnostics.
+#[derive(Copy, Clone)]
+pub struct Noted;
+
+impl<'a> DiagnosticBuilder<'a, Noted> {
+ /// Convenience function for internal use, clients should use one of the
+ /// `struct_*` methods on [`Handler`].
+ pub(crate) fn new_note(handler: &'a Handler, message: impl Into<DiagnosticMessage>) -> Self {
+ let diagnostic = Diagnostic::new_with_code(Level::Note, None, message);
+ Self::new_diagnostic_note(handler, diagnostic)
+ }
+
+ /// Creates a new `DiagnosticBuilder` with an already constructed
+ /// diagnostic.
+ pub(crate) fn new_diagnostic_note(handler: &'a Handler, diagnostic: Diagnostic) -> Self {
+ debug!("Created new diagnostic");
+ Self {
+ inner: DiagnosticBuilderInner {
+ state: DiagnosticBuilderState::Emittable(handler),
+ diagnostic: Box::new(diagnostic),
+ },
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl EmissionGuarantee for Noted {
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self {
+ match db.inner.state {
+ // First `.emit()` call, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => {
+ db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+ handler.emit_diagnostic(&mut db.inner.diagnostic);
+ }
+ // `.emit()` was previously called, disallowed from repeating it.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
+ }
+
+ Noted
+ }
+
+ fn make_diagnostic_builder(
+ handler: &Handler,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, Self> {
+ DiagnosticBuilder::new_note(handler, msg)
+ }
}
impl<'a> DiagnosticBuilder<'a, !> {
@@ -247,6 +342,13 @@ impl EmissionGuarantee for ! {
// Then fatally error, returning `!`
crate::FatalError.raise()
}
+
+ fn make_diagnostic_builder(
+ handler: &Handler,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, Self> {
+ DiagnosticBuilder::new_fatal(handler, msg)
+ }
}
/// In general, the `DiagnosticBuilder` uses deref to allow access to
@@ -541,7 +643,7 @@ impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> {
forward!(pub fn subdiagnostic(
&mut self,
- subdiagnostic: impl crate::AddSubdiagnostic
+ subdiagnostic: impl crate::AddToDiagnostic
) -> &mut Self);
}
@@ -566,7 +668,7 @@ impl Drop for DiagnosticBuilderInner<'_> {
),
));
handler.emit_diagnostic(&mut self.diagnostic);
- panic!();
+ panic!("error was constructed but not emitted");
}
}
// `.emit()` was previously called, or maybe we're during `.cancel()`.
@@ -590,27 +692,3 @@ macro_rules! struct_span_err {
macro_rules! error_code {
($code:ident) => {{ $crate::DiagnosticId::Error(stringify!($code).to_owned()) }};
}
-
-/// Wrapper around a `DiagnosticBuilder` for creating lints.
-pub struct LintDiagnosticBuilder<'a, G: EmissionGuarantee>(DiagnosticBuilder<'a, G>);
-
-impl<'a, G: EmissionGuarantee> LintDiagnosticBuilder<'a, G> {
- #[rustc_lint_diagnostics]
- /// Return the inner `DiagnosticBuilder`, first setting the primary message to `msg`.
- pub fn build(mut self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'a, G> {
- self.0.set_primary_message(msg);
- self.0.set_is_lint();
- self.0
- }
-
- /// Create a `LintDiagnosticBuilder` from some existing `DiagnosticBuilder`.
- pub fn new(err: DiagnosticBuilder<'a, G>) -> LintDiagnosticBuilder<'a, G> {
- LintDiagnosticBuilder(err)
- }
-}
-
-impl<'a> LintDiagnosticBuilder<'a, ErrorGuaranteed> {
- pub fn forget_guarantee(self) -> LintDiagnosticBuilder<'a, ()> {
- LintDiagnosticBuilder(self.0.forget_guarantee())
- }
-}
diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs
new file mode 100644
index 000000000..7640b2919
--- /dev/null
+++ b/compiler/rustc_errors/src/diagnostic_impls.rs
@@ -0,0 +1,222 @@
+use crate::{
+ fluent, DiagnosticArgValue, DiagnosticBuilder, Handler, IntoDiagnostic, IntoDiagnosticArg,
+};
+use rustc_ast as ast;
+use rustc_ast_pretty::pprust;
+use rustc_hir as hir;
+use rustc_lint_defs::Level;
+use rustc_span::edition::Edition;
+use rustc_span::symbol::{Ident, MacroRulesNormalizedIdent, Symbol};
+use rustc_target::abi::TargetDataLayoutErrors;
+use rustc_target::spec::{PanicStrategy, SplitDebuginfo, StackProtector, TargetTriple};
+use std::borrow::Cow;
+use std::fmt;
+use std::num::ParseIntError;
+use std::path::{Path, PathBuf};
+
+pub struct DiagnosticArgFromDisplay<'a>(pub &'a dyn fmt::Display);
+
+impl IntoDiagnosticArg for DiagnosticArgFromDisplay<'_> {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.0.to_string().into_diagnostic_arg()
+ }
+}
+
+impl<'a> From<&'a dyn fmt::Display> for DiagnosticArgFromDisplay<'a> {
+ fn from(t: &'a dyn fmt::Display) -> Self {
+ DiagnosticArgFromDisplay(t)
+ }
+}
+
+impl<'a, T: fmt::Display> From<&'a T> for DiagnosticArgFromDisplay<'a> {
+ fn from(t: &'a T) -> Self {
+ DiagnosticArgFromDisplay(t)
+ }
+}
+
+macro_rules! into_diagnostic_arg_using_display {
+ ($( $ty:ty ),+ $(,)?) => {
+ $(
+ impl IntoDiagnosticArg for $ty {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+ }
+ )+
+ }
+}
+
+into_diagnostic_arg_using_display!(
+ i8,
+ u8,
+ i16,
+ u16,
+ i32,
+ u32,
+ i64,
+ u64,
+ i128,
+ u128,
+ std::io::Error,
+ std::num::NonZeroU32,
+ hir::Target,
+ Edition,
+ Ident,
+ MacroRulesNormalizedIdent,
+ ParseIntError,
+ StackProtector,
+ &TargetTriple,
+ SplitDebuginfo
+);
+
+impl IntoDiagnosticArg for bool {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ if self {
+ DiagnosticArgValue::Str(Cow::Borrowed("true"))
+ } else {
+ DiagnosticArgValue::Str(Cow::Borrowed("false"))
+ }
+ }
+}
+
+impl IntoDiagnosticArg for char {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(format!("{:?}", self)))
+ }
+}
+
+impl IntoDiagnosticArg for Symbol {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_ident_string().into_diagnostic_arg()
+ }
+}
+
+impl<'a> IntoDiagnosticArg for &'a str {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+}
+
+impl IntoDiagnosticArg for String {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self))
+ }
+}
+
+impl<'a> IntoDiagnosticArg for &'a Path {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self.display().to_string()))
+ }
+}
+
+impl IntoDiagnosticArg for PathBuf {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self.display().to_string()))
+ }
+}
+
+impl IntoDiagnosticArg for usize {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Number(self)
+ }
+}
+
+impl IntoDiagnosticArg for PanicStrategy {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self.desc().to_string()))
+ }
+}
+
+impl IntoDiagnosticArg for hir::ConstContext {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Borrowed(match self {
+ hir::ConstContext::ConstFn => "constant function",
+ hir::ConstContext::Static(_) => "static",
+ hir::ConstContext::Const => "constant",
+ }))
+ }
+}
+
+impl IntoDiagnosticArg for ast::Path {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(pprust::path_to_string(&self)))
+ }
+}
+
+impl IntoDiagnosticArg for ast::token::Token {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(pprust::token_to_string(&self))
+ }
+}
+
+impl IntoDiagnosticArg for ast::token::TokenKind {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(pprust::token_kind_to_string(&self))
+ }
+}
+
+impl IntoDiagnosticArg for Level {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Borrowed(match self {
+ Level::Allow => "-A",
+ Level::Warn => "-W",
+ Level::ForceWarn(_) => "--force-warn",
+ Level::Deny => "-D",
+ Level::Forbid => "-F",
+ Level::Expect(_) => {
+ unreachable!("lints with the level of `expect` should not run this code");
+ }
+ }))
+ }
+}
+
+impl IntoDiagnostic<'_, !> for TargetDataLayoutErrors<'_> {
+ fn into_diagnostic(self, handler: &Handler) -> DiagnosticBuilder<'_, !> {
+ let mut diag;
+ match self {
+ TargetDataLayoutErrors::InvalidAddressSpace { addr_space, err, cause } => {
+ diag = handler.struct_fatal(fluent::errors_target_invalid_address_space);
+ diag.set_arg("addr_space", addr_space);
+ diag.set_arg("cause", cause);
+ diag.set_arg("err", err);
+ diag
+ }
+ TargetDataLayoutErrors::InvalidBits { kind, bit, cause, err } => {
+ diag = handler.struct_fatal(fluent::errors_target_invalid_bits);
+ diag.set_arg("kind", kind);
+ diag.set_arg("bit", bit);
+ diag.set_arg("cause", cause);
+ diag.set_arg("err", err);
+ diag
+ }
+ TargetDataLayoutErrors::MissingAlignment { cause } => {
+ diag = handler.struct_fatal(fluent::errors_target_missing_alignment);
+ diag.set_arg("cause", cause);
+ diag
+ }
+ TargetDataLayoutErrors::InvalidAlignment { cause, err } => {
+ diag = handler.struct_fatal(fluent::errors_target_invalid_alignment);
+ diag.set_arg("cause", cause);
+ diag.set_arg("err", err);
+ diag
+ }
+ TargetDataLayoutErrors::InconsistentTargetArchitecture { dl, target } => {
+ diag = handler.struct_fatal(fluent::errors_target_inconsistent_architecture);
+ diag.set_arg("dl", dl);
+ diag.set_arg("target", target);
+ diag
+ }
+ TargetDataLayoutErrors::InconsistentTargetPointerWidth { pointer_size, target } => {
+ diag = handler.struct_fatal(fluent::errors_target_inconsistent_pointer_width);
+ diag.set_arg("pointer_size", pointer_size);
+ diag.set_arg("target", target);
+ diag
+ }
+ TargetDataLayoutErrors::InvalidBitsSize { err } => {
+ diag = handler.struct_fatal(fluent::errors_target_invalid_bits_size);
+ diag.set_arg("err", err);
+ diag
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index 61d953cd6..cd6413bc3 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -14,15 +14,15 @@ use rustc_span::{FileLines, SourceFile, Span};
use crate::snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, Style, StyledString};
use crate::styled_buffer::StyledBuffer;
+use crate::translation::{to_fluent_args, Translate};
use crate::{
- CodeSuggestion, Diagnostic, DiagnosticArg, DiagnosticId, DiagnosticMessage, FluentBundle,
- Handler, LazyFallbackBundle, Level, MultiSpan, SubDiagnostic, SubstitutionHighlight,
- SuggestionStyle,
+ CodeSuggestion, Diagnostic, DiagnosticId, DiagnosticMessage, FluentBundle, Handler,
+ LazyFallbackBundle, Level, MultiSpan, SubDiagnostic, SubstitutionHighlight, SuggestionStyle,
};
use rustc_lint_defs::pluralize;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sync::Lrc;
use rustc_error_messages::FluentArgs;
use rustc_span::hygiene::{ExpnKind, MacroKind};
@@ -34,7 +34,6 @@ use std::iter;
use std::path::Path;
use termcolor::{Ansi, BufferWriter, ColorChoice, ColorSpec, StandardStream};
use termcolor::{Buffer, Color, WriteColor};
-use tracing::*;
/// Default column width, used in tests and when terminal dimensions cannot be determined.
const DEFAULT_COLUMN_WIDTH: usize = 140;
@@ -200,7 +199,7 @@ impl Margin {
const ANONYMIZED_LINE_NUM: &str = "LL";
/// Emitter trait for emitting errors.
-pub trait Emitter {
+pub trait Emitter: Translate {
/// Emit a structured diagnostic.
fn emit_diagnostic(&mut self, diag: &Diagnostic);
@@ -231,84 +230,6 @@ pub trait Emitter {
fn source_map(&self) -> Option<&Lrc<SourceMap>>;
- /// Return `FluentBundle` with localized diagnostics for the locale requested by the user. If no
- /// language was requested by the user then this will be `None` and `fallback_fluent_bundle`
- /// should be used.
- fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>>;
-
- /// Return `FluentBundle` with localized diagnostics for the default locale of the compiler.
- /// Used when the user has not requested a specific language or when a localized diagnostic is
- /// unavailable for the requested locale.
- fn fallback_fluent_bundle(&self) -> &FluentBundle;
-
- /// Convert diagnostic arguments (a rustc internal type that exists to implement
- /// `Encodable`/`Decodable`) into `FluentArgs` which is necessary to perform translation.
- ///
- /// Typically performed once for each diagnostic at the start of `emit_diagnostic` and then
- /// passed around as a reference thereafter.
- fn to_fluent_args<'arg>(&self, args: &[DiagnosticArg<'arg>]) -> FluentArgs<'arg> {
- FromIterator::from_iter(args.to_vec().drain(..))
- }
-
- /// Convert `DiagnosticMessage`s to a string, performing translation if necessary.
- fn translate_messages(
- &self,
- messages: &[(DiagnosticMessage, Style)],
- args: &FluentArgs<'_>,
- ) -> Cow<'_, str> {
- Cow::Owned(
- messages.iter().map(|(m, _)| self.translate_message(m, args)).collect::<String>(),
- )
- }
-
- /// Convert a `DiagnosticMessage` to a string, performing translation if necessary.
- fn translate_message<'a>(
- &'a self,
- message: &'a DiagnosticMessage,
- args: &'a FluentArgs<'_>,
- ) -> Cow<'_, str> {
- trace!(?message, ?args);
- let (identifier, attr) = match message {
- DiagnosticMessage::Str(msg) => return Cow::Borrowed(&msg),
- DiagnosticMessage::FluentIdentifier(identifier, attr) => (identifier, attr),
- };
-
- let bundle = match self.fluent_bundle() {
- Some(bundle) if bundle.has_message(&identifier) => bundle,
- _ => self.fallback_fluent_bundle(),
- };
-
- let message = bundle.get_message(&identifier).expect("missing diagnostic in fluent bundle");
- let value = match attr {
- Some(attr) => {
- if let Some(attr) = message.get_attribute(attr) {
- attr.value()
- } else {
- panic!("missing attribute `{attr}` in fluent message `{identifier}`")
- }
- }
- None => {
- if let Some(value) = message.value() {
- value
- } else {
- panic!("missing value in fluent message `{identifier}`")
- }
- }
- };
-
- let mut err = vec![];
- let translated = bundle.format_pattern(value, Some(&args), &mut err);
- trace!(?translated, ?err);
- debug_assert!(
- err.is_empty(),
- "identifier: {:?}, args: {:?}, errors: {:?}",
- identifier,
- args,
- err
- );
- translated
- }
-
/// Formats the substitutions of the primary_span
///
/// There are a lot of conditions to this method, but in short:
@@ -598,11 +519,7 @@ pub trait Emitter {
}
}
-impl Emitter for EmitterWriter {
- fn source_map(&self) -> Option<&Lrc<SourceMap>> {
- self.sm.as_ref()
- }
-
+impl Translate for EmitterWriter {
fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
self.fluent_bundle.as_ref()
}
@@ -610,9 +527,15 @@ impl Emitter for EmitterWriter {
fn fallback_fluent_bundle(&self) -> &FluentBundle {
&**self.fallback_bundle
}
+}
+
+impl Emitter for EmitterWriter {
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ self.sm.as_ref()
+ }
fn emit_diagnostic(&mut self, diag: &Diagnostic) {
- let fluent_args = self.to_fluent_args(diag.args());
+ let fluent_args = to_fluent_args(diag.args());
let mut children = diag.children.clone();
let (mut primary_span, suggestions) = self.primary_span_formatted(&diag, &fluent_args);
@@ -654,11 +577,7 @@ pub struct SilentEmitter {
pub fatal_note: Option<String>,
}
-impl Emitter for SilentEmitter {
- fn source_map(&self) -> Option<&Lrc<SourceMap>> {
- None
- }
-
+impl Translate for SilentEmitter {
fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
None
}
@@ -666,6 +585,12 @@ impl Emitter for SilentEmitter {
fn fallback_fluent_bundle(&self) -> &FluentBundle {
panic!("silent emitter attempted to translate message")
}
+}
+
+impl Emitter for SilentEmitter {
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ None
+ }
fn emit_diagnostic(&mut self, d: &Diagnostic) {
if d.level == Level::Fatal {
@@ -1562,7 +1487,7 @@ impl EmitterWriter {
);
// Contains the vertical lines' positions for active multiline annotations
- let mut multilines = FxHashMap::default();
+ let mut multilines = FxIndexMap::default();
// Get the left-side margin to remove it
let mut whitespace_margin = usize::MAX;
@@ -1779,7 +1704,7 @@ impl EmitterWriter {
{
notice_capitalization |= only_capitalization;
- let has_deletion = parts.iter().any(|p| p.is_deletion());
+ let has_deletion = parts.iter().any(|p| p.is_deletion(sm));
let is_multiline = complete.lines().count() > 1;
if let Some(span) = span.primary_span() {
@@ -1955,16 +1880,23 @@ impl EmitterWriter {
let span_start_pos = sm.lookup_char_pos(part.span.lo()).col_display;
let span_end_pos = sm.lookup_char_pos(part.span.hi()).col_display;
+ // If this addition is _only_ whitespace, then don't trim it,
+ // or else we're just not rendering anything.
+ let is_whitespace_addition = part.snippet.trim().is_empty();
+
// Do not underline the leading...
- let start = part.snippet.len().saturating_sub(part.snippet.trim_start().len());
+ let start = if is_whitespace_addition {
+ 0
+ } else {
+ part.snippet.len().saturating_sub(part.snippet.trim_start().len())
+ };
// ...or trailing spaces. Account for substitutions containing unicode
// characters.
- let sub_len: usize = part
- .snippet
- .trim()
- .chars()
- .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
- .sum();
+ let sub_len: usize =
+ if is_whitespace_addition { &part.snippet } else { part.snippet.trim() }
+ .chars()
+ .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
+ .sum();
let offset: isize = offsets
.iter()
@@ -2205,7 +2137,7 @@ impl EmitterWriter {
}
}
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
enum DisplaySuggestion {
Underline,
Diff,
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
index b8cd334b4..4cc7be47f 100644
--- a/compiler/rustc_errors/src/json.rs
+++ b/compiler/rustc_errors/src/json.rs
@@ -13,6 +13,7 @@ use rustc_span::source_map::{FilePathMapping, SourceMap};
use crate::emitter::{Emitter, HumanReadableErrorType};
use crate::registry::Registry;
+use crate::translation::{to_fluent_args, Translate};
use crate::DiagnosticId;
use crate::{
CodeSuggestion, FluentBundle, LazyFallbackBundle, MultiSpan, SpanLabel, SubDiagnostic,
@@ -122,6 +123,16 @@ impl JsonEmitter {
}
}
+impl Translate for JsonEmitter {
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ self.fluent_bundle.as_ref()
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ &**self.fallback_bundle
+ }
+}
+
impl Emitter for JsonEmitter {
fn emit_diagnostic(&mut self, diag: &crate::Diagnostic) {
let data = Diagnostic::from_errors_diagnostic(diag, self);
@@ -189,14 +200,6 @@ impl Emitter for JsonEmitter {
Some(&self.sm)
}
- fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
- self.fluent_bundle.as_ref()
- }
-
- fn fallback_fluent_bundle(&self) -> &FluentBundle {
- &**self.fallback_bundle
- }
-
fn should_show_explain(&self) -> bool {
!matches!(self.json_rendered, HumanReadableErrorType::Short(_))
}
@@ -309,7 +312,7 @@ struct UnusedExterns<'a, 'b, 'c> {
impl Diagnostic {
fn from_errors_diagnostic(diag: &crate::Diagnostic, je: &JsonEmitter) -> Diagnostic {
- let args = je.to_fluent_args(diag.args());
+ let args = to_fluent_args(diag.args());
let sugg = diag.suggestions.iter().flatten().map(|sugg| {
let translated_message = je.translate_message(&sugg.msg, &args);
Diagnostic {
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index 2409c0b5a..0963ea71f 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -4,15 +4,13 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(drain_filter)]
-#![feature(backtrace)]
#![feature(if_let_guard)]
+#![feature(adt_const_params)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(never_type)]
-#![feature(adt_const_params)]
+#![feature(result_option_inspect)]
#![feature(rustc_attrs)]
#![allow(incomplete_features)]
-#![allow(rustc::potential_query_instability)]
#[macro_use]
extern crate rustc_macros;
@@ -27,12 +25,12 @@ use Level::*;
use emitter::{is_case_difference, Emitter, EmitterWriter};
use registry::Registry;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::stable_hasher::StableHasher;
use rustc_data_structures::sync::{self, Lock, Lrc};
use rustc_data_structures::AtomicRef;
pub use rustc_error_messages::{
- fallback_fluent_bundle, fluent, fluent_bundle, DiagnosticMessage, FluentBundle,
+ fallback_fluent_bundle, fluent, fluent_bundle, DelayDm, DiagnosticMessage, FluentBundle,
LanguageIdentifier, LazyFallbackBundle, MultiSpan, SpanLabel, SubdiagnosticMessage,
DEFAULT_LOCALE_RESOURCES,
};
@@ -53,23 +51,27 @@ use termcolor::{Color, ColorSpec};
pub mod annotate_snippet_emitter_writer;
mod diagnostic;
mod diagnostic_builder;
+mod diagnostic_impls;
pub mod emitter;
pub mod json;
mod lock;
pub mod registry;
mod snippet;
mod styled_buffer;
+pub mod translation;
+pub use diagnostic_builder::IntoDiagnostic;
pub use snippet::Style;
-pub type PResult<'a, T> = Result<T, DiagnosticBuilder<'a, ErrorGuaranteed>>;
+pub type PErr<'a> = DiagnosticBuilder<'a, ErrorGuaranteed>;
+pub type PResult<'a, T> = Result<T, PErr<'a>>;
// `PResult` is used a lot. Make sure it doesn't unintentionally get bigger.
-// (See also the comment on `DiagnosticBuilder`'s `diagnostic` field.)
+// (See also the comment on `DiagnosticBuilderInner`'s `diagnostic` field.)
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PResult<'_, ()>, 16);
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(PResult<'_, bool>, 24);
+rustc_data_structures::static_assert_size!(PResult<'_, bool>, 16);
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Encodable, Decodable)]
pub enum SuggestionStyle {
@@ -150,21 +152,20 @@ pub struct SubstitutionHighlight {
impl SubstitutionPart {
pub fn is_addition(&self, sm: &SourceMap) -> bool {
- !self.snippet.is_empty()
- && sm
- .span_to_snippet(self.span)
- .map_or(self.span.is_empty(), |snippet| snippet.trim().is_empty())
+ !self.snippet.is_empty() && !self.replaces_meaningful_content(sm)
}
- pub fn is_deletion(&self) -> bool {
- self.snippet.trim().is_empty()
+ pub fn is_deletion(&self, sm: &SourceMap) -> bool {
+ self.snippet.trim().is_empty() && self.replaces_meaningful_content(sm)
}
pub fn is_replacement(&self, sm: &SourceMap) -> bool {
- !self.snippet.is_empty()
- && sm
- .span_to_snippet(self.span)
- .map_or(!self.span.is_empty(), |snippet| !snippet.trim().is_empty())
+ !self.snippet.is_empty() && self.replaces_meaningful_content(sm)
+ }
+
+ fn replaces_meaningful_content(&self, sm: &SourceMap) -> bool {
+ sm.span_to_snippet(self.span)
+ .map_or(!self.span.is_empty(), |snippet| !snippet.trim().is_empty())
}
}
@@ -371,10 +372,11 @@ impl fmt::Display for ExplicitBug {
impl error::Error for ExplicitBug {}
pub use diagnostic::{
- AddSubdiagnostic, DecorateLint, Diagnostic, DiagnosticArg, DiagnosticArgFromDisplay,
- DiagnosticArgValue, DiagnosticId, DiagnosticStyledString, IntoDiagnosticArg, SubDiagnostic,
+ AddToDiagnostic, DecorateLint, Diagnostic, DiagnosticArg, DiagnosticArgValue, DiagnosticId,
+ DiagnosticStyledString, IntoDiagnosticArg, SubDiagnostic,
};
-pub use diagnostic_builder::{DiagnosticBuilder, EmissionGuarantee, LintDiagnosticBuilder};
+pub use diagnostic_builder::{DiagnosticBuilder, EmissionGuarantee, Noted};
+pub use diagnostic_impls::DiagnosticArgFromDisplay;
use std::backtrace::Backtrace;
/// A handler deals with errors and other compiler output.
@@ -411,8 +413,8 @@ struct HandlerInner {
/// would be unnecessary repetition.
taught_diagnostics: FxHashSet<DiagnosticId>,
- /// Used to suggest rustc --explain <error code>
- emitted_diagnostic_codes: FxHashSet<DiagnosticId>,
+ /// Used to suggest rustc --explain `<error code>`
+ emitted_diagnostic_codes: FxIndexSet<DiagnosticId>,
/// This set contains a hash of every diagnostic that has been emitted by
/// this handler. These hashes is used to avoid emitting the same error
@@ -437,11 +439,11 @@ struct HandlerInner {
/// have been converted.
check_unstable_expect_diagnostics: bool,
- /// Expected [`Diagnostic`]s store a [`LintExpectationId`] as part of
+ /// Expected [`Diagnostic`][diagnostic::Diagnostic]s store a [`LintExpectationId`] as part of
/// the lint level. [`LintExpectationId`]s created early during the compilation
/// (before `HirId`s have been defined) are not stable and can therefore not be
/// stored on disk. This buffer stores these diagnostics until the ID has been
- /// replaced by a stable [`LintExpectationId`]. The [`Diagnostic`]s are the
+ /// replaced by a stable [`LintExpectationId`]. The [`Diagnostic`][diagnostic::Diagnostic]s are the
/// submitted for storage and added to the list of fulfilled expectations.
unstable_expect_diagnostics: Vec<Diagnostic>,
@@ -455,9 +457,15 @@ struct HandlerInner {
}
/// A key denoting where from a diagnostic was stashed.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum StashKey {
ItemNoType,
+ UnderscoreForArrayLengths,
+ EarlySyntaxWarning,
+ CallIntoMethod,
+ /// When an invalid lifetime e.g. `'2` should be reinterpreted
+ /// as a char literal in the parser
+ LifetimeIsChar,
}
fn default_track_diagnostic(_: &Diagnostic) {}
@@ -595,6 +603,17 @@ impl Handler {
}
}
+ /// Translate `message` eagerly with `args`.
+ pub fn eagerly_translate<'a>(
+ &self,
+ message: DiagnosticMessage,
+ args: impl Iterator<Item = DiagnosticArg<'a, 'static>>,
+ ) -> SubdiagnosticMessage {
+ let inner = self.inner.borrow();
+ let args = crate::translation::to_fluent_args(args);
+ SubdiagnosticMessage::Eager(inner.emitter.translate_message(&message, &args).to_string())
+ }
+
// This is here to not allow mutation of flags;
// as of this writing it's only used in tests in librustc_middle.
pub fn can_emit_warnings(&self) -> bool {
@@ -625,19 +644,17 @@ impl Handler {
/// Stash a given diagnostic with the given `Span` and `StashKey` as the key for later stealing.
pub fn stash_diagnostic(&self, span: Span, key: StashKey, diag: Diagnostic) {
let mut inner = self.inner.borrow_mut();
- // FIXME(Centril, #69537): Consider reintroducing panic on overwriting a stashed diagnostic
- // if/when we have a more robust macro-friendly replacement for `(span, key)` as a key.
- // See the PR for a discussion.
- inner.stashed_diagnostics.insert((span, key), diag);
+ inner.stash((span, key), diag);
}
/// Steal a previously stashed diagnostic with the given `Span` and `StashKey` as the key.
pub fn steal_diagnostic(&self, span: Span, key: StashKey) -> Option<DiagnosticBuilder<'_, ()>> {
- self.inner
- .borrow_mut()
- .stashed_diagnostics
- .remove(&(span, key))
- .map(|diag| DiagnosticBuilder::new_diagnostic(self, diag))
+ let mut inner = self.inner.borrow_mut();
+ inner.steal((span, key)).map(|diag| DiagnosticBuilder::new_diagnostic(self, diag))
+ }
+
+ pub fn has_stashed_diagnostic(&self, span: Span, key: StashKey) -> bool {
+ self.inner.borrow().stashed_diagnostics.get(&(span, key)).is_some()
}
/// Emit all stashed diagnostics.
@@ -645,6 +662,15 @@ impl Handler {
self.inner.borrow_mut().emit_stashed_diagnostics()
}
+ /// Construct a builder with the `msg` at the level appropriate for the specific `EmissionGuarantee`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_diagnostic<G: EmissionGuarantee>(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, G> {
+ G::make_diagnostic_builder(self, msg)
+ }
+
/// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
///
/// Attempting to `.emit()` the builder will only emit if either:
@@ -1020,6 +1046,39 @@ impl Handler {
self.inner.borrow_mut().emit_diagnostic(diagnostic)
}
+ pub fn emit_err<'a>(&'a self, err: impl IntoDiagnostic<'a>) -> ErrorGuaranteed {
+ self.create_err(err).emit()
+ }
+
+ pub fn create_err<'a>(
+ &'a self,
+ err: impl IntoDiagnostic<'a>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ err.into_diagnostic(self)
+ }
+
+ pub fn create_warning<'a>(
+ &'a self,
+ warning: impl IntoDiagnostic<'a, ()>,
+ ) -> DiagnosticBuilder<'a, ()> {
+ warning.into_diagnostic(self)
+ }
+
+ pub fn emit_warning<'a>(&'a self, warning: impl IntoDiagnostic<'a, ()>) {
+ self.create_warning(warning).emit()
+ }
+
+ pub fn create_fatal<'a>(
+ &'a self,
+ fatal: impl IntoDiagnostic<'a, !>,
+ ) -> DiagnosticBuilder<'a, !> {
+ fatal.into_diagnostic(self)
+ }
+
+ pub fn emit_fatal<'a>(&'a self, fatal: impl IntoDiagnostic<'a, !>) -> ! {
+ self.create_fatal(fatal).emit()
+ }
+
fn emit_diag_at_span(
&self,
mut diag: Diagnostic,
@@ -1092,6 +1151,12 @@ impl Handler {
);
std::mem::take(&mut self.inner.borrow_mut().fulfilled_expectations)
}
+
+ pub fn flush_delayed(&self) {
+ let mut inner = self.inner.lock();
+ let bugs = std::mem::replace(&mut inner.delayed_span_bugs, Vec::new());
+ inner.flush_delayed(bugs, "no errors encountered even though `delay_span_bug` issued");
+ }
}
impl HandlerInner {
@@ -1105,13 +1170,31 @@ impl HandlerInner {
/// Emit all stashed diagnostics.
fn emit_stashed_diagnostics(&mut self) -> Option<ErrorGuaranteed> {
+ let has_errors = self.has_errors();
let diags = self.stashed_diagnostics.drain(..).map(|x| x.1).collect::<Vec<_>>();
let mut reported = None;
for mut diag in diags {
+ // Decrement the count tracking the stash; emitting will increment it.
if diag.is_error() {
- reported = Some(ErrorGuaranteed(()));
+ if matches!(diag.level, Level::Error { lint: true }) {
+ self.lint_err_count -= 1;
+ } else {
+ self.err_count -= 1;
+ }
+ } else {
+ if diag.is_force_warn() {
+ self.warn_count -= 1;
+ } else {
+ // Unless they're forced, don't flush stashed warnings when
+ // there are errors, to avoid causing warning overload. The
+ // stash would've been stolen already if it were important.
+ if has_errors {
+ continue;
+ }
+ }
}
- self.emit_diagnostic(&mut diag);
+ let reported_this = self.emit_diagnostic(&mut diag);
+ reported = reported.or(reported_this);
}
reported
}
@@ -1145,7 +1228,7 @@ impl HandlerInner {
if let Some(expectation_id) = diagnostic.level.get_expectation_id() {
self.suppressed_expected_diag = true;
- self.fulfilled_expectations.insert(expectation_id);
+ self.fulfilled_expectations.insert(expectation_id.normalize());
}
if matches!(diagnostic.level, Warning(_))
@@ -1225,9 +1308,13 @@ impl HandlerInner {
}
fn treat_err_as_bug(&self) -> bool {
- self.flags
- .treat_err_as_bug
- .map_or(false, |c| self.err_count() + self.lint_err_count >= c.get())
+ self.flags.treat_err_as_bug.map_or(false, |c| {
+ self.err_count() + self.lint_err_count + self.delayed_bug_count() >= c.get()
+ })
+ }
+
+ fn delayed_bug_count(&self) -> usize {
+ self.delayed_span_bugs.len() + self.delayed_good_path_bugs.len()
}
fn print_error_count(&mut self, registry: &Registry) {
@@ -1301,9 +1388,47 @@ impl HandlerInner {
}
}
+ fn stash(&mut self, key: (Span, StashKey), diagnostic: Diagnostic) {
+ // Track the diagnostic for counts, but don't panic-if-treat-err-as-bug
+ // yet; that happens when we actually emit the diagnostic.
+ if diagnostic.is_error() {
+ if matches!(diagnostic.level, Level::Error { lint: true }) {
+ self.lint_err_count += 1;
+ } else {
+ self.err_count += 1;
+ }
+ } else {
+ // Warnings are only automatically flushed if they're forced.
+ if diagnostic.is_force_warn() {
+ self.warn_count += 1;
+ }
+ }
+
+ // FIXME(Centril, #69537): Consider reintroducing panic on overwriting a stashed diagnostic
+ // if/when we have a more robust macro-friendly replacement for `(span, key)` as a key.
+ // See the PR for a discussion.
+ self.stashed_diagnostics.insert(key, diagnostic);
+ }
+
+ fn steal(&mut self, key: (Span, StashKey)) -> Option<Diagnostic> {
+ let diagnostic = self.stashed_diagnostics.remove(&key)?;
+ if diagnostic.is_error() {
+ if matches!(diagnostic.level, Level::Error { lint: true }) {
+ self.lint_err_count -= 1;
+ } else {
+ self.err_count -= 1;
+ }
+ } else {
+ if diagnostic.is_force_warn() {
+ self.warn_count -= 1;
+ }
+ }
+ Some(diagnostic)
+ }
+
#[inline]
fn err_count(&self) -> usize {
- self.err_count + self.stashed_diagnostics.len()
+ self.err_count
}
fn has_errors(&self) -> bool {
@@ -1345,7 +1470,9 @@ impl HandlerInner {
// This is technically `self.treat_err_as_bug()` but `delay_span_bug` is called before
// incrementing `err_count` by one, so we need to +1 the comparing.
// FIXME: Would be nice to increment err_count in a more coherent way.
- if self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() + 1 >= c.get()) {
+ if self.flags.treat_err_as_bug.map_or(false, |c| {
+ self.err_count() + self.lint_err_count + self.delayed_bug_count() + 1 >= c.get()
+ }) {
// FIXME: don't abort here if report_delayed_bugs is off
self.span_bug(sp, msg);
}
@@ -1445,14 +1572,24 @@ impl HandlerInner {
if self.treat_err_as_bug() {
match (
self.err_count() + self.lint_err_count,
+ self.delayed_bug_count(),
self.flags.treat_err_as_bug.map(|c| c.get()).unwrap_or(0),
) {
- (1, 1) => panic!("aborting due to `-Z treat-err-as-bug=1`"),
- (0 | 1, _) => {}
- (count, as_bug) => panic!(
- "aborting after {} errors due to `-Z treat-err-as-bug={}`",
- count, as_bug,
- ),
+ (1, 0, 1) => panic!("aborting due to `-Z treat-err-as-bug=1`"),
+ (0, 1, 1) => panic!("aborting due delayed bug with `-Z treat-err-as-bug=1`"),
+ (count, delayed_count, as_bug) => {
+ if delayed_count > 0 {
+ panic!(
+ "aborting after {} errors and {} delayed bugs due to `-Z treat-err-as-bug={}`",
+ count, delayed_count, as_bug,
+ )
+ } else {
+ panic!(
+ "aborting after {} errors due to `-Z treat-err-as-bug={}`",
+ count, as_bug,
+ )
+ }
+ }
}
}
}
diff --git a/compiler/rustc_errors/src/translation.rs b/compiler/rustc_errors/src/translation.rs
new file mode 100644
index 000000000..a7737b467
--- /dev/null
+++ b/compiler/rustc_errors/src/translation.rs
@@ -0,0 +1,117 @@
+use crate::snippet::Style;
+use crate::{DiagnosticArg, DiagnosticMessage, FluentBundle};
+use rustc_data_structures::sync::Lrc;
+use rustc_error_messages::FluentArgs;
+use std::borrow::Cow;
+
+/// Convert diagnostic arguments (a rustc internal type that exists to implement
+/// `Encodable`/`Decodable`) into `FluentArgs` which is necessary to perform translation.
+///
+/// Typically performed once for each diagnostic at the start of `emit_diagnostic` and then
+/// passed around as a reference thereafter.
+pub fn to_fluent_args<'iter, 'arg: 'iter>(
+ iter: impl Iterator<Item = DiagnosticArg<'iter, 'arg>>,
+) -> FluentArgs<'arg> {
+ let mut args = if let Some(size) = iter.size_hint().1 {
+ FluentArgs::with_capacity(size)
+ } else {
+ FluentArgs::new()
+ };
+
+ for (k, v) in iter {
+ args.set(k.clone(), v.clone());
+ }
+
+ args
+}
+
+pub trait Translate {
+ /// Return `FluentBundle` with localized diagnostics for the locale requested by the user. If no
+ /// language was requested by the user then this will be `None` and `fallback_fluent_bundle`
+ /// should be used.
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>>;
+
+ /// Return `FluentBundle` with localized diagnostics for the default locale of the compiler.
+ /// Used when the user has not requested a specific language or when a localized diagnostic is
+ /// unavailable for the requested locale.
+ fn fallback_fluent_bundle(&self) -> &FluentBundle;
+
+ /// Convert `DiagnosticMessage`s to a string, performing translation if necessary.
+ fn translate_messages(
+ &self,
+ messages: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ ) -> Cow<'_, str> {
+ Cow::Owned(
+ messages.iter().map(|(m, _)| self.translate_message(m, args)).collect::<String>(),
+ )
+ }
+
+ /// Convert a `DiagnosticMessage` to a string, performing translation if necessary.
+ fn translate_message<'a>(
+ &'a self,
+ message: &'a DiagnosticMessage,
+ args: &'a FluentArgs<'_>,
+ ) -> Cow<'_, str> {
+ trace!(?message, ?args);
+ let (identifier, attr) = match message {
+ DiagnosticMessage::Str(msg) | DiagnosticMessage::Eager(msg) => {
+ return Cow::Borrowed(&msg);
+ }
+ DiagnosticMessage::FluentIdentifier(identifier, attr) => (identifier, attr),
+ };
+
+ let translate_with_bundle = |bundle: &'a FluentBundle| -> Option<(Cow<'_, str>, Vec<_>)> {
+ let message = bundle.get_message(&identifier)?;
+ let value = match attr {
+ Some(attr) => message.get_attribute(attr)?.value(),
+ None => message.value()?,
+ };
+ debug!(?message, ?value);
+
+ let mut errs = vec![];
+ let translated = bundle.format_pattern(value, Some(&args), &mut errs);
+ debug!(?translated, ?errs);
+ Some((translated, errs))
+ };
+
+ self.fluent_bundle()
+ .and_then(|bundle| translate_with_bundle(bundle))
+ // If `translate_with_bundle` returns `None` with the primary bundle, this is likely
+ // just that the primary bundle doesn't contain the message being translated, so
+ // proceed to the fallback bundle.
+ //
+ // However, when errors are produced from translation, then that means the translation
+ // is broken (e.g. `{$foo}` exists in a translation but `foo` isn't provided).
+ //
+ // In debug builds, assert so that compiler devs can spot the broken translation and
+ // fix it..
+ .inspect(|(_, errs)| {
+ debug_assert!(
+ errs.is_empty(),
+ "identifier: {:?}, attr: {:?}, args: {:?}, errors: {:?}",
+ identifier,
+ attr,
+ args,
+ errs
+ );
+ })
+ // ..otherwise, for end users, an error about this wouldn't be useful or actionable, so
+ // just hide it and try with the fallback bundle.
+ .filter(|(_, errs)| errs.is_empty())
+ .or_else(|| translate_with_bundle(self.fallback_fluent_bundle()))
+ .map(|(translated, errs)| {
+ // Always bail out for errors with the fallback bundle.
+ assert!(
+ errs.is_empty(),
+ "identifier: {:?}, attr: {:?}, args: {:?}, errors: {:?}",
+ identifier,
+ attr,
+ args,
+ errs
+ );
+ translated
+ })
+ .expect("failed to find message in primary or fallback fluent bundles")
+ }
+}
diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs
index 6e093811f..c8de60ccb 100644
--- a/compiler/rustc_expand/src/base.rs
+++ b/compiler/rustc_expand/src/base.rs
@@ -6,21 +6,23 @@ use rustc_ast::ptr::P;
use rustc_ast::token::{self, Nonterminal};
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{AssocCtxt, Visitor};
-use rustc_ast::{self as ast, Attribute, HasAttrs, Item, NodeId, PatKind};
+use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, Item, NodeId, PatKind};
use rustc_attr::{self as attr, Deprecation, Stability};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_data_structures::sync::{self, Lrc};
-use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, MultiSpan, PResult};
+use rustc_errors::{
+ Applicability, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic, MultiSpan, PResult,
+};
use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
use rustc_lint_defs::{BufferedEarlyLint, BuiltinLintDiagnostics};
use rustc_parse::{self, parser, MACRO_ARGUMENTS};
-use rustc_session::{parse::ParseSess, Limit, Session, SessionDiagnostic};
+use rustc_session::{parse::ParseSess, Limit, Session};
use rustc_span::def_id::{CrateNum, DefId, LocalDefId};
use rustc_span::edition::Edition;
use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId};
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{FileName, Span, DUMMY_SP};
+use rustc_span::{BytePos, FileName, RealFileName, Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::default::Default;
@@ -71,7 +73,7 @@ impl Annotatable {
}
}
- pub fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ pub fn visit_attrs(&mut self, f: impl FnOnce(&mut AttrVec)) {
match self {
Annotatable::Item(item) => item.visit_attrs(f),
Annotatable::TraitItem(trait_item) => trait_item.visit_attrs(f),
@@ -693,10 +695,6 @@ pub struct SyntaxExtension {
pub span: Span,
/// List of unstable features that are treated as stable inside this macro.
pub allow_internal_unstable: Option<Lrc<[Symbol]>>,
- /// Suppresses the `unsafe_code` lint for code produced by this macro.
- pub allow_internal_unsafe: bool,
- /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro.
- pub local_inner_macros: bool,
/// The macro's stability info.
pub stability: Option<Stability>,
/// The macro's deprecation info.
@@ -708,6 +706,13 @@ pub struct SyntaxExtension {
/// Built-in macros have a couple of special properties like availability
/// in `#[no_implicit_prelude]` modules, so we have to keep this flag.
pub builtin_name: Option<Symbol>,
+ /// Suppresses the `unsafe_code` lint for code produced by this macro.
+ pub allow_internal_unsafe: bool,
+ /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro.
+ pub local_inner_macros: bool,
+ /// Should debuginfo for the macro be collapsed to the outermost expansion site (in other
+ /// words, was the macro definition annotated with `#[collapse_debuginfo]`)?
+ pub collapse_debuginfo: bool,
}
impl SyntaxExtension {
@@ -729,14 +734,15 @@ impl SyntaxExtension {
SyntaxExtension {
span: DUMMY_SP,
allow_internal_unstable: None,
- allow_internal_unsafe: false,
- local_inner_macros: false,
stability: None,
deprecation: None,
helper_attrs: Vec::new(),
edition,
builtin_name: None,
kind,
+ allow_internal_unsafe: false,
+ local_inner_macros: false,
+ collapse_debuginfo: false,
}
}
@@ -754,12 +760,13 @@ impl SyntaxExtension {
let allow_internal_unstable =
attr::allow_internal_unstable(sess, &attrs).collect::<Vec<Symbol>>();
- let mut local_inner_macros = false;
- if let Some(macro_export) = sess.find_by_name(attrs, sym::macro_export) {
- if let Some(l) = macro_export.meta_item_list() {
- local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros);
- }
- }
+ let allow_internal_unsafe = sess.contains_name(attrs, sym::allow_internal_unsafe);
+ let local_inner_macros = sess
+ .find_by_name(attrs, sym::macro_export)
+ .and_then(|macro_export| macro_export.meta_item_list())
+ .map_or(false, |l| attr::list_contains_name(&l, sym::local_inner_macros));
+ let collapse_debuginfo = sess.contains_name(attrs, sym::collapse_debuginfo);
+ tracing::debug!(?local_inner_macros, ?collapse_debuginfo, ?allow_internal_unsafe);
let (builtin_name, helper_attrs) = sess
.find_by_name(attrs, sym::rustc_builtin_macro)
@@ -772,7 +779,7 @@ impl SyntaxExtension {
)
})
.unwrap_or_else(|| (None, helper_attrs));
- let (stability, const_stability) = attr::find_stability(&sess, attrs, span);
+ let (stability, const_stability, body_stability) = attr::find_stability(&sess, attrs, span);
if let Some((_, sp)) = const_stability {
sess.parse_sess
.span_diagnostic
@@ -784,19 +791,31 @@ impl SyntaxExtension {
)
.emit();
}
+ if let Some((_, sp)) = body_stability {
+ sess.parse_sess
+ .span_diagnostic
+ .struct_span_err(sp, "macros cannot have body stability attributes")
+ .span_label(sp, "invalid body stability attribute")
+ .span_label(
+ sess.source_map().guess_head_span(span),
+ "body stability attribute affects this macro",
+ )
+ .emit();
+ }
SyntaxExtension {
kind,
span,
allow_internal_unstable: (!allow_internal_unstable.is_empty())
.then(|| allow_internal_unstable.into()),
- allow_internal_unsafe: sess.contains_name(attrs, sym::allow_internal_unsafe),
- local_inner_macros,
stability: stability.map(|(s, _)| s),
deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d),
helper_attrs,
edition,
builtin_name,
+ allow_internal_unsafe,
+ local_inner_macros,
+ collapse_debuginfo,
}
}
@@ -841,11 +860,12 @@ impl SyntaxExtension {
call_site,
self.span,
self.allow_internal_unstable.clone(),
- self.allow_internal_unsafe,
- self.local_inner_macros,
self.edition,
macro_def_id,
parent_module,
+ self.allow_internal_unsafe,
+ self.local_inner_macros,
+ self.collapse_debuginfo,
)
}
}
@@ -1091,12 +1111,12 @@ impl<'a> ExtCtxt<'a> {
pub fn create_err(
&self,
- err: impl SessionDiagnostic<'a>,
+ err: impl IntoDiagnostic<'a>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
self.sess.create_err(err)
}
- pub fn emit_err(&self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ pub fn emit_err(&self, err: impl IntoDiagnostic<'a>) -> ErrorGuaranteed {
self.sess.emit_err(err)
}
@@ -1208,15 +1228,16 @@ pub fn expr_to_spanned_string<'a>(
ast::LitKind::Str(s, style) => return Ok((s, style, expr.span)),
ast::LitKind::ByteStr(_) => {
let mut err = cx.struct_span_err(l.span, err_msg);
+ let span = expr.span.shrink_to_lo();
err.span_suggestion(
- expr.span.shrink_to_lo(),
+ span.with_hi(span.lo() + BytePos(1)),
"consider removing the leading `b`",
"",
Applicability::MaybeIncorrect,
);
Some((err, true))
}
- ast::LitKind::Err(_) => None,
+ ast::LitKind::Err => None,
_ => Some((cx.struct_span_err(l.span, err_msg), false)),
},
ast::ExprKind::Err => None,
@@ -1402,16 +1423,40 @@ fn pretty_printing_compatibility_hack(item: &Item, sess: &ParseSess) -> bool {
if let ast::ItemKind::Enum(enum_def, _) = &item.kind {
if let [variant] = &*enum_def.variants {
if variant.ident.name == sym::Input {
- sess.buffer_lint_with_diagnostic(
- &PROC_MACRO_BACK_COMPAT,
- item.ident.span,
- ast::CRATE_NODE_ID,
- "using `procedural-masquerade` crate",
- BuiltinLintDiagnostics::ProcMacroBackCompat(
- "The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. \
- Versions of this crate below 0.1.7 will eventually stop compiling.".to_string())
- );
- return true;
+ let filename = sess.source_map().span_to_filename(item.ident.span);
+ if let FileName::Real(RealFileName::LocalPath(path)) = filename {
+ if let Some(c) = path
+ .components()
+ .flat_map(|c| c.as_os_str().to_str())
+ .find(|c| c.starts_with("rental") || c.starts_with("allsorts-rental"))
+ {
+ let crate_matches = if c.starts_with("allsorts-rental") {
+ true
+ } else {
+ let mut version = c.trim_start_matches("rental-").split(".");
+ version.next() == Some("0")
+ && version.next() == Some("5")
+ && version
+ .next()
+ .and_then(|c| c.parse::<u32>().ok())
+ .map_or(false, |v| v < 6)
+ };
+
+ if crate_matches {
+ sess.buffer_lint_with_diagnostic(
+ &PROC_MACRO_BACK_COMPAT,
+ item.ident.span,
+ ast::CRATE_NODE_ID,
+ "using an old version of `rental`",
+ BuiltinLintDiagnostics::ProcMacroBackCompat(
+ "older versions of the `rental` crate will stop compiling in future versions of Rust; \
+ please update to `rental` v0.5.6, or switch to one of the `rental` alternatives".to_string()
+ )
+ );
+ return true;
+ }
+ }
+ }
}
}
}
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
index fa3e2a4a5..0952e65cf 100644
--- a/compiler/rustc_expand/src/build.rs
+++ b/compiler/rustc_expand/src/build.rs
@@ -3,6 +3,7 @@ use crate::base::ExtCtxt;
use rustc_ast::attr;
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, AttrVec, BlockCheckMode, Expr, LocalKind, PatKind, UnOp};
+use rustc_data_structures::sync::Lrc;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
@@ -106,14 +107,13 @@ impl<'a> ExtCtxt<'a> {
&self,
span: Span,
ident: Ident,
- attrs: Vec<ast::Attribute>,
bounds: ast::GenericBounds,
default: Option<P<ast::Ty>>,
) -> ast::GenericParam {
ast::GenericParam {
ident: ident.with_span_pos(span),
id: ast::DUMMY_NODE_ID,
- attrs: attrs.into(),
+ attrs: AttrVec::new(),
bounds,
kind: ast::GenericParamKind::Type { default },
is_placeholder: false,
@@ -178,8 +178,7 @@ impl<'a> ExtCtxt<'a> {
ex: P<ast::Expr>,
) -> ast::Stmt {
let pat = if mutbl {
- let binding_mode = ast::BindingMode::ByValue(ast::Mutability::Mut);
- self.pat_ident_binding_mode(sp, ident, binding_mode)
+ self.pat_ident_binding_mode(sp, ident, ast::BindingAnnotation::MUT)
} else {
self.pat_ident(sp, ident)
};
@@ -253,6 +252,10 @@ impl<'a> ExtCtxt<'a> {
self.expr_ident(span, Ident::with_dummy_span(kw::SelfLower))
}
+ pub fn expr_field(&self, span: Span, expr: P<Expr>, field: Ident) -> P<ast::Expr> {
+ self.expr(span, ast::ExprKind::Field(expr, field))
+ }
+
pub fn expr_binary(
&self,
sp: Span,
@@ -330,23 +333,38 @@ impl<'a> ExtCtxt<'a> {
self.expr_struct(span, self.path_ident(span, id), fields)
}
- pub fn expr_lit(&self, span: Span, lit_kind: ast::LitKind) -> P<ast::Expr> {
+ fn expr_lit(&self, span: Span, lit_kind: ast::LitKind) -> P<ast::Expr> {
let lit = ast::Lit::from_lit_kind(lit_kind, span);
self.expr(span, ast::ExprKind::Lit(lit))
}
+
pub fn expr_usize(&self, span: Span, i: usize) -> P<ast::Expr> {
self.expr_lit(
span,
ast::LitKind::Int(i as u128, ast::LitIntType::Unsigned(ast::UintTy::Usize)),
)
}
+
pub fn expr_u32(&self, sp: Span, u: u32) -> P<ast::Expr> {
self.expr_lit(sp, ast::LitKind::Int(u as u128, ast::LitIntType::Unsigned(ast::UintTy::U32)))
}
+
pub fn expr_bool(&self, sp: Span, value: bool) -> P<ast::Expr> {
self.expr_lit(sp, ast::LitKind::Bool(value))
}
+ pub fn expr_str(&self, sp: Span, s: Symbol) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::Str(s, ast::StrStyle::Cooked))
+ }
+
+ pub fn expr_char(&self, sp: Span, ch: char) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::Char(ch))
+ }
+
+ pub fn expr_byte_str(&self, sp: Span, bytes: Vec<u8>) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::ByteStr(Lrc::from(bytes)))
+ }
+
/// `[expr1, expr2, ...]`
pub fn expr_array(&self, sp: Span, exprs: Vec<P<ast::Expr>>) -> P<ast::Expr> {
self.expr(sp, ast::ExprKind::Array(exprs))
@@ -357,10 +375,6 @@ impl<'a> ExtCtxt<'a> {
self.expr_addr_of(sp, self.expr_array(sp, exprs))
}
- pub fn expr_str(&self, sp: Span, s: Symbol) -> P<ast::Expr> {
- self.expr_lit(sp, ast::LitKind::Str(s, ast::StrStyle::Cooked))
- }
-
pub fn expr_cast(&self, sp: Span, expr: P<ast::Expr>, ty: P<ast::Ty>) -> P<ast::Expr> {
self.expr(sp, ast::ExprKind::Cast(expr, ty))
}
@@ -434,17 +448,16 @@ impl<'a> ExtCtxt<'a> {
self.pat(span, PatKind::Lit(expr))
}
pub fn pat_ident(&self, span: Span, ident: Ident) -> P<ast::Pat> {
- let binding_mode = ast::BindingMode::ByValue(ast::Mutability::Not);
- self.pat_ident_binding_mode(span, ident, binding_mode)
+ self.pat_ident_binding_mode(span, ident, ast::BindingAnnotation::NONE)
}
pub fn pat_ident_binding_mode(
&self,
span: Span,
ident: Ident,
- bm: ast::BindingMode,
+ ann: ast::BindingAnnotation,
) -> P<ast::Pat> {
- let pat = PatKind::Ident(bm, ident.with_span_pos(span), None);
+ let pat = PatKind::Ident(ann, ident.with_span_pos(span), None);
self.pat(span, pat)
}
pub fn pat_path(&self, span: Span, path: ast::Path) -> P<ast::Pat> {
@@ -564,7 +577,7 @@ impl<'a> ExtCtxt<'a> {
&self,
span: Span,
name: Ident,
- attrs: Vec<ast::Attribute>,
+ attrs: ast::AttrVec,
kind: ast::ItemKind,
) -> P<ast::Item> {
// FIXME: Would be nice if our generated code didn't violate
@@ -592,7 +605,7 @@ impl<'a> ExtCtxt<'a> {
mutbl: ast::Mutability,
expr: P<ast::Expr>,
) -> P<ast::Item> {
- self.item(span, name, Vec::new(), ast::ItemKind::Static(ty, mutbl, Some(expr)))
+ self.item(span, name, AttrVec::new(), ast::ItemKind::Static(ty, mutbl, Some(expr)))
}
pub fn item_const(
@@ -603,11 +616,11 @@ impl<'a> ExtCtxt<'a> {
expr: P<ast::Expr>,
) -> P<ast::Item> {
let def = ast::Defaultness::Final;
- self.item(span, name, Vec::new(), ast::ItemKind::Const(def, ty, Some(expr)))
+ self.item(span, name, AttrVec::new(), ast::ItemKind::Const(def, ty, Some(expr)))
}
pub fn attribute(&self, mi: ast::MetaItem) -> ast::Attribute {
- attr::mk_attr_outer(mi)
+ attr::mk_attr_outer(&self.sess.parse_sess.attr_id_generator, mi)
}
pub fn meta_word(&self, sp: Span, w: Symbol) -> ast::MetaItem {
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
index 3e1acf438..1d2b1298a 100644
--- a/compiler/rustc_expand/src/config.rs
+++ b/compiler/rustc_expand/src/config.rs
@@ -2,9 +2,9 @@
use rustc_ast::ptr::P;
use rustc_ast::token::{Delimiter, Token, TokenKind};
-use rustc_ast::tokenstream::{AttrAnnotatedTokenStream, AttrAnnotatedTokenTree};
+use rustc_ast::tokenstream::{AttrTokenStream, AttrTokenTree};
use rustc_ast::tokenstream::{DelimSpan, Spacing};
-use rustc_ast::tokenstream::{LazyTokenStream, TokenTree};
+use rustc_ast::tokenstream::{LazyAttrTokenStream, TokenTree};
use rustc_ast::NodeId;
use rustc_ast::{self as ast, AttrStyle, Attribute, HasAttrs, HasTokens, MetaItem};
use rustc_attr as attr;
@@ -215,7 +215,7 @@ pub fn features(
let features = match strip_unconfigured.configure_krate_attrs(krate.attrs) {
None => {
// The entire crate is unconfigured.
- krate.attrs = Vec::new();
+ krate.attrs = ast::AttrVec::new();
krate.items = Vec::new();
Features::default()
}
@@ -259,27 +259,27 @@ impl<'a> StripUnconfigured<'a> {
fn try_configure_tokens<T: HasTokens>(&self, node: &mut T) {
if self.config_tokens {
if let Some(Some(tokens)) = node.tokens_mut() {
- let attr_annotated_tokens = tokens.create_token_stream();
- *tokens = LazyTokenStream::new(self.configure_tokens(&attr_annotated_tokens));
+ let attr_stream = tokens.to_attr_token_stream();
+ *tokens = LazyAttrTokenStream::new(self.configure_tokens(&attr_stream));
}
}
}
- fn configure_krate_attrs(&self, mut attrs: Vec<ast::Attribute>) -> Option<Vec<ast::Attribute>> {
+ fn configure_krate_attrs(&self, mut attrs: ast::AttrVec) -> Option<ast::AttrVec> {
attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
if self.in_cfg(&attrs) { Some(attrs) } else { None }
}
- /// Performs cfg-expansion on `stream`, producing a new `AttrAnnotatedTokenStream`.
+ /// Performs cfg-expansion on `stream`, producing a new `AttrTokenStream`.
/// This is only used during the invocation of `derive` proc-macros,
/// which require that we cfg-expand their entire input.
/// Normal cfg-expansion operates on parsed AST nodes via the `configure` method
- fn configure_tokens(&self, stream: &AttrAnnotatedTokenStream) -> AttrAnnotatedTokenStream {
- fn can_skip(stream: &AttrAnnotatedTokenStream) -> bool {
- stream.0.iter().all(|(tree, _spacing)| match tree {
- AttrAnnotatedTokenTree::Attributes(_) => false,
- AttrAnnotatedTokenTree::Token(_) => true,
- AttrAnnotatedTokenTree::Delimited(_, _, inner) => can_skip(inner),
+ fn configure_tokens(&self, stream: &AttrTokenStream) -> AttrTokenStream {
+ fn can_skip(stream: &AttrTokenStream) -> bool {
+ stream.0.iter().all(|tree| match tree {
+ AttrTokenTree::Attributes(_) => false,
+ AttrTokenTree::Token(..) => true,
+ AttrTokenTree::Delimited(_, _, inner) => can_skip(inner),
})
}
@@ -290,38 +290,36 @@ impl<'a> StripUnconfigured<'a> {
let trees: Vec<_> = stream
.0
.iter()
- .flat_map(|(tree, spacing)| match tree.clone() {
- AttrAnnotatedTokenTree::Attributes(mut data) => {
- let mut attrs: Vec<_> = std::mem::take(&mut data.attrs).into();
- attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
- data.attrs = attrs.into();
+ .flat_map(|tree| match tree.clone() {
+ AttrTokenTree::Attributes(mut data) => {
+ data.attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
if self.in_cfg(&data.attrs) {
- data.tokens = LazyTokenStream::new(
- self.configure_tokens(&data.tokens.create_token_stream()),
+ data.tokens = LazyAttrTokenStream::new(
+ self.configure_tokens(&data.tokens.to_attr_token_stream()),
);
- Some((AttrAnnotatedTokenTree::Attributes(data), *spacing)).into_iter()
+ Some(AttrTokenTree::Attributes(data)).into_iter()
} else {
None.into_iter()
}
}
- AttrAnnotatedTokenTree::Delimited(sp, delim, mut inner) => {
+ AttrTokenTree::Delimited(sp, delim, mut inner) => {
inner = self.configure_tokens(&inner);
- Some((AttrAnnotatedTokenTree::Delimited(sp, delim, inner), *spacing))
+ Some(AttrTokenTree::Delimited(sp, delim, inner))
.into_iter()
}
- AttrAnnotatedTokenTree::Token(ref token) if let TokenKind::Interpolated(ref nt) = token.kind => {
+ AttrTokenTree::Token(ref token, _) if let TokenKind::Interpolated(ref nt) = token.kind => {
panic!(
"Nonterminal should have been flattened at {:?}: {:?}",
token.span, nt
);
}
- AttrAnnotatedTokenTree::Token(token) => {
- Some((AttrAnnotatedTokenTree::Token(token), *spacing)).into_iter()
+ AttrTokenTree::Token(token, spacing) => {
+ Some(AttrTokenTree::Token(token, spacing)).into_iter()
}
})
.collect();
- AttrAnnotatedTokenStream::new(trees)
+ AttrTokenStream::new(trees)
}
/// Parse and expand all `cfg_attr` attributes into a list of attributes
@@ -390,7 +388,7 @@ impl<'a> StripUnconfigured<'a> {
attr: &Attribute,
(item, item_span): (ast::AttrItem, Span),
) -> Attribute {
- let orig_tokens = attr.tokens().to_tokenstream();
+ let orig_tokens = attr.tokens();
// We are taking an attribute of the form `#[cfg_attr(pred, attr)]`
// and producing an attribute of the form `#[attr]`. We
@@ -406,27 +404,33 @@ impl<'a> StripUnconfigured<'a> {
};
let pound_span = pound_token.span;
- let mut trees = vec![(AttrAnnotatedTokenTree::Token(pound_token), Spacing::Alone)];
+ let mut trees = vec![AttrTokenTree::Token(pound_token, Spacing::Alone)];
if attr.style == AttrStyle::Inner {
// For inner attributes, we do the same thing for the `!` in `#![some_attr]`
let TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _) = orig_trees.next().unwrap() else {
panic!("Bad tokens for attribute {:?}", attr);
};
- trees.push((AttrAnnotatedTokenTree::Token(bang_token), Spacing::Alone));
+ trees.push(AttrTokenTree::Token(bang_token, Spacing::Alone));
}
// We don't really have a good span to use for the synthesized `[]`
// in `#[attr]`, so just use the span of the `#` token.
- let bracket_group = AttrAnnotatedTokenTree::Delimited(
+ let bracket_group = AttrTokenTree::Delimited(
DelimSpan::from_single(pound_span),
Delimiter::Bracket,
item.tokens
.as_ref()
.unwrap_or_else(|| panic!("Missing tokens for {:?}", item))
- .create_token_stream(),
+ .to_attr_token_stream(),
+ );
+ trees.push(bracket_group);
+ let tokens = Some(LazyAttrTokenStream::new(AttrTokenStream::new(trees)));
+ let attr = attr::mk_attr_from_item(
+ &self.sess.parse_sess.attr_id_generator,
+ item,
+ tokens,
+ attr.style,
+ item_span,
);
- trees.push((bracket_group, Spacing::Alone));
- let tokens = Some(LazyTokenStream::new(AttrAnnotatedTokenStream::new(trees)));
- let attr = attr::mk_attr_from_item(item, tokens, attr.style, item_span);
if attr.has_name(sym::crate_type) {
self.sess.parse_sess.buffer_lint(
rustc_lint_defs::builtin::DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
@@ -465,6 +469,7 @@ impl<'a> StripUnconfigured<'a> {
}
/// If attributes are not allowed on expressions, emit an error for `attr`
+ #[instrument(level = "trace", skip(self))]
pub(crate) fn maybe_emit_expr_attr_err(&self, attr: &Attribute) {
if !self.features.map_or(true, |features| features.stmt_expr_attributes) {
let mut err = feature_err(
@@ -482,9 +487,12 @@ impl<'a> StripUnconfigured<'a> {
}
}
- pub fn configure_expr(&self, expr: &mut P<ast::Expr>) {
- for attr in expr.attrs.iter() {
- self.maybe_emit_expr_attr_err(attr);
+ #[instrument(level = "trace", skip(self))]
+ pub fn configure_expr(&self, expr: &mut P<ast::Expr>, method_receiver: bool) {
+ if !method_receiver {
+ for attr in expr.attrs.iter() {
+ self.maybe_emit_expr_attr_err(attr);
+ }
}
// If an expr is valid to cfg away it will have been removed by the
diff --git a/compiler/rustc_expand/src/errors.rs b/compiler/rustc_expand/src/errors.rs
new file mode 100644
index 000000000..d383f4832
--- /dev/null
+++ b/compiler/rustc_expand/src/errors.rs
@@ -0,0 +1,48 @@
+use rustc_macros::Diagnostic;
+use rustc_span::symbol::MacroRulesNormalizedIdent;
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(expand_expr_repeat_no_syntax_vars)]
+pub(crate) struct NoSyntaxVarsExprRepeat {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(expand_must_repeat_once)]
+pub(crate) struct MustRepeatOnce {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(expand_count_repetition_misplaced)]
+pub(crate) struct CountRepetitionMisplaced {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(expand_meta_var_expr_unrecognized_var)]
+pub(crate) struct MetaVarExprUnrecognizedVar {
+ #[primary_span]
+ pub span: Span,
+ pub key: MacroRulesNormalizedIdent,
+}
+
+#[derive(Diagnostic)]
+#[diag(expand_var_still_repeating)]
+pub(crate) struct VarStillRepeating {
+ #[primary_span]
+ pub span: Span,
+ pub ident: MacroRulesNormalizedIdent,
+}
+
+#[derive(Diagnostic)]
+#[diag(expand_meta_var_dif_seq_matchers)]
+pub(crate) struct MetaVarsDifSeqMatchers {
+ #[primary_span]
+ pub span: Span,
+ pub msg: String,
+}
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
index 93eeca5b2..57713fb3c 100644
--- a/compiler/rustc_expand/src/expand.rs
+++ b/compiler/rustc_expand/src/expand.rs
@@ -11,7 +11,7 @@ use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter};
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{self, AssocCtxt, Visitor};
-use rustc_ast::{AssocItemKind, AstNodeWrapper, AttrStyle, ExprKind, ForeignItemKind};
+use rustc_ast::{AssocItemKind, AstNodeWrapper, AttrStyle, AttrVec, ExprKind, ForeignItemKind};
use rustc_ast::{HasAttrs, HasNodeId};
use rustc_ast::{Inline, ItemKind, MacArgs, MacStmtStyle, MetaItemKind, ModKind};
use rustc_ast::{NestedMetaItem, NodeId, PatKind, StmtKind, TyKind};
@@ -50,6 +50,7 @@ macro_rules! ast_fragments {
/// Can also serve as an input and intermediate result for macro expansion operations.
pub enum AstFragment {
OptExpr(Option<P<ast::Expr>>),
+ MethodReceiverExpr(P<ast::Expr>),
$($Kind($AstTy),)*
}
@@ -57,6 +58,7 @@ macro_rules! ast_fragments {
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum AstFragmentKind {
OptExpr,
+ MethodReceiverExpr,
$($Kind,)*
}
@@ -64,6 +66,7 @@ macro_rules! ast_fragments {
pub fn name(self) -> &'static str {
match self {
AstFragmentKind::OptExpr => "expression",
+ AstFragmentKind::MethodReceiverExpr => "expression",
$(AstFragmentKind::$Kind => $kind_name,)*
}
}
@@ -72,6 +75,8 @@ macro_rules! ast_fragments {
match self {
AstFragmentKind::OptExpr =>
result.make_expr().map(Some).map(AstFragment::OptExpr),
+ AstFragmentKind::MethodReceiverExpr =>
+ result.make_expr().map(AstFragment::MethodReceiverExpr),
$(AstFragmentKind::$Kind => result.$make_ast().map(AstFragment::$Kind),)*
}
}
@@ -98,6 +103,13 @@ macro_rules! ast_fragments {
}
}
+ pub fn make_method_receiver_expr(self) -> P<ast::Expr> {
+ match self {
+ AstFragment::MethodReceiverExpr(expr) => expr,
+ _ => panic!("AstFragment::make_* called on the wrong kind of fragment"),
+ }
+ }
+
$(pub fn $make_ast(self) -> $AstTy {
match self {
AstFragment::$Kind(ast) => ast,
@@ -120,6 +132,7 @@ macro_rules! ast_fragments {
}
});
}
+ AstFragment::MethodReceiverExpr(expr) => vis.visit_method_receiver_expr(expr),
$($(AstFragment::$Kind(ast) => vis.$mut_visit_ast(ast),)?)*
$($(AstFragment::$Kind(ast) =>
ast.flat_map_in_place(|ast| vis.$flat_map_ast_elt(ast)),)?)*
@@ -130,6 +143,7 @@ macro_rules! ast_fragments {
match *self {
AstFragment::OptExpr(Some(ref expr)) => visitor.visit_expr(expr),
AstFragment::OptExpr(None) => {}
+ AstFragment::MethodReceiverExpr(ref expr) => visitor.visit_method_receiver_expr(expr),
$($(AstFragment::$Kind(ref ast) => visitor.$visit_ast(ast),)?)*
$($(AstFragment::$Kind(ref ast) => for ast_elt in &ast[..] {
visitor.$visit_ast_elt(ast_elt, $($args)*);
@@ -222,6 +236,7 @@ impl AstFragmentKind {
match self {
AstFragmentKind::OptExpr
| AstFragmentKind::Expr
+ | AstFragmentKind::MethodReceiverExpr
| AstFragmentKind::Stmts
| AstFragmentKind::Ty
| AstFragmentKind::Pat => SupportsMacroExpansion::Yes { supports_inner_attrs: false },
@@ -285,6 +300,9 @@ impl AstFragmentKind {
AstFragmentKind::Expr => AstFragment::Expr(
items.next().expect("expected exactly one expression").expect_expr(),
),
+ AstFragmentKind::MethodReceiverExpr => AstFragment::MethodReceiverExpr(
+ items.next().expect("expected exactly one expression").expect_expr(),
+ ),
AstFragmentKind::OptExpr => {
AstFragment::OptExpr(items.next().map(Annotatable::expect_expr))
}
@@ -306,7 +324,7 @@ pub struct Invocation {
pub enum InvocationKind {
Bang {
- mac: ast::MacCall,
+ mac: P<ast::MacCall>,
span: Span,
},
Attr {
@@ -327,7 +345,7 @@ impl InvocationKind {
fn placeholder_visibility(&self) -> Option<ast::Visibility> {
// HACK: For unnamed fields placeholders should have the same visibility as the actual
// fields because for tuple structs/variants resolve determines visibilities of their
- // constructor using these field visibilities before attributes on them are are expanded.
+ // constructor using these field visibilities before attributes on them are expanded.
// The assumption is that the attribute expansion cannot change field visibilities,
// and it holds because only inert attributes are supported in this position.
match self {
@@ -893,6 +911,7 @@ pub fn parse_ast_fragment<'a>(
AstFragment::Stmts(stmts)
}
AstFragmentKind::Expr => AstFragment::Expr(this.parse_expr()?),
+ AstFragmentKind::MethodReceiverExpr => AstFragment::MethodReceiverExpr(this.parse_expr()?),
AstFragmentKind::OptExpr => {
if this.token != token::Eof {
AstFragment::OptExpr(Some(this.parse_expr()?))
@@ -937,13 +956,12 @@ pub fn ensure_complete_parse<'a>(
kind_name,
);
err.note(&msg);
- let semi_span = this.sess.source_map().next_point(span);
- let semi_full_span = semi_span.to(this.sess.source_map().next_point(semi_span));
- match this.sess.source_map().span_to_snippet(semi_full_span) {
+ let semi_span = this.sess.source_map().next_point(span);
+ match this.sess.source_map().span_to_snippet(semi_span) {
Ok(ref snippet) if &snippet[..] != ";" && kind_name == "expression" => {
err.span_suggestion(
- semi_span,
+ span.shrink_to_hi(),
"you might be missing a semicolon here",
";",
Applicability::MaybeIncorrect,
@@ -1001,7 +1019,7 @@ enum AddSemicolon {
/// of functionality used by `InvocationCollector`.
trait InvocationCollectorNode: HasAttrs + HasNodeId + Sized {
type OutputTy = SmallVec<[Self; 1]>;
- type AttrsTy: Deref<Target = [ast::Attribute]> = Vec<ast::Attribute>;
+ type AttrsTy: Deref<Target = [ast::Attribute]> = ast::AttrVec;
const KIND: AstFragmentKind;
fn to_annotatable(self) -> Annotatable;
fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy;
@@ -1017,7 +1035,7 @@ trait InvocationCollectorNode: HasAttrs + HasNodeId + Sized {
fn is_mac_call(&self) -> bool {
false
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
unreachable!()
}
fn pre_flat_map_node_collect_attr(_cfg: &StripUnconfigured<'_>, _attr: &ast::Attribute) {}
@@ -1046,7 +1064,7 @@ impl InvocationCollectorNode for P<ast::Item> {
fn is_mac_call(&self) -> bool {
matches!(self.kind, ItemKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.into_inner();
match node.kind {
ItemKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
@@ -1154,7 +1172,7 @@ impl InvocationCollectorNode for AstNodeWrapper<P<ast::AssocItem>, TraitItemTag>
fn is_mac_call(&self) -> bool {
matches!(self.wrapped.kind, AssocItemKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let item = self.wrapped.into_inner();
match item.kind {
AssocItemKind::MacCall(mac) => (mac, item.attrs, AddSemicolon::No),
@@ -1179,7 +1197,7 @@ impl InvocationCollectorNode for AstNodeWrapper<P<ast::AssocItem>, ImplItemTag>
fn is_mac_call(&self) -> bool {
matches!(self.wrapped.kind, AssocItemKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let item = self.wrapped.into_inner();
match item.kind {
AssocItemKind::MacCall(mac) => (mac, item.attrs, AddSemicolon::No),
@@ -1202,7 +1220,7 @@ impl InvocationCollectorNode for P<ast::ForeignItem> {
fn is_mac_call(&self) -> bool {
matches!(self.kind, ForeignItemKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.into_inner();
match node.kind {
ForeignItemKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
@@ -1323,7 +1341,7 @@ impl InvocationCollectorNode for ast::Stmt {
StmtKind::Local(..) | StmtKind::Empty => false,
}
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
// We pull macro invocations (both attributes and fn-like macro calls) out of their
// `StmtKind`s and treat them as statement macro invocations, not as items or expressions.
let (add_semicolon, mac, attrs) = match self.kind {
@@ -1333,7 +1351,7 @@ impl InvocationCollectorNode for ast::Stmt {
}
StmtKind::Item(item) => match item.into_inner() {
ast::Item { kind: ItemKind::MacCall(mac), attrs, .. } => {
- (mac.args.need_semicolon(), mac, attrs.into())
+ (mac.args.need_semicolon(), mac, attrs)
}
_ => unreachable!(),
},
@@ -1387,10 +1405,10 @@ impl InvocationCollectorNode for P<ast::Ty> {
fn is_mac_call(&self) -> bool {
matches!(self.kind, ast::TyKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.into_inner();
match node.kind {
- TyKind::MacCall(mac) => (mac, Vec::new(), AddSemicolon::No),
+ TyKind::MacCall(mac) => (mac, AttrVec::new(), AddSemicolon::No),
_ => unreachable!(),
}
}
@@ -1411,10 +1429,10 @@ impl InvocationCollectorNode for P<ast::Pat> {
fn is_mac_call(&self) -> bool {
matches!(self.kind, PatKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.into_inner();
match node.kind {
- PatKind::MacCall(mac) => (mac, Vec::new(), AddSemicolon::No),
+ PatKind::MacCall(mac) => (mac, AttrVec::new(), AddSemicolon::No),
_ => unreachable!(),
}
}
@@ -1439,7 +1457,7 @@ impl InvocationCollectorNode for P<ast::Expr> {
fn is_mac_call(&self) -> bool {
matches!(self.kind, ExprKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.into_inner();
match node.kind {
ExprKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
@@ -1466,7 +1484,7 @@ impl InvocationCollectorNode for AstNodeWrapper<P<ast::Expr>, OptExprTag> {
fn is_mac_call(&self) -> bool {
matches!(self.wrapped.kind, ast::ExprKind::MacCall(..))
}
- fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
let node = self.wrapped.into_inner();
match node.kind {
ExprKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
@@ -1478,6 +1496,42 @@ impl InvocationCollectorNode for AstNodeWrapper<P<ast::Expr>, OptExprTag> {
}
}
+/// This struct is a hack to workaround unstable of `stmt_expr_attributes`.
+/// It can be removed once that feature is stabilized.
+struct MethodReceiverTag;
+impl DummyAstNode for MethodReceiverTag {
+ fn dummy() -> MethodReceiverTag {
+ MethodReceiverTag
+ }
+}
+impl InvocationCollectorNode for AstNodeWrapper<P<ast::Expr>, MethodReceiverTag> {
+ type OutputTy = Self;
+ type AttrsTy = ast::AttrVec;
+ const KIND: AstFragmentKind = AstFragmentKind::MethodReceiverExpr;
+ fn descr() -> &'static str {
+ "an expression"
+ }
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Expr(self.wrapped)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ AstNodeWrapper::new(fragment.make_method_receiver_expr(), MethodReceiverTag)
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, visitor: &mut V) {
+ noop_visit_expr(&mut self.wrapped, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.wrapped.kind, ast::ExprKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (P<ast::MacCall>, Self::AttrsTy, AddSemicolon) {
+ let node = self.wrapped.into_inner();
+ match node.kind {
+ ExprKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
struct InvocationCollector<'a, 'b> {
cx: &'a mut ExtCtxt<'b>,
invocations: Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>,
@@ -1512,7 +1566,7 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
placeholder(fragment_kind, NodeId::placeholder_from_expn_id(expn_id), vis)
}
- fn collect_bang(&mut self, mac: ast::MacCall, kind: AstFragmentKind) -> AstFragment {
+ fn collect_bang(&mut self, mac: P<ast::MacCall>, kind: AstFragmentKind) -> AstFragment {
// cache the macro call span so that it can be
// easily adjusted for incremental compilation
let span = mac.span();
@@ -1646,7 +1700,11 @@ impl<'a, 'b> InvocationCollector<'a, 'b> {
fn expand_cfg_attr(&self, node: &mut impl HasAttrs, attr: ast::Attribute, pos: usize) {
node.visit_attrs(|attrs| {
- attrs.splice(pos..pos, self.cfg().expand_cfg_attr(attr, false));
+ // Repeated `insert` calls is inefficient, but the number of
+ // insertions is almost always 0 or 1 in practice.
+ for cfg in self.cfg().expand_cfg_attr(attr, false).into_iter().rev() {
+ attrs.insert(pos, cfg)
+ }
});
}
@@ -1837,6 +1895,14 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> {
self.visit_node(node)
}
+ fn visit_method_receiver_expr(&mut self, node: &mut P<ast::Expr>) {
+ visit_clobber(node, |node| {
+ let mut wrapper = AstNodeWrapper::new(node, MethodReceiverTag);
+ self.visit_node(&mut wrapper);
+ wrapper.wrapped
+ })
+ }
+
fn filter_map_expr(&mut self, node: P<ast::Expr>) -> Option<P<ast::Expr>> {
self.flat_map_node(AstNodeWrapper::new(node, OptExprTag))
}
diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs
index 9d0232822..b34de94fb 100644
--- a/compiler/rustc_expand/src/lib.rs
+++ b/compiler/rustc_expand/src/lib.rs
@@ -3,7 +3,6 @@
#![feature(associated_type_defaults)]
#![feature(if_let_guard)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(macro_metavar_expr)]
#![feature(proc_macro_diagnostic)]
#![feature(proc_macro_internals)]
@@ -15,6 +14,9 @@
#[macro_use]
extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+
extern crate proc_macro as pm;
mod placeholders;
@@ -26,6 +28,7 @@ pub mod base;
pub mod build;
#[macro_use]
pub mod config;
+pub mod errors;
pub mod expand;
pub mod module;
pub mod proc_macro;
diff --git a/compiler/rustc_expand/src/mbe/macro_parser.rs b/compiler/rustc_expand/src/mbe/macro_parser.rs
index 4fa91dfea..c8bdc3931 100644
--- a/compiler/rustc_expand/src/mbe/macro_parser.rs
+++ b/compiler/rustc_expand/src/mbe/macro_parser.rs
@@ -430,7 +430,7 @@ impl TtParser {
}
}
MatcherLoc::Delimited => {
- // Entering the delimeter is trivial.
+ // Entering the delimiter is trivial.
mp.idx += 1;
self.cur_mps.push(mp);
}
diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs
index f7e1575af..f6fe38174 100644
--- a/compiler/rustc_expand/src/mbe/macro_rules.rs
+++ b/compiler/rustc_expand/src/mbe/macro_rules.rs
@@ -14,7 +14,7 @@ use rustc_ast::{NodeId, DUMMY_NODE_ID};
use rustc_ast_pretty::pprust;
use rustc_attr::{self as attr, TransparencyError};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
-use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, DiagnosticMessage};
use rustc_feature::Features;
use rustc_lint_defs::builtin::{
RUST_2021_INCOMPATIBLE_OR_PATTERNS, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
@@ -32,7 +32,6 @@ use rustc_span::Span;
use std::borrow::Cow;
use std::collections::hash_map::Entry;
use std::{mem, slice};
-use tracing::debug;
pub(crate) struct ParserAnyMacro<'a> {
parser: Parser<'a>,
@@ -69,19 +68,22 @@ fn emit_frag_parse_err(
kind: AstFragmentKind,
) {
// FIXME(davidtwco): avoid depending on the error message text
- if parser.token == token::Eof && e.message[0].0.expect_str().ends_with(", found `<eof>`") {
- if !e.span.is_dummy() {
- // early end of macro arm (#52866)
- e.replace_span_with(parser.sess.source_map().next_point(parser.token.span));
- }
+ if parser.token == token::Eof
+ && let DiagnosticMessage::Str(message) = &e.message[0].0
+ && message.ends_with(", found `<eof>`")
+ {
let msg = &e.message[0];
e.message[0] = (
- rustc_errors::DiagnosticMessage::Str(format!(
+ DiagnosticMessage::Str(format!(
"macro expansion ends with an incomplete expression: {}",
- msg.0.expect_str().replace(", found `<eof>`", ""),
+ message.replace(", found `<eof>`", ""),
)),
msg.1,
);
+ if !e.span.is_dummy() {
+ // early end of macro arm (#52866)
+ e.replace_span_with(parser.token.span.shrink_to_hi());
+ }
}
if e.span.is_dummy() {
// Get around lack of span in error (#30128)
@@ -248,6 +250,7 @@ fn expand_macro<'cx>(
// hacky, but speeds up the `html5ever` benchmark significantly. (Issue
// 68836 suggests a more comprehensive but more complex change to deal with
// this situation.)
+ // FIXME(Nilstrieb): Stop recovery from happening on this parser and retry later with recovery if the macro failed to match.
let parser = parser_from_cx(sess, arg.clone());
// Try each arm's matchers.
@@ -594,25 +597,21 @@ pub fn compile_declarative_macro(
(mk_syn_ext(expander), rule_spans)
}
-#[derive(SessionSubdiagnostic)]
+#[derive(Subdiagnostic)]
enum ExplainDocComment {
- #[label(expand::explain_doc_comment_inner)]
+ #[label(expand_explain_doc_comment_inner)]
Inner {
#[primary_span]
span: Span,
},
- #[label(expand::explain_doc_comment_outer)]
+ #[label(expand_explain_doc_comment_outer)]
Outer {
#[primary_span]
span: Span,
},
}
-fn annotate_doc_comment(
- err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
- sm: &SourceMap,
- span: Span,
-) {
+fn annotate_doc_comment(err: &mut Diagnostic, sm: &SourceMap, span: Span) {
if let Ok(src) = sm.span_to_snippet(span) {
if src.starts_with("///") || src.starts_with("/**") {
err.subdiagnostic(ExplainDocComment::Outer { span });
@@ -980,7 +979,7 @@ impl<'tt> TokenSet<'tt> {
self.maybe_empty = false;
}
- // Adds `tok` to the set for `self`, marking sequence as non-empy.
+ // Adds `tok` to the set for `self`, marking sequence as non-empty.
fn add_one(&mut self, tt: TtHandle<'tt>) {
if !self.tokens.contains(&tt) {
self.tokens.push(tt);
diff --git a/compiler/rustc_expand/src/mbe/metavar_expr.rs b/compiler/rustc_expand/src/mbe/metavar_expr.rs
index fc808401a..99fe47454 100644
--- a/compiler/rustc_expand/src/mbe/metavar_expr.rs
+++ b/compiler/rustc_expand/src/mbe/metavar_expr.rs
@@ -112,7 +112,7 @@ fn parse_depth<'sess>(
"meta-variable expression depth must be a literal"
));
};
- if let Ok(lit_kind) = LitKind::from_lit_token(*lit)
+ if let Ok(lit_kind) = LitKind::from_token_lit(*lit)
&& let LitKind::Int(n_u128, LitIntType::Unsuffixed) = lit_kind
&& let Ok(n_usize) = usize::try_from(n_u128)
{
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
index e47ea83ac..bec6d1a2d 100644
--- a/compiler/rustc_expand/src/mbe/transcribe.rs
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -1,4 +1,8 @@
use crate::base::ExtCtxt;
+use crate::errors::{
+ CountRepetitionMisplaced, MetaVarExprUnrecognizedVar, MetaVarsDifSeqMatchers, MustRepeatOnce,
+ NoSyntaxVarsExprRepeat, VarStillRepeating,
+};
use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq, MatchedTokenTree, NamedMatch};
use crate::mbe::{self, MetaVarExpr};
use rustc_ast::mut_visit::{self, MutVisitor};
@@ -165,11 +169,7 @@ pub(super) fn transcribe<'a>(
seq @ mbe::TokenTree::Sequence(_, delimited) => {
match lockstep_iter_size(&seq, interp, &repeats) {
LockstepIterSize::Unconstrained => {
- return Err(cx.struct_span_err(
- seq.span(), /* blame macro writer */
- "attempted to repeat an expression containing no syntax variables \
- matched as repeating at this depth",
- ));
+ return Err(cx.create_err(NoSyntaxVarsExprRepeat { span: seq.span() }));
}
LockstepIterSize::Contradiction(msg) => {
@@ -177,7 +177,7 @@ pub(super) fn transcribe<'a>(
// happens when two meta-variables are used in the same repetition in a
// sequence, but they come from different sequence matchers and repeat
// different amounts.
- return Err(cx.struct_span_err(seq.span(), &msg));
+ return Err(cx.create_err(MetaVarsDifSeqMatchers { span: seq.span(), msg }));
}
LockstepIterSize::Constraint(len, _) => {
@@ -193,10 +193,7 @@ pub(super) fn transcribe<'a>(
// FIXME: this really ought to be caught at macro definition
// time... It happens when the Kleene operator in the matcher and
// the body for the same meta-variable do not match.
- return Err(cx.struct_span_err(
- sp.entire(),
- "this must repeat at least once",
- ));
+ return Err(cx.create_err(MustRepeatOnce { span: sp.entire() }));
}
} else {
// 0 is the initial counter (we have done 0 repetitions so far). `len`
@@ -239,10 +236,7 @@ pub(super) fn transcribe<'a>(
}
MatchedSeq(..) => {
// We were unable to descend far enough. This is an error.
- return Err(cx.struct_span_err(
- sp, /* blame the macro writer */
- &format!("variable '{}' is still repeating at this depth", ident),
- ));
+ return Err(cx.create_err(VarStillRepeating { span: sp, ident }));
}
}
} else {
@@ -448,10 +442,7 @@ fn count_repetitions<'a>(
match matched {
MatchedTokenTree(_) | MatchedNonterminal(_) => {
if declared_lhs_depth == 0 {
- return Err(cx.struct_span_err(
- sp.entire(),
- "`count` can not be placed inside the inner-most repetition",
- ));
+ return Err(cx.create_err(CountRepetitionMisplaced { span: sp.entire() }));
}
match depth_opt {
None => Ok(1),
@@ -499,12 +490,7 @@ where
{
let span = ident.span;
let key = MacroRulesNormalizedIdent::new(ident);
- interp.get(&key).ok_or_else(|| {
- cx.struct_span_err(
- span,
- &format!("variable `{}` is not recognized in meta-variable expression", key),
- )
- })
+ interp.get(&key).ok_or_else(|| cx.create_err(MetaVarExprUnrecognizedVar { span, key }))
}
/// Used by meta-variable expressions when an user input is out of the actual declared bounds. For
diff --git a/compiler/rustc_expand/src/module.rs b/compiler/rustc_expand/src/module.rs
index 0315d1163..9002a24e4 100644
--- a/compiler/rustc_expand/src/module.rs
+++ b/compiler/rustc_expand/src/module.rs
@@ -1,6 +1,6 @@
use crate::base::ModuleData;
use rustc_ast::ptr::P;
-use rustc_ast::{token, Attribute, Inline, Item, ModSpans};
+use rustc_ast::{token, AttrVec, Attribute, Inline, Item, ModSpans};
use rustc_errors::{struct_span_err, DiagnosticBuilder, ErrorGuaranteed};
use rustc_parse::new_parser_from_file;
use rustc_parse::validate_attr;
@@ -48,7 +48,7 @@ pub(crate) fn parse_external_mod(
span: Span, // The span to blame on errors.
module: &ModuleData,
mut dir_ownership: DirOwnership,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
) -> ParsedExternalMod {
// We bail on the first error, but that error does not cause a fatal error... (1)
let result: Result<_, ModError<'_>> = try {
@@ -63,9 +63,9 @@ pub(crate) fn parse_external_mod(
// Actually parse the external file as a module.
let mut parser = new_parser_from_file(&sess.parse_sess, &mp.file_path, Some(span));
- let (mut inner_attrs, items, inner_span) =
+ let (inner_attrs, items, inner_span) =
parser.parse_mod(&token::Eof).map_err(|err| ModError::ParserError(err))?;
- attrs.append(&mut inner_attrs);
+ attrs.extend(inner_attrs);
(items, inner_span, mp.file_path)
};
// (1) ...instead, we return a dummy module.
diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs
index 0d5d6ee07..faaf3b3fe 100644
--- a/compiler/rustc_expand/src/placeholders.rs
+++ b/compiler/rustc_expand/src/placeholders.rs
@@ -15,16 +15,16 @@ pub fn placeholder(
id: ast::NodeId,
vis: Option<ast::Visibility>,
) -> AstFragment {
- fn mac_placeholder() -> ast::MacCall {
- ast::MacCall {
+ fn mac_placeholder() -> P<ast::MacCall> {
+ P(ast::MacCall {
path: ast::Path { span: DUMMY_SP, segments: Vec::new(), tokens: None },
args: P(ast::MacArgs::Empty),
prior_type_ascription: None,
- }
+ })
}
let ident = Ident::empty();
- let attrs = Vec::new();
+ let attrs = ast::AttrVec::new();
let vis = vis.unwrap_or(ast::Visibility {
span: DUMMY_SP,
kind: ast::VisibilityKind::Inherited,
@@ -55,6 +55,7 @@ pub fn placeholder(
}),
AstFragmentKind::Expr => AstFragment::Expr(expr_placeholder()),
AstFragmentKind::OptExpr => AstFragment::OptExpr(Some(expr_placeholder())),
+ AstFragmentKind::MethodReceiverExpr => AstFragment::MethodReceiverExpr(expr_placeholder()),
AstFragmentKind::Items => AstFragment::Items(smallvec![P(ast::Item {
id,
span,
@@ -296,6 +297,13 @@ impl MutVisitor for PlaceholderExpander {
}
}
+ fn visit_method_receiver_expr(&mut self, expr: &mut P<ast::Expr>) {
+ match expr.kind {
+ ast::ExprKind::MacCall(_) => *expr = self.remove(expr.id).make_method_receiver_expr(),
+ _ => noop_visit_expr(expr, self),
+ }
+ }
+
fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
match expr.kind {
ast::ExprKind::MacCall(_) => self.remove(expr.id).make_opt_expr(),
diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs
index 7d9a4aed0..cc2858d3f 100644
--- a/compiler/rustc_expand/src/proc_macro_server.rs
+++ b/compiler/rustc_expand/src/proc_macro_server.rs
@@ -1,23 +1,22 @@
use crate::base::ExtCtxt;
-
+use pm::bridge::{
+ server, DelimSpan, Diagnostic, ExpnGlobals, Group, Ident, LitKind, Literal, Punct, TokenTree,
+};
+use pm::{Delimiter, Level, LineColumn};
use rustc_ast as ast;
use rustc_ast::token;
use rustc_ast::tokenstream::{self, Spacing::*, TokenStream};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
-use rustc_errors::{Diagnostic, MultiSpan, PResult};
+use rustc_errors::{MultiSpan, PResult};
use rustc_parse::lexer::nfc_normalize;
use rustc_parse::parse_stream_from_source_str;
use rustc_session::parse::ParseSess;
use rustc_span::def_id::CrateNum;
use rustc_span::symbol::{self, sym, Symbol};
use rustc_span::{BytePos, FileName, Pos, SourceFile, Span};
-
-use pm::bridge::{
- server, DelimSpan, ExpnGlobals, Group, Ident, LitKind, Literal, Punct, TokenTree,
-};
-use pm::{Delimiter, Level, LineColumn};
+use smallvec::{smallvec, SmallVec};
use std::ops::Bound;
trait FromInternal<T> {
@@ -110,10 +109,26 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre
tokenstream::TokenTree::Token(token, spacing) => (token, spacing == Joint),
};
+ // Split the operator into one or more `Punct`s, one per character.
+ // The final one inherits the jointness of the original token. Any
+ // before that get `joint = true`.
let mut op = |s: &str| {
assert!(s.is_ascii());
- trees.extend(s.as_bytes().iter().enumerate().map(|(idx, &ch)| {
- TokenTree::Punct(Punct { ch, joint: joint || idx != s.len() - 1, span })
+ trees.extend(s.bytes().enumerate().map(|(i, ch)| {
+ let is_final = i == s.len() - 1;
+ // Split the token span into single chars. Unless the span
+ // is an unusual one, e.g. due to proc macro expansion. We
+ // determine this by assuming any span with a length that
+ // matches the operator length is a normal one, and any
+ // span with a different length is an unusual one.
+ let span = if (span.hi() - span.lo()).to_usize() == s.len() {
+ let lo = span.lo() + BytePos::from_usize(i);
+ let hi = lo + BytePos::from_usize(1);
+ span.with_lo(lo).with_hi(hi)
+ } else {
+ span
+ };
+ TokenTree::Punct(Punct { ch, joint: if is_final { joint } else { true }, span })
}));
};
@@ -237,23 +252,57 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre
}
}
-impl ToInternal<TokenStream> for (TokenTree<TokenStream, Span, Symbol>, &mut Rustc<'_, '_>) {
- fn to_internal(self) -> TokenStream {
+// We use a `SmallVec` because the output size is always one or two `TokenTree`s.
+impl ToInternal<SmallVec<[tokenstream::TokenTree; 2]>>
+ for (TokenTree<TokenStream, Span, Symbol>, &mut Rustc<'_, '_>)
+{
+ fn to_internal(self) -> SmallVec<[tokenstream::TokenTree; 2]> {
use rustc_ast::token::*;
let (tree, rustc) = self;
- let (ch, joint, span) = match tree {
- TokenTree::Punct(Punct { ch, joint, span }) => (ch, joint, span),
+ match tree {
+ TokenTree::Punct(Punct { ch, joint, span }) => {
+ let kind = match ch {
+ b'=' => Eq,
+ b'<' => Lt,
+ b'>' => Gt,
+ b'!' => Not,
+ b'~' => Tilde,
+ b'+' => BinOp(Plus),
+ b'-' => BinOp(Minus),
+ b'*' => BinOp(Star),
+ b'/' => BinOp(Slash),
+ b'%' => BinOp(Percent),
+ b'^' => BinOp(Caret),
+ b'&' => BinOp(And),
+ b'|' => BinOp(Or),
+ b'@' => At,
+ b'.' => Dot,
+ b',' => Comma,
+ b';' => Semi,
+ b':' => Colon,
+ b'#' => Pound,
+ b'$' => Dollar,
+ b'?' => Question,
+ b'\'' => SingleQuote,
+ _ => unreachable!(),
+ };
+ smallvec![if joint {
+ tokenstream::TokenTree::token_joint(kind, span)
+ } else {
+ tokenstream::TokenTree::token_alone(kind, span)
+ }]
+ }
TokenTree::Group(Group { delimiter, stream, span: DelimSpan { open, close, .. } }) => {
- return tokenstream::TokenStream::delimited(
+ smallvec![tokenstream::TokenTree::Delimited(
tokenstream::DelimSpan { open, close },
delimiter.to_internal(),
stream.unwrap_or_default(),
- );
+ )]
}
TokenTree::Ident(self::Ident { sym, is_raw, span }) => {
rustc.sess().symbol_gallery.insert(sym, span);
- return tokenstream::TokenStream::token_alone(Ident(sym, is_raw), span);
+ smallvec![tokenstream::TokenTree::token_alone(Ident(sym, is_raw), span)]
}
TokenTree::Literal(self::Literal {
kind: self::LitKind::Integer,
@@ -266,7 +315,7 @@ impl ToInternal<TokenStream> for (TokenTree<TokenStream, Span, Symbol>, &mut Rus
let integer = TokenKind::lit(token::Integer, symbol, suffix);
let a = tokenstream::TokenTree::token_alone(minus, span);
let b = tokenstream::TokenTree::token_alone(integer, span);
- return [a, b].into_iter().collect();
+ smallvec![a, b]
}
TokenTree::Literal(self::Literal {
kind: self::LitKind::Float,
@@ -279,46 +328,14 @@ impl ToInternal<TokenStream> for (TokenTree<TokenStream, Span, Symbol>, &mut Rus
let float = TokenKind::lit(token::Float, symbol, suffix);
let a = tokenstream::TokenTree::token_alone(minus, span);
let b = tokenstream::TokenTree::token_alone(float, span);
- return [a, b].into_iter().collect();
+ smallvec![a, b]
}
TokenTree::Literal(self::Literal { kind, symbol, suffix, span }) => {
- return tokenstream::TokenStream::token_alone(
+ smallvec![tokenstream::TokenTree::token_alone(
TokenKind::lit(kind.to_internal(), symbol, suffix),
span,
- );
+ )]
}
- };
-
- let kind = match ch {
- b'=' => Eq,
- b'<' => Lt,
- b'>' => Gt,
- b'!' => Not,
- b'~' => Tilde,
- b'+' => BinOp(Plus),
- b'-' => BinOp(Minus),
- b'*' => BinOp(Star),
- b'/' => BinOp(Slash),
- b'%' => BinOp(Percent),
- b'^' => BinOp(Caret),
- b'&' => BinOp(And),
- b'|' => BinOp(Or),
- b'@' => At,
- b'.' => Dot,
- b',' => Comma,
- b';' => Semi,
- b':' => Colon,
- b'#' => Pound,
- b'$' => Dollar,
- b'?' => Question,
- b'\'' => SingleQuote,
- _ => unreachable!(),
- };
-
- if joint {
- tokenstream::TokenStream::token_joint(kind, span)
- } else {
- tokenstream::TokenStream::token_alone(kind, span)
}
}
}
@@ -368,8 +385,6 @@ impl server::Types for Rustc<'_, '_> {
type FreeFunctions = FreeFunctions;
type TokenStream = TokenStream;
type SourceFile = Lrc<SourceFile>;
- type MultiSpan = Vec<Span>;
- type Diagnostic = Diagnostic;
type Span = Span;
type Symbol = Symbol;
}
@@ -436,6 +451,21 @@ impl server::FreeFunctions for Rustc<'_, '_> {
span: self.call_site,
})
}
+
+ fn emit_diagnostic(&mut self, diagnostic: Diagnostic<Self::Span>) {
+ let mut diag =
+ rustc_errors::Diagnostic::new(diagnostic.level.to_internal(), diagnostic.message);
+ diag.set_span(MultiSpan::from_spans(diagnostic.spans));
+ for child in diagnostic.children {
+ diag.sub(
+ child.level.to_internal(),
+ child.message,
+ MultiSpan::from_spans(child.spans),
+ None,
+ );
+ }
+ self.sess().span_diagnostic.emit_diagnostic(&mut diag);
+ }
}
impl server::TokenStream for Rustc<'_, '_> {
@@ -486,20 +516,26 @@ impl server::TokenStream for Rustc<'_, '_> {
// We don't use `TokenStream::from_ast` as the tokenstream currently cannot
// be recovered in the general case.
match &expr.kind {
- ast::ExprKind::Lit(l) if l.token.kind == token::Bool => Ok(
- tokenstream::TokenStream::token_alone(token::Ident(l.token.symbol, false), l.span),
- ),
+ ast::ExprKind::Lit(l) if l.token_lit.kind == token::Bool => {
+ Ok(tokenstream::TokenStream::token_alone(
+ token::Ident(l.token_lit.symbol, false),
+ l.span,
+ ))
+ }
ast::ExprKind::Lit(l) => {
- Ok(tokenstream::TokenStream::token_alone(token::Literal(l.token), l.span))
+ Ok(tokenstream::TokenStream::token_alone(token::Literal(l.token_lit), l.span))
}
ast::ExprKind::Unary(ast::UnOp::Neg, e) => match &e.kind {
- ast::ExprKind::Lit(l) => match l.token {
+ ast::ExprKind::Lit(l) => match l.token_lit {
token::Lit { kind: token::Integer | token::Float, .. } => {
Ok(Self::TokenStream::from_iter([
// FIXME: The span of the `-` token is lost when
// parsing, so we cannot faithfully recover it here.
tokenstream::TokenTree::token_alone(token::BinOp(token::Minus), e.span),
- tokenstream::TokenTree::token_alone(token::Literal(l.token), l.span),
+ tokenstream::TokenTree::token_alone(
+ token::Literal(l.token_lit),
+ l.span,
+ ),
]))
}
_ => Err(()),
@@ -514,7 +550,7 @@ impl server::TokenStream for Rustc<'_, '_> {
&mut self,
tree: TokenTree<Self::TokenStream, Self::Span, Self::Symbol>,
) -> Self::TokenStream {
- (tree, &mut *self).to_internal()
+ Self::TokenStream::new((tree, &mut *self).to_internal().into_iter().collect::<Vec<_>>())
}
fn concat_trees(
@@ -522,14 +558,14 @@ impl server::TokenStream for Rustc<'_, '_> {
base: Option<Self::TokenStream>,
trees: Vec<TokenTree<Self::TokenStream, Self::Span, Self::Symbol>>,
) -> Self::TokenStream {
- let mut builder = tokenstream::TokenStreamBuilder::new();
- if let Some(base) = base {
- builder.push(base);
- }
+ let mut stream =
+ if let Some(base) = base { base } else { tokenstream::TokenStream::default() };
for tree in trees {
- builder.push((tree, &mut *self).to_internal());
+ for tt in (tree, &mut *self).to_internal() {
+ stream.push_tree(tt);
+ }
}
- builder.build()
+ stream
}
fn concat_streams(
@@ -537,14 +573,12 @@ impl server::TokenStream for Rustc<'_, '_> {
base: Option<Self::TokenStream>,
streams: Vec<Self::TokenStream>,
) -> Self::TokenStream {
- let mut builder = tokenstream::TokenStreamBuilder::new();
- if let Some(base) = base {
- builder.push(base);
+ let mut stream =
+ if let Some(base) = base { base } else { tokenstream::TokenStream::default() };
+ for s in streams {
+ stream.push_stream(s);
}
- for stream in streams {
- builder.push(stream);
- }
- builder.build()
+ stream
}
fn into_trees(
@@ -577,38 +611,6 @@ impl server::SourceFile for Rustc<'_, '_> {
}
}
-impl server::MultiSpan for Rustc<'_, '_> {
- fn new(&mut self) -> Self::MultiSpan {
- vec![]
- }
-
- fn push(&mut self, spans: &mut Self::MultiSpan, span: Self::Span) {
- spans.push(span)
- }
-}
-
-impl server::Diagnostic for Rustc<'_, '_> {
- fn new(&mut self, level: Level, msg: &str, spans: Self::MultiSpan) -> Self::Diagnostic {
- let mut diag = Diagnostic::new(level.to_internal(), msg);
- diag.set_span(MultiSpan::from_spans(spans));
- diag
- }
-
- fn sub(
- &mut self,
- diag: &mut Self::Diagnostic,
- level: Level,
- msg: &str,
- spans: Self::MultiSpan,
- ) {
- diag.sub(level.to_internal(), msg, MultiSpan::from_spans(spans), None);
- }
-
- fn emit(&mut self, mut diag: Self::Diagnostic) {
- self.sess().span_diagnostic.emit_diagnostic(&mut diag);
- }
-}
-
impl server::Span for Rustc<'_, '_> {
fn debug(&mut self, span: Self::Span) -> String {
if self.ecx.ecfg.span_debug {
@@ -702,6 +704,7 @@ impl server::Span for Rustc<'_, '_> {
fn source_text(&mut self, span: Self::Span) -> Option<String> {
self.sess().source_map().span_to_snippet(span).ok()
}
+
/// Saves the provided span into the metadata of
/// *the crate we are currently compiling*, which must
/// be a proc-macro crate. This id can be passed to
diff --git a/compiler/rustc_expand/src/tokenstream/tests.rs b/compiler/rustc_expand/src/tokenstream/tests.rs
index eed696810..91c4dd732 100644
--- a/compiler/rustc_expand/src/tokenstream/tests.rs
+++ b/compiler/rustc_expand/src/tokenstream/tests.rs
@@ -1,7 +1,7 @@
use crate::tests::string_to_stream;
use rustc_ast::token;
-use rustc_ast::tokenstream::{TokenStream, TokenStreamBuilder};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_span::create_default_session_globals_then;
use rustc_span::{BytePos, Span, Symbol};
@@ -19,10 +19,9 @@ fn test_concat() {
let test_res = string_to_ts("foo::bar::baz");
let test_fst = string_to_ts("foo::bar");
let test_snd = string_to_ts("::baz");
- let mut builder = TokenStreamBuilder::new();
- builder.push(test_fst);
- builder.push(test_snd);
- let eq_res = builder.build();
+ let mut eq_res = TokenStream::default();
+ eq_res.push_stream(test_fst);
+ eq_res.push_stream(test_snd);
assert_eq!(test_res.trees().count(), 5);
assert_eq!(eq_res.trees().count(), 5);
assert_eq!(test_res.eq_unspanned(&eq_res), true);
@@ -99,11 +98,10 @@ fn test_is_empty() {
#[test]
fn test_dotdotdot() {
create_default_session_globals_then(|| {
- let mut builder = TokenStreamBuilder::new();
- builder.push(TokenStream::token_joint(token::Dot, sp(0, 1)));
- builder.push(TokenStream::token_joint(token::Dot, sp(1, 2)));
- builder.push(TokenStream::token_alone(token::Dot, sp(2, 3)));
- let stream = builder.build();
+ let mut stream = TokenStream::default();
+ stream.push_tree(TokenTree::token_joint(token::Dot, sp(0, 1)));
+ stream.push_tree(TokenTree::token_joint(token::Dot, sp(1, 2)));
+ stream.push_tree(TokenTree::token_alone(token::Dot, sp(2, 3)));
assert!(stream.eq_unspanned(&string_to_ts("...")));
assert_eq!(stream.trees().count(), 1);
})
diff --git a/compiler/rustc_feature/Cargo.toml b/compiler/rustc_feature/Cargo.toml
index 3d8d0db20..6f6468646 100644
--- a/compiler/rustc_feature/Cargo.toml
+++ b/compiler/rustc_feature/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_data_structures = { path = "../rustc_data_structures" }
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index 099c40b21..80448605c 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -53,6 +53,10 @@ declare_features! (
(accepted, abi_sysv64, "1.24.0", Some(36167), None),
/// Allows using ADX intrinsics from `core::arch::{x86, x86_64}`.
(accepted, adx_target_feature, "1.61.0", Some(44839), None),
+ /// Allows explicit discriminants on non-unit enum variants.
+ (accepted, arbitrary_enum_discriminant, "1.66.0", Some(60553), None),
+ /// Allows using `sym` operands in inline assembly.
+ (accepted, asm_sym, "1.66.0", Some(93333), None),
/// Allows the definition of associated constants in `trait` or `impl` blocks.
(accepted, associated_consts, "1.20.0", Some(29646), None),
/// Allows using associated `type`s in `trait`s.
@@ -161,12 +165,16 @@ declare_features! (
(accepted, fn_must_use, "1.27.0", Some(43302), None),
/// Allows capturing variables in scope using format_args!
(accepted, format_args_capture, "1.58.0", Some(67984), None),
+ /// Allows associated types to be generic, e.g., `type Foo<T>;` (RFC 1598).
+ (accepted, generic_associated_types, "1.65.0", Some(44265), None),
/// Allows attributes on lifetime/type formal parameters in generics (RFC 1327).
(accepted, generic_param_attrs, "1.27.0", Some(48848), None),
/// Allows the `#[global_allocator]` attribute.
(accepted, global_allocator, "1.28.0", Some(27389), None),
// FIXME: explain `globs`.
(accepted, globs, "1.0.0", None, None),
+ /// Allows using `..=X` as a pattern.
+ (accepted, half_open_range_patterns, "1.66.0", Some(67264), None),
/// Allows using the `u128` and `i128` types.
(accepted, i128_type, "1.26.0", Some(35118), None),
/// Allows the use of `if let` expressions.
@@ -186,6 +194,10 @@ declare_features! (
/// Allows some increased flexibility in the name resolution rules,
/// especially around globs and shadowing (RFC 1560).
(accepted, item_like_imports, "1.15.0", Some(35120), None),
+ /// Allows `'a: { break 'a; }`.
+ (accepted, label_break_value, "1.65.0", Some(48594), None),
+ /// Allows `let...else` statements.
+ (accepted, let_else, "1.65.0", Some(87335), None),
/// Allows `break {expr}` with a value inside `loop`s.
(accepted, loop_break_value, "1.19.0", Some(37339), None),
/// Allows use of `?` as the Kleene "at most one" operator in macros.
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index ef4a17564..647ccdec7 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -221,6 +221,8 @@ declare_features! (
(active, rustc_private, "1.0.0", Some(27812), None),
/// Allows using internal rustdoc features like `doc(primitive)` or `doc(keyword)`.
(active, rustdoc_internals, "1.58.0", Some(90418), None),
+ /// Allows using the `rustdoc::missing_doc_code_examples` lint
+ (active, rustdoc_missing_doc_code_examples, "1.31.0", Some(101730), None),
/// Allows using `#[start]` on a function indicating that it is the program entrypoint.
(active, start, "1.0.0", Some(29633), None),
/// Allows using `#[structural_match]` which indicates that a type is structurally matchable.
@@ -290,16 +292,12 @@ declare_features! (
(incomplete, adt_const_params, "1.56.0", Some(95174), None),
/// Allows defining an `#[alloc_error_handler]`.
(active, alloc_error_handler, "1.29.0", Some(51540), None),
- /// Allows explicit discriminants on non-unit enum variants.
- (active, arbitrary_enum_discriminant, "1.37.0", Some(60553), None),
/// Allows trait methods with arbitrary self types.
(active, arbitrary_self_types, "1.23.0", Some(44874), None),
/// Allows using `const` operands in inline assembly.
(active, asm_const, "1.58.0", Some(93332), None),
/// Enables experimental inline assembly support for additional architectures.
(active, asm_experimental_arch, "1.58.0", Some(93335), None),
- /// Allows using `sym` operands in inline assembly.
- (active, asm_sym, "1.58.0", Some(93333), None),
/// Allows the `may_unwind` option in inline assembly.
(active, asm_unwind, "1.58.0", Some(93334), None),
/// Allows users to enforce equality of associated constants `TraitImpl<AssocConst=3>`.
@@ -310,6 +308,8 @@ declare_features! (
(active, associated_type_defaults, "1.2.0", Some(29661), None),
/// Allows `async || body` closures.
(active, async_closure, "1.37.0", Some(62290), None),
+ /// Alows async functions to be declared, implemented, and used in traits.
+ (incomplete, async_fn_in_trait, "1.66.0", Some(91611), None),
/// Allows `extern "C-unwind" fn` to enable unwinding across ABI boundaries.
(active, c_unwind, "1.52.0", Some(74990), None),
/// Allows using C-variadics.
@@ -336,6 +336,8 @@ declare_features! (
(active, closure_track_caller, "1.57.0", Some(87417), None),
/// Allows to use the `#[cmse_nonsecure_entry]` attribute.
(active, cmse_nonsecure_entry, "1.48.0", Some(75835), None),
+ /// Allows use of the `#[collapse_debuginfo]` attribute.
+ (active, collapse_debuginfo, "1.65.0", Some(100758), None),
/// Allows `async {}` expressions in const contexts.
(active, const_async_blocks, "1.53.0", Some(85368), None),
// Allows limiting the evaluation steps of const expressions
@@ -380,6 +382,8 @@ declare_features! (
(active, doc_cfg_hide, "1.57.0", Some(43781), None),
/// Allows `#[doc(masked)]`.
(active, doc_masked, "1.21.0", Some(44027), None),
+ /// Allows `dyn* Trait` objects.
+ (incomplete, dyn_star, "1.65.0", Some(91611), None),
/// Allows `X..Y` patterns.
(active, exclusive_range_pattern, "1.11.0", Some(37854), None),
/// Allows exhaustive pattern matching on types that contain uninhabited types.
@@ -394,18 +398,18 @@ declare_features! (
(active, ffi_returns_twice, "1.34.0", Some(58314), None),
/// Allows using `#[repr(align(...))]` on function items
(active, fn_align, "1.53.0", Some(82232), None),
+ /// Allows generators to be cloned.
+ (active, generator_clone, "1.65.0", Some(95360), None),
/// Allows defining generators.
(active, generators, "1.21.0", Some(43122), None),
/// Infer generic args for both consts and types.
(active, generic_arg_infer, "1.55.0", Some(85077), None),
- /// Allows associated types to be generic, e.g., `type Foo<T>;` (RFC 1598).
- (active, generic_associated_types, "1.23.0", Some(44265), None),
/// An extension to the `generic_associated_types` feature, allowing incomplete features.
(incomplete, generic_associated_types_extended, "1.61.0", Some(95451), None),
/// Allows non-trivial generic constants which have to have wfness manually propagated to callers
(incomplete, generic_const_exprs, "1.56.0", Some(76560), None),
- /// Allows using `..X`, `..=X`, `...X`, and `X..` as a pattern.
- (active, half_open_range_patterns, "1.41.0", Some(67264), None),
+ /// Allows using `..=X` as a patterns in slices.
+ (active, half_open_range_patterns_in_slices, "1.66.0", Some(67264), None),
/// Allows `if let` guard in match arms.
(active, if_let_guard, "1.47.0", Some(51114), None),
/// Allows using imported `main` function
@@ -420,14 +424,10 @@ declare_features! (
(active, intra_doc_pointers, "1.51.0", Some(80896), None),
/// Allows `#[instruction_set(_)]` attribute
(active, isa_attribute, "1.48.0", Some(74727), None),
- /// Allows `'a: { break 'a; }`.
- (active, label_break_value, "1.28.0", Some(48594), None),
// Allows setting the threshold for the `large_assignments` lint.
(active, large_assignments, "1.52.0", Some(83518), None),
/// Allows `if/while p && let q = r && ...` chains.
(active, let_chains, "1.37.0", Some(53667), None),
- /// Allows `let...else` statements.
- (active, let_else, "1.56.0", Some(87335), None),
/// Allows `#[link(..., cfg(..))]`.
(active, link_cfg, "1.14.0", Some(37406), None),
/// Allows using `reason` in lint attributes and the `#[expect(lint)]` lint check.
@@ -480,17 +480,17 @@ declare_features! (
/// Allows macro attributes on expressions, statements and non-inline modules.
(active, proc_macro_hygiene, "1.30.0", Some(54727), None),
/// Allows the use of raw-dylibs (RFC 2627).
- (incomplete, raw_dylib, "1.40.0", Some(58713), None),
+ (active, raw_dylib, "1.65.0", Some(58713), None),
/// Allows `&raw const $place_expr` and `&raw mut $place_expr` expressions.
(active, raw_ref_op, "1.41.0", Some(64490), None),
- /// Allows using the `#[register_attr]` attribute.
- (active, register_attr, "1.41.0", Some(66080), None),
/// Allows using the `#[register_tool]` attribute.
(active, register_tool, "1.41.0", Some(66079), None),
/// Allows the `#[repr(i128)]` attribute for enums.
(incomplete, repr128, "1.16.0", Some(56071), None),
/// Allows `repr(simd)` and importing the various simd intrinsics.
(active, repr_simd, "1.4.0", Some(27731), None),
+ /// Allows return-position `impl Trait` in traits.
+ (incomplete, return_position_impl_trait_in_trait, "1.65.0", Some(91611), None),
/// Allows `extern "rust-cold"`.
(active, rust_cold_cc, "1.63.0", Some(97544), None),
/// Allows the use of SIMD types in functions declared in `extern` blocks.
@@ -523,6 +523,8 @@ declare_features! (
/// Allows creation of instances of a struct by moving fields that have
/// not changed from prior instances of the same struct (RFC #2528)
(active, type_changing_struct_update, "1.58.0", Some(86555), None),
+ /// Enables rustc to generate code that instructs libstd to NOT ignore SIGPIPE.
+ (active, unix_sigpipe, "1.65.0", Some(97889), None),
/// Allows unsized fn parameters.
(active, unsized_fn_params, "1.49.0", Some(48055), None),
/// Allows unsized rvalues at arguments and parameters.
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 0e73d8fd7..2ead3c2c8 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -277,7 +277,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
ungated!(ignore, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing),
ungated!(
should_panic, Normal,
- template!(Word, List: r#"expected = "reason"#, NameValueStr: "reason"), FutureWarnFollowing,
+ template!(Word, List: r#"expected = "reason""#, NameValueStr: "reason"), FutureWarnFollowing,
),
// FIXME(Centril): This can be used on stable but shouldn't.
ungated!(reexport_test_harness_main, CrateLevel, template!(NameValueStr: "name"), ErrorFollowing),
@@ -296,20 +296,24 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// Lints:
ungated!(
- warn, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ warn, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
+ DuplicatesOk, @only_local: true,
),
ungated!(
- allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
+ DuplicatesOk, @only_local: true,
),
gated!(
expect, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk,
lint_reasons, experimental!(expect)
),
ungated!(
- forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
+ DuplicatesOk, @only_local: true,
),
ungated!(
- deny, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ deny, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
+ DuplicatesOk, @only_local: true,
),
ungated!(must_use, Normal, template!(Word, NameValueStr: "reason"), FutureWarnFollowing),
gated!(
@@ -335,16 +339,17 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// ABI, linking, symbols, and FFI
ungated!(
link, Normal,
- template!(List: r#"name = "...", /*opt*/ kind = "dylib|static|...", /*opt*/ wasm_import_module = "...""#),
+ template!(List: r#"name = "...", /*opt*/ kind = "dylib|static|...", /*opt*/ wasm_import_module = "...", /*opt*/ import_name_type = "decorated|noprefix|undecorated""#),
DuplicatesOk,
),
ungated!(link_name, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
ungated!(no_link, Normal, template!(Word), WarnFollowing),
- ungated!(repr, Normal, template!(List: "C"), DuplicatesOk),
+ ungated!(repr, Normal, template!(List: "C"), DuplicatesOk, @only_local: true),
ungated!(export_name, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
ungated!(link_section, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
ungated!(no_mangle, Normal, template!(Word), WarnFollowing, @only_local: true),
ungated!(used, Normal, template!(Word, List: "compiler|linker"), WarnFollowing, @only_local: true),
+ ungated!(link_ordinal, Normal, template!(List: "ordinal"), ErrorPreceding),
// Limits:
ungated!(recursion_limit, CrateLevel, template!(NameValueStr: "N"), FutureWarnFollowing),
@@ -359,6 +364,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
),
// Entry point:
+ gated!(unix_sigpipe, Normal, template!(Word, NameValueStr: "inherit|sig_ign|sig_dfl"), ErrorFollowing, experimental!(unix_sigpipe)),
ungated!(start, Normal, template!(Word), WarnFollowing),
ungated!(no_start, CrateLevel, template!(Word), WarnFollowing),
ungated!(no_main, CrateLevel, template!(Word), WarnFollowing),
@@ -380,7 +386,10 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
ungated!(inline, Normal, template!(Word, List: "always|never"), FutureWarnFollowing, @only_local: true),
ungated!(cold, Normal, template!(Word), WarnFollowing, @only_local: true),
ungated!(no_builtins, CrateLevel, template!(Word), WarnFollowing),
- ungated!(target_feature, Normal, template!(List: r#"enable = "name""#), DuplicatesOk),
+ ungated!(
+ target_feature, Normal, template!(List: r#"enable = "name""#),
+ DuplicatesOk, @only_local: true,
+ ),
ungated!(track_caller, Normal, template!(Word), WarnFollowing),
gated!(
no_sanitize, Normal,
@@ -405,10 +414,6 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// Linking:
gated!(naked, Normal, template!(Word), WarnFollowing, @only_local: true, naked_functions, experimental!(naked)),
- gated!(
- link_ordinal, Normal, template!(List: "ordinal"), ErrorPreceding, raw_dylib,
- experimental!(link_ordinal)
- ),
// Plugins:
BuiltinAttribute {
@@ -459,10 +464,6 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
gated!(ffi_pure, Normal, template!(Word), WarnFollowing, experimental!(ffi_pure)),
gated!(ffi_const, Normal, template!(Word), WarnFollowing, experimental!(ffi_const)),
gated!(
- register_attr, CrateLevel, template!(List: "attr1, attr2, ..."), DuplicatesOk,
- experimental!(register_attr),
- ),
- gated!(
register_tool, CrateLevel, template!(List: "tool1, tool2, ..."), DuplicatesOk,
experimental!(register_tool),
),
@@ -474,7 +475,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// RFC 2632
gated!(
const_trait, Normal, template!(Word), WarnFollowing, const_trait_impl,
- "`const` is a temporary placeholder for marking a trait that is suitable for `const` \
+ "`const_trait` is a temporary placeholder for marking a trait that is suitable for `const` \
`impls` and all default bodies as `const`, which may be removed or renamed in the \
future."
),
@@ -484,21 +485,38 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
experimental!(deprecated_safe),
),
+ // `#[collapse_debuginfo]`
+ gated!(
+ collapse_debuginfo, Normal, template!(Word), WarnFollowing,
+ experimental!(collapse_debuginfo)
+ ),
+
// ==========================================================================
// Internal attributes: Stability, deprecation, and unsafe:
// ==========================================================================
- ungated!(feature, CrateLevel, template!(List: "name1, name2, ..."), DuplicatesOk),
+ ungated!(
+ feature, CrateLevel,
+ template!(List: "name1, name2, ..."), DuplicatesOk, @only_local: true,
+ ),
// DuplicatesOk since it has its own validation
ungated!(
- stable, Normal, template!(List: r#"feature = "name", since = "version""#), DuplicatesOk,
+ stable, Normal,
+ template!(List: r#"feature = "name", since = "version""#), DuplicatesOk, @only_local: true,
),
ungated!(
unstable, Normal,
template!(List: r#"feature = "name", reason = "...", issue = "N""#), DuplicatesOk,
),
ungated!(rustc_const_unstable, Normal, template!(List: r#"feature = "name""#), DuplicatesOk),
- ungated!(rustc_const_stable, Normal, template!(List: r#"feature = "name""#), DuplicatesOk),
+ ungated!(
+ rustc_const_stable, Normal,
+ template!(List: r#"feature = "name""#), DuplicatesOk, @only_local: true,
+ ),
+ ungated!(
+ rustc_default_body_unstable, Normal,
+ template!(List: r#"feature = "name", reason = "...", issue = "N""#), DuplicatesOk
+ ),
gated!(
allow_internal_unstable, Normal, template!(Word, List: "feat1, feat2, ..."), DuplicatesOk,
"allow_internal_unstable side-steps feature gating and stability checks",
@@ -512,6 +530,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
allow_internal_unsafe, Normal, template!(Word), WarnFollowing,
"allow_internal_unsafe side-steps the unsafe_code lint",
),
+ ungated!(rustc_safe_intrinsic, Normal, template!(Word), DuplicatesOk),
rustc_attr!(rustc_allowed_through_unstable_modules, Normal, template!(Word), WarnFollowing,
"rustc_allowed_through_unstable_modules special cases accidental stabilizations of stable items \
through unstable paths"),
@@ -531,7 +550,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// ==========================================================================
rustc_attr!(rustc_allocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
- rustc_attr!(rustc_allocator_nounwind, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(rustc_nounwind, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
rustc_attr!(rustc_reallocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
rustc_attr!(rustc_deallocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
rustc_attr!(rustc_allocator_zeroed, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
@@ -727,7 +746,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
for reserving for `for<T> From<!> for T` impl"
),
rustc_attr!(
- rustc_test_marker, Normal, template!(Word), WarnFollowing,
+ rustc_test_marker, Normal, template!(NameValueStr: "name"), WarnFollowing,
"the `#[rustc_test_marker]` attribute is used internally to track tests",
),
rustc_attr!(
@@ -758,6 +777,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// Internal attributes, Testing:
// ==========================================================================
+ rustc_attr!(TEST, rustc_effective_visibility, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_outlives, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_capture_analysis, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word), WarnFollowing),
@@ -817,10 +837,20 @@ pub fn is_builtin_attr_name(name: Symbol) -> bool {
BUILTIN_ATTRIBUTE_MAP.get(&name).is_some()
}
+/// Whether this builtin attribute is only used in the local crate.
+/// If so, it is not encoded in the crate metadata.
pub fn is_builtin_only_local(name: Symbol) -> bool {
BUILTIN_ATTRIBUTE_MAP.get(&name).map_or(false, |attr| attr.only_local)
}
+pub fn is_valid_for_get_attr(name: Symbol) -> bool {
+ BUILTIN_ATTRIBUTE_MAP.get(&name).map_or(false, |attr| match attr.duplicates {
+ WarnFollowing | ErrorFollowing | ErrorPreceding | FutureWarnFollowing
+ | FutureWarnPreceding => true,
+ DuplicatesOk | WarnFollowingWordOnly => false,
+ })
+}
+
pub static BUILTIN_ATTRIBUTE_MAP: LazyLock<FxHashMap<Symbol, &BuiltinAttribute>> =
LazyLock::new(|| {
let mut map = FxHashMap::default();
diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs
index efb830527..bdaa0ee88 100644
--- a/compiler/rustc_feature/src/lib.rs
+++ b/compiler/rustc_feature/src/lib.rs
@@ -12,6 +12,8 @@
//! symbol to the `accepted` or `removed` modules respectively.
#![feature(once_cell)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
mod accepted;
mod active;
@@ -149,7 +151,7 @@ pub use active::{Features, ACTIVE_FEATURES, INCOMPATIBLE_FEATURES};
pub use builtin_attrs::AttributeDuplicates;
pub use builtin_attrs::{
deprecated_attributes, find_gated_cfg, is_builtin_attr_name, is_builtin_only_local,
- AttributeGate, AttributeTemplate, AttributeType, BuiltinAttribute, GatedCfg,
- BUILTIN_ATTRIBUTES, BUILTIN_ATTRIBUTE_MAP,
+ is_valid_for_get_attr, AttributeGate, AttributeTemplate, AttributeType, BuiltinAttribute,
+ GatedCfg, BUILTIN_ATTRIBUTES, BUILTIN_ATTRIBUTE_MAP,
};
pub use removed::{REMOVED_FEATURES, STABLE_REMOVED_FEATURES};
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index 2ddaf9201..79a12801d 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -163,6 +163,9 @@ declare_features! (
(removed, quad_precision_float, "1.0.0", None, None, None),
(removed, quote, "1.33.0", Some(29601), None, None),
(removed, reflect, "1.0.0", Some(27749), None, None),
+ /// Allows using the `#[register_attr]` attribute.
+ (removed, register_attr, "1.65.0", Some(66080), None,
+ Some("removed in favor of `#![register_tool]`")),
/// Allows using the macros:
/// + `__diagnostic_used`
/// + `__register_diagnostic`
diff --git a/compiler/rustc_fs_util/src/lib.rs b/compiler/rustc_fs_util/src/lib.rs
index 87e97c746..63998bb6b 100644
--- a/compiler/rustc_fs_util/src/lib.rs
+++ b/compiler/rustc_fs_util/src/lib.rs
@@ -1,3 +1,6 @@
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
use std::ffi::CString;
use std::fs;
use std::io;
diff --git a/compiler/rustc_graphviz/src/lib.rs b/compiler/rustc_graphviz/src/lib.rs
index 6eaff5c2f..3c1bb5532 100644
--- a/compiler/rustc_graphviz/src/lib.rs
+++ b/compiler/rustc_graphviz/src/lib.rs
@@ -273,6 +273,8 @@
html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
test(attr(allow(unused_variables), deny(warnings)))
)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
use LabelText::*;
diff --git a/compiler/rustc_hir/Cargo.toml b/compiler/rustc_hir/Cargo.toml
index 69ad623b7..129f8d235 100644
--- a/compiler/rustc_hir/Cargo.toml
+++ b/compiler/rustc_hir/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_arena = { path = "../rustc_arena" }
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
index be5b7eccb..4ef4aad90 100644
--- a/compiler/rustc_hir/src/def.rs
+++ b/compiler/rustc_hir/src/def.rs
@@ -45,8 +45,6 @@ pub enum NonMacroAttrKind {
/// Single-segment custom attribute registered by a derive macro
/// but used before that derive macro was expanded (deprecated).
DeriveHelperCompat,
- /// Single-segment custom attribute registered with `#[register_attr]`.
- Registered,
}
/// What kind of definition something is; e.g., `mod` vs `struct`.
@@ -111,6 +109,8 @@ pub enum DefKind {
InlineConst,
/// Opaque type, aka `impl Trait`.
OpaqueTy,
+ /// A return-position `impl Trait` in a trait definition
+ ImplTraitPlaceholder,
Field,
/// Lifetime parameter: the `'a` in `struct Foo<'a> { ... }`
LifetimeParam,
@@ -140,6 +140,7 @@ impl DefKind {
panic!("impossible struct constructor")
}
DefKind::OpaqueTy => "opaque type",
+ DefKind::ImplTraitPlaceholder => "opaque type in trait",
DefKind::TyAlias => "type alias",
DefKind::TraitAlias => "trait alias",
DefKind::AssocTy => "associated type",
@@ -219,7 +220,8 @@ impl DefKind {
| DefKind::Use
| DefKind::ForeignMod
| DefKind::GlobalAsm
- | DefKind::Impl => None,
+ | DefKind::Impl
+ | DefKind::ImplTraitPlaceholder => None,
}
}
@@ -256,6 +258,7 @@ impl DefKind {
| DefKind::Use
| DefKind::ForeignMod
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Impl
| DefKind::Field
| DefKind::TyParam
@@ -310,72 +313,76 @@ pub enum Res<Id = hir::HirId> {
///
/// **Belongs to the type namespace.**
PrimTy(hir::PrimTy),
- /// The `Self` type, optionally with the [`DefId`] of the trait it belongs to and
- /// optionally with the [`DefId`] of the item introducing the `Self` type alias.
+
+ /// The `Self` type, as used within a trait.
+ ///
+ /// **Belongs to the type namespace.**
+ ///
+ /// See the examples on [`Res::SelfTyAlias`] for details.
+ SelfTyParam {
+ /// The trait this `Self` is a generic parameter for.
+ trait_: DefId,
+ },
+
+ /// The `Self` type, as used somewhere other than within a trait.
///
/// **Belongs to the type namespace.**
///
/// Examples:
/// ```
- /// struct Bar(Box<Self>);
- /// // `Res::SelfTy { trait_: None, alias_of: Some(Bar) }`
+ /// struct Bar(Box<Self>); // SelfTyAlias
///
/// trait Foo {
- /// fn foo() -> Box<Self>;
- /// // `Res::SelfTy { trait_: Some(Foo), alias_of: None }`
+ /// fn foo() -> Box<Self>; // SelfTyParam
/// }
///
/// impl Bar {
/// fn blah() {
- /// let _: Self;
- /// // `Res::SelfTy { trait_: None, alias_of: Some(::{impl#0}) }`
+ /// let _: Self; // SelfTyAlias
/// }
/// }
///
/// impl Foo for Bar {
- /// fn foo() -> Box<Self> {
- /// // `Res::SelfTy { trait_: Some(Foo), alias_of: Some(::{impl#1}) }`
- /// let _: Self;
- /// // `Res::SelfTy { trait_: Some(Foo), alias_of: Some(::{impl#1}) }`
+ /// fn foo() -> Box<Self> { // SelfTyAlias
+ /// let _: Self; // SelfTyAlias
///
/// todo!()
/// }
/// }
/// ```
- ///
/// *See also [`Res::SelfCtor`].*
///
- /// -----
- ///
- /// HACK(min_const_generics): self types also have an optional requirement to **not** mention
- /// any generic parameters to allow the following with `min_const_generics`:
- /// ```
- /// # struct Foo;
- /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
- ///
- /// struct Bar([u8; baz::<Self>()]);
- /// const fn baz<T>() -> usize { 10 }
- /// ```
- /// We do however allow `Self` in repeat expression even if it is generic to not break code
- /// which already works on stable while causing the `const_evaluatable_unchecked` future compat lint:
- /// ```
- /// fn foo<T>() {
- /// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
- /// }
- /// ```
- // FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
- SelfTy {
- /// The trait this `Self` is a generic arg for.
- trait_: Option<DefId>,
+ SelfTyAlias {
/// The item introducing the `Self` type alias. Can be used in the `type_of` query
- /// to get the underlying type. Additionally whether the `Self` type is disallowed
- /// from mentioning generics (i.e. when used in an anonymous constant).
- alias_to: Option<(DefId, bool)>,
+ /// to get the underlying type.
+ alias_to: DefId,
+
+ /// Whether the `Self` type is disallowed from mentioning generics (i.e. when used in an
+ /// anonymous constant).
+ ///
+ /// HACK(min_const_generics): self types also have an optional requirement to **not**
+ /// mention any generic parameters to allow the following with `min_const_generics`:
+ /// ```
+ /// # struct Foo;
+ /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
+ ///
+ /// struct Bar([u8; baz::<Self>()]);
+ /// const fn baz<T>() -> usize { 10 }
+ /// ```
+ /// We do however allow `Self` in repeat expression even if it is generic to not break code
+ /// which already works on stable while causing the `const_evaluatable_unchecked` future
+ /// compat lint:
+ /// ```
+ /// fn foo<T>() {
+ /// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
+ /// }
+ /// ```
+ // FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
+ forbid_generic: bool,
+
+ /// Is this within an `impl Foo for bar`?
+ is_trait_impl: bool,
},
- /// A tool attribute module; e.g., the `rustfmt` in `#[rustfmt::skip]`.
- ///
- /// **Belongs to the type namespace.**
- ToolMod,
// Value namespace
/// The `Self` constructor, along with the [`DefId`]
@@ -383,13 +390,19 @@ pub enum Res<Id = hir::HirId> {
///
/// **Belongs to the value namespace.**
///
- /// *See also [`Res::SelfTy`].*
+ /// *See also [`Res::SelfTyParam`] and [`Res::SelfTyAlias`].*
SelfCtor(DefId),
+
/// A local variable or function parameter.
///
/// **Belongs to the value namespace.**
Local(Id),
+ /// A tool attribute module; e.g., the `rustfmt` in `#[rustfmt::skip]`.
+ ///
+ /// **Belongs to the type namespace.**
+ ToolMod,
+
// Macro namespace
/// An attribute that is *not* implemented via macro.
/// E.g., `#[inline]` and `#[rustfmt::skip]`, which are essentially directives,
@@ -451,11 +464,21 @@ impl PartialRes {
pub fn unresolved_segments(&self) -> usize {
self.unresolved_segments
}
+
+ #[inline]
+ pub fn full_res(&self) -> Option<Res<NodeId>> {
+ (self.unresolved_segments == 0).then_some(self.base_res)
+ }
+
+ #[inline]
+ pub fn expect_full_res(&self) -> Res<NodeId> {
+ self.full_res().expect("unexpected unresolved segments")
+ }
}
/// Different kinds of symbols can coexist even if they share the same textual name.
/// Therefore, they each have a separate universe (known as a "namespace").
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Namespace {
/// The type namespace includes `struct`s, `enum`s, `union`s, `trait`s, and `mod`s
/// (and, by extension, crates).
@@ -564,15 +587,11 @@ impl NonMacroAttrKind {
NonMacroAttrKind::DeriveHelper | NonMacroAttrKind::DeriveHelperCompat => {
"derive helper attribute"
}
- NonMacroAttrKind::Registered => "explicitly registered attribute",
}
}
pub fn article(self) -> &'static str {
- match self {
- NonMacroAttrKind::Registered => "an",
- _ => "a",
- }
+ "a"
}
/// Users of some attributes cannot mark them as used, so they are considered always used.
@@ -581,7 +600,7 @@ impl NonMacroAttrKind {
NonMacroAttrKind::Tool
| NonMacroAttrKind::DeriveHelper
| NonMacroAttrKind::DeriveHelperCompat => true,
- NonMacroAttrKind::Builtin(..) | NonMacroAttrKind::Registered => false,
+ NonMacroAttrKind::Builtin(..) => false,
}
}
}
@@ -603,7 +622,8 @@ impl<Id> Res<Id> {
Res::Local(..)
| Res::PrimTy(..)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::SelfCtor(..)
| Res::ToolMod
| Res::NonMacroAttr(..)
@@ -626,7 +646,7 @@ impl<Id> Res<Id> {
Res::SelfCtor(..) => "self constructor",
Res::PrimTy(..) => "builtin type",
Res::Local(..) => "local variable",
- Res::SelfTy { .. } => "self type",
+ Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } => "self type",
Res::ToolMod => "tool module",
Res::NonMacroAttr(attr_kind) => attr_kind.descr(),
Res::Err => "unresolved item",
@@ -649,7 +669,10 @@ impl<Id> Res<Id> {
Res::SelfCtor(id) => Res::SelfCtor(id),
Res::PrimTy(id) => Res::PrimTy(id),
Res::Local(id) => Res::Local(map(id)),
- Res::SelfTy { trait_, alias_to } => Res::SelfTy { trait_, alias_to },
+ Res::SelfTyParam { trait_ } => Res::SelfTyParam { trait_ },
+ Res::SelfTyAlias { alias_to, forbid_generic, is_trait_impl } => {
+ Res::SelfTyAlias { alias_to, forbid_generic, is_trait_impl }
+ }
Res::ToolMod => Res::ToolMod,
Res::NonMacroAttr(attr_kind) => Res::NonMacroAttr(attr_kind),
Res::Err => Res::Err,
@@ -662,7 +685,10 @@ impl<Id> Res<Id> {
Res::SelfCtor(id) => Res::SelfCtor(id),
Res::PrimTy(id) => Res::PrimTy(id),
Res::Local(id) => Res::Local(map(id)?),
- Res::SelfTy { trait_, alias_to } => Res::SelfTy { trait_, alias_to },
+ Res::SelfTyParam { trait_ } => Res::SelfTyParam { trait_ },
+ Res::SelfTyAlias { alias_to, forbid_generic, is_trait_impl } => {
+ Res::SelfTyAlias { alias_to, forbid_generic, is_trait_impl }
+ }
Res::ToolMod => Res::ToolMod,
Res::NonMacroAttr(attr_kind) => Res::NonMacroAttr(attr_kind),
Res::Err => Res::Err,
@@ -689,7 +715,9 @@ impl<Id> Res<Id> {
pub fn ns(&self) -> Option<Namespace> {
match self {
Res::Def(kind, ..) => kind.ns(),
- Res::PrimTy(..) | Res::SelfTy { .. } | Res::ToolMod => Some(Namespace::TypeNS),
+ Res::PrimTy(..) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::ToolMod => {
+ Some(Namespace::TypeNS)
+ }
Res::SelfCtor(..) | Res::Local(..) => Some(Namespace::ValueNS),
Res::NonMacroAttr(..) => Some(Namespace::MacroNS),
Res::Err => None,
diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs
index c2c551e78..d85ac960f 100644
--- a/compiler/rustc_hir/src/definitions.rs
+++ b/compiler/rustc_hir/src/definitions.rs
@@ -15,7 +15,6 @@ use rustc_span::symbol::{kw, sym, Symbol};
use std::fmt::{self, Write};
use std::hash::Hash;
-use tracing::debug;
/// The `DefPathTable` maps `DefIndex`es to `DefKey`s and vice versa.
/// Internally the `DefPathTable` holds a tree of `DefKey`s, where each `DefKey`
diff --git a/compiler/rustc_hir/src/errors.rs b/compiler/rustc_hir/src/errors.rs
new file mode 100644
index 000000000..e593ed104
--- /dev/null
+++ b/compiler/rustc_hir/src/errors.rs
@@ -0,0 +1,10 @@
+use crate::LangItem;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable)]
+pub struct LangItemError(pub LangItem);
+
+impl ToString for LangItemError {
+ fn to_string(&self) -> String {
+ format!("requires `{}` lang_item", self.0.name())
+ }
+}
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 617433a98..ef00c1ffc 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -1,13 +1,13 @@
use crate::def::{CtorKind, DefKind, Res};
use crate::def_id::DefId;
-pub(crate) use crate::hir_id::{HirId, ItemLocalId};
+pub(crate) use crate::hir_id::{HirId, ItemLocalId, OwnerId};
use crate::intravisit::FnKind;
use crate::LangItem;
use rustc_ast as ast;
use rustc_ast::util::parser::ExprPrecedence;
use rustc_ast::{Attribute, FloatTy, IntTy, Label, LitKind, TraitObjectSyntax, UintTy};
-pub use rustc_ast::{BorrowKind, ImplPolarity, IsAuto};
+pub use rustc_ast::{BindingAnnotation, BorrowKind, ByRef, ImplPolarity, IsAuto};
pub use rustc_ast::{CaptureBy, Movability, Mutability};
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_data_structures::fingerprint::Fingerprint;
@@ -139,11 +139,10 @@ impl LifetimeName {
match self {
LifetimeName::ImplicitObjectLifetimeDefault | LifetimeName::Infer => true,
- // It might seem surprising that `Fresh` counts as
- // *not* elided -- but this is because, as far as the code
- // in the compiler is concerned -- `Fresh` variants act
- // equivalently to "some fresh name". They correspond to
- // early-bound regions on an impl, in other words.
+ // It might seem surprising that `Fresh` counts as not *elided*
+ // -- but this is because, as far as the code in the compiler is
+ // concerned -- `Fresh` variants act equivalently to "some fresh name".
+ // They correspond to early-bound regions on an impl, in other words.
LifetimeName::Error | LifetimeName::Param(..) | LifetimeName::Static => false,
}
}
@@ -202,13 +201,8 @@ impl Path<'_> {
pub struct PathSegment<'hir> {
/// The identifier portion of this path segment.
pub ident: Ident,
- // `id` and `res` are optional. We currently only use these in save-analysis,
- // any path segments without these will not have save-analysis info and
- // therefore will not have 'jump to def' in IDEs, but otherwise will not be
- // affected. (In general, we don't bother to get the defs for synthesized
- // segments, only for segments which have come from the AST).
- pub hir_id: Option<HirId>,
- pub res: Option<Res>,
+ pub hir_id: HirId,
+ pub res: Res,
/// Type/lifetime parameters attached to this path. They come in
/// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`. Note that
@@ -226,12 +220,12 @@ pub struct PathSegment<'hir> {
impl<'hir> PathSegment<'hir> {
/// Converts an identifier to the corresponding segment.
- pub fn from_ident(ident: Ident) -> PathSegment<'hir> {
- PathSegment { ident, hir_id: None, res: None, infer_args: true, args: None }
+ pub fn new(ident: Ident, hir_id: HirId, res: Res) -> PathSegment<'hir> {
+ PathSegment { ident, hir_id, res, infer_args: true, args: None }
}
pub fn invalid() -> Self {
- Self::from_ident(Ident::empty())
+ Self::new(Ident::empty(), HirId::INVALID, Res::Err)
}
pub fn args(&self) -> &GenericArgs<'hir> {
@@ -264,8 +258,8 @@ impl InferArg {
#[derive(Debug, HashStable_Generic)]
pub enum GenericArg<'hir> {
- Lifetime(Lifetime),
- Type(Ty<'hir>),
+ Lifetime(&'hir Lifetime),
+ Type(&'hir Ty<'hir>),
Const(ConstArg),
Infer(InferArg),
}
@@ -280,7 +274,7 @@ impl GenericArg<'_> {
}
}
- pub fn id(&self) -> HirId {
+ pub fn hir_id(&self) -> HirId {
match self {
GenericArg::Lifetime(l) => l.hir_id,
GenericArg::Type(t) => t.hir_id,
@@ -305,9 +299,9 @@ impl GenericArg<'_> {
pub fn to_ord(&self) -> ast::ParamKindOrd {
match self {
GenericArg::Lifetime(_) => ast::ParamKindOrd::Lifetime,
- GenericArg::Type(_) => ast::ParamKindOrd::Type,
- GenericArg::Const(_) => ast::ParamKindOrd::Const,
- GenericArg::Infer(_) => ast::ParamKindOrd::Infer,
+ GenericArg::Type(_) | GenericArg::Const(_) | GenericArg::Infer(_) => {
+ ast::ParamKindOrd::TypeOrConst
+ }
}
}
@@ -435,7 +429,7 @@ pub enum GenericBound<'hir> {
Trait(PolyTraitRef<'hir>, TraitBoundModifier),
// FIXME(davidtwco): Introduce `PolyTraitRef::LangItem`
LangItemTrait(LangItem, Span, HirId, &'hir GenericArgs<'hir>),
- Outlives(Lifetime),
+ Outlives(&'hir Lifetime),
}
impl GenericBound<'_> {
@@ -581,8 +575,7 @@ impl<'hir> Generics<'hir> {
if self.has_where_clause_predicates {
self.predicates
.iter()
- .filter(|p| p.in_where_clause())
- .last()
+ .rfind(|&p| p.in_where_clause())
.map_or(end, |p| p.span())
.shrink_to_hi()
.to(end)
@@ -738,6 +731,7 @@ pub enum PredicateOrigin {
/// A type bound (e.g., `for<'c> Foo: Send + Clone + 'c`).
#[derive(Debug, HashStable_Generic)]
pub struct WhereBoundPredicate<'hir> {
+ pub hir_id: HirId,
pub span: Span,
/// Origin of the predicate.
pub origin: PredicateOrigin,
@@ -761,7 +755,7 @@ impl<'hir> WhereBoundPredicate<'hir> {
pub struct WhereRegionPredicate<'hir> {
pub span: Span,
pub in_where_clause: bool,
- pub lifetime: Lifetime,
+ pub lifetime: &'hir Lifetime,
pub bounds: GenericBounds<'hir>,
}
@@ -778,7 +772,6 @@ impl<'hir> WhereRegionPredicate<'hir> {
/// An equality predicate (e.g., `T = int`); currently unsupported.
#[derive(Debug, HashStable_Generic)]
pub struct WhereEqPredicate<'hir> {
- pub hir_id: HirId,
pub span: Span,
pub lhs_ty: &'hir Ty<'hir>,
pub rhs_ty: &'hir Ty<'hir>,
@@ -841,7 +834,16 @@ impl<'tcx> OwnerNodes<'tcx> {
impl fmt::Debug for OwnerNodes<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OwnerNodes")
+ // Do not print all the pointers to all the nodes, as it would be unreadable.
.field("node", &self.nodes[ItemLocalId::from_u32(0)])
+ .field(
+ "parents",
+ &self
+ .nodes
+ .iter_enumerated()
+ .map(|(id, parented_node)| (id, parented_node.as_ref().map(|node| node.parent)))
+ .collect::<Vec<_>>(),
+ )
.field("bodies", &self.bodies)
.field("local_id_to_def_id", &self.local_id_to_def_id)
.field("hash_without_bodies", &self.hash_without_bodies)
@@ -1050,30 +1052,6 @@ pub struct PatField<'hir> {
pub span: Span,
}
-/// Explicit binding annotations given in the HIR for a binding. Note
-/// that this is not the final binding *mode* that we infer after type
-/// inference.
-#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
-pub enum BindingAnnotation {
- /// No binding annotation given: this means that the final binding mode
- /// will depend on whether we have skipped through a `&` reference
- /// when matching. For example, the `x` in `Some(x)` will have binding
- /// mode `None`; if you do `let Some(x) = &Some(22)`, it will
- /// ultimately be inferred to be by-reference.
- ///
- /// Note that implicit reference skipping is not implemented yet (#42640).
- Unannotated,
-
- /// Annotated with `mut x` -- could be either ref or not, similar to `None`.
- Mutable,
-
- /// Annotated as `ref`, like `ref x`
- Ref,
-
- /// Annotated as `ref mut x`.
- RefMut,
-}
-
#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
pub enum RangeEnd {
Included,
@@ -1089,6 +1067,35 @@ impl fmt::Display for RangeEnd {
}
}
+// Equivalent to `Option<usize>`. That type takes up 16 bytes on 64-bit, but
+// this type only takes up 4 bytes, at the cost of being restricted to a
+// maximum value of `u32::MAX - 1`. In practice, this is more than enough.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct DotDotPos(u32);
+
+impl DotDotPos {
+ // Panics if n >= u32::MAX.
+ pub fn new(n: Option<usize>) -> Self {
+ match n {
+ Some(n) => {
+ assert!(n < u32::MAX as usize);
+ Self(n as u32)
+ }
+ None => Self(u32::MAX),
+ }
+ }
+
+ pub fn as_opt_usize(&self) -> Option<usize> {
+ if self.0 == u32::MAX { None } else { Some(self.0 as usize) }
+ }
+}
+
+impl fmt::Debug for DotDotPos {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.as_opt_usize().fmt(f)
+ }
+}
+
#[derive(Debug, HashStable_Generic)]
pub enum PatKind<'hir> {
/// Represents a wildcard pattern (i.e., `_`).
@@ -1105,9 +1112,9 @@ pub enum PatKind<'hir> {
Struct(QPath<'hir>, &'hir [PatField<'hir>], bool),
/// A tuple struct/variant pattern `Variant(x, y, .., z)`.
- /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position.
+ /// If the `..` pattern fragment is present, then `DotDotPos` denotes its position.
/// `0 <= position <= subpats.len()`
- TupleStruct(QPath<'hir>, &'hir [Pat<'hir>], Option<usize>),
+ TupleStruct(QPath<'hir>, &'hir [Pat<'hir>], DotDotPos),
/// An or-pattern `A | B | C`.
/// Invariant: `pats.len() >= 2`.
@@ -1119,7 +1126,7 @@ pub enum PatKind<'hir> {
/// A tuple pattern (e.g., `(a, b)`).
/// If the `..` pattern fragment is present, then `Option<usize>` denotes its position.
/// `0 <= position <= subpats.len()`
- Tuple(&'hir [Pat<'hir>], Option<usize>),
+ Tuple(&'hir [Pat<'hir>], DotDotPos),
/// A `box` pattern.
Box(&'hir Pat<'hir>),
@@ -1322,7 +1329,7 @@ pub enum StmtKind<'hir> {
Semi(&'hir Expr<'hir>),
}
-/// Represents a `let` statement (i.e., `let <pat>:<ty> = <expr>;`).
+/// Represents a `let` statement (i.e., `let <pat>:<ty> = <init>;`).
#[derive(Debug, HashStable_Generic)]
pub struct Local<'hir> {
pub pat: &'hir Pat<'hir>,
@@ -1439,7 +1446,7 @@ pub struct BodyId {
#[derive(Debug, HashStable_Generic)]
pub struct Body<'hir> {
pub params: &'hir [Param<'hir>],
- pub value: Expr<'hir>,
+ pub value: &'hir Expr<'hir>,
pub generator_kind: Option<GeneratorKind>,
}
@@ -1626,7 +1633,7 @@ pub struct AnonConst {
}
/// An expression.
-#[derive(Debug)]
+#[derive(Debug, HashStable_Generic)]
pub struct Expr<'hir> {
pub hir_id: HirId,
pub kind: ExprKind<'hir>,
@@ -1882,11 +1889,11 @@ pub enum ExprKind<'hir> {
///
/// The `PathSegment` represents the method name and its generic arguments
/// (within the angle brackets).
- /// The first element of the `&[Expr]` is the expression that evaluates
+ /// The `&Expr` is the expression that evaluates
/// to the object on which the method is being called on (the receiver),
- /// and the remaining elements are the rest of the arguments.
+ /// and the `&[Expr]` is the rest of the arguments.
/// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
- /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d], span)`.
+ /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, x, [a, b, c, d], span)`.
/// The final `Span` represents the span of the function and arguments
/// (e.g. `foo::<Bar, Baz>(a, b, c, d)` in `x.foo::<Bar, Baz>(a, b, c, d)`
///
@@ -1894,7 +1901,7 @@ pub enum ExprKind<'hir> {
/// the `hir_id` of the `MethodCall` node itself.
///
/// [`type_dependent_def_id`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.type_dependent_def_id
- MethodCall(&'hir PathSegment<'hir>, &'hir [Expr<'hir>], Span),
+ MethodCall(&'hir PathSegment<'hir>, &'hir Expr<'hir>, &'hir [Expr<'hir>], Span),
/// A tuple (e.g., `(a, b, c, d)`).
Tup(&'hir [Expr<'hir>]),
/// A binary operation (e.g., `a + b`, `a * b`).
@@ -2200,14 +2207,14 @@ pub struct FnSig<'hir> {
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct TraitItemId {
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
}
impl TraitItemId {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
}
@@ -2218,7 +2225,7 @@ impl TraitItemId {
#[derive(Debug, HashStable_Generic)]
pub struct TraitItem<'hir> {
pub ident: Ident,
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
pub generics: &'hir Generics<'hir>,
pub kind: TraitItemKind<'hir>,
pub span: Span,
@@ -2229,11 +2236,11 @@ impl TraitItem<'_> {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
pub fn trait_item_id(&self) -> TraitItemId {
- TraitItemId { def_id: self.def_id }
+ TraitItemId { owner_id: self.owner_id }
}
}
@@ -2264,14 +2271,14 @@ pub enum TraitItemKind<'hir> {
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct ImplItemId {
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
}
impl ImplItemId {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
}
@@ -2279,7 +2286,7 @@ impl ImplItemId {
#[derive(Debug, HashStable_Generic)]
pub struct ImplItem<'hir> {
pub ident: Ident,
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
pub generics: &'hir Generics<'hir>,
pub kind: ImplItemKind<'hir>,
pub defaultness: Defaultness,
@@ -2291,11 +2298,11 @@ impl ImplItem<'_> {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
pub fn impl_item_id(&self) -> ImplItemId {
- ImplItemId { def_id: self.def_id }
+ ImplItemId { owner_id: self.owner_id }
}
}
@@ -2308,7 +2315,7 @@ pub enum ImplItemKind<'hir> {
/// An associated function implementation with the given signature and body.
Fn(FnSig<'hir>, BodyId),
/// An associated type.
- TyAlias(&'hir Ty<'hir>),
+ Type(&'hir Ty<'hir>),
}
// The name of the associated type for `Fn` return types.
@@ -2380,7 +2387,7 @@ impl TypeBinding<'_> {
}
}
-#[derive(Debug)]
+#[derive(Debug, HashStable_Generic)]
pub struct Ty<'hir> {
pub hir_id: HirId,
pub kind: TyKind<'hir>,
@@ -2397,11 +2404,44 @@ impl<'hir> Ty<'hir> {
return None;
};
match path.res {
- Res::Def(DefKind::TyParam, def_id)
- | Res::SelfTy { trait_: Some(def_id), alias_to: None } => Some((def_id, segment.ident)),
+ Res::Def(DefKind::TyParam, def_id) | Res::SelfTyParam { trait_: def_id } => {
+ Some((def_id, segment.ident))
+ }
_ => None,
}
}
+
+ pub fn peel_refs(&self) -> &Self {
+ let mut final_ty = self;
+ while let TyKind::Rptr(_, MutTy { ty, .. }) = &final_ty.kind {
+ final_ty = &ty;
+ }
+ final_ty
+ }
+
+ pub fn find_self_aliases(&self) -> Vec<Span> {
+ use crate::intravisit::Visitor;
+ struct MyVisitor(Vec<Span>);
+ impl<'v> Visitor<'v> for MyVisitor {
+ fn visit_ty(&mut self, t: &'v Ty<'v>) {
+ if matches!(
+ &t.kind,
+ TyKind::Path(QPath::Resolved(
+ _,
+ Path { res: crate::def::Res::SelfTyAlias { .. }, .. },
+ ))
+ ) {
+ self.0.push(t.span);
+ return;
+ }
+ crate::intravisit::walk_ty(self, t);
+ }
+ }
+
+ let mut my_visitor = MyVisitor(vec![]);
+ my_visitor.visit_ty(self);
+ my_visitor.0
+ }
}
/// Not represented directly in the AST; referred to by name through a `ty_path`.
@@ -2506,6 +2546,7 @@ pub struct OpaqueTy<'hir> {
pub generics: &'hir Generics<'hir>,
pub bounds: GenericBounds<'hir>,
pub origin: OpaqueTyOrigin,
+ pub in_trait: bool,
}
/// From whence the opaque type came.
@@ -2529,7 +2570,7 @@ pub enum TyKind<'hir> {
/// A raw pointer (i.e., `*const T` or `*mut T`).
Ptr(MutTy<'hir>),
/// A reference (i.e., `&'a T` or `&'a mut T`).
- Rptr(Lifetime, MutTy<'hir>),
+ Rptr(&'hir Lifetime, MutTy<'hir>),
/// A bare function (e.g., `fn(usize) -> bool`).
BareFn(&'hir BareFnTy<'hir>),
/// The never type (`!`).
@@ -2545,10 +2586,12 @@ pub enum TyKind<'hir> {
///
/// The generic argument list contains the lifetimes (and in the future
/// possibly parameters) that are actually bound on the `impl Trait`.
- OpaqueDef(ItemId, &'hir [GenericArg<'hir>]),
+ ///
+ /// The last parameter specifies whether this opaque appears in a trait definition.
+ OpaqueDef(ItemId, &'hir [GenericArg<'hir>], bool),
/// A trait object type `Bound1 + Bound2 + Bound3`
/// where `Bound` is a trait or a lifetime.
- TraitObject(&'hir [PolyTraitRef<'hir>], Lifetime, TraitObjectSyntax),
+ TraitObject(&'hir [PolyTraitRef<'hir>], &'hir Lifetime, TraitObjectSyntax),
/// Unused for now.
Typeof(AnonConst),
/// `TyKind::Infer` means the type should be inferred instead of it having been
@@ -2562,23 +2605,23 @@ pub enum TyKind<'hir> {
pub enum InlineAsmOperand<'hir> {
In {
reg: InlineAsmRegOrRegClass,
- expr: Expr<'hir>,
+ expr: &'hir Expr<'hir>,
},
Out {
reg: InlineAsmRegOrRegClass,
late: bool,
- expr: Option<Expr<'hir>>,
+ expr: Option<&'hir Expr<'hir>>,
},
InOut {
reg: InlineAsmRegOrRegClass,
late: bool,
- expr: Expr<'hir>,
+ expr: &'hir Expr<'hir>,
},
SplitInOut {
reg: InlineAsmRegOrRegClass,
late: bool,
- in_expr: Expr<'hir>,
- out_expr: Option<Expr<'hir>>,
+ in_expr: &'hir Expr<'hir>,
+ out_expr: Option<&'hir Expr<'hir>>,
},
Const {
anon_const: AnonConst,
@@ -2643,7 +2686,7 @@ pub struct FnDecl<'hir> {
}
/// Represents what type of implicit self a function has, if any.
-#[derive(Copy, Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
pub enum ImplicitSelfKind {
/// Represents a `fn x(self);`.
Imm,
@@ -2871,14 +2914,14 @@ impl<'hir> VariantData<'hir> {
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
pub struct ItemId {
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
}
impl ItemId {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
}
@@ -2888,7 +2931,7 @@ impl ItemId {
#[derive(Debug, HashStable_Generic)]
pub struct Item<'hir> {
pub ident: Ident,
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
pub kind: ItemKind<'hir>,
pub span: Span,
pub vis_span: Span,
@@ -2898,11 +2941,11 @@ impl Item<'_> {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
pub fn item_id(&self) -> ItemId {
- ItemId { def_id: self.def_id }
+ ItemId { owner_id: self.owner_id }
}
}
@@ -2992,7 +3035,7 @@ pub enum ItemKind<'hir> {
/// A MBE macro definition (`macro_rules!` or `macro`).
Macro(ast::MacroDef, MacroKind),
/// A module.
- Mod(Mod<'hir>),
+ Mod(&'hir Mod<'hir>),
/// An external module, e.g. `extern { .. }`.
ForeignMod { abi: Abi, items: &'hir [ForeignItemRef] },
/// Module-level inline assembly (from `global_asm!`).
@@ -3115,14 +3158,14 @@ pub enum AssocItemKind {
// so it can fetched later.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
pub struct ForeignItemId {
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
}
impl ForeignItemId {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
}
@@ -3143,7 +3186,7 @@ pub struct ForeignItemRef {
pub struct ForeignItem<'hir> {
pub ident: Ident,
pub kind: ForeignItemKind<'hir>,
- pub def_id: LocalDefId,
+ pub owner_id: OwnerId,
pub span: Span,
pub vis_span: Span,
}
@@ -3152,11 +3195,11 @@ impl ForeignItem<'_> {
#[inline]
pub fn hir_id(&self) -> HirId {
// Items are always HIR owners.
- HirId::make_owner(self.def_id)
+ HirId::make_owner(self.owner_id.def_id)
}
pub fn foreign_item_id(&self) -> ForeignItemId {
- ForeignItemId { def_id: self.def_id }
+ ForeignItemId { owner_id: self.owner_id }
}
}
@@ -3217,7 +3260,7 @@ impl<'hir> OwnerNode<'hir> {
}
}
- pub fn fn_decl(&self) -> Option<&FnDecl<'hir>> {
+ pub fn fn_decl(self) -> Option<&'hir FnDecl<'hir>> {
match self {
OwnerNode::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
| OwnerNode::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
@@ -3246,12 +3289,12 @@ impl<'hir> OwnerNode<'hir> {
Node::generics(self.into())
}
- pub fn def_id(self) -> LocalDefId {
+ pub fn def_id(self) -> OwnerId {
match self {
- OwnerNode::Item(Item { def_id, .. })
- | OwnerNode::TraitItem(TraitItem { def_id, .. })
- | OwnerNode::ImplItem(ImplItem { def_id, .. })
- | OwnerNode::ForeignItem(ForeignItem { def_id, .. }) => *def_id,
+ OwnerNode::Item(Item { owner_id, .. })
+ | OwnerNode::TraitItem(TraitItem { owner_id, .. })
+ | OwnerNode::ImplItem(ImplItem { owner_id, .. })
+ | OwnerNode::ForeignItem(ForeignItem { owner_id, .. }) => *owner_id,
OwnerNode::Crate(..) => crate::CRATE_HIR_ID.owner,
}
}
@@ -3332,12 +3375,14 @@ pub enum Node<'hir> {
Field(&'hir FieldDef<'hir>),
AnonConst(&'hir AnonConst),
Expr(&'hir Expr<'hir>),
+ ExprField(&'hir ExprField<'hir>),
Stmt(&'hir Stmt<'hir>),
PathSegment(&'hir PathSegment<'hir>),
Ty(&'hir Ty<'hir>),
TypeBinding(&'hir TypeBinding<'hir>),
TraitRef(&'hir TraitRef<'hir>),
Pat(&'hir Pat<'hir>),
+ PatField(&'hir PatField<'hir>),
Arm(&'hir Arm<'hir>),
Block(&'hir Block<'hir>),
Local(&'hir Local<'hir>),
@@ -3388,6 +3433,8 @@ impl<'hir> Node<'hir> {
| Node::Block(..)
| Node::Ctor(..)
| Node::Pat(..)
+ | Node::PatField(..)
+ | Node::ExprField(..)
| Node::Arm(..)
| Node::Local(..)
| Node::Crate(..)
@@ -3397,19 +3444,20 @@ impl<'hir> Node<'hir> {
}
}
- pub fn fn_decl(&self) -> Option<&'hir FnDecl<'hir>> {
+ pub fn fn_decl(self) -> Option<&'hir FnDecl<'hir>> {
match self {
Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
| Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
| Node::Item(Item { kind: ItemKind::Fn(fn_sig, _, _), .. }) => Some(fn_sig.decl),
- Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
+ Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl, .. }), .. })
+ | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
Some(fn_decl)
}
_ => None,
}
}
- pub fn fn_sig(&self) -> Option<&'hir FnSig<'hir>> {
+ pub fn fn_sig(self) -> Option<&'hir FnSig<'hir>> {
match self {
Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
| Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
@@ -3490,17 +3538,35 @@ impl<'hir> Node<'hir> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- rustc_data_structures::static_assert_size!(Block<'static>, 48);
- rustc_data_structures::static_assert_size!(Expr<'static>, 56);
- rustc_data_structures::static_assert_size!(ForeignItem<'static>, 72);
- rustc_data_structures::static_assert_size!(GenericBound<'_>, 48);
- rustc_data_structures::static_assert_size!(Generics<'static>, 56);
- rustc_data_structures::static_assert_size!(ImplItem<'static>, 88);
- rustc_data_structures::static_assert_size!(Impl<'static>, 80);
- rustc_data_structures::static_assert_size!(Item<'static>, 80);
- rustc_data_structures::static_assert_size!(Pat<'static>, 88);
- rustc_data_structures::static_assert_size!(QPath<'static>, 24);
- rustc_data_structures::static_assert_size!(TraitItem<'static>, 96);
- rustc_data_structures::static_assert_size!(Ty<'static>, 72);
+ // tidy-alphabetical-start
+ static_assert_size!(Block<'_>, 48);
+ static_assert_size!(Body<'_>, 32);
+ static_assert_size!(Expr<'_>, 64);
+ static_assert_size!(ExprKind<'_>, 48);
+ static_assert_size!(FnDecl<'_>, 40);
+ static_assert_size!(ForeignItem<'_>, 72);
+ static_assert_size!(ForeignItemKind<'_>, 40);
+ static_assert_size!(GenericArg<'_>, 24);
+ static_assert_size!(GenericBound<'_>, 48);
+ static_assert_size!(Generics<'_>, 56);
+ static_assert_size!(Impl<'_>, 80);
+ static_assert_size!(ImplItem<'_>, 80);
+ static_assert_size!(ImplItemKind<'_>, 32);
+ static_assert_size!(Item<'_>, 80);
+ static_assert_size!(ItemKind<'_>, 48);
+ static_assert_size!(Local<'_>, 64);
+ static_assert_size!(Param<'_>, 32);
+ static_assert_size!(Pat<'_>, 72);
+ static_assert_size!(Path<'_>, 40);
+ static_assert_size!(PathSegment<'_>, 48);
+ static_assert_size!(PatKind<'_>, 48);
+ static_assert_size!(QPath<'_>, 24);
+ static_assert_size!(Res, 12);
+ static_assert_size!(Stmt<'_>, 32);
+ static_assert_size!(StmtKind<'_>, 16);
+ static_assert_size!(TraitItem<'_>, 88);
+ static_assert_size!(TraitItemKind<'_>, 48);
+ static_assert_size!(Ty<'_>, 48);
+ static_assert_size!(TyKind<'_>, 32);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
index 346ac9e96..752f760ea 100644
--- a/compiler/rustc_hir/src/hir_id.rs
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -1,6 +1,43 @@
-use crate::def_id::{LocalDefId, CRATE_DEF_ID};
+use crate::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_span::{def_id::DefPathHash, HashStableContext};
use std::fmt;
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(Encodable, Decodable)]
+pub struct OwnerId {
+ pub def_id: LocalDefId,
+}
+
+impl From<OwnerId> for HirId {
+ fn from(owner: OwnerId) -> HirId {
+ HirId { owner, local_id: ItemLocalId::from_u32(0) }
+ }
+}
+
+impl OwnerId {
+ #[inline]
+ pub fn to_def_id(self) -> DefId {
+ self.def_id.to_def_id()
+ }
+}
+
+impl<CTX: HashStableContext> HashStable<CTX> for OwnerId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.to_stable_hash_key(hcx).hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX: HashStableContext> ToStableHashKey<CTX> for OwnerId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &CTX) -> DefPathHash {
+ hcx.def_path_hash(self.to_def_id())
+ }
+}
+
/// Uniquely identifies a node in the HIR of the current crate. It is
/// composed of the `owner`, which is the `LocalDefId` of the directly enclosing
/// `hir::Item`, `hir::TraitItem`, or `hir::ImplItem` (i.e., the closest "item-like"),
@@ -15,19 +52,23 @@ use std::fmt;
#[derive(Encodable, Decodable, HashStable_Generic)]
#[rustc_pass_by_value]
pub struct HirId {
- pub owner: LocalDefId,
+ pub owner: OwnerId,
pub local_id: ItemLocalId,
}
impl HirId {
+ /// Signal local id which should never be used.
+ pub const INVALID: HirId =
+ HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::INVALID };
+
#[inline]
- pub fn expect_owner(self) -> LocalDefId {
+ pub fn expect_owner(self) -> OwnerId {
assert_eq!(self.local_id.index(), 0);
self.owner
}
#[inline]
- pub fn as_owner(self) -> Option<LocalDefId> {
+ pub fn as_owner(self) -> Option<OwnerId> {
if self.local_id.index() == 0 { Some(self.owner) } else { None }
}
@@ -38,11 +79,14 @@ impl HirId {
#[inline]
pub fn make_owner(owner: LocalDefId) -> Self {
- Self { owner, local_id: ItemLocalId::from_u32(0) }
+ Self { owner: OwnerId { def_id: owner }, local_id: ItemLocalId::from_u32(0) }
}
pub fn index(self) -> (usize, usize) {
- (rustc_index::vec::Idx::index(self.owner), rustc_index::vec::Idx::index(self.local_id))
+ (
+ rustc_index::vec::Idx::index(self.owner.def_id),
+ rustc_index::vec::Idx::index(self.local_id),
+ )
}
}
@@ -64,8 +108,13 @@ impl PartialOrd for HirId {
}
}
-rustc_data_structures::define_id_collections!(HirIdMap, HirIdSet, HirId);
-rustc_data_structures::define_id_collections!(ItemLocalMap, ItemLocalSet, ItemLocalId);
+rustc_data_structures::define_stable_id_collections!(HirIdMap, HirIdSet, HirIdMapEntry, HirId);
+rustc_data_structures::define_id_collections!(
+ ItemLocalMap,
+ ItemLocalSet,
+ ItemLocalMapEntry,
+ ItemLocalId
+);
rustc_index::newtype_index! {
/// An `ItemLocalId` uniquely identifies something within a given "item-like";
@@ -86,4 +135,7 @@ impl ItemLocalId {
}
/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
-pub const CRATE_HIR_ID: HirId = HirId { owner: CRATE_DEF_ID, local_id: ItemLocalId::from_u32(0) };
+pub const CRATE_HIR_ID: HirId =
+ HirId { owner: OwnerId { def_id: CRATE_DEF_ID }, local_id: ItemLocalId::from_u32(0) };
+
+pub const CRATE_OWNER_ID: OwnerId = OwnerId { def_id: CRATE_DEF_ID };
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index e676acebe..be77e6fd3 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -298,7 +298,7 @@ pub trait Visitor<'v>: Sized {
fn visit_id(&mut self, _hir_id: HirId) {
// Nothing to do.
}
- fn visit_name(&mut self, _span: Span, _name: Symbol) {
+ fn visit_name(&mut self, _name: Symbol) {
// Nothing to do.
}
fn visit_ident(&mut self, ident: Ident) {
@@ -325,6 +325,9 @@ pub trait Visitor<'v>: Sized {
fn visit_pat(&mut self, p: &'v Pat<'v>) {
walk_pat(self, p)
}
+ fn visit_pat_field(&mut self, f: &'v PatField<'v>) {
+ walk_pat_field(self, f)
+ }
fn visit_array_length(&mut self, len: &'v ArrayLen) {
walk_array_len(self, len)
}
@@ -337,6 +340,9 @@ pub trait Visitor<'v>: Sized {
fn visit_let_expr(&mut self, lex: &'v Let<'v>) {
walk_let_expr(self, lex)
}
+ fn visit_expr_field(&mut self, field: &'v ExprField<'v>) {
+ walk_expr_field(self, field)
+ }
fn visit_ty(&mut self, t: &'v Ty<'v>) {
walk_ty(self, t)
}
@@ -355,8 +361,8 @@ pub trait Visitor<'v>: Sized {
fn visit_fn_decl(&mut self, fd: &'v FnDecl<'v>) {
walk_fn_decl(self, fd)
}
- fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl<'v>, b: BodyId, s: Span, id: HirId) {
- walk_fn(self, fk, fd, b, s, id)
+ fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl<'v>, b: BodyId, _: Span, id: HirId) {
+ walk_fn(self, fk, fd, b, id)
}
fn visit_use(&mut self, path: &'v Path<'v>, hir_id: HirId) {
walk_use(self, path, hir_id)
@@ -382,33 +388,20 @@ pub trait Visitor<'v>: Sized {
fn visit_param_bound(&mut self, bounds: &'v GenericBound<'v>) {
walk_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef<'v>, m: TraitBoundModifier) {
- walk_poly_trait_ref(self, t, m)
- }
- fn visit_variant_data(
- &mut self,
- s: &'v VariantData<'v>,
- _: Symbol,
- _: &'v Generics<'v>,
- _parent_id: HirId,
- _: Span,
- ) {
+ fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef<'v>) {
+ walk_poly_trait_ref(self, t)
+ }
+ fn visit_variant_data(&mut self, s: &'v VariantData<'v>) {
walk_struct_def(self, s)
}
fn visit_field_def(&mut self, s: &'v FieldDef<'v>) {
walk_field_def(self, s)
}
- fn visit_enum_def(
- &mut self,
- enum_definition: &'v EnumDef<'v>,
- generics: &'v Generics<'v>,
- item_id: HirId,
- _: Span,
- ) {
- walk_enum_def(self, enum_definition, generics, item_id)
+ fn visit_enum_def(&mut self, enum_definition: &'v EnumDef<'v>, item_id: HirId) {
+ walk_enum_def(self, enum_definition, item_id)
}
- fn visit_variant(&mut self, v: &'v Variant<'v>, g: &'v Generics<'v>, item_id: HirId) {
- walk_variant(self, v, g, item_id)
+ fn visit_variant(&mut self, v: &'v Variant<'v>) {
+ walk_variant(self, v)
}
fn visit_label(&mut self, label: &'v Label) {
walk_label(self, label)
@@ -427,17 +420,18 @@ pub trait Visitor<'v>: Sized {
fn visit_lifetime(&mut self, lifetime: &'v Lifetime) {
walk_lifetime(self, lifetime)
}
- fn visit_qpath(&mut self, qpath: &'v QPath<'v>, id: HirId, span: Span) {
- walk_qpath(self, qpath, id, span)
+ // The span is that of the surrounding type/pattern/expr/whatever.
+ fn visit_qpath(&mut self, qpath: &'v QPath<'v>, id: HirId, _span: Span) {
+ walk_qpath(self, qpath, id)
}
fn visit_path(&mut self, path: &'v Path<'v>, _id: HirId) {
walk_path(self, path)
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment<'v>) {
- walk_path_segment(self, path_span, path_segment)
+ fn visit_path_segment(&mut self, path_segment: &'v PathSegment<'v>) {
+ walk_path_segment(self, path_segment)
}
- fn visit_generic_args(&mut self, path_span: Span, generic_args: &'v GenericArgs<'v>) {
- walk_generic_args(self, path_span, generic_args)
+ fn visit_generic_args(&mut self, generic_args: &'v GenericArgs<'v>) {
+ walk_generic_args(self, generic_args)
}
fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding<'v>) {
walk_assoc_type_binding(self, type_binding)
@@ -479,7 +473,7 @@ pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local<'v>) {
}
pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) {
- visitor.visit_name(ident.span, ident.name);
+ visitor.visit_name(ident.name);
}
pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) {
@@ -501,11 +495,7 @@ pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime
}
}
-pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(
- visitor: &mut V,
- trait_ref: &'v PolyTraitRef<'v>,
- _modifier: TraitBoundModifier,
-) {
+pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_ref: &'v PolyTraitRef<'v>) {
walk_list!(visitor, visit_generic_param, trait_ref.bound_generic_params);
visitor.visit_trait_ref(&trait_ref.trait_ref);
}
@@ -526,7 +516,7 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
ItemKind::ExternCrate(orig_name) => {
visitor.visit_id(item.hir_id());
if let Some(orig_name) = orig_name {
- visitor.visit_name(item.span, orig_name);
+ visitor.visit_name(orig_name);
}
}
ItemKind::Use(ref path, _) => {
@@ -572,7 +562,7 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
ItemKind::Enum(ref enum_definition, ref generics) => {
visitor.visit_generics(generics);
// `visit_enum_def()` takes care of visiting the `Item`'s `HirId`.
- visitor.visit_enum_def(enum_definition, generics, item.hir_id(), item.span)
+ visitor.visit_enum_def(enum_definition, item.hir_id())
}
ItemKind::Impl(Impl {
unsafety: _,
@@ -595,13 +585,7 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
| ItemKind::Union(ref struct_definition, ref generics) => {
visitor.visit_generics(generics);
visitor.visit_id(item.hir_id());
- visitor.visit_variant_data(
- struct_definition,
- item.ident.name,
- generics,
- item.hir_id(),
- item.span,
- );
+ visitor.visit_variant_data(struct_definition);
}
ItemKind::Trait(.., ref generics, bounds, trait_item_refs) => {
visitor.visit_id(item.hir_id());
@@ -649,28 +633,16 @@ pub fn walk_use<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path<'v>, hir_id:
pub fn walk_enum_def<'v, V: Visitor<'v>>(
visitor: &mut V,
enum_definition: &'v EnumDef<'v>,
- generics: &'v Generics<'v>,
item_id: HirId,
) {
visitor.visit_id(item_id);
- walk_list!(visitor, visit_variant, enum_definition.variants, generics, item_id);
+ walk_list!(visitor, visit_variant, enum_definition.variants);
}
-pub fn walk_variant<'v, V: Visitor<'v>>(
- visitor: &mut V,
- variant: &'v Variant<'v>,
- generics: &'v Generics<'v>,
- parent_item_id: HirId,
-) {
+pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, variant: &'v Variant<'v>) {
visitor.visit_ident(variant.ident);
visitor.visit_id(variant.id);
- visitor.visit_variant_data(
- &variant.data,
- variant.ident.name,
- generics,
- parent_item_id,
- variant.span,
- );
+ visitor.visit_variant_data(&variant.data);
walk_list!(visitor, visit_anon_const, &variant.disr_expr);
}
@@ -695,7 +667,7 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) {
TyKind::Path(ref qpath) => {
visitor.visit_qpath(qpath, typ.hir_id, typ.span);
}
- TyKind::OpaqueDef(item_id, lifetimes) => {
+ TyKind::OpaqueDef(item_id, lifetimes, _in_trait) => {
visitor.visit_nested_item(item_id);
walk_list!(visitor, visit_generic_arg, lifetimes);
}
@@ -705,7 +677,7 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) {
}
TyKind::TraitObject(bounds, ref lifetime, _syntax) => {
for bound in bounds {
- visitor.visit_poly_trait_ref(bound, TraitBoundModifier::None);
+ visitor.visit_poly_trait_ref(bound);
}
visitor.visit_lifetime(lifetime);
}
@@ -718,12 +690,7 @@ pub fn walk_inf<'v, V: Visitor<'v>>(visitor: &mut V, inf: &'v InferArg) {
visitor.visit_id(inf.hir_id);
}
-pub fn walk_qpath<'v, V: Visitor<'v>>(
- visitor: &mut V,
- qpath: &'v QPath<'v>,
- id: HirId,
- span: Span,
-) {
+pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath<'v>, id: HirId) {
match *qpath {
QPath::Resolved(ref maybe_qself, ref path) => {
walk_list!(visitor, visit_ty, maybe_qself);
@@ -731,7 +698,7 @@ pub fn walk_qpath<'v, V: Visitor<'v>>(
}
QPath::TypeRelative(ref qself, ref segment) => {
visitor.visit_ty(qself);
- visitor.visit_path_segment(span, segment);
+ visitor.visit_path_segment(segment);
}
QPath::LangItem(..) => {}
}
@@ -739,27 +706,19 @@ pub fn walk_qpath<'v, V: Visitor<'v>>(
pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path<'v>) {
for segment in path.segments {
- visitor.visit_path_segment(path.span, segment);
+ visitor.visit_path_segment(segment);
}
}
-pub fn walk_path_segment<'v, V: Visitor<'v>>(
- visitor: &mut V,
- path_span: Span,
- segment: &'v PathSegment<'v>,
-) {
+pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, segment: &'v PathSegment<'v>) {
visitor.visit_ident(segment.ident);
- walk_list!(visitor, visit_id, segment.hir_id);
+ visitor.visit_id(segment.hir_id);
if let Some(ref args) = segment.args {
- visitor.visit_generic_args(path_span, args);
+ visitor.visit_generic_args(args);
}
}
-pub fn walk_generic_args<'v, V: Visitor<'v>>(
- visitor: &mut V,
- _path_span: Span,
- generic_args: &'v GenericArgs<'v>,
-) {
+pub fn walk_generic_args<'v, V: Visitor<'v>>(visitor: &mut V, generic_args: &'v GenericArgs<'v>) {
walk_list!(visitor, visit_generic_arg, generic_args.args);
walk_list!(visitor, visit_assoc_type_binding, generic_args.bindings);
}
@@ -770,7 +729,7 @@ pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(
) {
visitor.visit_id(type_binding.hir_id);
visitor.visit_ident(type_binding.ident);
- visitor.visit_generic_args(type_binding.span, type_binding.gen_args);
+ visitor.visit_generic_args(type_binding.gen_args);
match type_binding.kind {
TypeBindingKind::Equality { ref term } => match term {
Term::Ty(ref ty) => visitor.visit_ty(ty),
@@ -792,11 +751,7 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) {
}
PatKind::Struct(ref qpath, fields, _) => {
visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
- for field in fields {
- visitor.visit_id(field.hir_id);
- visitor.visit_ident(field.ident);
- visitor.visit_pat(&field.pat)
- }
+ walk_list!(visitor, visit_pat_field, fields);
}
PatKind::Or(pats) => walk_list!(visitor, visit_pat, pats),
PatKind::Tuple(tuple_elements, _) => {
@@ -823,6 +778,12 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) {
}
}
+pub fn walk_pat_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v PatField<'v>) {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_ident(field.ident);
+ visitor.visit_pat(&field.pat)
+}
+
pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem<'v>) {
visitor.visit_id(foreign_item.hir_id());
visitor.visit_ident(foreign_item.ident);
@@ -842,12 +803,12 @@ pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v
pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericBound<'v>) {
match *bound {
- GenericBound::Trait(ref typ, modifier) => {
- visitor.visit_poly_trait_ref(typ, modifier);
+ GenericBound::Trait(ref typ, _modifier) => {
+ visitor.visit_poly_trait_ref(typ);
}
- GenericBound::LangItemTrait(_, span, hir_id, args) => {
+ GenericBound::LangItemTrait(_, _span, hir_id, args) => {
visitor.visit_id(hir_id);
- visitor.visit_generic_args(span, args);
+ visitor.visit_generic_args(args);
}
GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime),
}
@@ -886,23 +847,28 @@ pub fn walk_where_predicate<'v, V: Visitor<'v>>(
) {
match *predicate {
WherePredicate::BoundPredicate(WhereBoundPredicate {
+ hir_id,
ref bounded_ty,
bounds,
bound_generic_params,
- ..
+ origin: _,
+ span: _,
}) => {
+ visitor.visit_id(hir_id);
visitor.visit_ty(bounded_ty);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_generic_param, bound_generic_params);
}
- WherePredicate::RegionPredicate(WhereRegionPredicate { ref lifetime, bounds, .. }) => {
+ WherePredicate::RegionPredicate(WhereRegionPredicate {
+ ref lifetime,
+ bounds,
+ span: _,
+ in_where_clause: _,
+ }) => {
visitor.visit_lifetime(lifetime);
walk_list!(visitor, visit_param_bound, bounds);
}
- WherePredicate::EqPredicate(WhereEqPredicate {
- hir_id, ref lhs_ty, ref rhs_ty, ..
- }) => {
- visitor.visit_id(hir_id);
+ WherePredicate::EqPredicate(WhereEqPredicate { ref lhs_ty, ref rhs_ty, span: _ }) => {
visitor.visit_ty(lhs_ty);
visitor.visit_ty(rhs_ty);
}
@@ -936,7 +902,6 @@ pub fn walk_fn<'v, V: Visitor<'v>>(
function_kind: FnKind<'v>,
function_declaration: &'v FnDecl<'v>,
body_id: BodyId,
- _span: Span,
id: HirId,
) {
visitor.visit_id(id);
@@ -947,7 +912,7 @@ pub fn walk_fn<'v, V: Visitor<'v>>(
pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem<'v>) {
// N.B., deliberately force a compilation error if/when new fields are added.
- let TraitItem { ident, generics, ref defaultness, ref kind, span, def_id: _ } = *trait_item;
+ let TraitItem { ident, generics, ref defaultness, ref kind, span, owner_id: _ } = *trait_item;
let hir_id = trait_item.hir_id();
visitor.visit_ident(ident);
visitor.visit_generics(&generics);
@@ -987,7 +952,7 @@ pub fn walk_trait_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_item_ref:
pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem<'v>) {
// N.B., deliberately force a compilation error if/when new fields are added.
let ImplItem {
- def_id: _,
+ owner_id: _,
ident,
ref generics,
ref kind,
@@ -1014,7 +979,7 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt
impl_item.hir_id(),
);
}
- ImplItemKind::TyAlias(ref ty) => {
+ ImplItemKind::Type(ref ty) => {
visitor.visit_id(impl_item.hir_id());
visitor.visit_ty(ty);
}
@@ -1090,6 +1055,12 @@ pub fn walk_let_expr<'v, V: Visitor<'v>>(visitor: &mut V, let_expr: &'v Let<'v>)
walk_list!(visitor, visit_ty, let_expr.ty);
}
+pub fn walk_expr_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v ExprField<'v>) {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_ident(field.ident);
+ visitor.visit_expr(&field.expr)
+}
+
pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>) {
visitor.visit_id(expression.hir_id);
match expression.kind {
@@ -1104,11 +1075,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
}
ExprKind::Struct(ref qpath, fields, ref optional_base) => {
visitor.visit_qpath(qpath, expression.hir_id, expression.span);
- for field in fields {
- visitor.visit_id(field.hir_id);
- visitor.visit_ident(field.ident);
- visitor.visit_expr(&field.expr)
- }
+ walk_list!(visitor, visit_expr_field, fields);
walk_list!(visitor, visit_expr, optional_base);
}
ExprKind::Tup(subexpressions) => {
@@ -1118,8 +1085,9 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>)
visitor.visit_expr(callee_expression);
walk_list!(visitor, visit_expr, arguments);
}
- ExprKind::MethodCall(ref segment, arguments, _) => {
- visitor.visit_path_segment(expression.span, segment);
+ ExprKind::MethodCall(ref segment, receiver, arguments, _) => {
+ visitor.visit_path_segment(segment);
+ visitor.visit_expr(receiver);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::Binary(_, ref left_expression, ref right_expression) => {
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index c337be12a..ca615a491 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -8,6 +8,7 @@
//! * Functions called by the compiler itself.
use crate::def_id::DefId;
+use crate::errors::LangItemError;
use crate::{MethodKind, Target};
use rustc_ast as ast;
@@ -115,9 +116,9 @@ macro_rules! language_item_table {
/// Requires that a given `LangItem` was bound and returns the corresponding `DefId`.
/// If it wasn't bound, e.g. due to a missing `#[lang = "<it.name()>"]`,
- /// returns an error message as a string.
- pub fn require(&self, it: LangItem) -> Result<DefId, String> {
- self.items[it as usize].ok_or_else(|| format!("requires `{}` lang_item", it.name()))
+ /// returns an error encapsulating the `LangItem`.
+ pub fn require(&self, it: LangItem) -> Result<DefId, LangItemError> {
+ self.items[it as usize].ok_or_else(|| LangItemError(it))
}
/// Returns the [`DefId`]s of all lang items in a group.
@@ -192,7 +193,8 @@ language_item_table! {
DispatchFromDyn, sym::dispatch_from_dyn, dispatch_from_dyn_trait, Target::Trait, GenericRequirement::Minimum(1);
// language items relating to transmutability
- TransmuteTrait, sym::transmute_trait, transmute_trait, Target::Trait, GenericRequirement::Exact(6);
+ TransmuteOpts, sym::transmute_opts, transmute_opts, Target::Struct, GenericRequirement::Exact(0);
+ TransmuteTrait, sym::transmute_trait, transmute_trait, Target::Trait, GenericRequirement::Exact(3);
Add(Op), sym::add, add_trait, Target::Trait, GenericRequirement::Exact(1);
Sub(Op), sym::sub, sub_trait, Target::Trait, GenericRequirement::Exact(1);
@@ -236,7 +238,6 @@ language_item_table! {
Future, sym::future_trait, future_trait, Target::Trait, GenericRequirement::Exact(0);
GeneratorState, sym::generator_state, gen_state, Target::Enum, GenericRequirement::None;
Generator, sym::generator, gen_trait, Target::Trait, GenericRequirement::Minimum(1);
- GeneratorReturn, sym::generator_return, generator_return, Target::AssocTy, GenericRequirement::None;
Unpin, sym::unpin, unpin_trait, Target::Trait, GenericRequirement::None;
Pin, sym::pin, pin_type, Target::Struct, GenericRequirement::None;
@@ -267,8 +268,6 @@ language_item_table! {
DropInPlace, sym::drop_in_place, drop_in_place_fn, Target::Fn, GenericRequirement::Minimum(1);
Oom, sym::oom, oom, Target::Fn, GenericRequirement::None;
AllocLayout, sym::alloc_layout, alloc_layout, Target::Struct, GenericRequirement::None;
- ConstEvalSelect, sym::const_eval_select, const_eval_select, Target::Fn, GenericRequirement::Exact(4);
- ConstConstEvalSelect, sym::const_eval_select_ct,const_eval_select_ct, Target::Fn, GenericRequirement::Exact(4);
Start, sym::start, start_fn, Target::Fn, GenericRequirement::Exact(1);
@@ -290,6 +289,8 @@ language_item_table! {
Try, sym::Try, try_trait, Target::Trait, GenericRequirement::None;
+ Tuple, sym::tuple_trait, tuple_trait, Target::Trait, GenericRequirement::Exact(0);
+
SliceLen, sym::slice_len_fn, slice_len_fn, Target::Method(MethodKind::Inherent), GenericRequirement::None;
// Language items from AST lowering
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
index 0f9e6fa7b..1c4aa420c 100644
--- a/compiler/rustc_hir/src/lib.rs
+++ b/compiler/rustc_hir/src/lib.rs
@@ -4,18 +4,22 @@
#![feature(associated_type_defaults)]
#![feature(closure_track_caller)]
-#![feature(const_btree_new)]
-#![feature(let_else)]
+#![feature(const_btree_len)]
#![feature(once_cell)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
#[macro_use]
+extern crate tracing;
+
+#[macro_use]
extern crate rustc_data_structures;
extern crate self as rustc_hir;
@@ -25,6 +29,7 @@ pub mod def;
pub mod def_path_hash_map;
pub mod definitions;
pub mod diagnostic_items;
+pub mod errors;
pub use rustc_span::def_id;
mod hir;
pub mod hir_id;
diff --git a/compiler/rustc_hir/src/pat_util.rs b/compiler/rustc_hir/src/pat_util.rs
index 93112199b..0c1819bb0 100644
--- a/compiler/rustc_hir/src/pat_util.rs
+++ b/compiler/rustc_hir/src/pat_util.rs
@@ -1,6 +1,6 @@
use crate::def::{CtorOf, DefKind, Res};
use crate::def_id::DefId;
-use crate::hir::{self, HirId, PatKind};
+use crate::hir::{self, BindingAnnotation, ByRef, HirId, PatKind};
use rustc_data_structures::fx::FxHashSet;
use rustc_span::hygiene::DesugaringKind;
use rustc_span::symbol::Ident;
@@ -35,7 +35,7 @@ pub trait EnumerateAndAdjustIterator {
fn enumerate_and_adjust(
self,
expected_len: usize,
- gap_pos: Option<usize>,
+ gap_pos: hir::DotDotPos,
) -> EnumerateAndAdjust<Self>
where
Self: Sized;
@@ -45,7 +45,7 @@ impl<T: ExactSizeIterator> EnumerateAndAdjustIterator for T {
fn enumerate_and_adjust(
self,
expected_len: usize,
- gap_pos: Option<usize>,
+ gap_pos: hir::DotDotPos,
) -> EnumerateAndAdjust<Self>
where
Self: Sized,
@@ -53,7 +53,7 @@ impl<T: ExactSizeIterator> EnumerateAndAdjustIterator for T {
let actual_len = self.len();
EnumerateAndAdjust {
enumerate: self.enumerate(),
- gap_pos: gap_pos.unwrap_or(expected_len),
+ gap_pos: gap_pos.as_opt_usize().unwrap_or(expected_len),
gap_len: expected_len - actual_len,
}
}
@@ -93,12 +93,7 @@ impl hir::Pat<'_> {
pub fn simple_ident(&self) -> Option<Ident> {
match self.kind {
- PatKind::Binding(
- hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
- _,
- ident,
- None,
- ) => Some(ident),
+ PatKind::Binding(BindingAnnotation(ByRef::No, _), _, ident, None) => Some(ident),
_ => None,
}
}
@@ -135,11 +130,11 @@ impl hir::Pat<'_> {
pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> {
let mut result = None;
self.each_binding(|annotation, _, _, _| match annotation {
- hir::BindingAnnotation::Ref => match result {
+ hir::BindingAnnotation::REF => match result {
None | Some(hir::Mutability::Not) => result = Some(hir::Mutability::Not),
_ => {}
},
- hir::BindingAnnotation::RefMut => result = Some(hir::Mutability::Mut),
+ hir::BindingAnnotation::REF_MUT => result = Some(hir::Mutability::Mut),
_ => {}
});
result
diff --git a/compiler/rustc_hir/src/stable_hash_impls.rs b/compiler/rustc_hir/src/stable_hash_impls.rs
index 8ccd59e8e..23423e8f3 100644
--- a/compiler/rustc_hir/src/stable_hash_impls.rs
+++ b/compiler/rustc_hir/src/stable_hash_impls.rs
@@ -1,8 +1,7 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
use crate::hir::{
- AttributeMap, BodyId, Crate, Expr, ForeignItemId, ImplItemId, ItemId, OwnerNodes, TraitItemId,
- Ty,
+ AttributeMap, BodyId, Crate, ForeignItemId, ImplItemId, ItemId, OwnerNodes, TraitItemId,
};
use crate::hir_id::{HirId, ItemLocalId};
use rustc_span::def_id::DefPathHash;
@@ -14,8 +13,6 @@ pub trait HashStableContext:
rustc_ast::HashStableContext + rustc_target::HashStableContext
{
fn hash_body_id(&mut self, _: BodyId, hasher: &mut StableHasher);
- fn hash_hir_expr(&mut self, _: &Expr<'_>, hasher: &mut StableHasher);
- fn hash_hir_ty(&mut self, _: &Ty<'_>, hasher: &mut StableHasher);
}
impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for HirId {
@@ -23,7 +20,7 @@ impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for HirId {
#[inline]
fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
- let def_path_hash = self.owner.to_stable_hash_key(hcx);
+ let def_path_hash = self.owner.def_id.to_stable_hash_key(hcx);
(def_path_hash, self.local_id)
}
}
@@ -52,7 +49,7 @@ impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ItemId {
#[inline]
fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
- self.def_id.to_stable_hash_key(hcx)
+ self.owner_id.def_id.to_stable_hash_key(hcx)
}
}
@@ -61,7 +58,7 @@ impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for TraitItemId {
#[inline]
fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
- self.def_id.to_stable_hash_key(hcx)
+ self.owner_id.def_id.to_stable_hash_key(hcx)
}
}
@@ -70,7 +67,7 @@ impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ImplItemId {
#[inline]
fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
- self.def_id.to_stable_hash_key(hcx)
+ self.owner_id.def_id.to_stable_hash_key(hcx)
}
}
@@ -79,7 +76,7 @@ impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ForeignItemId
#[inline]
fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
- self.def_id.to_stable_hash_key(hcx)
+ self.owner_id.def_id.to_stable_hash_key(hcx)
}
}
@@ -96,18 +93,6 @@ impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for BodyId {
// want to pick up on a reference changing its target, so we hash the NodeIds
// in "DefPath Mode".
-impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Expr<'_> {
- fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_hir_expr(self, hasher)
- }
-}
-
-impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Ty<'_> {
- fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
- hcx.hash_hir_ty(self, hasher)
- }
-}
-
impl<'tcx, HirCtx: crate::HashStableContext> HashStable<HirCtx> for OwnerNodes<'tcx> {
fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
// We ignore the `nodes` and `bodies` fields since these refer to information included in
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
index 6236dea10..5917d5e34 100644
--- a/compiler/rustc_hir/src/target.rs
+++ b/compiler/rustc_hir/src/target.rs
@@ -36,6 +36,7 @@ pub enum Target {
GlobalAsm,
TyAlias,
OpaqueTy,
+ ImplTraitPlaceholder,
Enum,
Variant,
Struct,
@@ -56,6 +57,8 @@ pub enum Target {
GenericParam(GenericParamKind),
MacroDef,
Param,
+ PatField,
+ ExprField,
}
impl Display for Target {
@@ -77,7 +80,13 @@ impl Target {
ItemKind::ForeignMod { .. } => Target::ForeignMod,
ItemKind::GlobalAsm(..) => Target::GlobalAsm,
ItemKind::TyAlias(..) => Target::TyAlias,
- ItemKind::OpaqueTy(..) => Target::OpaqueTy,
+ ItemKind::OpaqueTy(ref opaque) => {
+ if opaque.in_trait {
+ Target::ImplTraitPlaceholder
+ } else {
+ Target::OpaqueTy
+ }
+ }
ItemKind::Enum(..) => Target::Enum,
ItemKind::Struct(..) => Target::Struct,
ItemKind::Union(..) => Target::Union,
@@ -101,6 +110,7 @@ impl Target {
DefKind::GlobalAsm => Target::GlobalAsm,
DefKind::TyAlias => Target::TyAlias,
DefKind::OpaqueTy => Target::OpaqueTy,
+ DefKind::ImplTraitPlaceholder => Target::ImplTraitPlaceholder,
DefKind::Enum => Target::Enum,
DefKind::Struct => Target::Struct,
DefKind::Union => Target::Union,
@@ -155,6 +165,7 @@ impl Target {
Target::GlobalAsm => "global asm",
Target::TyAlias => "type alias",
Target::OpaqueTy => "opaque type",
+ Target::ImplTraitPlaceholder => "opaque type in trait",
Target::Enum => "enum",
Target::Variant => "enum variant",
Target::Struct => "struct",
@@ -183,6 +194,8 @@ impl Target {
},
Target::MacroDef => "macro def",
Target::Param => "function param",
+ Target::PatField => "pattern field",
+ Target::ExprField => "struct field",
}
}
}
diff --git a/compiler/rustc_hir/src/weak_lang_items.rs b/compiler/rustc_hir/src/weak_lang_items.rs
index b6a85c047..da9c9c121 100644
--- a/compiler/rustc_hir/src/weak_lang_items.rs
+++ b/compiler/rustc_hir/src/weak_lang_items.rs
@@ -18,6 +18,12 @@ pub static WEAK_ITEMS_REFS: LazyLock<FxIndexMap<Symbol, LangItem>> = LazyLock::n
map
});
+pub static WEAK_ITEMS_SYMBOLS: LazyLock<FxIndexMap<LangItem, Symbol>> = LazyLock::new(|| {
+ let mut map = FxIndexMap::default();
+ $(map.insert(LangItem::$item, sym::$sym);)*
+ map
+});
+
pub fn link_name(attrs: &[ast::Attribute]) -> Option<Symbol>
{
lang_items::extract(attrs).and_then(|(name, _)| {
diff --git a/compiler/rustc_hir_analysis/Cargo.toml b/compiler/rustc_hir_analysis/Cargo.toml
new file mode 100644
index 000000000..0761d8cdb
--- /dev/null
+++ b/compiler/rustc_hir_analysis/Cargo.toml
@@ -0,0 +1,32 @@
+[package]
+name = "rustc_hir_analysis"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+tracing = "0.1"
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_type_ir = { path = "../rustc_type_ir" }
+rustc_feature = { path = "../rustc_feature" }
diff --git a/compiler/rustc_typeck/README.md b/compiler/rustc_hir_analysis/README.md
index b61dbd8c9..b61dbd8c9 100644
--- a/compiler/rustc_typeck/README.md
+++ b/compiler/rustc_hir_analysis/README.md
diff --git a/compiler/rustc_hir_analysis/src/astconv/errors.rs b/compiler/rustc_hir_analysis/src/astconv/errors.rs
new file mode 100644
index 000000000..a9152bdc5
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/astconv/errors.rs
@@ -0,0 +1,411 @@
+use crate::astconv::AstConv;
+use crate::errors::{ManualImplementation, MissingTypeParams};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{pluralize, struct_span_err, Applicability, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty;
+use rustc_session::parse::feature_err;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use std::collections::BTreeSet;
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ /// On missing type parameters, emit an E0393 error and provide a structured suggestion using
+ /// the type parameter's name as a placeholder.
+ pub(crate) fn complain_about_missing_type_params(
+ &self,
+ missing_type_params: Vec<Symbol>,
+ def_id: DefId,
+ span: Span,
+ empty_generic_args: bool,
+ ) {
+ if missing_type_params.is_empty() {
+ return;
+ }
+
+ self.tcx().sess.emit_err(MissingTypeParams {
+ span,
+ def_span: self.tcx().def_span(def_id),
+ span_snippet: self.tcx().sess.source_map().span_to_snippet(span).ok(),
+ missing_type_params,
+ empty_generic_args,
+ });
+ }
+
+ /// When the code is using the `Fn` traits directly, instead of the `Fn(A) -> B` syntax, emit
+ /// an error and attempt to build a reasonable structured suggestion.
+ pub(crate) fn complain_about_internal_fn_trait(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ trait_segment: &'_ hir::PathSegment<'_>,
+ is_impl: bool,
+ ) {
+ if self.tcx().features().unboxed_closures {
+ return;
+ }
+
+ let trait_def = self.tcx().trait_def(trait_def_id);
+ if !trait_def.paren_sugar {
+ if trait_segment.args().parenthesized {
+ // For now, require that parenthetical notation be used only with `Fn()` etc.
+ let mut err = feature_err(
+ &self.tcx().sess.parse_sess,
+ sym::unboxed_closures,
+ span,
+ "parenthetical notation is only stable when used with `Fn`-family traits",
+ );
+ err.emit();
+ }
+
+ return;
+ }
+
+ let sess = self.tcx().sess;
+
+ if !trait_segment.args().parenthesized {
+ // For now, require that parenthetical notation be used only with `Fn()` etc.
+ let mut err = feature_err(
+ &sess.parse_sess,
+ sym::unboxed_closures,
+ span,
+ "the precise format of `Fn`-family traits' type parameters is subject to change",
+ );
+ // Do not suggest the other syntax if we are in trait impl:
+ // the desugaring would contain an associated type constraint.
+ if !is_impl {
+ let args = trait_segment
+ .args
+ .as_ref()
+ .and_then(|args| args.args.get(0))
+ .and_then(|arg| match arg {
+ hir::GenericArg::Type(ty) => match ty.kind {
+ hir::TyKind::Tup(t) => t
+ .iter()
+ .map(|e| sess.source_map().span_to_snippet(e.span))
+ .collect::<Result<Vec<_>, _>>()
+ .map(|a| a.join(", ")),
+ _ => sess.source_map().span_to_snippet(ty.span),
+ }
+ .map(|s| format!("({})", s))
+ .ok(),
+ _ => None,
+ })
+ .unwrap_or_else(|| "()".to_string());
+ let ret = trait_segment
+ .args()
+ .bindings
+ .iter()
+ .find_map(|b| match (b.ident.name == sym::Output, &b.kind) {
+ (true, hir::TypeBindingKind::Equality { term }) => {
+ let span = match term {
+ hir::Term::Ty(ty) => ty.span,
+ hir::Term::Const(c) => self.tcx().hir().span(c.hir_id),
+ };
+ sess.source_map().span_to_snippet(span).ok()
+ }
+ _ => None,
+ })
+ .unwrap_or_else(|| "()".to_string());
+ err.span_suggestion(
+ span,
+ "use parenthetical notation instead",
+ format!("{}{} -> {}", trait_segment.ident, args, ret),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ if is_impl {
+ let trait_name = self.tcx().def_path_str(trait_def_id);
+ self.tcx().sess.emit_err(ManualImplementation { span, trait_name });
+ }
+ }
+
+ pub(crate) fn complain_about_assoc_type_not_found<I>(
+ &self,
+ all_candidates: impl Fn() -> I,
+ ty_param_name: &str,
+ assoc_name: Ident,
+ span: Span,
+ ) -> ErrorGuaranteed
+ where
+ I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ {
+ // The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a
+ // valid span, so we point at the whole path segment instead.
+ let span = if assoc_name.span != DUMMY_SP { assoc_name.span } else { span };
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0220,
+ "associated type `{}` not found for `{}`",
+ assoc_name,
+ ty_param_name
+ );
+
+ let all_candidate_names: Vec<_> = all_candidates()
+ .flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order())
+ .filter_map(
+ |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
+ )
+ .collect();
+
+ if let (Some(suggested_name), true) = (
+ find_best_match_for_name(&all_candidate_names, assoc_name.name, None),
+ assoc_name.span != DUMMY_SP,
+ ) {
+ err.span_suggestion(
+ assoc_name.span,
+ "there is an associated type with a similar name",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+ return err.emit();
+ }
+
+ // If we didn't find a good item in the supertraits (or couldn't get
+ // the supertraits), like in ItemCtxt, then look more generally from
+ // all visible traits. If there's one clear winner, just suggest that.
+
+ let visible_traits: Vec<_> = self
+ .tcx()
+ .all_traits()
+ .filter(|trait_def_id| {
+ let viz = self.tcx().visibility(*trait_def_id);
+ if let Some(def_id) = self.item_def_id() {
+ viz.is_accessible_from(def_id, self.tcx())
+ } else {
+ viz.is_visible_locally()
+ }
+ })
+ .collect();
+
+ let wider_candidate_names: Vec<_> = visible_traits
+ .iter()
+ .flat_map(|trait_def_id| {
+ self.tcx().associated_items(*trait_def_id).in_definition_order()
+ })
+ .filter_map(
+ |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
+ )
+ .collect();
+
+ if let (Some(suggested_name), true) = (
+ find_best_match_for_name(&wider_candidate_names, assoc_name.name, None),
+ assoc_name.span != DUMMY_SP,
+ ) {
+ if let [best_trait] = visible_traits
+ .iter()
+ .filter(|trait_def_id| {
+ self.tcx()
+ .associated_items(*trait_def_id)
+ .filter_by_name_unhygienic(suggested_name)
+ .any(|item| item.kind == ty::AssocKind::Type)
+ })
+ .collect::<Vec<_>>()[..]
+ {
+ err.span_label(
+ assoc_name.span,
+ format!(
+ "there is a similarly named associated type `{suggested_name}` in the trait `{}`",
+ self.tcx().def_path_str(*best_trait)
+ ),
+ );
+ return err.emit();
+ }
+ }
+
+ err.span_label(span, format!("associated type `{}` not found", assoc_name));
+ err.emit()
+ }
+
+ /// When there are any missing associated types, emit an E0191 error and attempt to supply a
+ /// reasonable suggestion on how to write it. For the case of multiple associated types in the
+ /// same trait bound have the same name (as they come from different supertraits), we instead
+ /// emit a generic note suggesting using a `where` clause to constraint instead.
+ pub(crate) fn complain_about_missing_associated_types(
+ &self,
+ associated_types: FxHashMap<Span, BTreeSet<DefId>>,
+ potential_assoc_types: Vec<Span>,
+ trait_bounds: &[hir::PolyTraitRef<'_>],
+ ) {
+ if associated_types.values().all(|v| v.is_empty()) {
+ return;
+ }
+ let tcx = self.tcx();
+ // FIXME: Marked `mut` so that we can replace the spans further below with a more
+ // appropriate one, but this should be handled earlier in the span assignment.
+ let mut associated_types: FxHashMap<Span, Vec<_>> = associated_types
+ .into_iter()
+ .map(|(span, def_ids)| {
+ (span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect())
+ })
+ .collect();
+ let mut names = vec![];
+
+ // Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and
+ // `issue-22560.rs`.
+ let mut trait_bound_spans: Vec<Span> = vec![];
+ for (span, items) in &associated_types {
+ if !items.is_empty() {
+ trait_bound_spans.push(*span);
+ }
+ for assoc_item in items {
+ let trait_def_id = assoc_item.container_id(tcx);
+ names.push(format!(
+ "`{}` (from trait `{}`)",
+ assoc_item.name,
+ tcx.def_path_str(trait_def_id),
+ ));
+ }
+ }
+ if let ([], [bound]) = (&potential_assoc_types[..], &trait_bounds) {
+ match bound.trait_ref.path.segments {
+ // FIXME: `trait_ref.path.span` can point to a full path with multiple
+ // segments, even though `trait_ref.path.segments` is of length `1`. Work
+ // around that bug here, even though it should be fixed elsewhere.
+ // This would otherwise cause an invalid suggestion. For an example, look at
+ // `src/test/ui/issues/issue-28344.rs` where instead of the following:
+ //
+ // error[E0191]: the value of the associated type `Output`
+ // (from trait `std::ops::BitXor`) must be specified
+ // --> $DIR/issue-28344.rs:4:17
+ // |
+ // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
+ // | ^^^^^^ help: specify the associated type:
+ // | `BitXor<Output = Type>`
+ //
+ // we would output:
+ //
+ // error[E0191]: the value of the associated type `Output`
+ // (from trait `std::ops::BitXor`) must be specified
+ // --> $DIR/issue-28344.rs:4:17
+ // |
+ // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
+ // | ^^^^^^^^^^^^^ help: specify the associated type:
+ // | `BitXor::bitor<Output = Type>`
+ [segment] if segment.args.is_none() => {
+ trait_bound_spans = vec![segment.ident.span];
+ associated_types = associated_types
+ .into_iter()
+ .map(|(_, items)| (segment.ident.span, items))
+ .collect();
+ }
+ _ => {}
+ }
+ }
+ names.sort();
+ trait_bound_spans.sort();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ trait_bound_spans,
+ E0191,
+ "the value of the associated type{} {} must be specified",
+ pluralize!(names.len()),
+ names.join(", "),
+ );
+ let mut suggestions = vec![];
+ let mut types_count = 0;
+ let mut where_constraints = vec![];
+ let mut already_has_generics_args_suggestion = false;
+ for (span, assoc_items) in &associated_types {
+ let mut names: FxHashMap<_, usize> = FxHashMap::default();
+ for item in assoc_items {
+ types_count += 1;
+ *names.entry(item.name).or_insert(0) += 1;
+ }
+ let mut dupes = false;
+ for item in assoc_items {
+ let prefix = if names[&item.name] > 1 {
+ let trait_def_id = item.container_id(tcx);
+ dupes = true;
+ format!("{}::", tcx.def_path_str(trait_def_id))
+ } else {
+ String::new()
+ };
+ if let Some(sp) = tcx.hir().span_if_local(item.def_id) {
+ err.span_label(sp, format!("`{}{}` defined here", prefix, item.name));
+ }
+ }
+ if potential_assoc_types.len() == assoc_items.len() {
+ // When the amount of missing associated types equals the number of
+ // extra type arguments present. A suggesting to replace the generic args with
+ // associated types is already emitted.
+ already_has_generics_args_suggestion = true;
+ } else if let (Ok(snippet), false) =
+ (tcx.sess.source_map().span_to_snippet(*span), dupes)
+ {
+ let types: Vec<_> =
+ assoc_items.iter().map(|item| format!("{} = Type", item.name)).collect();
+ let code = if snippet.ends_with('>') {
+ // The user wrote `Trait<'a>` or similar and we don't have a type we can
+ // suggest, but at least we can clue them to the correct syntax
+ // `Trait<'a, Item = Type>` while accounting for the `<'a>` in the
+ // suggestion.
+ format!("{}, {}>", &snippet[..snippet.len() - 1], types.join(", "))
+ } else {
+ // The user wrote `Iterator`, so we don't have a type we can suggest, but at
+ // least we can clue them to the correct syntax `Iterator<Item = Type>`.
+ format!("{}<{}>", snippet, types.join(", "))
+ };
+ suggestions.push((*span, code));
+ } else if dupes {
+ where_constraints.push(*span);
+ }
+ }
+ let where_msg = "consider introducing a new type parameter, adding `where` constraints \
+ using the fully-qualified path to the associated types";
+ if !where_constraints.is_empty() && suggestions.is_empty() {
+ // If there are duplicates associated type names and a single trait bound do not
+ // use structured suggestion, it means that there are multiple supertraits with
+ // the same associated type name.
+ err.help(where_msg);
+ }
+ if suggestions.len() != 1 || already_has_generics_args_suggestion {
+ // We don't need this label if there's an inline suggestion, show otherwise.
+ for (span, assoc_items) in &associated_types {
+ let mut names: FxHashMap<_, usize> = FxHashMap::default();
+ for item in assoc_items {
+ types_count += 1;
+ *names.entry(item.name).or_insert(0) += 1;
+ }
+ let mut label = vec![];
+ for item in assoc_items {
+ let postfix = if names[&item.name] > 1 {
+ let trait_def_id = item.container_id(tcx);
+ format!(" (from trait `{}`)", tcx.def_path_str(trait_def_id))
+ } else {
+ String::new()
+ };
+ label.push(format!("`{}`{}", item.name, postfix));
+ }
+ if !label.is_empty() {
+ err.span_label(
+ *span,
+ format!(
+ "associated type{} {} must be specified",
+ pluralize!(label.len()),
+ label.join(", "),
+ ),
+ );
+ }
+ }
+ }
+ if !suggestions.is_empty() {
+ err.multipart_suggestion(
+ &format!("specify the associated type{}", pluralize!(types_count)),
+ suggestions,
+ Applicability::HasPlaceholders,
+ );
+ if !where_constraints.is_empty() {
+ err.span_help(where_constraints, where_msg);
+ }
+ }
+ err.emit();
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/astconv/generics.rs b/compiler/rustc_hir_analysis/src/astconv/generics.rs
new file mode 100644
index 000000000..47915b4bd
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/astconv/generics.rs
@@ -0,0 +1,662 @@
+use super::IsMethodCall;
+use crate::astconv::{
+ AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
+ GenericArgCountResult, GenericArgPosition,
+};
+use crate::errors::AssocTypeBindingNotAllowed;
+use crate::structured_errors::{GenericArgsInfo, StructuredDiagnostic, WrongNumberOfGenericArgs};
+use rustc_ast::ast::ParamKindOrd;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::GenericArg;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::{
+ self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt,
+};
+use rustc_session::lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS;
+use rustc_span::{symbol::kw, Span};
+use smallvec::SmallVec;
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ /// Report an error that a generic argument did not match the generic parameter that was
+ /// expected.
+ fn generic_arg_mismatch_err(
+ tcx: TyCtxt<'_>,
+ arg: &GenericArg<'_>,
+ param: &GenericParamDef,
+ possible_ordering_error: bool,
+ help: Option<&str>,
+ ) {
+ let sess = tcx.sess;
+ let mut err = struct_span_err!(
+ sess,
+ arg.span(),
+ E0747,
+ "{} provided when a {} was expected",
+ arg.descr(),
+ param.kind.descr(),
+ );
+
+ if let GenericParamDefKind::Const { .. } = param.kind {
+ if matches!(arg, GenericArg::Type(hir::Ty { kind: hir::TyKind::Infer, .. })) {
+ err.help("const arguments cannot yet be inferred with `_`");
+ if sess.is_nightly_build() {
+ err.help(
+ "add `#![feature(generic_arg_infer)]` to the crate attributes to enable",
+ );
+ }
+ }
+ }
+
+ let add_braces_suggestion = |arg: &GenericArg<'_>, err: &mut Diagnostic| {
+ let suggestions = vec![
+ (arg.span().shrink_to_lo(), String::from("{ ")),
+ (arg.span().shrink_to_hi(), String::from(" }")),
+ ];
+ err.multipart_suggestion(
+ "if this generic argument was intended as a const parameter, \
+ surround it with braces",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ };
+
+ // Specific suggestion set for diagnostics
+ match (arg, &param.kind) {
+ (
+ GenericArg::Type(hir::Ty {
+ kind: hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)),
+ ..
+ }),
+ GenericParamDefKind::Const { .. },
+ ) => match path.res {
+ Res::Err => {
+ add_braces_suggestion(arg, &mut err);
+ err.set_primary_message(
+ "unresolved item provided when a constant was expected",
+ )
+ .emit();
+ return;
+ }
+ Res::Def(DefKind::TyParam, src_def_id) => {
+ if let Some(param_local_id) = param.def_id.as_local() {
+ let param_name = tcx.hir().ty_param_name(param_local_id);
+ let infcx = tcx.infer_ctxt().build();
+ let param_type =
+ infcx.resolve_numeric_literals_with_default(tcx.type_of(param.def_id));
+ if param_type.is_suggestable(tcx, false) {
+ err.span_suggestion(
+ tcx.def_span(src_def_id),
+ "consider changing this type parameter to be a `const` generic",
+ format!("const {}: {}", param_name, param_type),
+ Applicability::MaybeIncorrect,
+ );
+ };
+ }
+ }
+ _ => add_braces_suggestion(arg, &mut err),
+ },
+ (
+ GenericArg::Type(hir::Ty { kind: hir::TyKind::Path(_), .. }),
+ GenericParamDefKind::Const { .. },
+ ) => add_braces_suggestion(arg, &mut err),
+ (
+ GenericArg::Type(hir::Ty { kind: hir::TyKind::Array(_, len), .. }),
+ GenericParamDefKind::Const { .. },
+ ) if tcx.type_of(param.def_id) == tcx.types.usize => {
+ let snippet = sess.source_map().span_to_snippet(tcx.hir().span(len.hir_id()));
+ if let Ok(snippet) = snippet {
+ err.span_suggestion(
+ arg.span(),
+ "array type provided where a `usize` was expected, try",
+ format!("{{ {} }}", snippet),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ (GenericArg::Const(cnst), GenericParamDefKind::Type { .. }) => {
+ let body = tcx.hir().body(cnst.value.body);
+ if let rustc_hir::ExprKind::Path(rustc_hir::QPath::Resolved(_, path)) =
+ body.value.kind
+ {
+ if let Res::Def(DefKind::Fn { .. }, id) = path.res {
+ err.help(&format!(
+ "`{}` is a function item, not a type",
+ tcx.item_name(id)
+ ));
+ err.help("function item types cannot be named directly");
+ }
+ }
+ }
+ _ => {}
+ }
+
+ let kind_ord = param.kind.to_ord();
+ let arg_ord = arg.to_ord();
+
+ // This note is only true when generic parameters are strictly ordered by their kind.
+ if possible_ordering_error && kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal {
+ let (first, last) = if kind_ord < arg_ord {
+ (param.kind.descr(), arg.descr())
+ } else {
+ (arg.descr(), param.kind.descr())
+ };
+ err.note(&format!("{} arguments must be provided before {} arguments", first, last));
+ if let Some(help) = help {
+ err.help(help);
+ }
+ }
+
+ err.emit();
+ }
+
+ /// Creates the relevant generic argument substitutions
+ /// corresponding to a set of generic parameters. This is a
+ /// rather complex function. Let us try to explain the role
+ /// of each of its parameters:
+ ///
+ /// To start, we are given the `def_id` of the thing we are
+ /// creating the substitutions for, and a partial set of
+ /// substitutions `parent_substs`. In general, the substitutions
+ /// for an item begin with substitutions for all the "parents" of
+ /// that item -- e.g., for a method it might include the
+ /// parameters from the impl.
+ ///
+ /// Therefore, the method begins by walking down these parents,
+ /// starting with the outermost parent and proceed inwards until
+ /// it reaches `def_id`. For each parent `P`, it will check `parent_substs`
+ /// first to see if the parent's substitutions are listed in there. If so,
+ /// we can append those and move on. Otherwise, it invokes the
+ /// three callback functions:
+ ///
+ /// - `args_for_def_id`: given the `DefId` `P`, supplies back the
+ /// generic arguments that were given to that parent from within
+ /// the path; so e.g., if you have `<T as Foo>::Bar`, the `DefId`
+ /// might refer to the trait `Foo`, and the arguments might be
+ /// `[T]`. The boolean value indicates whether to infer values
+ /// for arguments whose values were not explicitly provided.
+ /// - `provided_kind`: given the generic parameter and the value from `args_for_def_id`,
+ /// instantiate a `GenericArg`.
+ /// - `inferred_kind`: if no parameter was provided, and inference is enabled, then
+ /// creates a suitable inference variable.
+ pub fn create_substs_for_generic_args<'a>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ parent_substs: &[subst::GenericArg<'tcx>],
+ has_self: bool,
+ self_ty: Option<Ty<'tcx>>,
+ arg_count: &GenericArgCountResult,
+ ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>,
+ ) -> SubstsRef<'tcx> {
+ // Collect the segments of the path; we need to substitute arguments
+ // for parameters throughout the entire path (wherever there are
+ // generic parameters).
+ let mut parent_defs = tcx.generics_of(def_id);
+ let count = parent_defs.count();
+ let mut stack = vec![(def_id, parent_defs)];
+ while let Some(def_id) = parent_defs.parent {
+ parent_defs = tcx.generics_of(def_id);
+ stack.push((def_id, parent_defs));
+ }
+
+ // We manually build up the substitution, rather than using convenience
+ // methods in `subst.rs`, so that we can iterate over the arguments and
+ // parameters in lock-step linearly, instead of trying to match each pair.
+ let mut substs: SmallVec<[subst::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
+ // Iterate over each segment of the path.
+ while let Some((def_id, defs)) = stack.pop() {
+ let mut params = defs.params.iter().peekable();
+
+ // If we have already computed substitutions for parents, we can use those directly.
+ while let Some(&param) = params.peek() {
+ if let Some(&kind) = parent_substs.get(param.index as usize) {
+ substs.push(kind);
+ params.next();
+ } else {
+ break;
+ }
+ }
+
+ // `Self` is handled first, unless it's been handled in `parent_substs`.
+ if has_self {
+ if let Some(&param) = params.peek() {
+ if param.index == 0 {
+ if let GenericParamDefKind::Type { .. } = param.kind {
+ substs.push(
+ self_ty
+ .map(|ty| ty.into())
+ .unwrap_or_else(|| ctx.inferred_kind(None, param, true)),
+ );
+ params.next();
+ }
+ }
+ }
+ }
+
+ // Check whether this segment takes generic arguments and the user has provided any.
+ let (generic_args, infer_args) = ctx.args_for_def_id(def_id);
+
+ let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter());
+ let mut args = args_iter.clone().peekable();
+
+ // If we encounter a type or const when we expect a lifetime, we infer the lifetimes.
+ // If we later encounter a lifetime, we know that the arguments were provided in the
+ // wrong order. `force_infer_lt` records the type or const that forced lifetimes to be
+ // inferred, so we can use it for diagnostics later.
+ let mut force_infer_lt = None;
+
+ loop {
+ // We're going to iterate through the generic arguments that the user
+ // provided, matching them with the generic parameters we expect.
+ // Mismatches can occur as a result of elided lifetimes, or for malformed
+ // input. We try to handle both sensibly.
+ match (args.peek(), params.peek()) {
+ (Some(&arg), Some(&param)) => {
+ match (arg, &param.kind, arg_count.explicit_late_bound) {
+ (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _)
+ | (
+ GenericArg::Type(_) | GenericArg::Infer(_),
+ GenericParamDefKind::Type { .. },
+ _,
+ )
+ | (
+ GenericArg::Const(_) | GenericArg::Infer(_),
+ GenericParamDefKind::Const { .. },
+ _,
+ ) => {
+ substs.push(ctx.provided_kind(param, arg));
+ args.next();
+ params.next();
+ }
+ (
+ GenericArg::Infer(_) | GenericArg::Type(_) | GenericArg::Const(_),
+ GenericParamDefKind::Lifetime,
+ _,
+ ) => {
+ // We expected a lifetime argument, but got a type or const
+ // argument. That means we're inferring the lifetimes.
+ substs.push(ctx.inferred_kind(None, param, infer_args));
+ force_infer_lt = Some((arg, param));
+ params.next();
+ }
+ (GenericArg::Lifetime(_), _, ExplicitLateBound::Yes) => {
+ // We've come across a lifetime when we expected something else in
+ // the presence of explicit late bounds. This is most likely
+ // due to the presence of the explicit bound so we're just going to
+ // ignore it.
+ args.next();
+ }
+ (_, _, _) => {
+ // We expected one kind of parameter, but the user provided
+ // another. This is an error. However, if we already know that
+ // the arguments don't match up with the parameters, we won't issue
+ // an additional error, as the user already knows what's wrong.
+ if arg_count.correct.is_ok() {
+ // We're going to iterate over the parameters to sort them out, and
+ // show that order to the user as a possible order for the parameters
+ let mut param_types_present = defs
+ .params
+ .iter()
+ .map(|param| (param.kind.to_ord(), param.clone()))
+ .collect::<Vec<(ParamKindOrd, GenericParamDef)>>();
+ param_types_present.sort_by_key(|(ord, _)| *ord);
+ let (mut param_types_present, ordered_params): (
+ Vec<ParamKindOrd>,
+ Vec<GenericParamDef>,
+ ) = param_types_present.into_iter().unzip();
+ param_types_present.dedup();
+
+ Self::generic_arg_mismatch_err(
+ tcx,
+ arg,
+ param,
+ !args_iter.clone().is_sorted_by_key(|arg| arg.to_ord()),
+ Some(&format!(
+ "reorder the arguments: {}: `<{}>`",
+ param_types_present
+ .into_iter()
+ .map(|ord| format!("{}s", ord))
+ .collect::<Vec<String>>()
+ .join(", then "),
+ ordered_params
+ .into_iter()
+ .filter_map(|param| {
+ if param.name == kw::SelfUpper {
+ None
+ } else {
+ Some(param.name.to_string())
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ )),
+ );
+ }
+
+ // We've reported the error, but we want to make sure that this
+ // problem doesn't bubble down and create additional, irrelevant
+ // errors. In this case, we're simply going to ignore the argument
+ // and any following arguments. The rest of the parameters will be
+ // inferred.
+ while args.next().is_some() {}
+ }
+ }
+ }
+
+ (Some(&arg), None) => {
+ // We should never be able to reach this point with well-formed input.
+ // There are three situations in which we can encounter this issue.
+ //
+ // 1. The number of arguments is incorrect. In this case, an error
+ // will already have been emitted, and we can ignore it.
+ // 2. There are late-bound lifetime parameters present, yet the
+ // lifetime arguments have also been explicitly specified by the
+ // user.
+ // 3. We've inferred some lifetimes, which have been provided later (i.e.
+ // after a type or const). We want to throw an error in this case.
+
+ if arg_count.correct.is_ok()
+ && arg_count.explicit_late_bound == ExplicitLateBound::No
+ {
+ let kind = arg.descr();
+ assert_eq!(kind, "lifetime");
+ let (provided_arg, param) =
+ force_infer_lt.expect("lifetimes ought to have been inferred");
+ Self::generic_arg_mismatch_err(tcx, provided_arg, param, false, None);
+ }
+
+ break;
+ }
+
+ (None, Some(&param)) => {
+ // If there are fewer arguments than parameters, it means
+ // we're inferring the remaining arguments.
+ substs.push(ctx.inferred_kind(Some(&substs), param, infer_args));
+ params.next();
+ }
+
+ (None, None) => break,
+ }
+ }
+ }
+
+ tcx.intern_substs(&substs)
+ }
+
+ /// Checks that the correct number of generic arguments have been provided.
+ /// Used specifically for function calls.
+ pub fn check_generic_arg_count_for_call(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ def_id: DefId,
+ generics: &ty::Generics,
+ seg: &hir::PathSegment<'_>,
+ is_method_call: IsMethodCall,
+ ) -> GenericArgCountResult {
+ let empty_args = hir::GenericArgs::none();
+ let gen_args = seg.args.unwrap_or(&empty_args);
+ let gen_pos = if is_method_call == IsMethodCall::Yes {
+ GenericArgPosition::MethodCall
+ } else {
+ GenericArgPosition::Value
+ };
+ let has_self = generics.parent.is_none() && generics.has_self;
+
+ Self::check_generic_arg_count(
+ tcx,
+ span,
+ def_id,
+ seg,
+ generics,
+ gen_args,
+ gen_pos,
+ has_self,
+ seg.infer_args,
+ )
+ }
+
+ /// Checks that the correct number of generic arguments have been provided.
+ /// This is used both for datatypes and function calls.
+ #[instrument(skip(tcx, gen_pos), level = "debug")]
+ pub(crate) fn check_generic_arg_count(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ def_id: DefId,
+ seg: &hir::PathSegment<'_>,
+ gen_params: &ty::Generics,
+ gen_args: &hir::GenericArgs<'_>,
+ gen_pos: GenericArgPosition,
+ has_self: bool,
+ infer_args: bool,
+ ) -> GenericArgCountResult {
+ let default_counts = gen_params.own_defaults();
+ let param_counts = gen_params.own_counts();
+
+ // Subtracting from param count to ensure type params synthesized from `impl Trait`
+ // cannot be explicitly specified.
+ let synth_type_param_count = gen_params
+ .params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. })
+ })
+ .count();
+ let named_type_param_count =
+ param_counts.types - has_self as usize - synth_type_param_count;
+ let infer_lifetimes =
+ (gen_pos != GenericArgPosition::Type || infer_args) && !gen_args.has_lifetime_params();
+
+ if gen_pos != GenericArgPosition::Type && let Some(b) = gen_args.bindings.first() {
+ Self::prohibit_assoc_ty_binding(tcx, b.span);
+ }
+
+ let explicit_late_bound =
+ Self::prohibit_explicit_late_bound_lifetimes(tcx, gen_params, gen_args, gen_pos);
+
+ let mut invalid_args = vec![];
+
+ let mut check_lifetime_args =
+ |min_expected_args: usize,
+ max_expected_args: usize,
+ provided_args: usize,
+ late_bounds_ignore: bool| {
+ if (min_expected_args..=max_expected_args).contains(&provided_args) {
+ return Ok(());
+ }
+
+ if late_bounds_ignore {
+ return Ok(());
+ }
+
+ if provided_args > max_expected_args {
+ invalid_args.extend(
+ gen_args.args[max_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ };
+
+ let gen_args_info = if provided_args > min_expected_args {
+ invalid_args.extend(
+ gen_args.args[min_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ let num_redundant_args = provided_args - min_expected_args;
+ GenericArgsInfo::ExcessLifetimes { num_redundant_args }
+ } else {
+ let num_missing_args = min_expected_args - provided_args;
+ GenericArgsInfo::MissingLifetimes { num_missing_args }
+ };
+
+ let reported = WrongNumberOfGenericArgs::new(
+ tcx,
+ gen_args_info,
+ seg,
+ gen_params,
+ has_self as usize,
+ gen_args,
+ def_id,
+ )
+ .diagnostic()
+ .emit();
+
+ Err(reported)
+ };
+
+ let min_expected_lifetime_args = if infer_lifetimes { 0 } else { param_counts.lifetimes };
+ let max_expected_lifetime_args = param_counts.lifetimes;
+ let num_provided_lifetime_args = gen_args.num_lifetime_params();
+
+ let lifetimes_correct = check_lifetime_args(
+ min_expected_lifetime_args,
+ max_expected_lifetime_args,
+ num_provided_lifetime_args,
+ explicit_late_bound == ExplicitLateBound::Yes,
+ );
+
+ let mut check_types_and_consts = |expected_min,
+ expected_max,
+ expected_max_with_synth,
+ provided,
+ params_offset,
+ args_offset| {
+ debug!(
+ ?expected_min,
+ ?expected_max,
+ ?provided,
+ ?params_offset,
+ ?args_offset,
+ "check_types_and_consts"
+ );
+ if (expected_min..=expected_max).contains(&provided) {
+ return Ok(());
+ }
+
+ let num_default_params = expected_max - expected_min;
+
+ let gen_args_info = if provided > expected_max {
+ invalid_args.extend(
+ gen_args.args[args_offset + expected_max..args_offset + provided]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ let num_redundant_args = provided - expected_max;
+
+ // Provide extra note if synthetic arguments like `impl Trait` are specified.
+ let synth_provided = provided <= expected_max_with_synth;
+
+ GenericArgsInfo::ExcessTypesOrConsts {
+ num_redundant_args,
+ num_default_params,
+ args_offset,
+ synth_provided,
+ }
+ } else {
+ let num_missing_args = expected_max - provided;
+
+ GenericArgsInfo::MissingTypesOrConsts {
+ num_missing_args,
+ num_default_params,
+ args_offset,
+ }
+ };
+
+ debug!(?gen_args_info);
+
+ let reported = WrongNumberOfGenericArgs::new(
+ tcx,
+ gen_args_info,
+ seg,
+ gen_params,
+ params_offset,
+ gen_args,
+ def_id,
+ )
+ .diagnostic()
+ .emit_unless(gen_args.has_err());
+
+ Err(reported)
+ };
+
+ let args_correct = {
+ let expected_min = if infer_args {
+ 0
+ } else {
+ param_counts.consts + named_type_param_count
+ - default_counts.types
+ - default_counts.consts
+ };
+ debug!(?expected_min);
+ debug!(arg_counts.lifetimes=?gen_args.num_lifetime_params());
+
+ check_types_and_consts(
+ expected_min,
+ param_counts.consts + named_type_param_count,
+ param_counts.consts + named_type_param_count + synth_type_param_count,
+ gen_args.num_generic_params(),
+ param_counts.lifetimes + has_self as usize,
+ gen_args.num_lifetime_params(),
+ )
+ };
+
+ GenericArgCountResult {
+ explicit_late_bound,
+ correct: lifetimes_correct.and(args_correct).map_err(|reported| {
+ GenericArgCountMismatch { reported: Some(reported), invalid_args }
+ }),
+ }
+ }
+
+ /// Emits an error regarding forbidden type binding associations
+ pub fn prohibit_assoc_ty_binding(tcx: TyCtxt<'_>, span: Span) {
+ tcx.sess.emit_err(AssocTypeBindingNotAllowed { span });
+ }
+
+ /// Prohibits explicit lifetime arguments if late-bound lifetime parameters
+ /// are present. This is used both for datatypes and function calls.
+ pub(crate) fn prohibit_explicit_late_bound_lifetimes(
+ tcx: TyCtxt<'_>,
+ def: &ty::Generics,
+ args: &hir::GenericArgs<'_>,
+ position: GenericArgPosition,
+ ) -> ExplicitLateBound {
+ let param_counts = def.own_counts();
+ let infer_lifetimes = position != GenericArgPosition::Type && !args.has_lifetime_params();
+
+ if infer_lifetimes {
+ return ExplicitLateBound::No;
+ }
+
+ if let Some(span_late) = def.has_late_bound_regions {
+ let msg = "cannot specify lifetime arguments explicitly \
+ if late bound lifetime parameters are present";
+ let note = "the late bound lifetime parameter is introduced here";
+ let span = args.args[0].span();
+
+ if position == GenericArgPosition::Value
+ && args.num_lifetime_params() != param_counts.lifetimes
+ {
+ let mut err = tcx.sess.struct_span_err(span, msg);
+ err.span_note(span_late, note);
+ err.emit();
+ } else {
+ let mut multispan = MultiSpan::from_span(span);
+ multispan.push_span_label(span_late, note);
+ tcx.struct_span_lint_hir(
+ LATE_BOUND_LIFETIME_ARGUMENTS,
+ args.args[0].hir_id(),
+ multispan,
+ msg,
+ |lint| lint,
+ );
+ }
+
+ ExplicitLateBound::Yes
+ } else {
+ ExplicitLateBound::No
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs
new file mode 100644
index 000000000..38f195dab
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs
@@ -0,0 +1,3136 @@
+//! Conversion from AST representation of types to the `ty.rs` representation.
+//! The main routine here is `ast_ty_to_ty()`; each use is parameterized by an
+//! instance of `AstConv`.
+
+mod errors;
+mod generics;
+
+use crate::bounds::Bounds;
+use crate::collect::HirPlaceholderCollector;
+use crate::errors::{
+ AmbiguousLifetimeBound, MultipleRelaxedDefaultBounds, TraitObjectDeclaredWithNoTraits,
+ TypeofReservedKeywordUsed, ValueOfAssociatedStructAlreadySpecified,
+};
+use crate::middle::resolve_lifetime as rl;
+use crate::require_c_abi_if_c_variadic;
+use rustc_ast::TraitObjectSyntax;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{
+ struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, FatalError,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Namespace, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{walk_generics, Visitor as _};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{GenericArg, GenericArgs, OpaqueTyOrigin};
+use rustc_middle::middle::stability::AllowUnstable;
+use rustc_middle::ty::subst::{self, GenericArgKind, InternalSubsts, SubstsRef};
+use rustc_middle::ty::DynKind;
+use rustc_middle::ty::GenericParamDefKind;
+use rustc_middle::ty::{
+ self, Const, DefIdTree, EarlyBinder, IsSuggestable, Ty, TyCtxt, TypeVisitable,
+};
+use rustc_session::lint::builtin::{AMBIGUOUS_ASSOCIATED_ITEMS, BARE_TRAIT_OBJECTS};
+use rustc_span::edition::Edition;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::{sym, Span};
+use rustc_target::spec::abi;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::astconv_object_safety_violations;
+use rustc_trait_selection::traits::error_reporting::{
+ report_object_safety_error, suggestions::NextTypeParamName,
+};
+use rustc_trait_selection::traits::wf::object_region_bounds;
+
+use smallvec::{smallvec, SmallVec};
+use std::collections::BTreeSet;
+use std::slice;
+
+#[derive(Debug)]
+pub struct PathSeg(pub DefId, pub usize);
+
+pub trait AstConv<'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn item_def_id(&self) -> Option<DefId>;
+
+ /// Returns predicates in scope of the form `X: Foo<T>`, where `X`
+ /// is a type parameter `X` with the given id `def_id` and T
+ /// matches `assoc_name`. This is a subset of the full set of
+ /// predicates.
+ ///
+ /// This is used for one specific purpose: resolving "short-hand"
+ /// associated type references like `T::Item`. In principle, we
+ /// would do that by first getting the full set of predicates in
+ /// scope and then filtering down to find those that apply to `T`,
+ /// but this can lead to cycle errors. The problem is that we have
+ /// to do this resolution *in order to create the predicates in
+ /// the first place*. Hence, we have this "special pass".
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx>;
+
+ /// Returns the lifetime to use when a lifetime is omitted (and not elided).
+ fn re_infer(&self, param: Option<&ty::GenericParamDef>, span: Span)
+ -> Option<ty::Region<'tcx>>;
+
+ /// Returns the type to use when a type is omitted.
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx>;
+
+ /// Returns `true` if `_` is allowed in type signatures in the current context.
+ fn allow_ty_infer(&self) -> bool;
+
+ /// Returns the const to use when a const is omitted.
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Const<'tcx>;
+
+ /// Projecting an associated type from a (potentially)
+ /// higher-ranked trait reference is more complicated, because of
+ /// the possibility of late-bound regions appearing in the
+ /// associated type binding. This is not legal in function
+ /// signatures for that reason. In a function body, we can always
+ /// handle it because we can use inference variables to remove the
+ /// late-bound regions.
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx>;
+
+ /// Normalize an associated type coming from the user.
+ fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx>;
+
+ /// Invoked when we encounter an error from some prior pass
+ /// (e.g., resolve) that is translated into a ty-error. This is
+ /// used to help suppress derived errors typeck might otherwise
+ /// report.
+ fn set_tainted_by_errors(&self);
+
+ fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span);
+}
+
+#[derive(Debug)]
+struct ConvertedBinding<'a, 'tcx> {
+ hir_id: hir::HirId,
+ item_name: Ident,
+ kind: ConvertedBindingKind<'a, 'tcx>,
+ gen_args: &'a GenericArgs<'a>,
+ span: Span,
+}
+
+#[derive(Debug)]
+enum ConvertedBindingKind<'a, 'tcx> {
+ Equality(ty::Term<'tcx>),
+ Constraint(&'a [hir::GenericBound<'a>]),
+}
+
+/// New-typed boolean indicating whether explicit late-bound lifetimes
+/// are present in a set of generic arguments.
+///
+/// For example if we have some method `fn f<'a>(&'a self)` implemented
+/// for some type `T`, although `f` is generic in the lifetime `'a`, `'a`
+/// is late-bound so should not be provided explicitly. Thus, if `f` is
+/// instantiated with some generic arguments providing `'a` explicitly,
+/// we taint those arguments with `ExplicitLateBound::Yes` so that we
+/// can provide an appropriate diagnostic later.
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum ExplicitLateBound {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum IsMethodCall {
+ Yes,
+ No,
+}
+
+/// Denotes the "position" of a generic argument, indicating if it is a generic type,
+/// generic function or generic method call.
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum GenericArgPosition {
+ Type,
+ Value, // e.g., functions
+ MethodCall,
+}
+
+/// A marker denoting that the generic arguments that were
+/// provided did not match the respective generic parameters.
+#[derive(Clone, Default, Debug)]
+pub struct GenericArgCountMismatch {
+ /// Indicates whether a fatal error was reported (`Some`), or just a lint (`None`).
+ pub reported: Option<ErrorGuaranteed>,
+ /// A list of spans of arguments provided that were not valid.
+ pub invalid_args: Vec<Span>,
+}
+
+/// Decorates the result of a generic argument count mismatch
+/// check with whether explicit late bounds were provided.
+#[derive(Clone, Debug)]
+pub struct GenericArgCountResult {
+ pub explicit_late_bound: ExplicitLateBound,
+ pub correct: Result<(), GenericArgCountMismatch>,
+}
+
+pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> {
+ fn args_for_def_id(&mut self, def_id: DefId) -> (Option<&'a GenericArgs<'a>>, bool);
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx>;
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> subst::GenericArg<'tcx>;
+}
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ #[instrument(level = "debug", skip(self), ret)]
+ pub fn ast_region_to_region(
+ &self,
+ lifetime: &hir::Lifetime,
+ def: Option<&ty::GenericParamDef>,
+ ) -> ty::Region<'tcx> {
+ let tcx = self.tcx();
+ let lifetime_name = |def_id| tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id));
+
+ match tcx.named_region(lifetime.hir_id) {
+ Some(rl::Region::Static) => tcx.lifetimes.re_static,
+
+ Some(rl::Region::LateBound(debruijn, index, def_id)) => {
+ let name = lifetime_name(def_id.expect_local());
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(index),
+ kind: ty::BrNamed(def_id, name),
+ };
+ tcx.mk_region(ty::ReLateBound(debruijn, br))
+ }
+
+ Some(rl::Region::EarlyBound(def_id)) => {
+ let name = tcx.hir().ty_param_name(def_id.expect_local());
+ let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local());
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, index, name }))
+ }
+
+ Some(rl::Region::Free(scope, id)) => {
+ let name = lifetime_name(id.expect_local());
+ tcx.mk_region(ty::ReFree(ty::FreeRegion {
+ scope,
+ bound_region: ty::BrNamed(id, name),
+ }))
+
+ // (*) -- not late-bound, won't change
+ }
+
+ None => {
+ self.re_infer(def, lifetime.span).unwrap_or_else(|| {
+ debug!(?lifetime, "unelided lifetime in signature");
+
+ // This indicates an illegal lifetime
+ // elision. `resolve_lifetime` should have
+ // reported an error in this case -- but if
+ // not, let's error out.
+ tcx.sess.delay_span_bug(lifetime.span, "unelided lifetime in signature");
+
+ // Supply some dummy value. We don't have an
+ // `re_error`, annoyingly, so use `'static`.
+ tcx.lifetimes.re_static
+ })
+ }
+ }
+ }
+
+ /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
+ /// returns an appropriate set of substitutions for this particular reference to `I`.
+ pub fn ast_path_substs_for_ty(
+ &self,
+ span: Span,
+ def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ ) -> SubstsRef<'tcx> {
+ let (substs, _) = self.create_substs_for_ast_path(
+ span,
+ def_id,
+ &[],
+ item_segment,
+ item_segment.args(),
+ item_segment.infer_args,
+ None,
+ None,
+ );
+ if let Some(b) = item_segment.args().bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ }
+
+ substs
+ }
+
+ /// Given the type/lifetime/const arguments provided to some path (along with
+ /// an implicit `Self`, if this is a trait reference), returns the complete
+ /// set of substitutions. This may involve applying defaulted type parameters.
+ /// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// T: std::ops::Index<usize, Output = u32>
+ /// // ^1 ^^^^^^^^^^^^^^2 ^^^^3 ^^^^^^^^^^^4
+ /// ```
+ ///
+ /// 1. The `self_ty` here would refer to the type `T`.
+ /// 2. The path in question is the path to the trait `std::ops::Index`,
+ /// which will have been resolved to a `def_id`
+ /// 3. The `generic_args` contains info on the `<...>` contents. The `usize` type
+ /// parameters are returned in the `SubstsRef`, the associated type bindings like
+ /// `Output = u32` are returned from `create_assoc_bindings_for_generic_args`.
+ ///
+ /// Note that the type listing given here is *exactly* what the user provided.
+ ///
+ /// For (generic) associated types
+ ///
+ /// ```ignore (illustrative)
+ /// <Vec<u8> as Iterable<u8>>::Iter::<'a>
+ /// ```
+ ///
+ /// We have the parent substs are the substs for the parent trait:
+ /// `[Vec<u8>, u8]` and `generic_args` are the arguments for the associated
+ /// type itself: `['a]`. The returned `SubstsRef` concatenates these two
+ /// lists: `[Vec<u8>, u8, 'a]`.
+ #[instrument(level = "debug", skip(self, span), ret)]
+ fn create_substs_for_ast_path<'a>(
+ &self,
+ span: Span,
+ def_id: DefId,
+ parent_substs: &[subst::GenericArg<'tcx>],
+ seg: &hir::PathSegment<'_>,
+ generic_args: &'a hir::GenericArgs<'_>,
+ infer_args: bool,
+ self_ty: Option<Ty<'tcx>>,
+ constness: Option<ty::BoundConstness>,
+ ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ // If the type is parameterized by this region, then replace this
+ // region with the current anon region binding (in other words,
+ // whatever & would get replaced with).
+
+ let tcx = self.tcx();
+ let generics = tcx.generics_of(def_id);
+ debug!("generics: {:?}", generics);
+
+ if generics.has_self {
+ if generics.parent.is_some() {
+ // The parent is a trait so it should have at least one subst
+ // for the `Self` type.
+ assert!(!parent_substs.is_empty())
+ } else {
+ // This item (presumably a trait) needs a self-type.
+ assert!(self_ty.is_some());
+ }
+ } else {
+ assert!(self_ty.is_none() && parent_substs.is_empty());
+ }
+
+ let arg_count = Self::check_generic_arg_count(
+ tcx,
+ span,
+ def_id,
+ seg,
+ generics,
+ generic_args,
+ GenericArgPosition::Type,
+ self_ty.is_some(),
+ infer_args,
+ );
+
+ // Skip processing if type has no generic parameters.
+ // Traits always have `Self` as a generic parameter, which means they will not return early
+ // here and so associated type bindings will be handled regardless of whether there are any
+ // non-`Self` generic parameters.
+ if generics.params.is_empty() {
+ return (tcx.intern_substs(parent_substs), arg_count);
+ }
+
+ struct SubstsForAstPathCtxt<'a, 'tcx> {
+ astconv: &'a (dyn AstConv<'tcx> + 'a),
+ def_id: DefId,
+ generic_args: &'a GenericArgs<'a>,
+ span: Span,
+ inferred_params: Vec<Span>,
+ infer_args: bool,
+ }
+
+ impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for SubstsForAstPathCtxt<'a, 'tcx> {
+ fn args_for_def_id(&mut self, did: DefId) -> (Option<&'a GenericArgs<'a>>, bool) {
+ if did == self.def_id {
+ (Some(self.generic_args), self.infer_args)
+ } else {
+ // The last component of this tuple is unimportant.
+ (None, false)
+ }
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx> {
+ let tcx = self.astconv.tcx();
+
+ let mut handle_ty_args = |has_default, ty: &hir::Ty<'_>| {
+ if has_default {
+ tcx.check_optional_stability(
+ param.def_id,
+ Some(arg.hir_id()),
+ arg.span(),
+ None,
+ AllowUnstable::No,
+ |_, _| {
+ // Default generic parameters may not be marked
+ // with stability attributes, i.e. when the
+ // default parameter was defined at the same time
+ // as the rest of the type. As such, we ignore missing
+ // stability attributes.
+ },
+ );
+ }
+ if let (hir::TyKind::Infer, false) = (&ty.kind, self.astconv.allow_ty_infer()) {
+ self.inferred_params.push(ty.span);
+ tcx.ty_error().into()
+ } else {
+ self.astconv.ast_ty_to_ty(ty).into()
+ }
+ };
+
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ self.astconv.ast_region_to_region(lt, Some(param)).into()
+ }
+ (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => {
+ handle_ty_args(has_default, ty)
+ }
+ (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Infer(inf)) => {
+ handle_ty_args(has_default, &inf.to_ty())
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ ty::Const::from_opt_const_arg_anon_const(
+ tcx,
+ ty::WithOptConstParam {
+ did: tcx.hir().local_def_id(ct.value.hir_id),
+ const_param_did: Some(param.def_id),
+ },
+ )
+ .into()
+ }
+ (&GenericParamDefKind::Const { .. }, hir::GenericArg::Infer(inf)) => {
+ let ty = tcx.at(self.span).type_of(param.def_id);
+ if self.astconv.allow_ty_infer() {
+ self.astconv.ct_infer(ty, Some(param), inf.span).into()
+ } else {
+ self.inferred_params.push(inf.span);
+ tcx.const_error(ty).into()
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> subst::GenericArg<'tcx> {
+ let tcx = self.astconv.tcx();
+ match param.kind {
+ GenericParamDefKind::Lifetime => self
+ .astconv
+ .re_infer(Some(param), self.span)
+ .unwrap_or_else(|| {
+ debug!(?param, "unelided lifetime in signature");
+
+ // This indicates an illegal lifetime in a non-assoc-trait position
+ tcx.sess.delay_span_bug(self.span, "unelided lifetime in signature");
+
+ // Supply some dummy value. We don't have an
+ // `re_error`, annoyingly, so use `'static`.
+ tcx.lifetimes.re_static
+ })
+ .into(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_args && has_default {
+ // No type parameter provided, but a default exists.
+ let substs = substs.unwrap();
+ if substs.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Type(ty) => ty.references_error(),
+ _ => false,
+ }) {
+ // Avoid ICE #86756 when type error recovery goes awry.
+ return tcx.ty_error().into();
+ }
+ self.astconv
+ .normalize_ty(
+ self.span,
+ EarlyBinder(tcx.at(self.span).type_of(param.def_id))
+ .subst(tcx, substs),
+ )
+ .into()
+ } else if infer_args {
+ self.astconv.ty_infer(Some(param), self.span).into()
+ } else {
+ // We've already errored above about the mismatch.
+ tcx.ty_error().into()
+ }
+ }
+ GenericParamDefKind::Const { has_default } => {
+ let ty = tcx.at(self.span).type_of(param.def_id);
+ if !infer_args && has_default {
+ tcx.bound_const_param_default(param.def_id)
+ .subst(tcx, substs.unwrap())
+ .into()
+ } else {
+ if infer_args {
+ self.astconv.ct_infer(ty, Some(param), self.span).into()
+ } else {
+ // We've already errored above about the mismatch.
+ tcx.const_error(ty).into()
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let mut substs_ctx = SubstsForAstPathCtxt {
+ astconv: self,
+ def_id,
+ span,
+ generic_args,
+ inferred_params: vec![],
+ infer_args,
+ };
+ let substs = Self::create_substs_for_generic_args(
+ tcx,
+ def_id,
+ parent_substs,
+ self_ty.is_some(),
+ self_ty,
+ &arg_count,
+ &mut substs_ctx,
+ );
+
+ if let Some(ty::BoundConstness::ConstIfConst) = constness
+ && generics.has_self && !tcx.has_attr(def_id, sym::const_trait)
+ {
+ tcx.sess.emit_err(crate::errors::ConstBoundForNonConstTrait { span } );
+ }
+
+ (substs, arg_count)
+ }
+
+ fn create_assoc_bindings_for_generic_args<'a>(
+ &self,
+ generic_args: &'a hir::GenericArgs<'_>,
+ ) -> Vec<ConvertedBinding<'a, 'tcx>> {
+ // Convert associated-type bindings or constraints into a separate vector.
+ // Example: Given this:
+ //
+ // T: Iterator<Item = u32>
+ //
+ // The `T` is passed in as a self-type; the `Item = u32` is
+ // not a "type parameter" of the `Iterator` trait, but rather
+ // a restriction on `<T as Iterator>::Item`, so it is passed
+ // back separately.
+ let assoc_bindings = generic_args
+ .bindings
+ .iter()
+ .map(|binding| {
+ let kind = match binding.kind {
+ hir::TypeBindingKind::Equality { ref term } => match term {
+ hir::Term::Ty(ref ty) => {
+ ConvertedBindingKind::Equality(self.ast_ty_to_ty(ty).into())
+ }
+ hir::Term::Const(ref c) => {
+ let local_did = self.tcx().hir().local_def_id(c.hir_id);
+ let c = Const::from_anon_const(self.tcx(), local_did);
+ ConvertedBindingKind::Equality(c.into())
+ }
+ },
+ hir::TypeBindingKind::Constraint { ref bounds } => {
+ ConvertedBindingKind::Constraint(bounds)
+ }
+ };
+ ConvertedBinding {
+ hir_id: binding.hir_id,
+ item_name: binding.ident,
+ kind,
+ gen_args: binding.gen_args,
+ span: binding.span,
+ }
+ })
+ .collect();
+
+ assoc_bindings
+ }
+
+ pub fn create_substs_for_associated_item(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ parent_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ debug!(
+ "create_substs_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}",
+ span, item_def_id, item_segment
+ );
+ let (args, _) = self.create_substs_for_ast_path(
+ span,
+ item_def_id,
+ parent_substs,
+ item_segment,
+ item_segment.args(),
+ item_segment.infer_args,
+ None,
+ None,
+ );
+
+ if let Some(b) = item_segment.args().bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ }
+
+ args
+ }
+
+ /// Instantiates the path for the given trait reference, assuming that it's
+ /// bound to a valid trait type. Returns the `DefId` of the defining trait.
+ /// The type _cannot_ be a type other than a trait type.
+ ///
+ /// If the `projections` argument is `None`, then assoc type bindings like `Foo<T = X>`
+ /// are disallowed. Otherwise, they are pushed onto the vector given.
+ pub fn instantiate_mono_trait_ref(
+ &self,
+ trait_ref: &hir::TraitRef<'_>,
+ self_ty: Ty<'tcx>,
+ constness: ty::BoundConstness,
+ ) -> ty::TraitRef<'tcx> {
+ self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
+
+ self.ast_path_to_mono_trait_ref(
+ trait_ref.path.span,
+ trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()),
+ self_ty,
+ trait_ref.path.segments.last().unwrap(),
+ true,
+ Some(constness),
+ )
+ }
+
+ fn instantiate_poly_trait_ref_inner(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ binding_span: Option<Span>,
+ constness: ty::BoundConstness,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ trait_ref_span: Span,
+ trait_def_id: DefId,
+ trait_segment: &hir::PathSegment<'_>,
+ args: &GenericArgs<'_>,
+ infer_args: bool,
+ self_ty: Ty<'tcx>,
+ ) -> GenericArgCountResult {
+ let (substs, arg_count) = self.create_substs_for_ast_path(
+ trait_ref_span,
+ trait_def_id,
+ &[],
+ trait_segment,
+ args,
+ infer_args,
+ Some(self_ty),
+ Some(constness),
+ );
+
+ let tcx = self.tcx();
+ let bound_vars = tcx.late_bound_vars(hir_id);
+ debug!(?bound_vars);
+
+ let assoc_bindings = self.create_assoc_bindings_for_generic_args(args);
+
+ let poly_trait_ref =
+ ty::Binder::bind_with_vars(ty::TraitRef::new(trait_def_id, substs), bound_vars);
+
+ debug!(?poly_trait_ref, ?assoc_bindings);
+ bounds.trait_bounds.push((poly_trait_ref, span, constness));
+
+ let mut dup_bindings = FxHashMap::default();
+ for binding in &assoc_bindings {
+ // Specify type to assert that error was already reported in `Err` case.
+ let _: Result<_, ErrorGuaranteed> = self.add_predicates_for_ast_type_binding(
+ hir_id,
+ poly_trait_ref,
+ binding,
+ bounds,
+ speculative,
+ &mut dup_bindings,
+ binding_span.unwrap_or(binding.span),
+ constness,
+ );
+ // Okay to ignore `Err` because of `ErrorGuaranteed` (see above).
+ }
+
+ arg_count
+ }
+
+ /// Given a trait bound like `Debug`, applies that trait bound the given self-type to construct
+ /// a full trait reference. The resulting trait reference is returned. This may also generate
+ /// auxiliary bounds, which are added to `bounds`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// poly_trait_ref = Iterator<Item = u32>
+ /// self_ty = Foo
+ /// ```
+ ///
+ /// this would return `Foo: Iterator` and add `<Foo as Iterator>::Item = u32` into `bounds`.
+ ///
+ /// **A note on binders:** against our usual convention, there is an implied bounder around
+ /// the `self_ty` and `poly_trait_ref` parameters here. So they may reference bound regions.
+ /// If for example you had `for<'a> Foo<'a>: Bar<'a>`, then the `self_ty` would be `Foo<'a>`
+ /// where `'a` is a bound region at depth 0. Similarly, the `poly_trait_ref` would be
+ /// `Bar<'a>`. The returned poly-trait-ref will have this binder instantiated explicitly,
+ /// however.
+ #[instrument(level = "debug", skip(self, span, constness, bounds, speculative))]
+ pub(crate) fn instantiate_poly_trait_ref(
+ &self,
+ trait_ref: &hir::TraitRef<'_>,
+ span: Span,
+ constness: ty::BoundConstness,
+ self_ty: Ty<'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ ) -> GenericArgCountResult {
+ let hir_id = trait_ref.hir_ref_id;
+ let binding_span = None;
+ let trait_ref_span = trait_ref.path.span;
+ let trait_def_id = trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise());
+ let trait_segment = trait_ref.path.segments.last().unwrap();
+ let args = trait_segment.args();
+ let infer_args = trait_segment.infer_args;
+
+ self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
+ self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, false);
+
+ self.instantiate_poly_trait_ref_inner(
+ hir_id,
+ span,
+ binding_span,
+ constness,
+ bounds,
+ speculative,
+ trait_ref_span,
+ trait_def_id,
+ trait_segment,
+ args,
+ infer_args,
+ self_ty,
+ )
+ }
+
+ pub(crate) fn instantiate_lang_item_trait_ref(
+ &self,
+ lang_item: hir::LangItem,
+ span: Span,
+ hir_id: hir::HirId,
+ args: &GenericArgs<'_>,
+ self_ty: Ty<'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ ) {
+ let binding_span = Some(span);
+ let constness = ty::BoundConstness::NotConst;
+ let speculative = false;
+ let trait_ref_span = span;
+ let trait_def_id = self.tcx().require_lang_item(lang_item, Some(span));
+ let trait_segment = &hir::PathSegment::invalid();
+ let infer_args = false;
+
+ self.instantiate_poly_trait_ref_inner(
+ hir_id,
+ span,
+ binding_span,
+ constness,
+ bounds,
+ speculative,
+ trait_ref_span,
+ trait_def_id,
+ trait_segment,
+ args,
+ infer_args,
+ self_ty,
+ );
+ }
+
+ fn ast_path_to_mono_trait_ref(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ trait_segment: &hir::PathSegment<'_>,
+ is_impl: bool,
+ constness: Option<ty::BoundConstness>,
+ ) -> ty::TraitRef<'tcx> {
+ let (substs, _) = self.create_substs_for_ast_trait_ref(
+ span,
+ trait_def_id,
+ self_ty,
+ trait_segment,
+ is_impl,
+ constness,
+ );
+ if let Some(b) = trait_segment.args().bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ }
+ ty::TraitRef::new(trait_def_id, substs)
+ }
+
+ #[instrument(level = "debug", skip(self, span))]
+ fn create_substs_for_ast_trait_ref<'a>(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ trait_segment: &'a hir::PathSegment<'a>,
+ is_impl: bool,
+ constness: Option<ty::BoundConstness>,
+ ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, is_impl);
+
+ self.create_substs_for_ast_path(
+ span,
+ trait_def_id,
+ &[],
+ trait_segment,
+ trait_segment.args(),
+ trait_segment.infer_args,
+ Some(self_ty),
+ constness,
+ )
+ }
+
+ fn trait_defines_associated_type_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.tcx()
+ .associated_items(trait_def_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, trait_def_id)
+ .is_some()
+ }
+ fn trait_defines_associated_const_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.tcx()
+ .associated_items(trait_def_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Const, trait_def_id)
+ .is_some()
+ }
+
+ // Sets `implicitly_sized` to true on `Bounds` if necessary
+ pub(crate) fn add_implicitly_sized<'hir>(
+ &self,
+ bounds: &mut Bounds<'hir>,
+ ast_bounds: &'hir [hir::GenericBound<'hir>],
+ self_ty_where_predicates: Option<(hir::HirId, &'hir [hir::WherePredicate<'hir>])>,
+ span: Span,
+ ) {
+ let tcx = self.tcx();
+
+ // Try to find an unbound in bounds.
+ let mut unbound = None;
+ let mut search_bounds = |ast_bounds: &'hir [hir::GenericBound<'hir>]| {
+ for ab in ast_bounds {
+ if let hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = ab {
+ if unbound.is_none() {
+ unbound = Some(&ptr.trait_ref);
+ } else {
+ tcx.sess.emit_err(MultipleRelaxedDefaultBounds { span });
+ }
+ }
+ }
+ };
+ search_bounds(ast_bounds);
+ if let Some((self_ty, where_clause)) = self_ty_where_predicates {
+ let self_ty_def_id = tcx.hir().local_def_id(self_ty).to_def_id();
+ for clause in where_clause {
+ if let hir::WherePredicate::BoundPredicate(pred) = clause {
+ if pred.is_param_bound(self_ty_def_id) {
+ search_bounds(pred.bounds);
+ }
+ }
+ }
+ }
+
+ let sized_def_id = tcx.lang_items().require(LangItem::Sized);
+ match (&sized_def_id, unbound) {
+ (Ok(sized_def_id), Some(tpb))
+ if tpb.path.res == Res::Def(DefKind::Trait, *sized_def_id) =>
+ {
+ // There was in fact a `?Sized` bound, return without doing anything
+ return;
+ }
+ (_, Some(_)) => {
+ // There was a `?Trait` bound, but it was not `?Sized`; warn.
+ tcx.sess.span_warn(
+ span,
+ "default bound relaxed for a type parameter, but \
+ this does nothing because the given bound is not \
+ a default; only `?Sized` is supported",
+ );
+ // Otherwise, add implicitly sized if `Sized` is available.
+ }
+ _ => {
+ // There was no `?Sized` bound; add implicitly sized if `Sized` is available.
+ }
+ }
+ if sized_def_id.is_err() {
+ // No lang item for `Sized`, so we can't add it as a bound.
+ return;
+ }
+ bounds.implicitly_sized = Some(span);
+ }
+
+ /// This helper takes a *converted* parameter type (`param_ty`)
+ /// and an *unconverted* list of bounds:
+ ///
+ /// ```text
+ /// fn foo<T: Debug>
+ /// ^ ^^^^^ `ast_bounds` parameter, in HIR form
+ /// |
+ /// `param_ty`, in ty form
+ /// ```
+ ///
+ /// It adds these `ast_bounds` into the `bounds` structure.
+ ///
+ /// **A note on binders:** there is an implied binder around
+ /// `param_ty` and `ast_bounds`. See `instantiate_poly_trait_ref`
+ /// for more details.
+ #[instrument(level = "debug", skip(self, ast_bounds, bounds))]
+ pub(crate) fn add_bounds<'hir, I: Iterator<Item = &'hir hir::GenericBound<'hir>>>(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: I,
+ bounds: &mut Bounds<'tcx>,
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+ ) {
+ for ast_bound in ast_bounds {
+ match ast_bound {
+ hir::GenericBound::Trait(poly_trait_ref, modifier) => {
+ let constness = match modifier {
+ hir::TraitBoundModifier::MaybeConst => ty::BoundConstness::ConstIfConst,
+ hir::TraitBoundModifier::None => ty::BoundConstness::NotConst,
+ hir::TraitBoundModifier::Maybe => continue,
+ };
+
+ let _ = self.instantiate_poly_trait_ref(
+ &poly_trait_ref.trait_ref,
+ poly_trait_ref.span,
+ constness,
+ param_ty,
+ bounds,
+ false,
+ );
+ }
+ &hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => {
+ self.instantiate_lang_item_trait_ref(
+ lang_item, span, hir_id, args, param_ty, bounds,
+ );
+ }
+ hir::GenericBound::Outlives(lifetime) => {
+ let region = self.ast_region_to_region(lifetime, None);
+ bounds
+ .region_bounds
+ .push((ty::Binder::bind_with_vars(region, bound_vars), lifetime.span));
+ }
+ }
+ }
+ }
+
+ /// Translates a list of bounds from the HIR into the `Bounds` data structure.
+ /// The self-type for the bounds is given by `param_ty`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// fn foo<T: Bar + Baz>() { }
+ /// // ^ ^^^^^^^^^ ast_bounds
+ /// // param_ty
+ /// ```
+ ///
+ /// The `sized_by_default` parameter indicates if, in this context, the `param_ty` should be
+ /// considered `Sized` unless there is an explicit `?Sized` bound. This would be true in the
+ /// example above, but is not true in supertrait listings like `trait Foo: Bar + Baz`.
+ ///
+ /// `span` should be the declaration size of the parameter.
+ pub(crate) fn compute_bounds(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ ) -> Bounds<'tcx> {
+ self.compute_bounds_inner(param_ty, ast_bounds)
+ }
+
+ /// Convert the bounds in `ast_bounds` that refer to traits which define an associated type
+ /// named `assoc_name` into ty::Bounds. Ignore the rest.
+ pub(crate) fn compute_bounds_that_match_assoc_type(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ assoc_name: Ident,
+ ) -> Bounds<'tcx> {
+ let mut result = Vec::new();
+
+ for ast_bound in ast_bounds {
+ if let Some(trait_ref) = ast_bound.trait_ref()
+ && let Some(trait_did) = trait_ref.trait_def_id()
+ && self.tcx().trait_may_define_assoc_type(trait_did, assoc_name)
+ {
+ result.push(ast_bound.clone());
+ }
+ }
+
+ self.compute_bounds_inner(param_ty, &result)
+ }
+
+ fn compute_bounds_inner(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ ) -> Bounds<'tcx> {
+ let mut bounds = Bounds::default();
+
+ self.add_bounds(param_ty, ast_bounds.iter(), &mut bounds, ty::List::empty());
+ debug!(?bounds);
+
+ bounds
+ }
+
+ /// Given an HIR binding like `Item = Foo` or `Item: Foo`, pushes the corresponding predicates
+ /// onto `bounds`.
+ ///
+ /// **A note on binders:** given something like `T: for<'a> Iterator<Item = &'a u32>`, the
+ /// `trait_ref` here will be `for<'a> T: Iterator`. The `binding` data however is from *inside*
+ /// the binder (e.g., `&'a u32`) and hence may reference bound regions.
+ #[instrument(level = "debug", skip(self, bounds, speculative, dup_bindings, path_span))]
+ fn add_predicates_for_ast_type_binding(
+ &self,
+ hir_ref_id: hir::HirId,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ binding: &ConvertedBinding<'_, 'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ dup_bindings: &mut FxHashMap<DefId, Span>,
+ path_span: Span,
+ constness: ty::BoundConstness,
+ ) -> Result<(), ErrorGuaranteed> {
+ // Given something like `U: SomeTrait<T = X>`, we want to produce a
+ // predicate like `<U as SomeTrait>::T = X`. This is somewhat
+ // subtle in the event that `T` is defined in a supertrait of
+ // `SomeTrait`, because in that case we need to upcast.
+ //
+ // That is, consider this case:
+ //
+ // ```
+ // trait SubTrait: SuperTrait<i32> { }
+ // trait SuperTrait<A> { type T; }
+ //
+ // ... B: SubTrait<T = foo> ...
+ // ```
+ //
+ // We want to produce `<B as SuperTrait<i32>>::T == foo`.
+
+ let tcx = self.tcx();
+
+ let candidate =
+ if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) {
+ // Simple case: X is defined in the current trait.
+ trait_ref
+ } else {
+ // Otherwise, we have to walk through the supertraits to find
+ // those that do.
+ self.one_bound_for_assoc_type(
+ || traits::supertraits(tcx, trait_ref),
+ || trait_ref.print_only_trait_path().to_string(),
+ binding.item_name,
+ path_span,
+ || match binding.kind {
+ ConvertedBindingKind::Equality(ty) => Some(ty.to_string()),
+ _ => None,
+ },
+ )?
+ };
+
+ let (assoc_ident, def_scope) =
+ tcx.adjust_ident_and_get_scope(binding.item_name, candidate.def_id(), hir_ref_id);
+
+ // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
+ // of calling `filter_by_name_and_kind`.
+ let find_item_of_kind = |kind| {
+ tcx.associated_items(candidate.def_id())
+ .filter_by_name_unhygienic(assoc_ident.name)
+ .find(|i| i.kind == kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident)
+ };
+ let assoc_item = find_item_of_kind(ty::AssocKind::Type)
+ .or_else(|| find_item_of_kind(ty::AssocKind::Const))
+ .expect("missing associated type");
+
+ if !assoc_item.visibility(tcx).is_accessible_from(def_scope, tcx) {
+ tcx.sess
+ .struct_span_err(
+ binding.span,
+ &format!("{} `{}` is private", assoc_item.kind, binding.item_name),
+ )
+ .span_label(binding.span, &format!("private {}", assoc_item.kind))
+ .emit();
+ }
+ tcx.check_stability(assoc_item.def_id, Some(hir_ref_id), binding.span, None);
+
+ if !speculative {
+ dup_bindings
+ .entry(assoc_item.def_id)
+ .and_modify(|prev_span| {
+ self.tcx().sess.emit_err(ValueOfAssociatedStructAlreadySpecified {
+ span: binding.span,
+ prev_span: *prev_span,
+ item_name: binding.item_name,
+ def_path: tcx.def_path_str(assoc_item.container_id(tcx)),
+ });
+ })
+ .or_insert(binding.span);
+ }
+
+ // Include substitutions for generic parameters of associated types
+ let projection_ty = candidate.map_bound(|trait_ref| {
+ let ident = Ident::new(assoc_item.name, binding.item_name.span);
+ let item_segment = hir::PathSegment {
+ ident,
+ hir_id: binding.hir_id,
+ res: Res::Err,
+ args: Some(binding.gen_args),
+ infer_args: false,
+ };
+
+ let substs_trait_ref_and_assoc_item = self.create_substs_for_associated_item(
+ path_span,
+ assoc_item.def_id,
+ &item_segment,
+ trait_ref.substs,
+ );
+
+ debug!(?substs_trait_ref_and_assoc_item);
+
+ ty::ProjectionTy {
+ item_def_id: assoc_item.def_id,
+ substs: substs_trait_ref_and_assoc_item,
+ }
+ });
+
+ if !speculative {
+ // Find any late-bound regions declared in `ty` that are not
+ // declared in the trait-ref or assoc_item. These are not well-formed.
+ //
+ // Example:
+ //
+ // for<'a> <T as Iterator>::Item = &'a str // <-- 'a is bad
+ // for<'a> <T as FnMut<(&'a u32,)>>::Output = &'a str // <-- 'a is ok
+ if let ConvertedBindingKind::Equality(ty) = binding.kind {
+ let late_bound_in_trait_ref =
+ tcx.collect_constrained_late_bound_regions(&projection_ty);
+ let late_bound_in_ty =
+ tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty));
+ debug!(?late_bound_in_trait_ref);
+ debug!(?late_bound_in_ty);
+
+ // FIXME: point at the type params that don't have appropriate lifetimes:
+ // struct S1<F: for<'a> Fn(&i32, &i32) -> &'a i32>(F);
+ // ---- ---- ^^^^^^^
+ self.validate_late_bound_regions(
+ late_bound_in_trait_ref,
+ late_bound_in_ty,
+ |br_name| {
+ struct_span_err!(
+ tcx.sess,
+ binding.span,
+ E0582,
+ "binding for associated type `{}` references {}, \
+ which does not appear in the trait input types",
+ binding.item_name,
+ br_name
+ )
+ },
+ );
+ }
+ }
+
+ match binding.kind {
+ ConvertedBindingKind::Equality(mut term) => {
+ // "Desugar" a constraint like `T: Iterator<Item = u32>` this to
+ // the "projection predicate" for:
+ //
+ // `<T as Iterator>::Item = u32`
+ let assoc_item_def_id = projection_ty.skip_binder().item_def_id;
+ let def_kind = tcx.def_kind(assoc_item_def_id);
+ match (def_kind, term.unpack()) {
+ (hir::def::DefKind::AssocTy, ty::TermKind::Ty(_))
+ | (hir::def::DefKind::AssocConst, ty::TermKind::Const(_)) => (),
+ (_, _) => {
+ let got = if let Some(_) = term.ty() { "type" } else { "constant" };
+ let expected = def_kind.descr(assoc_item_def_id);
+ tcx.sess
+ .struct_span_err(
+ binding.span,
+ &format!("expected {expected} bound, found {got}"),
+ )
+ .span_note(
+ tcx.def_span(assoc_item_def_id),
+ &format!("{expected} defined here"),
+ )
+ .emit();
+ term = match def_kind {
+ hir::def::DefKind::AssocTy => tcx.ty_error().into(),
+ hir::def::DefKind::AssocConst => tcx
+ .const_error(
+ tcx.bound_type_of(assoc_item_def_id)
+ .subst(tcx, projection_ty.skip_binder().substs),
+ )
+ .into(),
+ _ => unreachable!(),
+ };
+ }
+ }
+ bounds.projection_bounds.push((
+ projection_ty.map_bound(|projection_ty| ty::ProjectionPredicate {
+ projection_ty,
+ term: term,
+ }),
+ binding.span,
+ ));
+ }
+ ConvertedBindingKind::Constraint(ast_bounds) => {
+ // "Desugar" a constraint like `T: Iterator<Item: Debug>` to
+ //
+ // `<T as Iterator>::Item: Debug`
+ //
+ // Calling `skip_binder` is okay, because `add_bounds` expects the `param_ty`
+ // parameter to have a skipped binder.
+ let param_ty = tcx.mk_ty(ty::Projection(projection_ty.skip_binder()));
+ self.add_bounds(param_ty, ast_bounds.iter(), bounds, candidate.bound_vars());
+ }
+ }
+ Ok(())
+ }
+
+ fn ast_path_to_ty(
+ &self,
+ span: Span,
+ did: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ ) -> Ty<'tcx> {
+ let substs = self.ast_path_substs_for_ty(span, did, item_segment);
+ self.normalize_ty(
+ span,
+ EarlyBinder(self.tcx().at(span).type_of(did)).subst(self.tcx(), substs),
+ )
+ }
+
+ fn conv_object_ty_poly_trait_ref(
+ &self,
+ span: Span,
+ trait_bounds: &[hir::PolyTraitRef<'_>],
+ lifetime: &hir::Lifetime,
+ borrowed: bool,
+ representation: DynKind,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let mut bounds = Bounds::default();
+ let mut potential_assoc_types = Vec::new();
+ let dummy_self = self.tcx().types.trait_object_dummy_self;
+ for trait_bound in trait_bounds.iter().rev() {
+ if let GenericArgCountResult {
+ correct:
+ Err(GenericArgCountMismatch { invalid_args: cur_potential_assoc_types, .. }),
+ ..
+ } = self.instantiate_poly_trait_ref(
+ &trait_bound.trait_ref,
+ trait_bound.span,
+ ty::BoundConstness::NotConst,
+ dummy_self,
+ &mut bounds,
+ false,
+ ) {
+ potential_assoc_types.extend(cur_potential_assoc_types);
+ }
+ }
+
+ // Expand trait aliases recursively and check that only one regular (non-auto) trait
+ // is used and no 'maybe' bounds are used.
+ let expanded_traits =
+ traits::expand_trait_aliases(tcx, bounds.trait_bounds.iter().map(|&(a, b, _)| (a, b)));
+ let (mut auto_traits, regular_traits): (Vec<_>, Vec<_>) = expanded_traits
+ .filter(|i| i.trait_ref().self_ty().skip_binder() == dummy_self)
+ .partition(|i| tcx.trait_is_auto(i.trait_ref().def_id()));
+ if regular_traits.len() > 1 {
+ let first_trait = &regular_traits[0];
+ let additional_trait = &regular_traits[1];
+ let mut err = struct_span_err!(
+ tcx.sess,
+ additional_trait.bottom().1,
+ E0225,
+ "only auto traits can be used as additional traits in a trait object"
+ );
+ additional_trait.label_with_exp_info(
+ &mut err,
+ "additional non-auto trait",
+ "additional use",
+ );
+ first_trait.label_with_exp_info(&mut err, "first non-auto trait", "first use");
+ err.help(&format!(
+ "consider creating a new trait with all of these as supertraits and using that \
+ trait here instead: `trait NewTrait: {} {{}}`",
+ regular_traits
+ .iter()
+ .map(|t| t.trait_ref().print_only_trait_path().to_string())
+ .collect::<Vec<_>>()
+ .join(" + "),
+ ));
+ err.note(
+ "auto-traits like `Send` and `Sync` are traits that have special properties; \
+ for more information on them, visit \
+ <https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits>",
+ );
+ err.emit();
+ }
+
+ if regular_traits.is_empty() && auto_traits.is_empty() {
+ let trait_alias_span = bounds
+ .trait_bounds
+ .iter()
+ .map(|&(trait_ref, _, _)| trait_ref.def_id())
+ .find(|&trait_ref| tcx.is_trait_alias(trait_ref))
+ .map(|trait_ref| tcx.def_span(trait_ref));
+ tcx.sess.emit_err(TraitObjectDeclaredWithNoTraits { span, trait_alias_span });
+ return tcx.ty_error();
+ }
+
+ // Check that there are no gross object safety violations;
+ // most importantly, that the supertraits don't contain `Self`,
+ // to avoid ICEs.
+ for item in &regular_traits {
+ let object_safety_violations =
+ astconv_object_safety_violations(tcx, item.trait_ref().def_id());
+ if !object_safety_violations.is_empty() {
+ report_object_safety_error(
+ tcx,
+ span,
+ item.trait_ref().def_id(),
+ &object_safety_violations,
+ )
+ .emit();
+ return tcx.ty_error();
+ }
+ }
+
+ // Use a `BTreeSet` to keep output in a more consistent order.
+ let mut associated_types: FxHashMap<Span, BTreeSet<DefId>> = FxHashMap::default();
+
+ let regular_traits_refs_spans = bounds
+ .trait_bounds
+ .into_iter()
+ .filter(|(trait_ref, _, _)| !tcx.trait_is_auto(trait_ref.def_id()));
+
+ for (base_trait_ref, span, constness) in regular_traits_refs_spans {
+ assert_eq!(constness, ty::BoundConstness::NotConst);
+
+ for obligation in traits::elaborate_trait_ref(tcx, base_trait_ref) {
+ debug!(
+ "conv_object_ty_poly_trait_ref: observing object predicate `{:?}`",
+ obligation.predicate
+ );
+
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ associated_types.entry(span).or_default().extend(
+ tcx.associated_items(pred.def_id())
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Type)
+ .map(|item| item.def_id),
+ );
+ }
+ ty::PredicateKind::Projection(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ // A `Self` within the original bound will be substituted with a
+ // `trait_object_dummy_self`, so check for that.
+ let references_self = match pred.skip_binder().term.unpack() {
+ ty::TermKind::Ty(ty) => ty.walk().any(|arg| arg == dummy_self.into()),
+ ty::TermKind::Const(c) => {
+ c.ty().walk().any(|arg| arg == dummy_self.into())
+ }
+ };
+
+ // If the projection output contains `Self`, force the user to
+ // elaborate it explicitly to avoid a lot of complexity.
+ //
+ // The "classically useful" case is the following:
+ // ```
+ // trait MyTrait: FnMut() -> <Self as MyTrait>::MyOutput {
+ // type MyOutput;
+ // }
+ // ```
+ //
+ // Here, the user could theoretically write `dyn MyTrait<Output = X>`,
+ // but actually supporting that would "expand" to an infinitely-long type
+ // `fix $ τ → dyn MyTrait<MyOutput = X, Output = <τ as MyTrait>::MyOutput`.
+ //
+ // Instead, we force the user to write
+ // `dyn MyTrait<MyOutput = X, Output = X>`, which is uglier but works. See
+ // the discussion in #56288 for alternatives.
+ if !references_self {
+ // Include projections defined on supertraits.
+ bounds.projection_bounds.push((pred, span));
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ for (projection_bound, _) in &bounds.projection_bounds {
+ for def_ids in associated_types.values_mut() {
+ def_ids.remove(&projection_bound.projection_def_id());
+ }
+ }
+
+ self.complain_about_missing_associated_types(
+ associated_types,
+ potential_assoc_types,
+ trait_bounds,
+ );
+
+ // De-duplicate auto traits so that, e.g., `dyn Trait + Send + Send` is the same as
+ // `dyn Trait + Send`.
+ // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering
+ // the bounds
+ let mut duplicates = FxHashSet::default();
+ auto_traits.retain(|i| duplicates.insert(i.trait_ref().def_id()));
+ debug!("regular_traits: {:?}", regular_traits);
+ debug!("auto_traits: {:?}", auto_traits);
+
+ // Erase the `dummy_self` (`trait_object_dummy_self`) used above.
+ let existential_trait_refs = regular_traits.iter().map(|i| {
+ i.trait_ref().map_bound(|trait_ref: ty::TraitRef<'tcx>| {
+ assert_eq!(trait_ref.self_ty(), dummy_self);
+
+ // Verify that `dummy_self` did not leak inside default type parameters. This
+ // could not be done at path creation, since we need to see through trait aliases.
+ let mut missing_type_params = vec![];
+ let mut references_self = false;
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let substs: Vec<_> = trait_ref
+ .substs
+ .iter()
+ .enumerate()
+ .skip(1) // Remove `Self` for `ExistentialPredicate`.
+ .map(|(index, arg)| {
+ if arg == dummy_self.into() {
+ let param = &generics.params[index];
+ missing_type_params.push(param.name);
+ return tcx.ty_error().into();
+ } else if arg.walk().any(|arg| arg == dummy_self.into()) {
+ references_self = true;
+ return tcx.ty_error().into();
+ }
+ arg
+ })
+ .collect();
+ let substs = tcx.intern_substs(&substs[..]);
+
+ let span = i.bottom().1;
+ let empty_generic_args = trait_bounds.iter().any(|hir_bound| {
+ hir_bound.trait_ref.path.res == Res::Def(DefKind::Trait, trait_ref.def_id)
+ && hir_bound.span.contains(span)
+ });
+ self.complain_about_missing_type_params(
+ missing_type_params,
+ trait_ref.def_id,
+ span,
+ empty_generic_args,
+ );
+
+ if references_self {
+ let def_id = i.bottom().0.def_id();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ i.bottom().1,
+ E0038,
+ "the {} `{}` cannot be made into an object",
+ tcx.def_kind(def_id).descr(def_id),
+ tcx.item_name(def_id),
+ );
+ err.note(
+ rustc_middle::traits::ObjectSafetyViolation::SupertraitSelf(smallvec![])
+ .error_msg(),
+ );
+ err.emit();
+ }
+
+ ty::ExistentialTraitRef { def_id: trait_ref.def_id, substs }
+ })
+ });
+
+ let existential_projections = bounds.projection_bounds.iter().map(|(bound, _)| {
+ bound.map_bound(|mut b| {
+ assert_eq!(b.projection_ty.self_ty(), dummy_self);
+
+ // Like for trait refs, verify that `dummy_self` did not leak inside default type
+ // parameters.
+ let references_self = b.projection_ty.substs.iter().skip(1).any(|arg| {
+ if arg.walk().any(|arg| arg == dummy_self.into()) {
+ return true;
+ }
+ false
+ });
+ if references_self {
+ tcx.sess
+ .delay_span_bug(span, "trait object projection bounds reference `Self`");
+ let substs: Vec<_> = b
+ .projection_ty
+ .substs
+ .iter()
+ .map(|arg| {
+ if arg.walk().any(|arg| arg == dummy_self.into()) {
+ return tcx.ty_error().into();
+ }
+ arg
+ })
+ .collect();
+ b.projection_ty.substs = tcx.intern_substs(&substs[..]);
+ }
+
+ ty::ExistentialProjection::erase_self_ty(tcx, b)
+ })
+ });
+
+ let regular_trait_predicates = existential_trait_refs
+ .map(|trait_ref| trait_ref.map_bound(ty::ExistentialPredicate::Trait));
+ let auto_trait_predicates = auto_traits.into_iter().map(|trait_ref| {
+ ty::Binder::dummy(ty::ExistentialPredicate::AutoTrait(trait_ref.trait_ref().def_id()))
+ });
+ // N.b. principal, projections, auto traits
+ // FIXME: This is actually wrong with multiple principals in regards to symbol mangling
+ let mut v = regular_trait_predicates
+ .chain(
+ existential_projections.map(|x| x.map_bound(ty::ExistentialPredicate::Projection)),
+ )
+ .chain(auto_trait_predicates)
+ .collect::<SmallVec<[_; 8]>>();
+ v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ v.dedup();
+ let existential_predicates = tcx.mk_poly_existential_predicates(v.into_iter());
+
+ // Use explicitly-specified region bound.
+ let region_bound = if !lifetime.is_elided() {
+ self.ast_region_to_region(lifetime, None)
+ } else {
+ self.compute_object_lifetime_bound(span, existential_predicates).unwrap_or_else(|| {
+ if tcx.named_region(lifetime.hir_id).is_some() {
+ self.ast_region_to_region(lifetime, None)
+ } else {
+ self.re_infer(None, span).unwrap_or_else(|| {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0228,
+ "the lifetime bound for this object type cannot be deduced \
+ from context; please supply an explicit bound"
+ );
+ if borrowed {
+ // We will have already emitted an error E0106 complaining about a
+ // missing named lifetime in `&dyn Trait`, so we elide this one.
+ err.delay_as_bug();
+ } else {
+ err.emit();
+ }
+ tcx.lifetimes.re_static
+ })
+ }
+ })
+ };
+ debug!("region_bound: {:?}", region_bound);
+
+ let ty = tcx.mk_dynamic(existential_predicates, region_bound, representation);
+ debug!("trait_object_type: {:?}", ty);
+ ty
+ }
+
+ fn report_ambiguous_associated_type(
+ &self,
+ span: Span,
+ type_str: &str,
+ trait_str: &str,
+ name: Symbol,
+ ) -> ErrorGuaranteed {
+ let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type");
+ if self
+ .tcx()
+ .resolutions(())
+ .confused_type_with_std_module
+ .keys()
+ .any(|full_span| full_span.contains(span))
+ {
+ err.span_suggestion(
+ span.shrink_to_lo(),
+ "you are looking for the module in `std`, not the primitive type",
+ "std::",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ span,
+ "use fully-qualified syntax",
+ format!("<{} as {}>::{}", type_str, trait_str, name),
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.emit()
+ }
+
+ // Search for a bound on a type parameter which includes the associated item
+ // given by `assoc_name`. `ty_param_def_id` is the `DefId` of the type parameter
+ // This function will fail if there are no suitable bounds or there is
+ // any ambiguity.
+ fn find_bound_for_assoc_item(
+ &self,
+ ty_param_def_id: LocalDefId,
+ assoc_name: Ident,
+ span: Span,
+ ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed> {
+ let tcx = self.tcx();
+
+ debug!(
+ "find_bound_for_assoc_item(ty_param_def_id={:?}, assoc_name={:?}, span={:?})",
+ ty_param_def_id, assoc_name, span,
+ );
+
+ let predicates = &self
+ .get_type_parameter_bounds(span, ty_param_def_id.to_def_id(), assoc_name)
+ .predicates;
+
+ debug!("find_bound_for_assoc_item: predicates={:#?}", predicates);
+
+ let param_name = tcx.hir().ty_param_name(ty_param_def_id);
+ self.one_bound_for_assoc_type(
+ || {
+ traits::transitive_bounds_that_define_assoc_type(
+ tcx,
+ predicates.iter().filter_map(|(p, _)| {
+ Some(p.to_opt_poly_trait_pred()?.map_bound(|t| t.trait_ref))
+ }),
+ assoc_name,
+ )
+ },
+ || param_name.to_string(),
+ assoc_name,
+ span,
+ || None,
+ )
+ }
+
+ // Checks that `bounds` contains exactly one element and reports appropriate
+ // errors otherwise.
+ #[instrument(level = "debug", skip(self, all_candidates, ty_param_name, is_equality), ret)]
+ fn one_bound_for_assoc_type<I>(
+ &self,
+ all_candidates: impl Fn() -> I,
+ ty_param_name: impl Fn() -> String,
+ assoc_name: Ident,
+ span: Span,
+ is_equality: impl Fn() -> Option<String>,
+ ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed>
+ where
+ I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ {
+ let mut matching_candidates = all_candidates()
+ .filter(|r| self.trait_defines_associated_type_named(r.def_id(), assoc_name));
+ let mut const_candidates = all_candidates()
+ .filter(|r| self.trait_defines_associated_const_named(r.def_id(), assoc_name));
+
+ let (bound, next_cand) = match (matching_candidates.next(), const_candidates.next()) {
+ (Some(bound), _) => (bound, matching_candidates.next()),
+ (None, Some(bound)) => (bound, const_candidates.next()),
+ (None, None) => {
+ let reported = self.complain_about_assoc_type_not_found(
+ all_candidates,
+ &ty_param_name(),
+ assoc_name,
+ span,
+ );
+ return Err(reported);
+ }
+ };
+ debug!(?bound);
+
+ if let Some(bound2) = next_cand {
+ debug!(?bound2);
+
+ let is_equality = is_equality();
+ let bounds = IntoIterator::into_iter([bound, bound2]).chain(matching_candidates);
+ let mut err = if is_equality.is_some() {
+ // More specific Error Index entry.
+ struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0222,
+ "ambiguous associated type `{}` in bounds of `{}`",
+ assoc_name,
+ ty_param_name()
+ )
+ } else {
+ struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0221,
+ "ambiguous associated type `{}` in bounds of `{}`",
+ assoc_name,
+ ty_param_name()
+ )
+ };
+ err.span_label(span, format!("ambiguous associated type `{}`", assoc_name));
+
+ let mut where_bounds = vec![];
+ for bound in bounds {
+ let bound_id = bound.def_id();
+ let bound_span = self
+ .tcx()
+ .associated_items(bound_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, bound_id)
+ .and_then(|item| self.tcx().hir().span_if_local(item.def_id));
+
+ if let Some(bound_span) = bound_span {
+ err.span_label(
+ bound_span,
+ format!(
+ "ambiguous `{}` from `{}`",
+ assoc_name,
+ bound.print_only_trait_path(),
+ ),
+ );
+ if let Some(constraint) = &is_equality {
+ where_bounds.push(format!(
+ " T: {trait}::{assoc} = {constraint}",
+ trait=bound.print_only_trait_path(),
+ assoc=assoc_name,
+ constraint=constraint,
+ ));
+ } else {
+ err.span_suggestion_verbose(
+ span.with_hi(assoc_name.span.lo()),
+ "use fully qualified syntax to disambiguate",
+ format!(
+ "<{} as {}>::",
+ ty_param_name(),
+ bound.print_only_trait_path(),
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else {
+ err.note(&format!(
+ "associated type `{}` could derive from `{}`",
+ ty_param_name(),
+ bound.print_only_trait_path(),
+ ));
+ }
+ }
+ if !where_bounds.is_empty() {
+ err.help(&format!(
+ "consider introducing a new type parameter `T` and adding `where` constraints:\
+ \n where\n T: {},\n{}",
+ ty_param_name(),
+ where_bounds.join(",\n"),
+ ));
+ }
+ let reported = err.emit();
+ if !where_bounds.is_empty() {
+ return Err(reported);
+ }
+ }
+
+ Ok(bound)
+ }
+
+ // Create a type from a path to an associated type.
+ // For a path `A::B::C::D`, `qself_ty` and `qself_def` are the type and def for `A::B::C`
+ // and item_segment is the path segment for `D`. We return a type and a def for
+ // the whole path.
+ // Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type
+ // parameter or `Self`.
+ // NOTE: When this function starts resolving `Trait::AssocTy` successfully
+ // it should also start reporting the `BARE_TRAIT_OBJECTS` lint.
+ #[instrument(level = "debug", skip(self, hir_ref_id, span, qself, assoc_segment), fields(assoc_ident=?assoc_segment.ident), ret)]
+ pub fn associated_path_to_ty(
+ &self,
+ hir_ref_id: hir::HirId,
+ span: Span,
+ qself_ty: Ty<'tcx>,
+ qself: &hir::Ty<'_>,
+ assoc_segment: &hir::PathSegment<'_>,
+ permit_variants: bool,
+ ) -> Result<(Ty<'tcx>, DefKind, DefId), ErrorGuaranteed> {
+ let tcx = self.tcx();
+ let assoc_ident = assoc_segment.ident;
+ let qself_res = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.kind {
+ path.res
+ } else {
+ Res::Err
+ };
+
+ // Check if we have an enum variant.
+ let mut variant_resolution = None;
+ if let ty::Adt(adt_def, _) = qself_ty.kind() {
+ if adt_def.is_enum() {
+ let variant_def = adt_def
+ .variants()
+ .iter()
+ .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did()));
+ if let Some(variant_def) = variant_def {
+ if permit_variants {
+ tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None);
+ self.prohibit_generics(slice::from_ref(assoc_segment).iter(), |err| {
+ err.note("enum variants can't have type parameters");
+ let type_name = tcx.item_name(adt_def.did());
+ let msg = format!(
+ "you might have meant to specity type parameters on enum \
+ `{type_name}`"
+ );
+ let Some(args) = assoc_segment.args else { return; };
+ // Get the span of the generics args *including* the leading `::`.
+ let args_span = assoc_segment.ident.span.shrink_to_hi().to(args.span_ext);
+ if tcx.generics_of(adt_def.did()).count() == 0 {
+ // FIXME(estebank): we could also verify that the arguments being
+ // work for the `enum`, instead of just looking if it takes *any*.
+ err.span_suggestion_verbose(
+ args_span,
+ &format!("{type_name} doesn't have generic parameters"),
+ "",
+ Applicability::MachineApplicable,
+ );
+ return;
+ }
+ let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span) else {
+ err.note(&msg);
+ return;
+ };
+ let (qself_sugg_span, is_self) = if let hir::TyKind::Path(
+ hir::QPath::Resolved(_, ref path)
+ ) = qself.kind {
+ // If the path segment already has type params, we want to overwrite
+ // them.
+ match &path.segments[..] {
+ // `segment` is the previous to last element on the path,
+ // which would normally be the `enum` itself, while the last
+ // `_` `PathSegment` corresponds to the variant.
+ [.., hir::PathSegment {
+ ident,
+ args,
+ res: Res::Def(DefKind::Enum, _),
+ ..
+ }, _] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ ident.span.shrink_to_hi().to(args.map_or(
+ ident.span.shrink_to_hi(),
+ |a| a.span_ext)),
+ false,
+ ),
+ [segment] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ segment.ident.span.shrink_to_hi().to(segment.args.map_or(
+ segment.ident.span.shrink_to_hi(),
+ |a| a.span_ext)),
+ kw::SelfUpper == segment.ident.name,
+ ),
+ _ => {
+ err.note(&msg);
+ return;
+ }
+ }
+ } else {
+ err.note(&msg);
+ return;
+ };
+ let suggestion = vec![
+ if is_self {
+ // Account for people writing `Self::Variant::<Args>`, where
+ // `Self` is the enum, and suggest replacing `Self` with the
+ // appropriate type: `Type::<Args>::Variant`.
+ (qself.span, format!("{type_name}{snippet}"))
+ } else {
+ (qself_sugg_span, snippet)
+ },
+ (args_span, String::new()),
+ ];
+ err.multipart_suggestion_verbose(
+ &msg,
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ });
+ return Ok((qself_ty, DefKind::Variant, variant_def.def_id));
+ } else {
+ variant_resolution = Some(variant_def.def_id);
+ }
+ }
+ }
+ }
+
+ // Find the type of the associated item, and the trait where the associated
+ // item is declared.
+ let bound = match (&qself_ty.kind(), qself_res) {
+ (_, Res::SelfTyAlias { alias_to: impl_def_id, is_trait_impl: true, .. }) => {
+ // `Self` in an impl of a trait -- we have a concrete self type and a
+ // trait reference.
+ let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else {
+ // A cycle error occurred, most likely.
+ let guar = tcx.sess.delay_span_bug(span, "expected cycle error");
+ return Err(guar);
+ };
+
+ self.one_bound_for_assoc_type(
+ || traits::supertraits(tcx, ty::Binder::dummy(trait_ref)),
+ || "Self".to_string(),
+ assoc_ident,
+ span,
+ || None,
+ )?
+ }
+ (
+ &ty::Param(_),
+ Res::SelfTyParam { trait_: param_did } | Res::Def(DefKind::TyParam, param_did),
+ ) => self.find_bound_for_assoc_item(param_did.expect_local(), assoc_ident, span)?,
+ _ => {
+ let reported = if variant_resolution.is_some() {
+ // Variant in type position
+ let msg = format!("expected type, found variant `{}`", assoc_ident);
+ tcx.sess.span_err(span, &msg)
+ } else if qself_ty.is_enum() {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ assoc_ident.span,
+ E0599,
+ "no variant named `{}` found for enum `{}`",
+ assoc_ident,
+ qself_ty,
+ );
+
+ let adt_def = qself_ty.ty_adt_def().expect("enum is not an ADT");
+ if let Some(suggested_name) = find_best_match_for_name(
+ &adt_def
+ .variants()
+ .iter()
+ .map(|variant| variant.name)
+ .collect::<Vec<Symbol>>(),
+ assoc_ident.name,
+ None,
+ ) {
+ err.span_suggestion(
+ assoc_ident.span,
+ "there is a variant with a similar name",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(
+ assoc_ident.span,
+ format!("variant not found in `{}`", qself_ty),
+ );
+ }
+
+ if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) {
+ err.span_label(sp, format!("variant `{}` not found here", assoc_ident));
+ }
+
+ err.emit()
+ } else if let Some(reported) = qself_ty.error_reported() {
+ reported
+ } else {
+ // Don't print `TyErr` to the user.
+ self.report_ambiguous_associated_type(
+ span,
+ &qself_ty.to_string(),
+ "Trait",
+ assoc_ident.name,
+ )
+ };
+ return Err(reported);
+ }
+ };
+
+ let trait_did = bound.def_id();
+ let (assoc_ident, def_scope) =
+ tcx.adjust_ident_and_get_scope(assoc_ident, trait_did, hir_ref_id);
+
+ // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
+ // of calling `filter_by_name_and_kind`.
+ let item = tcx.associated_items(trait_did).in_definition_order().find(|i| {
+ i.kind.namespace() == Namespace::TypeNS
+ && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident
+ });
+ // Assume that if it's not matched, there must be a const defined with the same name
+ // but it was used in a type position.
+ let Some(item) = item else {
+ let msg = format!("found associated const `{assoc_ident}` when type was expected");
+ let guar = tcx.sess.struct_span_err(span, &msg).emit();
+ return Err(guar);
+ };
+
+ let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, assoc_segment, bound);
+ let ty = self.normalize_ty(span, ty);
+
+ let kind = DefKind::AssocTy;
+ if !item.visibility(tcx).is_accessible_from(def_scope, tcx) {
+ let kind = kind.descr(item.def_id);
+ let msg = format!("{} `{}` is private", kind, assoc_ident);
+ tcx.sess
+ .struct_span_err(span, &msg)
+ .span_label(span, &format!("private {}", kind))
+ .emit();
+ }
+ tcx.check_stability(item.def_id, Some(hir_ref_id), span, None);
+
+ if let Some(variant_def_id) = variant_resolution {
+ tcx.struct_span_lint_hir(
+ AMBIGUOUS_ASSOCIATED_ITEMS,
+ hir_ref_id,
+ span,
+ "ambiguous associated item",
+ |lint| {
+ let mut could_refer_to = |kind: DefKind, def_id, also| {
+ let note_msg = format!(
+ "`{}` could{} refer to the {} defined here",
+ assoc_ident,
+ also,
+ kind.descr(def_id)
+ );
+ lint.span_note(tcx.def_span(def_id), &note_msg);
+ };
+
+ could_refer_to(DefKind::Variant, variant_def_id, "");
+ could_refer_to(kind, item.def_id, " also");
+
+ lint.span_suggestion(
+ span,
+ "use fully-qualified syntax",
+ format!("<{} as {}>::{}", qself_ty, tcx.item_name(trait_did), assoc_ident),
+ Applicability::MachineApplicable,
+ );
+
+ lint
+ },
+ );
+ }
+ Ok((ty, kind, item.def_id))
+ }
+
+ fn qpath_to_ty(
+ &self,
+ span: Span,
+ opt_self_ty: Option<Ty<'tcx>>,
+ item_def_id: DefId,
+ trait_segment: &hir::PathSegment<'_>,
+ item_segment: &hir::PathSegment<'_>,
+ constness: ty::BoundConstness,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let trait_def_id = tcx.parent(item_def_id);
+
+ debug!("qpath_to_ty: trait_def_id={:?}", trait_def_id);
+
+ let Some(self_ty) = opt_self_ty else {
+ let path_str = tcx.def_path_str(trait_def_id);
+
+ let def_id = self.item_def_id();
+
+ debug!("qpath_to_ty: self.item_def_id()={:?}", def_id);
+
+ let parent_def_id = def_id
+ .and_then(|def_id| {
+ def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+ })
+ .map(|hir_id| tcx.hir().get_parent_item(hir_id).to_def_id());
+
+ debug!("qpath_to_ty: parent_def_id={:?}", parent_def_id);
+
+ // If the trait in segment is the same as the trait defining the item,
+ // use the `<Self as ..>` syntax in the error.
+ let is_part_of_self_trait_constraints = def_id == Some(trait_def_id);
+ let is_part_of_fn_in_self_trait = parent_def_id == Some(trait_def_id);
+
+ let type_name = if is_part_of_self_trait_constraints || is_part_of_fn_in_self_trait {
+ "Self"
+ } else {
+ "Type"
+ };
+
+ self.report_ambiguous_associated_type(
+ span,
+ type_name,
+ &path_str,
+ item_segment.ident.name,
+ );
+ return tcx.ty_error();
+ };
+
+ debug!("qpath_to_ty: self_type={:?}", self_ty);
+
+ let trait_ref = self.ast_path_to_mono_trait_ref(
+ span,
+ trait_def_id,
+ self_ty,
+ trait_segment,
+ false,
+ Some(constness),
+ );
+
+ let item_substs = self.create_substs_for_associated_item(
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+
+ debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
+
+ self.normalize_ty(span, tcx.mk_projection(item_def_id, item_substs))
+ }
+
+ pub fn prohibit_generics<'a>(
+ &self,
+ segments: impl Iterator<Item = &'a hir::PathSegment<'a>> + Clone,
+ extend: impl Fn(&mut Diagnostic),
+ ) -> bool {
+ let args = segments.clone().flat_map(|segment| segment.args().args);
+
+ let (lt, ty, ct, inf) =
+ args.clone().fold((false, false, false, false), |(lt, ty, ct, inf), arg| match arg {
+ hir::GenericArg::Lifetime(_) => (true, ty, ct, inf),
+ hir::GenericArg::Type(_) => (lt, true, ct, inf),
+ hir::GenericArg::Const(_) => (lt, ty, true, inf),
+ hir::GenericArg::Infer(_) => (lt, ty, ct, true),
+ });
+ let mut emitted = false;
+ if lt || ty || ct || inf {
+ let types_and_spans: Vec<_> = segments
+ .clone()
+ .flat_map(|segment| {
+ if segment.args().args.is_empty() {
+ None
+ } else {
+ Some((
+ match segment.res {
+ Res::PrimTy(ty) => format!("{} `{}`", segment.res.descr(), ty.name()),
+ Res::Def(_, def_id)
+ if let Some(name) = self.tcx().opt_item_name(def_id) => {
+ format!("{} `{name}`", segment.res.descr())
+ }
+ Res::Err => "this type".to_string(),
+ _ => segment.res.descr().to_string(),
+ },
+ segment.ident.span,
+ ))
+ }
+ })
+ .collect();
+ let this_type = match &types_and_spans[..] {
+ [.., _, (last, _)] => format!(
+ "{} and {last}",
+ types_and_spans[..types_and_spans.len() - 1]
+ .iter()
+ .map(|(x, _)| x.as_str())
+ .intersperse(&", ")
+ .collect::<String>()
+ ),
+ [(only, _)] => only.to_string(),
+ [] => "this type".to_string(),
+ };
+
+ let arg_spans: Vec<Span> = args.map(|arg| arg.span()).collect();
+
+ let mut kinds = Vec::with_capacity(4);
+ if lt {
+ kinds.push("lifetime");
+ }
+ if ty {
+ kinds.push("type");
+ }
+ if ct {
+ kinds.push("const");
+ }
+ if inf {
+ kinds.push("generic");
+ }
+ let (kind, s) = match kinds[..] {
+ [.., _, last] => (
+ format!(
+ "{} and {last}",
+ kinds[..kinds.len() - 1]
+ .iter()
+ .map(|&x| x)
+ .intersperse(", ")
+ .collect::<String>()
+ ),
+ "s",
+ ),
+ [only] => (format!("{only}"), ""),
+ [] => unreachable!(),
+ };
+ let last_span = *arg_spans.last().unwrap();
+ let span: MultiSpan = arg_spans.into();
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0109,
+ "{kind} arguments are not allowed on {this_type}",
+ );
+ err.span_label(last_span, format!("{kind} argument{s} not allowed"));
+ for (what, span) in types_and_spans {
+ err.span_label(span, format!("not allowed on {what}"));
+ }
+ extend(&mut err);
+ err.emit();
+ emitted = true;
+ }
+
+ for segment in segments {
+ // Only emit the first error to avoid overloading the user with error messages.
+ if let Some(b) = segment.args().bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ return true;
+ }
+ }
+ emitted
+ }
+
+ // FIXME(eddyb, varkor) handle type paths here too, not just value ones.
+ pub fn def_ids_for_value_path_segments(
+ &self,
+ segments: &[hir::PathSegment<'_>],
+ self_ty: Option<Ty<'tcx>>,
+ kind: DefKind,
+ def_id: DefId,
+ ) -> Vec<PathSeg> {
+ // We need to extract the type parameters supplied by the user in
+ // the path `path`. Due to the current setup, this is a bit of a
+ // tricky-process; the problem is that resolve only tells us the
+ // end-point of the path resolution, and not the intermediate steps.
+ // Luckily, we can (at least for now) deduce the intermediate steps
+ // just from the end-point.
+ //
+ // There are basically five cases to consider:
+ //
+ // 1. Reference to a constructor of a struct:
+ //
+ // struct Foo<T>(...)
+ //
+ // In this case, the parameters are declared in the type space.
+ //
+ // 2. Reference to a constructor of an enum variant:
+ //
+ // enum E<T> { Foo(...) }
+ //
+ // In this case, the parameters are defined in the type space,
+ // but may be specified either on the type or the variant.
+ //
+ // 3. Reference to a fn item or a free constant:
+ //
+ // fn foo<T>() { }
+ //
+ // In this case, the path will again always have the form
+ // `a::b::foo::<T>` where only the final segment should have
+ // type parameters. However, in this case, those parameters are
+ // declared on a value, and hence are in the `FnSpace`.
+ //
+ // 4. Reference to a method or an associated constant:
+ //
+ // impl<A> SomeStruct<A> {
+ // fn foo<B>(...)
+ // }
+ //
+ // Here we can have a path like
+ // `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
+ // may appear in two places. The penultimate segment,
+ // `SomeStruct::<A>`, contains parameters in TypeSpace, and the
+ // final segment, `foo::<B>` contains parameters in fn space.
+ //
+ // The first step then is to categorize the segments appropriately.
+
+ let tcx = self.tcx();
+
+ assert!(!segments.is_empty());
+ let last = segments.len() - 1;
+
+ let mut path_segs = vec![];
+
+ match kind {
+ // Case 1. Reference to a struct constructor.
+ DefKind::Ctor(CtorOf::Struct, ..) => {
+ // Everything but the final segment should have no
+ // parameters at all.
+ let generics = tcx.generics_of(def_id);
+ // Variant and struct constructors use the
+ // generics of their parent type definition.
+ let generics_def_id = generics.parent.unwrap_or(def_id);
+ path_segs.push(PathSeg(generics_def_id, last));
+ }
+
+ // Case 2. Reference to a variant constructor.
+ DefKind::Ctor(CtorOf::Variant, ..) | DefKind::Variant => {
+ let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap());
+ let (generics_def_id, index) = if let Some(adt_def) = adt_def {
+ debug_assert!(adt_def.is_enum());
+ (adt_def.did(), last)
+ } else if last >= 1 && segments[last - 1].args.is_some() {
+ // Everything but the penultimate segment should have no
+ // parameters at all.
+ let mut def_id = def_id;
+
+ // `DefKind::Ctor` -> `DefKind::Variant`
+ if let DefKind::Ctor(..) = kind {
+ def_id = tcx.parent(def_id);
+ }
+
+ // `DefKind::Variant` -> `DefKind::Enum`
+ let enum_def_id = tcx.parent(def_id);
+ (enum_def_id, last - 1)
+ } else {
+ // FIXME: lint here recommending `Enum::<...>::Variant` form
+ // instead of `Enum::Variant::<...>` form.
+
+ // Everything but the final segment should have no
+ // parameters at all.
+ let generics = tcx.generics_of(def_id);
+ // Variant and struct constructors use the
+ // generics of their parent type definition.
+ (generics.parent.unwrap_or(def_id), last)
+ };
+ path_segs.push(PathSeg(generics_def_id, index));
+ }
+
+ // Case 3. Reference to a top-level value.
+ DefKind::Fn | DefKind::Const | DefKind::ConstParam | DefKind::Static(_) => {
+ path_segs.push(PathSeg(def_id, last));
+ }
+
+ // Case 4. Reference to a method or associated const.
+ DefKind::AssocFn | DefKind::AssocConst => {
+ if segments.len() >= 2 {
+ let generics = tcx.generics_of(def_id);
+ path_segs.push(PathSeg(generics.parent.unwrap(), last - 1));
+ }
+ path_segs.push(PathSeg(def_id, last));
+ }
+
+ kind => bug!("unexpected definition kind {:?} for {:?}", kind, def_id),
+ }
+
+ debug!("path_segs = {:?}", path_segs);
+
+ path_segs
+ }
+
+ // Check a type `Path` and convert it to a `Ty`.
+ pub fn res_to_ty(
+ &self,
+ opt_self_ty: Option<Ty<'tcx>>,
+ path: &hir::Path<'_>,
+ permit_variants: bool,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ debug!(
+ "res_to_ty(res={:?}, opt_self_ty={:?}, path_segments={:?})",
+ path.res, opt_self_ty, path.segments
+ );
+
+ let span = path.span;
+ match path.res {
+ Res::Def(DefKind::OpaqueTy | DefKind::ImplTraitPlaceholder, did) => {
+ // Check for desugared `impl Trait`.
+ assert!(ty::is_impl_trait_defn(tcx, did).is_none());
+ let item_segment = path.segments.split_last().unwrap();
+ self.prohibit_generics(item_segment.1.iter(), |err| {
+ err.note("`impl Trait` types can't have type parameters");
+ });
+ let substs = self.ast_path_substs_for_ty(span, did, item_segment.0);
+ self.normalize_ty(span, tcx.mk_opaque(did, substs))
+ }
+ Res::Def(
+ DefKind::Enum
+ | DefKind::TyAlias
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::ForeignTy,
+ did,
+ ) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.split_last().unwrap().1.iter(), |_| {});
+ self.ast_path_to_ty(span, did, path.segments.last().unwrap())
+ }
+ Res::Def(kind @ DefKind::Variant, def_id) if permit_variants => {
+ // Convert "variant type" as if it were a real type.
+ // The resulting `Ty` is type of the variant's enum for now.
+ assert_eq!(opt_self_ty, None);
+
+ let path_segs =
+ self.def_ids_for_value_path_segments(path.segments, None, kind, def_id);
+ let generic_segs: FxHashSet<_> =
+ path_segs.iter().map(|PathSeg(_, index)| index).collect();
+ self.prohibit_generics(
+ path.segments.iter().enumerate().filter_map(|(index, seg)| {
+ if !generic_segs.contains(&index) { Some(seg) } else { None }
+ }),
+ |err| {
+ err.note("enum variants can't have type parameters");
+ },
+ );
+
+ let PathSeg(def_id, index) = path_segs.last().unwrap();
+ self.ast_path_to_ty(span, *def_id, &path.segments[*index])
+ }
+ Res::Def(DefKind::TyParam, def_id) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ if let Some(span) = tcx.def_ident_span(def_id) {
+ let name = tcx.item_name(def_id);
+ err.span_note(span, &format!("type parameter `{name}` defined here"));
+ }
+ });
+
+ let def_id = def_id.expect_local();
+ let item_def_id = tcx.hir().ty_param_owner(def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id.to_def_id()];
+ tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id))
+ }
+ Res::SelfTyParam { .. } => {
+ // `Self` in trait or type alias.
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ if let [hir::PathSegment { args: Some(args), ident, .. }] = &path.segments[..] {
+ err.span_suggestion_verbose(
+ ident.span.shrink_to_hi().to(args.span_ext),
+ "the `Self` type doesn't accept type parameters",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ });
+ tcx.types.self_param
+ }
+ Res::SelfTyAlias { alias_to: def_id, forbid_generic, .. } => {
+ // `Self` in impl (we know the concrete type).
+ assert_eq!(opt_self_ty, None);
+ // Try to evaluate any array length constants.
+ let ty = tcx.at(span).type_of(def_id);
+ let span_of_impl = tcx.span_of_impl(def_id);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ let def_id = match *ty.kind() {
+ ty::Adt(self_def, _) => self_def.did(),
+ _ => return,
+ };
+
+ let type_name = tcx.item_name(def_id);
+ let span_of_ty = tcx.def_ident_span(def_id);
+ let generics = tcx.generics_of(def_id).count();
+
+ let msg = format!("`Self` is of type `{ty}`");
+ if let (Ok(i_sp), Some(t_sp)) = (span_of_impl, span_of_ty) {
+ let mut span: MultiSpan = vec![t_sp].into();
+ span.push_span_label(
+ i_sp,
+ &format!("`Self` is on type `{type_name}` in this `impl`"),
+ );
+ let mut postfix = "";
+ if generics == 0 {
+ postfix = ", which doesn't have generic parameters";
+ }
+ span.push_span_label(
+ t_sp,
+ &format!("`Self` corresponds to this type{postfix}"),
+ );
+ err.span_note(span, &msg);
+ } else {
+ err.note(&msg);
+ }
+ for segment in path.segments {
+ if let Some(args) = segment.args && segment.ident.name == kw::SelfUpper {
+ if generics == 0 {
+ // FIXME(estebank): we could also verify that the arguments being
+ // work for the `enum`, instead of just looking if it takes *any*.
+ err.span_suggestion_verbose(
+ segment.ident.span.shrink_to_hi().to(args.span_ext),
+ "the `Self` type doesn't accept type parameters",
+ "",
+ Applicability::MachineApplicable,
+ );
+ return;
+ } else {
+ err.span_suggestion_verbose(
+ segment.ident.span,
+ format!(
+ "the `Self` type doesn't accept type parameters, use the \
+ concrete type's name `{type_name}` instead if you want to \
+ specify its type parameters"
+ ),
+ type_name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ });
+ // HACK(min_const_generics): Forbid generic `Self` types
+ // here as we can't easily do that during nameres.
+ //
+ // We do this before normalization as we otherwise allow
+ // ```rust
+ // trait AlwaysApplicable { type Assoc; }
+ // impl<T: ?Sized> AlwaysApplicable for T { type Assoc = usize; }
+ //
+ // trait BindsParam<T> {
+ // type ArrayTy;
+ // }
+ // impl<T> BindsParam<T> for <T as AlwaysApplicable>::Assoc {
+ // type ArrayTy = [u8; Self::MAX];
+ // }
+ // ```
+ // Note that the normalization happens in the param env of
+ // the anon const, which is empty. This is why the
+ // `AlwaysApplicable` impl needs a `T: ?Sized` bound for
+ // this to compile if we were to normalize here.
+ if forbid_generic && ty.needs_subst() {
+ let mut err = tcx.sess.struct_span_err(
+ path.span,
+ "generic `Self` types are currently not permitted in anonymous constants",
+ );
+ if let Some(hir::Node::Item(&hir::Item {
+ kind: hir::ItemKind::Impl(ref impl_),
+ ..
+ })) = tcx.hir().get_if_local(def_id)
+ {
+ err.span_note(impl_.self_ty.span, "not a concrete type");
+ }
+ err.emit();
+ tcx.ty_error()
+ } else {
+ self.normalize_ty(span, ty)
+ }
+ }
+ Res::Def(DefKind::AssocTy, def_id) => {
+ debug_assert!(path.segments.len() >= 2);
+ self.prohibit_generics(path.segments[..path.segments.len() - 2].iter(), |_| {});
+ // HACK: until we support `<Type as ~const Trait>`, assume all of them are.
+ let constness = if tcx.has_attr(tcx.parent(def_id), sym::const_trait) {
+ ty::BoundConstness::ConstIfConst
+ } else {
+ ty::BoundConstness::NotConst
+ };
+ self.qpath_to_ty(
+ span,
+ opt_self_ty,
+ def_id,
+ &path.segments[path.segments.len() - 2],
+ path.segments.last().unwrap(),
+ constness,
+ )
+ }
+ Res::PrimTy(prim_ty) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ let name = prim_ty.name_str();
+ for segment in path.segments {
+ if let Some(args) = segment.args {
+ err.span_suggestion_verbose(
+ segment.ident.span.shrink_to_hi().to(args.span_ext),
+ &format!("primitive type `{name}` doesn't have generic parameters"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ });
+ match prim_ty {
+ hir::PrimTy::Bool => tcx.types.bool,
+ hir::PrimTy::Char => tcx.types.char,
+ hir::PrimTy::Int(it) => tcx.mk_mach_int(ty::int_ty(it)),
+ hir::PrimTy::Uint(uit) => tcx.mk_mach_uint(ty::uint_ty(uit)),
+ hir::PrimTy::Float(ft) => tcx.mk_mach_float(ty::float_ty(ft)),
+ hir::PrimTy::Str => tcx.types.str_,
+ }
+ }
+ Res::Err => {
+ self.set_tainted_by_errors();
+ self.tcx().ty_error()
+ }
+ _ => span_bug!(span, "unexpected resolution: {:?}", path.res),
+ }
+ }
+
+ /// Parses the programmer's textual representation of a type into our
+ /// internal notion of a type.
+ pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ self.ast_ty_to_ty_inner(ast_ty, false, false)
+ }
+
+ /// Parses the programmer's textual representation of a type into our
+ /// internal notion of a type. This is meant to be used within a path.
+ pub fn ast_ty_to_ty_in_path(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ self.ast_ty_to_ty_inner(ast_ty, false, true)
+ }
+
+ /// Turns a `hir::Ty` into a `Ty`. For diagnostics' purposes we keep track of whether trait
+ /// objects are borrowed like `&dyn Trait` to avoid emitting redundant errors.
+ #[instrument(level = "debug", skip(self), ret)]
+ fn ast_ty_to_ty_inner(&self, ast_ty: &hir::Ty<'_>, borrowed: bool, in_path: bool) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let result_ty = match ast_ty.kind {
+ hir::TyKind::Slice(ref ty) => tcx.mk_slice(self.ast_ty_to_ty(ty)),
+ hir::TyKind::Ptr(ref mt) => {
+ tcx.mk_ptr(ty::TypeAndMut { ty: self.ast_ty_to_ty(mt.ty), mutbl: mt.mutbl })
+ }
+ hir::TyKind::Rptr(ref region, ref mt) => {
+ let r = self.ast_region_to_region(region, None);
+ debug!(?r);
+ let t = self.ast_ty_to_ty_inner(mt.ty, true, false);
+ tcx.mk_ref(r, ty::TypeAndMut { ty: t, mutbl: mt.mutbl })
+ }
+ hir::TyKind::Never => tcx.types.never,
+ hir::TyKind::Tup(fields) => tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(t))),
+ hir::TyKind::BareFn(bf) => {
+ require_c_abi_if_c_variadic(tcx, bf.decl, bf.abi, ast_ty.span);
+
+ tcx.mk_fn_ptr(self.ty_of_fn(
+ ast_ty.hir_id,
+ bf.unsafety,
+ bf.abi,
+ bf.decl,
+ None,
+ Some(ast_ty),
+ ))
+ }
+ hir::TyKind::TraitObject(bounds, ref lifetime, repr) => {
+ self.maybe_lint_bare_trait(ast_ty, in_path);
+ let repr = match repr {
+ TraitObjectSyntax::Dyn | TraitObjectSyntax::None => ty::Dyn,
+ TraitObjectSyntax::DynStar => ty::DynStar,
+ };
+ self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime, borrowed, repr)
+ }
+ hir::TyKind::Path(hir::QPath::Resolved(ref maybe_qself, ref path)) => {
+ debug!(?maybe_qself, ?path);
+ let opt_self_ty = maybe_qself.as_ref().map(|qself| self.ast_ty_to_ty(qself));
+ self.res_to_ty(opt_self_ty, path, false)
+ }
+ hir::TyKind::OpaqueDef(item_id, lifetimes, in_trait) => {
+ let opaque_ty = tcx.hir().item(item_id);
+ let def_id = item_id.owner_id.to_def_id();
+
+ match opaque_ty.kind {
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
+ self.impl_trait_ty_to_ty(def_id, lifetimes, origin, in_trait)
+ }
+ ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
+ }
+ }
+ hir::TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => {
+ debug!(?qself, ?segment);
+ let ty = self.ast_ty_to_ty_inner(qself, false, true);
+ self.associated_path_to_ty(ast_ty.hir_id, ast_ty.span, ty, qself, segment, false)
+ .map(|(ty, _, _)| ty)
+ .unwrap_or_else(|_| tcx.ty_error())
+ }
+ hir::TyKind::Path(hir::QPath::LangItem(lang_item, span, _)) => {
+ let def_id = tcx.require_lang_item(lang_item, Some(span));
+ let (substs, _) = self.create_substs_for_ast_path(
+ span,
+ def_id,
+ &[],
+ &hir::PathSegment::invalid(),
+ &GenericArgs::none(),
+ true,
+ None,
+ None,
+ );
+ EarlyBinder(self.normalize_ty(span, tcx.at(span).type_of(def_id)))
+ .subst(tcx, substs)
+ }
+ hir::TyKind::Array(ref ty, ref length) => {
+ let length = match length {
+ &hir::ArrayLen::Infer(_, span) => self.ct_infer(tcx.types.usize, None, span),
+ hir::ArrayLen::Body(constant) => {
+ let length_def_id = tcx.hir().local_def_id(constant.hir_id);
+ ty::Const::from_anon_const(tcx, length_def_id)
+ }
+ };
+
+ let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(ty), length));
+ self.normalize_ty(ast_ty.span, array_ty)
+ }
+ hir::TyKind::Typeof(ref e) => {
+ let ty_erased = tcx.type_of(tcx.hir().local_def_id(e.hir_id));
+ let ty = tcx.fold_regions(ty_erased, |r, _| {
+ if r.is_erased() { tcx.lifetimes.re_static } else { r }
+ });
+ let span = ast_ty.span;
+ tcx.sess.emit_err(TypeofReservedKeywordUsed {
+ span,
+ ty,
+ opt_sugg: Some((span, Applicability::MachineApplicable))
+ .filter(|_| ty.is_suggestable(tcx, false)),
+ });
+
+ ty
+ }
+ hir::TyKind::Infer => {
+ // Infer also appears as the type of arguments or return
+ // values in an ExprKind::Closure, or as
+ // the type of local variables. Both of these cases are
+ // handled specially and will not descend into this routine.
+ self.ty_infer(None, ast_ty.span)
+ }
+ hir::TyKind::Err => tcx.ty_error(),
+ };
+
+ self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span);
+ result_ty
+ }
+
+ #[instrument(level = "debug", skip(self), ret)]
+ fn impl_trait_ty_to_ty(
+ &self,
+ def_id: DefId,
+ lifetimes: &[hir::GenericArg<'_>],
+ origin: OpaqueTyOrigin,
+ in_trait: bool,
+ ) -> Ty<'tcx> {
+ debug!("impl_trait_ty_to_ty(def_id={:?}, lifetimes={:?})", def_id, lifetimes);
+ let tcx = self.tcx();
+
+ let generics = tcx.generics_of(def_id);
+
+ debug!("impl_trait_ty_to_ty: generics={:?}", generics);
+ let substs = InternalSubsts::for_item(tcx, def_id, |param, _| {
+ if let Some(i) = (param.index as usize).checked_sub(generics.parent_count) {
+ // Our own parameters are the resolved lifetimes.
+ if let GenericParamDefKind::Lifetime = param.kind {
+ if let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] {
+ self.ast_region_to_region(lifetime, None).into()
+ } else {
+ bug!()
+ }
+ } else {
+ bug!()
+ }
+ } else {
+ match param.kind {
+ // For RPIT (return position impl trait), only lifetimes
+ // mentioned in the impl Trait predicate are captured by
+ // the opaque type, so the lifetime parameters from the
+ // parent item need to be replaced with `'static`.
+ //
+ // For `impl Trait` in the types of statics, constants,
+ // locals and type aliases. These capture all parent
+ // lifetimes, so they can use their identity subst.
+ GenericParamDefKind::Lifetime
+ if matches!(
+ origin,
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..)
+ ) =>
+ {
+ tcx.lifetimes.re_static.into()
+ }
+ _ => tcx.mk_param_from_def(param),
+ }
+ }
+ });
+ debug!("impl_trait_ty_to_ty: substs={:?}", substs);
+
+ if in_trait { tcx.mk_projection(def_id, substs) } else { tcx.mk_opaque(def_id, substs) }
+ }
+
+ pub fn ty_of_arg(&self, ty: &hir::Ty<'_>, expected_ty: Option<Ty<'tcx>>) -> Ty<'tcx> {
+ match ty.kind {
+ hir::TyKind::Infer if expected_ty.is_some() => {
+ self.record_ty(ty.hir_id, expected_ty.unwrap(), ty.span);
+ expected_ty.unwrap()
+ }
+ _ => self.ast_ty_to_ty(ty),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self, hir_id, unsafety, abi, decl, generics, hir_ty), ret)]
+ pub fn ty_of_fn(
+ &self,
+ hir_id: hir::HirId,
+ unsafety: hir::Unsafety,
+ abi: abi::Abi,
+ decl: &hir::FnDecl<'_>,
+ generics: Option<&hir::Generics<'_>>,
+ hir_ty: Option<&hir::Ty<'_>>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let tcx = self.tcx();
+ let bound_vars = tcx.late_bound_vars(hir_id);
+ debug!(?bound_vars);
+
+ // We proactively collect all the inferred type params to emit a single error per fn def.
+ let mut visitor = HirPlaceholderCollector::default();
+ let mut infer_replacements = vec![];
+
+ if let Some(generics) = generics {
+ walk_generics(&mut visitor, generics);
+ }
+
+ let input_tys: Vec<_> = decl
+ .inputs
+ .iter()
+ .enumerate()
+ .map(|(i, a)| {
+ if let hir::TyKind::Infer = a.kind && !self.allow_ty_infer() {
+ if let Some(suggested_ty) =
+ self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, Some(i))
+ {
+ infer_replacements.push((a.span, suggested_ty.to_string()));
+ return suggested_ty;
+ }
+ }
+
+ // Only visit the type looking for `_` if we didn't fix the type above
+ visitor.visit_ty(a);
+ self.ty_of_arg(a, None)
+ })
+ .collect();
+
+ let output_ty = match decl.output {
+ hir::FnRetTy::Return(output) => {
+ if let hir::TyKind::Infer = output.kind
+ && !self.allow_ty_infer()
+ && let Some(suggested_ty) =
+ self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, None)
+ {
+ infer_replacements.push((output.span, suggested_ty.to_string()));
+ suggested_ty
+ } else {
+ visitor.visit_ty(output);
+ self.ast_ty_to_ty(output)
+ }
+ }
+ hir::FnRetTy::DefaultReturn(..) => tcx.mk_unit(),
+ };
+
+ debug!(?output_ty);
+
+ let fn_ty = tcx.mk_fn_sig(input_tys.into_iter(), output_ty, decl.c_variadic, unsafety, abi);
+ let bare_fn_ty = ty::Binder::bind_with_vars(fn_ty, bound_vars);
+
+ if !self.allow_ty_infer() && !(visitor.0.is_empty() && infer_replacements.is_empty()) {
+ // We always collect the spans for placeholder types when evaluating `fn`s, but we
+ // only want to emit an error complaining about them if infer types (`_`) are not
+ // allowed. `allow_ty_infer` gates this behavior. We check for the presence of
+ // `ident_span` to not emit an error twice when we have `fn foo(_: fn() -> _)`.
+
+ let mut diag = crate::collect::placeholder_type_error_diag(
+ tcx,
+ generics,
+ visitor.0,
+ infer_replacements.iter().map(|(s, _)| *s).collect(),
+ true,
+ hir_ty,
+ "function",
+ );
+
+ if !infer_replacements.is_empty() {
+ diag.multipart_suggestion(
+ &format!(
+ "try replacing `_` with the type{} in the corresponding trait method signature",
+ rustc_errors::pluralize!(infer_replacements.len()),
+ ),
+ infer_replacements,
+ Applicability::MachineApplicable,
+ );
+ }
+
+ diag.emit();
+ }
+
+ // Find any late-bound regions declared in return type that do
+ // not appear in the arguments. These are not well-formed.
+ //
+ // Example:
+ // for<'a> fn() -> &'a str <-- 'a is bad
+ // for<'a> fn(&'a String) -> &'a str <-- 'a is ok
+ let inputs = bare_fn_ty.inputs();
+ let late_bound_in_args =
+ tcx.collect_constrained_late_bound_regions(&inputs.map_bound(|i| i.to_owned()));
+ let output = bare_fn_ty.output();
+ let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output);
+
+ self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| {
+ struct_span_err!(
+ tcx.sess,
+ decl.output.span(),
+ E0581,
+ "return type references {}, which is not constrained by the fn input types",
+ br_name
+ )
+ });
+
+ bare_fn_ty
+ }
+
+ /// Given a fn_hir_id for a impl function, suggest the type that is found on the
+ /// corresponding function in the trait that the impl implements, if it exists.
+ /// If arg_idx is Some, then it corresponds to an input type index, otherwise it
+ /// corresponds to the return type.
+ fn suggest_trait_fn_ty_for_impl_fn_infer(
+ &self,
+ fn_hir_id: hir::HirId,
+ arg_idx: Option<usize>,
+ ) -> Option<Ty<'tcx>> {
+ let tcx = self.tcx();
+ let hir = tcx.hir();
+
+ let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), ident, .. }) =
+ hir.get(fn_hir_id) else { return None };
+ let hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(i), .. }) =
+ hir.get(hir.get_parent_node(fn_hir_id)) else { bug!("ImplItem should have Impl parent") };
+
+ let trait_ref = self.instantiate_mono_trait_ref(
+ i.of_trait.as_ref()?,
+ self.ast_ty_to_ty(i.self_ty),
+ ty::BoundConstness::NotConst,
+ );
+
+ let assoc = tcx.associated_items(trait_ref.def_id).find_by_name_and_kind(
+ tcx,
+ *ident,
+ ty::AssocKind::Fn,
+ trait_ref.def_id,
+ )?;
+
+ let fn_sig = tcx.bound_fn_sig(assoc.def_id).subst(
+ tcx,
+ trait_ref.substs.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)),
+ );
+
+ let ty = if let Some(arg_idx) = arg_idx { fn_sig.input(arg_idx) } else { fn_sig.output() };
+
+ Some(tcx.liberate_late_bound_regions(fn_hir_id.expect_owner().to_def_id(), ty))
+ }
+
+ fn validate_late_bound_regions(
+ &self,
+ constrained_regions: FxHashSet<ty::BoundRegionKind>,
+ referenced_regions: FxHashSet<ty::BoundRegionKind>,
+ generate_err: impl Fn(&str) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) {
+ for br in referenced_regions.difference(&constrained_regions) {
+ let br_name = match *br {
+ ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) | ty::BrEnv => {
+ "an anonymous lifetime".to_string()
+ }
+ ty::BrNamed(_, name) => format!("lifetime `{}`", name),
+ };
+
+ let mut err = generate_err(&br_name);
+
+ if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) = *br {
+ // The only way for an anonymous lifetime to wind up
+ // in the return type but **also** be unconstrained is
+ // if it only appears in "associated types" in the
+ // input. See #47511 and #62200 for examples. In this case,
+ // though we can easily give a hint that ought to be
+ // relevant.
+ err.note(
+ "lifetimes appearing in an associated or opaque type are not considered constrained",
+ );
+ err.note("consider introducing a named lifetime parameter");
+ }
+
+ err.emit();
+ }
+ }
+
+ /// Given the bounds on an object, determines what single region bound (if any) we can
+ /// use to summarize this type. The basic idea is that we will use the bound the user
+ /// provided, if they provided one, and otherwise search the supertypes of trait bounds
+ /// for region bounds. It may be that we can derive no bound at all, in which case
+ /// we return `None`.
+ fn compute_object_lifetime_bound(
+ &self,
+ span: Span,
+ existential_predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Option<ty::Region<'tcx>> // if None, use the default
+ {
+ let tcx = self.tcx();
+
+ debug!("compute_opt_region_bound(existential_predicates={:?})", existential_predicates);
+
+ // No explicit region bound specified. Therefore, examine trait
+ // bounds and see if we can derive region bounds from those.
+ let derived_region_bounds = object_region_bounds(tcx, existential_predicates);
+
+ // If there are no derived region bounds, then report back that we
+ // can find no region bound. The caller will use the default.
+ if derived_region_bounds.is_empty() {
+ return None;
+ }
+
+ // If any of the derived region bounds are 'static, that is always
+ // the best choice.
+ if derived_region_bounds.iter().any(|r| r.is_static()) {
+ return Some(tcx.lifetimes.re_static);
+ }
+
+ // Determine whether there is exactly one unique region in the set
+ // of derived region bounds. If so, use that. Otherwise, report an
+ // error.
+ let r = derived_region_bounds[0];
+ if derived_region_bounds[1..].iter().any(|r1| r != *r1) {
+ tcx.sess.emit_err(AmbiguousLifetimeBound { span });
+ }
+ Some(r)
+ }
+
+ /// Make sure that we are in the condition to suggest the blanket implementation.
+ fn maybe_lint_blanket_trait_impl(&self, self_ty: &hir::Ty<'_>, diag: &mut Diagnostic) {
+ let tcx = self.tcx();
+ let parent_id = tcx.hir().get_parent_item(self_ty.hir_id).def_id;
+ if let hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Impl(hir::Impl {
+ self_ty: impl_self_ty, of_trait: Some(of_trait_ref), generics, ..
+ }),
+ ..
+ }) = tcx.hir().get_by_def_id(parent_id) && self_ty.hir_id == impl_self_ty.hir_id
+ {
+ if !of_trait_ref.trait_def_id().map_or(false, |def_id| def_id.is_local()) {
+ return;
+ }
+ let of_trait_span = of_trait_ref.path.span;
+ // make sure that we are not calling unwrap to abort during the compilation
+ let Ok(impl_trait_name) = tcx.sess.source_map().span_to_snippet(self_ty.span) else { return; };
+ let Ok(of_trait_name) = tcx.sess.source_map().span_to_snippet(of_trait_span) else { return; };
+ // check if the trait has generics, to make a correct suggestion
+ let param_name = generics.params.next_type_param_name(None);
+
+ let add_generic_sugg = if let Some(span) = generics.span_for_param_suggestion() {
+ (span, format!(", {}: {}", param_name, impl_trait_name))
+ } else {
+ (generics.span, format!("<{}: {}>", param_name, impl_trait_name))
+ };
+ diag.multipart_suggestion(
+ format!("alternatively use a blanket \
+ implementation to implement `{of_trait_name}` for \
+ all types that also implement `{impl_trait_name}`"),
+ vec![
+ (self_ty.span, param_name),
+ add_generic_sugg,
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn maybe_lint_bare_trait(&self, self_ty: &hir::Ty<'_>, in_path: bool) {
+ let tcx = self.tcx();
+ if let hir::TyKind::TraitObject([poly_trait_ref, ..], _, TraitObjectSyntax::None) =
+ self_ty.kind
+ {
+ let needs_bracket = in_path
+ && !tcx
+ .sess
+ .source_map()
+ .span_to_prev_source(self_ty.span)
+ .ok()
+ .map_or(false, |s| s.trim_end().ends_with('<'));
+
+ let is_global = poly_trait_ref.trait_ref.path.is_global();
+
+ let mut sugg = Vec::from_iter([(
+ self_ty.span.shrink_to_lo(),
+ format!(
+ "{}dyn {}",
+ if needs_bracket { "<" } else { "" },
+ if is_global { "(" } else { "" },
+ ),
+ )]);
+
+ if is_global || needs_bracket {
+ sugg.push((
+ self_ty.span.shrink_to_hi(),
+ format!(
+ "{}{}",
+ if is_global { ")" } else { "" },
+ if needs_bracket { ">" } else { "" },
+ ),
+ ));
+ }
+
+ if self_ty.span.edition() >= Edition::Edition2021 {
+ let msg = "trait objects must include the `dyn` keyword";
+ let label = "add `dyn` keyword before this trait";
+ let mut diag =
+ rustc_errors::struct_span_err!(tcx.sess, self_ty.span, E0782, "{}", msg);
+ diag.multipart_suggestion_verbose(label, sugg, Applicability::MachineApplicable);
+ // check if the impl trait that we are considering is a impl of a local trait
+ self.maybe_lint_blanket_trait_impl(&self_ty, &mut diag);
+ diag.emit();
+ } else {
+ let msg = "trait objects without an explicit `dyn` are deprecated";
+ tcx.struct_span_lint_hir(
+ BARE_TRAIT_OBJECTS,
+ self_ty.hir_id,
+ self_ty.span,
+ msg,
+ |lint| {
+ lint.multipart_suggestion_verbose(
+ "use `dyn`",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ self.maybe_lint_blanket_trait_impl(&self_ty, lint);
+ lint
+ },
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/bounds.rs b/compiler/rustc_hir_analysis/src/bounds.rs
index 6a28bb16a..6a28bb16a 100644
--- a/compiler/rustc_typeck/src/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/bounds.rs
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
new file mode 100644
index 000000000..b70ac0205
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -0,0 +1,1443 @@
+use crate::check::intrinsicck::InlineAsmCtxt;
+
+use super::compare_method::check_type_bounds;
+use super::compare_method::{compare_impl_method, compare_ty_impl};
+use super::*;
+use rustc_attr as attr;
+use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{ItemKind, Node, PathSegment};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::{DefiningAnchor, RegionVariableOrigin, TyCtxtInferExt};
+use rustc_infer::traits::Obligation;
+use rustc_lint::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::stability::EvalResult;
+use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::util::{Discr, IntTypeExt};
+use rustc_middle::ty::{
+ self, ParamEnv, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
+};
+use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS};
+use rustc_span::symbol::sym;
+use rustc_span::{self, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCtxt};
+
+use std::ops::ControlFlow;
+
+pub fn check_abi(tcx: TyCtxt<'_>, hir_id: hir::HirId, span: Span, abi: Abi) {
+ match tcx.sess.target.is_abi_supported(abi) {
+ Some(true) => (),
+ Some(false) => {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0570,
+ "`{abi}` is not a supported ABI for the current target",
+ )
+ .emit();
+ }
+ None => {
+ tcx.struct_span_lint_hir(
+ UNSUPPORTED_CALLING_CONVENTIONS,
+ hir_id,
+ span,
+ "use of calling convention not supported on this target",
+ |lint| lint,
+ );
+ }
+ }
+
+ // This ABI is only allowed on function pointers
+ if abi == Abi::CCmseNonSecureCall {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0781,
+ "the `\"C-cmse-nonsecure-call\"` ABI is only allowed on function pointers"
+ )
+ .emit();
+ }
+}
+
+fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+
+ if def.repr().simd() {
+ check_simd(tcx, span, def_id);
+ }
+
+ check_transparent(tcx, span, def);
+ check_packed(tcx, span, def);
+}
+
+fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+ check_transparent(tcx, span, def);
+ check_union_fields(tcx, span, def_id);
+ check_packed(tcx, span, def);
+}
+
+/// Check that the fields of the `union` do not need dropping.
+fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
+ let item_type = tcx.type_of(item_def_id);
+ if let ty::Adt(def, substs) = item_type.kind() {
+ assert!(def.is_union());
+
+ fn allowed_union_field<'tcx>(
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Span,
+ ) -> bool {
+ // We don't just accept all !needs_drop fields, due to semver concerns.
+ match ty.kind() {
+ ty::Ref(..) => true, // references never drop (even mutable refs, which are non-Copy and hence fail the later check)
+ ty::Tuple(tys) => {
+ // allow tuples of allowed types
+ tys.iter().all(|ty| allowed_union_field(ty, tcx, param_env, span))
+ }
+ ty::Array(elem, _len) => {
+ // Like `Copy`, we do *not* special-case length 0.
+ allowed_union_field(*elem, tcx, param_env, span)
+ }
+ _ => {
+ // Fallback case: allow `ManuallyDrop` and things that are `Copy`.
+ ty.ty_adt_def().is_some_and(|adt_def| adt_def.is_manually_drop())
+ || ty.is_copy_modulo_regions(tcx, param_env)
+ }
+ }
+ }
+
+ let param_env = tcx.param_env(item_def_id);
+ for field in &def.non_enum_variant().fields {
+ let field_ty = field.ty(tcx, substs);
+
+ if !allowed_union_field(field_ty, tcx, param_env, span) {
+ let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) {
+ // We are currently checking the type this field came from, so it must be local.
+ Some(Node::Field(field)) => (field.span, field.ty.span),
+ _ => unreachable!("mir field has to correspond to hir field"),
+ };
+ struct_span_err!(
+ tcx.sess,
+ field_span,
+ E0740,
+ "unions cannot contain fields that may need dropping"
+ )
+ .note(
+ "a type is guaranteed not to need dropping \
+ when it implements `Copy`, or when it is the special `ManuallyDrop<_>` type",
+ )
+ .multipart_suggestion_verbose(
+ "when the type does not implement `Copy`, \
+ wrap it inside a `ManuallyDrop<_>` and ensure it is manually dropped",
+ vec![
+ (ty_span.shrink_to_lo(), "std::mem::ManuallyDrop<".into()),
+ (ty_span.shrink_to_hi(), ">".into()),
+ ],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ return false;
+ } else if field_ty.needs_drop(tcx, param_env) {
+ // This should never happen. But we can get here e.g. in case of name resolution errors.
+ tcx.sess.delay_span_bug(span, "we should never accept maybe-dropping union fields");
+ }
+ }
+ } else {
+ span_bug!(span, "unions must be ty::Adt, but got {:?}", item_type.kind());
+ }
+ true
+}
+
+/// Check that a `static` is inhabited.
+fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) {
+ // Make sure statics are inhabited.
+ // Other parts of the compiler assume that there are no uninhabited places. In principle it
+ // would be enough to check this for `extern` statics, as statics with an initializer will
+ // have UB during initialization if they are uninhabited, but there also seems to be no good
+ // reason to allow any statics to be uninhabited.
+ let ty = tcx.type_of(def_id);
+ let span = tcx.def_span(def_id);
+ let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
+ Ok(l) => l,
+ // Foreign statics that overflow their allowed size should emit an error
+ Err(LayoutError::SizeOverflow(_))
+ if {
+ let node = tcx.hir().get_by_def_id(def_id);
+ matches!(
+ node,
+ hir::Node::ForeignItem(hir::ForeignItem {
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ })
+ )
+ } =>
+ {
+ tcx.sess
+ .struct_span_err(span, "extern static is too large for the current architecture")
+ .emit();
+ return;
+ }
+ // Generic statics are rejected, but we still reach this case.
+ Err(e) => {
+ tcx.sess.delay_span_bug(span, &e.to_string());
+ return;
+ }
+ };
+ if layout.abi.is_uninhabited() {
+ tcx.struct_span_lint_hir(
+ UNINHABITED_STATIC,
+ tcx.hir().local_def_id_to_hir_id(def_id),
+ span,
+ "static of uninhabited type",
+ |lint| {
+ lint
+ .note("uninhabited statics cannot be initialized, and any access would be an immediate error")
+ },
+ );
+ }
+}
+
+/// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo`
+/// projections that would result in "inheriting lifetimes".
+fn check_opaque<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item.kind else {
+ tcx.sess.delay_span_bug(tcx.hir().span(id.hir_id()), "expected opaque item");
+ return;
+ };
+
+ // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting
+ // `async-std` (and `pub async fn` in general).
+ // Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it!
+ // See https://github.com/rust-lang/rust/issues/75100
+ if tcx.sess.opts.actually_rustdoc {
+ return;
+ }
+
+ let substs = InternalSubsts::identity_for_item(tcx, item.owner_id.to_def_id());
+ let span = tcx.def_span(item.owner_id.def_id);
+
+ check_opaque_for_inheriting_lifetimes(tcx, item.owner_id.def_id, span);
+ if tcx.type_of(item.owner_id.def_id).references_error() {
+ return;
+ }
+ if check_opaque_for_cycles(tcx, item.owner_id.def_id, substs, span, &origin).is_err() {
+ return;
+ }
+ check_opaque_meets_bounds(tcx, item.owner_id.def_id, substs, span, &origin);
+}
+/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
+/// in "inheriting lifetimes".
+#[instrument(level = "debug", skip(tcx, span))]
+pub(super) fn check_opaque_for_inheriting_lifetimes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+) {
+ let item = tcx.hir().expect_item(def_id);
+ debug!(?item, ?span);
+
+ struct FoundParentLifetime;
+ struct FindParentLifetimeVisitor<'tcx>(&'tcx ty::Generics);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for FindParentLifetimeVisitor<'tcx> {
+ type BreakTy = FoundParentLifetime;
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("FindParentLifetimeVisitor: r={:?}", r);
+ if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *r {
+ if index < self.0.parent_count as u32 {
+ return ControlFlow::Break(FoundParentLifetime);
+ } else {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ r.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Unevaluated(..) = c.kind() {
+ // FIXME(#72219) We currently don't detect lifetimes within substs
+ // which would violate this check. Even though the particular substitution is not used
+ // within the const, this should still be fixed.
+ return ControlFlow::CONTINUE;
+ }
+ c.super_visit_with(self)
+ }
+ }
+
+ struct ProhibitOpaqueVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ opaque_identity_ty: Ty<'tcx>,
+ generics: &'tcx ty::Generics,
+ selftys: Vec<(Span, Option<String>)>,
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type BreakTy = Ty<'tcx>;
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t);
+ if t == self.opaque_identity_ty {
+ ControlFlow::CONTINUE
+ } else {
+ t.super_visit_with(&mut FindParentLifetimeVisitor(self.generics))
+ .map_break(|FoundParentLifetime| t)
+ }
+ }
+ }
+
+ impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
+ [PathSegment { res: Res::SelfTyParam { .. }, .. }] => {
+ let impl_ty_name = None;
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ [PathSegment { res: Res::SelfTyAlias { alias_to: def_id, .. }, .. }] => {
+ let impl_ty_name = Some(self.tcx.def_path_str(*def_id));
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, arg);
+ }
+ }
+
+ if let ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
+ ..
+ }) = item.kind
+ {
+ let mut visitor = ProhibitOpaqueVisitor {
+ opaque_identity_ty: tcx.mk_opaque(
+ def_id.to_def_id(),
+ InternalSubsts::identity_for_item(tcx, def_id.to_def_id()),
+ ),
+ generics: tcx.generics_of(def_id),
+ tcx,
+ selftys: vec![],
+ };
+ let prohibit_opaque = tcx
+ .explicit_item_bounds(def_id)
+ .iter()
+ .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
+ debug!(
+ "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}",
+ prohibit_opaque, visitor.opaque_identity_ty, visitor.generics
+ );
+
+ if let Some(ty) = prohibit_opaque.break_value() {
+ visitor.visit_item(&item);
+ let is_async = match item.kind {
+ ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
+ matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..))
+ }
+ _ => unreachable!(),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0760,
+ "`{}` return type cannot contain a projection or `Self` that references lifetimes from \
+ a parent scope",
+ if is_async { "async fn" } else { "impl Trait" },
+ );
+
+ for (span, name) in visitor.selftys {
+ err.span_suggestion(
+ span,
+ "consider spelling out the type instead",
+ name.unwrap_or_else(|| format!("{:?}", ty)),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+ }
+}
+
+/// Checks that an opaque type does not contain cycles.
+pub(super) fn check_opaque_for_cycles<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) -> Result<(), ErrorGuaranteed> {
+ if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
+ let reported = match origin {
+ hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
+ _ => opaque_type_cycle_error(tcx, def_id, span),
+ };
+ Err(reported)
+ } else {
+ Ok(())
+ }
+}
+
+/// Check that the concrete type behind `impl Trait` actually implements `Trait`.
+///
+/// This is mostly checked at the places that specify the opaque type, but we
+/// check those cases in the `param_env` of that function, which may have
+/// bounds not on this opaque type:
+///
+/// ```ignore (illustrative)
+/// type X<T> = impl Clone;
+/// fn f<T: Clone>(t: T) -> X<T> {
+/// t
+/// }
+/// ```
+///
+/// Without this check the above code is incorrectly accepted: we would ICE if
+/// some tried, for example, to clone an `Option<X<&mut ()>>`.
+#[instrument(level = "debug", skip(tcx))]
+fn check_opaque_meets_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let defining_use_anchor = match *origin {
+ hir::OpaqueTyOrigin::FnReturn(did) | hir::OpaqueTyOrigin::AsyncFn(did) => did,
+ hir::OpaqueTyOrigin::TyAlias => def_id,
+ };
+ let param_env = tcx.param_env(defining_use_anchor);
+
+ let infcx = tcx
+ .infer_ctxt()
+ .with_opaque_type_inference(DefiningAnchor::Bind(defining_use_anchor))
+ .build();
+ let ocx = ObligationCtxt::new(&infcx);
+ let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs);
+
+ // `ReErased` regions appear in the "parent_substs" of closures/generators.
+ // We're ignoring them here and replacing them with fresh region variables.
+ // See tests in ui/type-alias-impl-trait/closure_{parent_substs,wf_outlives}.rs.
+ //
+ // FIXME: Consider wrapping the hidden type in an existential `Binder` and instantiating it
+ // here rather than using ReErased.
+ let hidden_ty = tcx.bound_type_of(def_id.to_def_id()).subst(tcx, substs);
+ let hidden_ty = tcx.fold_regions(hidden_ty, |re, _dbi| match re.kind() {
+ ty::ReErased => infcx.next_region_var(RegionVariableOrigin::MiscVariable(span)),
+ _ => re,
+ });
+
+ let misc_cause = traits::ObligationCause::misc(span, hir_id);
+
+ match infcx.at(&misc_cause, param_env).eq(opaque_ty, hidden_ty) {
+ Ok(infer_ok) => ocx.register_infer_ok_obligations(infer_ok),
+ Err(ty_err) => {
+ tcx.sess.delay_span_bug(
+ span,
+ &format!("could not unify `{hidden_ty}` with revealed type:\n{ty_err}"),
+ );
+ }
+ }
+
+ // Additionally require the hidden type to be well-formed with only the generics of the opaque type.
+ // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
+ // hidden type is well formed even without those bounds.
+ let predicate =
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(hidden_ty.into())).to_predicate(tcx);
+ ocx.register_obligation(Obligation::new(misc_cause, param_env, predicate));
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+ match origin {
+ // Checked when type checking the function containing them.
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {}
+ // Can have different predicates to their defining use
+ hir::OpaqueTyOrigin::TyAlias => {
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ defining_use_anchor,
+ &outlives_environment,
+ );
+ }
+ }
+ // Clean up after ourselves
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+}
+
+fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
+ debug!(
+ "check_item_type(it.def_id={:?}, it.name={})",
+ id.owner_id,
+ tcx.def_path_str(id.owner_id.to_def_id())
+ );
+ let _indenter = indenter();
+ match tcx.def_kind(id.owner_id) {
+ DefKind::Static(..) => {
+ tcx.ensure().typeck(id.owner_id.def_id);
+ maybe_check_static_with_link_section(tcx, id.owner_id.def_id);
+ check_static_inhabited(tcx, id.owner_id.def_id);
+ }
+ DefKind::Const => {
+ tcx.ensure().typeck(id.owner_id.def_id);
+ }
+ DefKind::Enum => {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::Enum(ref enum_definition, _) = item.kind else {
+ return;
+ };
+ check_enum(tcx, &enum_definition.variants, item.owner_id.def_id);
+ }
+ DefKind::Fn => {} // entirely within check_item_body
+ DefKind::Impl => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Impl(ref impl_) = it.kind else {
+ return;
+ };
+ debug!("ItemKind::Impl {} with id {:?}", it.ident, it.owner_id);
+ if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.owner_id) {
+ check_impl_items_against_trait(
+ tcx,
+ it.span,
+ it.owner_id.def_id,
+ impl_trait_ref,
+ &impl_.items,
+ );
+ check_on_unimplemented(tcx, it);
+ }
+ }
+ DefKind::Trait => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Trait(_, _, _, _, ref items) = it.kind else {
+ return;
+ };
+ check_on_unimplemented(tcx, it);
+
+ for item in items.iter() {
+ let item = tcx.hir().trait_item(item.id);
+ match item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => {
+ let abi = sig.header.abi;
+ fn_maybe_err(tcx, item.ident.span, abi);
+ }
+ hir::TraitItemKind::Type(.., Some(default)) => {
+ let assoc_item = tcx.associated_item(item.owner_id);
+ let trait_substs =
+ InternalSubsts::identity_for_item(tcx, it.owner_id.to_def_id());
+ let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds(
+ tcx,
+ assoc_item,
+ assoc_item,
+ default.span,
+ ty::TraitRef { def_id: it.owner_id.to_def_id(), substs: trait_substs },
+ );
+ }
+ _ => {}
+ }
+ }
+ }
+ DefKind::Struct => {
+ check_struct(tcx, id.owner_id.def_id);
+ }
+ DefKind::Union => {
+ check_union(tcx, id.owner_id.def_id);
+ }
+ DefKind::OpaqueTy => {
+ check_opaque(tcx, id);
+ }
+ DefKind::ImplTraitPlaceholder => {
+ let parent = tcx.impl_trait_in_trait_parent(id.owner_id.to_def_id());
+ // Only check the validity of this opaque type if the function has a default body
+ if let hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
+ ..
+ }) = tcx.hir().get_by_def_id(parent.expect_local())
+ {
+ check_opaque(tcx, id);
+ }
+ }
+ DefKind::TyAlias => {
+ let pty_ty = tcx.type_of(id.owner_id);
+ let generics = tcx.generics_of(id.owner_id);
+ check_type_params_are_used(tcx, &generics, pty_ty);
+ }
+ DefKind::ForeignMod => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::ForeignMod { abi, items } = it.kind else {
+ return;
+ };
+ check_abi(tcx, it.hir_id(), it.span, abi);
+
+ if abi == Abi::RustIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_intrinsic_type(tcx, item);
+ }
+ } else if abi == Abi::PlatformIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_platform_intrinsic_type(tcx, item);
+ }
+ } else {
+ for item in items {
+ let def_id = item.id.owner_id.def_id;
+ let generics = tcx.generics_of(def_id);
+ let own_counts = generics.own_counts();
+ if generics.params.len() - own_counts.lifetimes != 0 {
+ let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) {
+ (_, 0) => ("type", "types", Some("u32")),
+ // We don't specify an example value, because we can't generate
+ // a valid value for any type.
+ (0, _) => ("const", "consts", None),
+ _ => ("type or const", "types or consts", None),
+ };
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0044,
+ "foreign items may not have {kinds} parameters",
+ )
+ .span_label(item.span, &format!("can't have {kinds} parameters"))
+ .help(
+ // FIXME: once we start storing spans for type arguments, turn this
+ // into a suggestion.
+ &format!(
+ "replace the {} parameters with concrete {}{}",
+ kinds,
+ kinds_pl,
+ egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(),
+ ),
+ )
+ .emit();
+ }
+
+ let item = tcx.hir().foreign_item(item.id);
+ match item.kind {
+ hir::ForeignItemKind::Fn(ref fn_decl, _, _) => {
+ require_c_abi_if_c_variadic(tcx, fn_decl, abi, item.span);
+ }
+ hir::ForeignItemKind::Static(..) => {
+ check_static_inhabited(tcx, def_id);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ DefKind::GlobalAsm => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::GlobalAsm(asm) = it.kind else { span_bug!(it.span, "DefKind::GlobalAsm but got {:#?}", it) };
+ InlineAsmCtxt::new_global_asm(tcx).check_asm(asm, id.hir_id());
+ }
+ _ => {}
+ }
+}
+
+pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
+ // an error would be reported if this fails.
+ let _ = traits::OnUnimplementedDirective::of_item(tcx, item.owner_id.to_def_id());
+}
+
+pub(super) fn check_specialization_validity<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def: &ty::TraitDef,
+ trait_item: &ty::AssocItem,
+ impl_id: DefId,
+ impl_item: &hir::ImplItemRef,
+) {
+ let Ok(ancestors) = trait_def.ancestors(tcx, impl_id) else { return };
+ let mut ancestor_impls = ancestors.skip(1).filter_map(|parent| {
+ if parent.is_from_trait() {
+ None
+ } else {
+ Some((parent, parent.item(tcx, trait_item.def_id)))
+ }
+ });
+
+ let opt_result = ancestor_impls.find_map(|(parent_impl, parent_item)| {
+ match parent_item {
+ // Parent impl exists, and contains the parent item we're trying to specialize, but
+ // doesn't mark it `default`.
+ Some(parent_item) if traits::impl_item_is_final(tcx, &parent_item) => {
+ Some(Err(parent_impl.def_id()))
+ }
+
+ // Parent impl contains item and makes it specializable.
+ Some(_) => Some(Ok(())),
+
+ // Parent impl doesn't mention the item. This means it's inherited from the
+ // grandparent. In that case, if parent is a `default impl`, inherited items use the
+ // "defaultness" from the grandparent, else they are final.
+ None => {
+ if tcx.impl_defaultness(parent_impl.def_id()).is_default() {
+ None
+ } else {
+ Some(Err(parent_impl.def_id()))
+ }
+ }
+ }
+ });
+
+ // If `opt_result` is `None`, we have only encountered `default impl`s that don't contain the
+ // item. This is allowed, the item isn't actually getting specialized here.
+ let result = opt_result.unwrap_or(Ok(()));
+
+ if let Err(parent_impl) = result {
+ report_forbidden_specialization(tcx, impl_item, parent_impl);
+ }
+}
+
+fn check_impl_items_against_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ full_impl_span: Span,
+ impl_id: LocalDefId,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ impl_item_refs: &[hir::ImplItemRef],
+) {
+ // If the trait reference itself is erroneous (so the compilation is going
+ // to fail), skip checking the items here -- the `impl_item` table in `tcx`
+ // isn't populated for such impls.
+ if impl_trait_ref.references_error() {
+ return;
+ }
+
+ // Negative impls are not expected to have any items
+ match tcx.impl_polarity(impl_id) {
+ ty::ImplPolarity::Reservation | ty::ImplPolarity::Positive => {}
+ ty::ImplPolarity::Negative => {
+ if let [first_item_ref, ..] = impl_item_refs {
+ let first_item_span = tcx.hir().impl_item(first_item_ref.id).span;
+ struct_span_err!(
+ tcx.sess,
+ first_item_span,
+ E0749,
+ "negative impls cannot have any items"
+ )
+ .emit();
+ }
+ return;
+ }
+ }
+
+ let trait_def = tcx.trait_def(impl_trait_ref.def_id);
+
+ for impl_item in impl_item_refs {
+ let ty_impl_item = tcx.associated_item(impl_item.id.owner_id);
+ let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id {
+ tcx.associated_item(trait_item_id)
+ } else {
+ // Checked in `associated_item`.
+ tcx.sess.delay_span_bug(impl_item.span, "missing associated item in trait");
+ continue;
+ };
+ let impl_item_full = tcx.hir().impl_item(impl_item.id);
+ match impl_item_full.kind {
+ hir::ImplItemKind::Const(..) => {
+ let _ = tcx.compare_assoc_const_impl_item_with_trait_item((
+ impl_item.id.owner_id.def_id,
+ ty_impl_item.trait_item_def_id.unwrap(),
+ ));
+ }
+ hir::ImplItemKind::Fn(..) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_impl_method(
+ tcx,
+ &ty_impl_item,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ hir::ImplItemKind::Type(impl_ty) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_ty_impl(
+ tcx,
+ &ty_impl_item,
+ impl_ty.span,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ }
+
+ check_specialization_validity(
+ tcx,
+ trait_def,
+ &ty_trait_item,
+ impl_id.to_def_id(),
+ impl_item,
+ );
+ }
+
+ if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) {
+ // Check for missing items from trait
+ let mut missing_items = Vec::new();
+
+ let mut must_implement_one_of: Option<&[Ident]> =
+ trait_def.must_implement_one_of.as_deref();
+
+ for &trait_item_id in tcx.associated_item_def_ids(impl_trait_ref.def_id) {
+ let is_implemented = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| node_item.item.defaultness(tcx).has_value());
+
+ if !is_implemented && tcx.impl_defaultness(impl_id).is_final() {
+ missing_items.push(tcx.associated_item(trait_item_id));
+ }
+
+ // true if this item is specifically implemented in this impl
+ let is_implemented_here = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| !node_item.defining_node.is_from_trait());
+
+ if !is_implemented_here {
+ match tcx.eval_default_body_stability(trait_item_id, full_impl_span) {
+ EvalResult::Deny { feature, reason, issue, .. } => default_body_is_unstable(
+ tcx,
+ full_impl_span,
+ trait_item_id,
+ feature,
+ reason,
+ issue,
+ ),
+
+ // Unmarked default bodies are considered stable (at least for now).
+ EvalResult::Allow | EvalResult::Unmarked => {}
+ }
+ }
+
+ if let Some(required_items) = &must_implement_one_of {
+ if is_implemented_here {
+ let trait_item = tcx.associated_item(trait_item_id);
+ if required_items.contains(&trait_item.ident(tcx)) {
+ must_implement_one_of = None;
+ }
+ }
+ }
+ }
+
+ if !missing_items.is_empty() {
+ missing_items_err(tcx, tcx.def_span(impl_id), &missing_items, full_impl_span);
+ }
+
+ if let Some(missing_items) = must_implement_one_of {
+ let attr_span = tcx
+ .get_attr(impl_trait_ref.def_id, sym::rustc_must_implement_one_of)
+ .map(|attr| attr.span);
+
+ missing_items_must_implement_one_of_err(
+ tcx,
+ tcx.def_span(impl_id),
+ missing_items,
+ attr_span,
+ );
+ }
+ }
+}
+
+pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
+ let t = tcx.type_of(def_id);
+ if let ty::Adt(def, substs) = t.kind()
+ && def.is_struct()
+ {
+ let fields = &def.non_enum_variant().fields;
+ if fields.is_empty() {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ }
+ let e = fields[0].ty(tcx, substs);
+ if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
+ struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
+ .span_label(sp, "SIMD elements must have the same type")
+ .emit();
+ return;
+ }
+
+ let len = if let ty::Array(_ty, c) = e.kind() {
+ c.try_eval_usize(tcx, tcx.param_env(def.did()))
+ } else {
+ Some(fields.len() as u64)
+ };
+ if let Some(len) = len {
+ if len == 0 {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ } else if len > MAX_SIMD_LANES {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0075,
+ "SIMD vector cannot have more than {MAX_SIMD_LANES} elements",
+ )
+ .emit();
+ return;
+ }
+ }
+
+ // Check that we use types valid for use in the lanes of a SIMD "vector register"
+ // These are scalar types which directly match a "machine" type
+ // Yes: Integers, floats, "thin" pointers
+ // No: char, "fat" pointers, compound types
+ match e.kind() {
+ ty::Param(_) => (), // pass struct<T>(T, T, T, T) through, let monomorphization catch errors
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) => (), // struct(u8, u8, u8, u8) is ok
+ ty::Array(t, _) if matches!(t.kind(), ty::Param(_)) => (), // pass struct<T>([T; N]) through, let monomorphization catch errors
+ ty::Array(t, _clen)
+ if matches!(
+ t.kind(),
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_)
+ ) =>
+ { /* struct([f32; 4]) is ok */ }
+ _ => {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0077,
+ "SIMD vector element type should be a \
+ primitive scalar (integer/float/pointer) type"
+ )
+ .emit();
+ return;
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
+ let repr = def.repr();
+ if repr.packed() {
+ for attr in tcx.get_attrs(def.did(), sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ if let attr::ReprPacked(pack) = r
+ && let Some(repr_pack) = repr.pack
+ && pack as u64 != repr_pack.bytes()
+ {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0634,
+ "type has conflicting packed representation hints"
+ )
+ .emit();
+ }
+ }
+ }
+ if repr.align.is_some() {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0587,
+ "type has conflicting packed and align representation hints"
+ )
+ .emit();
+ } else {
+ if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0588,
+ "packed type cannot transitively contain a `#[repr(align)]` type"
+ );
+
+ err.span_note(
+ tcx.def_span(def_spans[0].0),
+ &format!(
+ "`{}` has a `#[repr(align)]` attribute",
+ tcx.item_name(def_spans[0].0)
+ ),
+ );
+
+ if def_spans.len() > 2 {
+ let mut first = true;
+ for (adt_def, span) in def_spans.iter().skip(1).rev() {
+ let ident = tcx.item_name(*adt_def);
+ err.span_note(
+ *span,
+ &if first {
+ format!(
+ "`{}` contains a field of type `{}`",
+ tcx.type_of(def.did()),
+ ident
+ )
+ } else {
+ format!("...which contains a field of type `{ident}`")
+ },
+ );
+ first = false;
+ }
+ }
+
+ err.emit();
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed_inner(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>,
+) -> Option<Vec<(DefId, Span)>> {
+ if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() {
+ if def.is_struct() || def.is_union() {
+ if def.repr().align.is_some() {
+ return Some(vec![(def.did(), DUMMY_SP)]);
+ }
+
+ stack.push(def_id);
+ for field in &def.non_enum_variant().fields {
+ if let ty::Adt(def, _) = field.ty(tcx, substs).kind()
+ && !stack.contains(&def.did())
+ && let Some(mut defs) = check_packed_inner(tcx, def.did(), stack)
+ {
+ defs.push((def.did(), field.ident(tcx).span));
+ return Some(defs);
+ }
+ }
+ stack.pop();
+ }
+ }
+
+ None
+}
+
+pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) {
+ if !adt.repr().transparent() {
+ return;
+ }
+
+ if adt.is_union() && !tcx.features().transparent_unions {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::transparent_unions,
+ sp,
+ "transparent unions are unstable",
+ )
+ .emit();
+ }
+
+ if adt.variants().len() != 1 {
+ bad_variant_count(tcx, adt, sp, adt.did());
+ if adt.variants().is_empty() {
+ // Don't bother checking the fields. No variants (and thus no fields) exist.
+ return;
+ }
+ }
+
+ // For each field, figure out if it's known to be a ZST and align(1), with "known"
+ // respecting #[non_exhaustive] attributes.
+ let field_infos = adt.all_fields().map(|field| {
+ let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
+ let param_env = tcx.param_env(field.did);
+ let layout = tcx.layout_of(param_env.and(ty));
+ // We are currently checking the type this field came from, so it must be local
+ let span = tcx.hir().span_if_local(field.did).unwrap();
+ let zst = layout.map_or(false, |layout| layout.is_zst());
+ let align1 = layout.map_or(false, |layout| layout.align.abi.bytes() == 1);
+ if !zst {
+ return (span, zst, align1, None);
+ }
+
+ fn check_non_exhaustive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> {
+ match t.kind() {
+ ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
+ ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
+ ty::Adt(def, subst) => {
+ if !def.did().is_local() {
+ let non_exhaustive = def.is_variant_list_non_exhaustive()
+ || def
+ .variants()
+ .iter()
+ .any(ty::VariantDef::is_field_list_non_exhaustive);
+ let has_priv = def.all_fields().any(|f| !f.vis.is_public());
+ if non_exhaustive || has_priv {
+ return ControlFlow::Break((
+ def.descr(),
+ def.did(),
+ subst,
+ non_exhaustive,
+ ));
+ }
+ }
+ def.all_fields()
+ .map(|field| field.ty(tcx, subst))
+ .try_for_each(|t| check_non_exhaustive(tcx, t))
+ }
+ _ => ControlFlow::Continue(()),
+ }
+ }
+
+ (span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
+ });
+
+ let non_zst_fields = field_infos
+ .clone()
+ .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
+ let non_zst_count = non_zst_fields.clone().count();
+ if non_zst_count >= 2 {
+ bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, sp);
+ }
+ let incompatible_zst_fields =
+ field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
+ let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
+ for (span, zst, align1, non_exhaustive) in field_infos {
+ if zst && !align1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0691,
+ "zero-sized field in transparent {} has alignment larger than 1",
+ adt.descr(),
+ )
+ .span_label(span, "has alignment larger than 1")
+ .emit();
+ }
+ if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive {
+ tcx.struct_span_lint_hir(
+ REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
+ span,
+ "zero-sized fields in `repr(transparent)` cannot contain external non-exhaustive types",
+ |lint| {
+ let note = if non_exhaustive {
+ "is marked with `#[non_exhaustive]`"
+ } else {
+ "contains private fields"
+ };
+ let field_ty = tcx.def_path_str_with_substs(def_id, substs);
+ lint
+ .note(format!("this {descr} contains `{field_ty}`, which {note}, \
+ and makes it not a breaking change to become non-zero-sized in the future."))
+ },
+ )
+ }
+ }
+}
+
+#[allow(trivial_numeric_casts)]
+fn check_enum<'tcx>(tcx: TyCtxt<'tcx>, vs: &'tcx [hir::Variant<'tcx>], def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let sp = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+
+ if vs.is_empty() {
+ if let Some(attr) = tcx.get_attrs(def_id.to_def_id(), sym::repr).next() {
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0084,
+ "unsupported representation for zero-variant enum"
+ )
+ .span_label(sp, "zero-variant enum")
+ .emit();
+ }
+ }
+
+ let repr_type_ty = def.repr().discr_type().to_ty(tcx);
+ if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
+ if !tcx.features().repr128 {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::repr128,
+ sp,
+ "repr with 128-bit type is unstable",
+ )
+ .emit();
+ }
+ }
+
+ for v in vs {
+ if let Some(ref e) = v.disr_expr {
+ tcx.ensure().typeck(tcx.hir().local_def_id(e.hir_id));
+ }
+ }
+
+ if tcx.adt_def(def_id).repr().int.is_none() {
+ let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..));
+
+ let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some();
+ let has_non_units = vs.iter().any(|var| !is_unit(var));
+ let disr_units = vs.iter().any(|var| is_unit(&var) && has_disr(&var));
+ let disr_non_unit = vs.iter().any(|var| !is_unit(&var) && has_disr(&var));
+
+ if disr_non_unit || (disr_units && has_non_units) {
+ let mut err =
+ struct_span_err!(tcx.sess, sp, E0732, "`#[repr(inttype)]` must be specified");
+ err.emit();
+ }
+ }
+
+ detect_discriminant_duplicate(tcx, def.discriminants(tcx).collect(), vs, sp);
+
+ check_transparent(tcx, sp, def);
+}
+
+/// Part of enum check. Given the discriminants of an enum, errors if two or more discriminants are equal
+fn detect_discriminant_duplicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut discrs: Vec<(VariantIdx, Discr<'tcx>)>,
+ vs: &'tcx [hir::Variant<'tcx>],
+ self_span: Span,
+) {
+ // Helper closure to reduce duplicate code. This gets called everytime we detect a duplicate.
+ // Here `idx` refers to the order of which the discriminant appears, and its index in `vs`
+ let report = |dis: Discr<'tcx>, idx: usize, err: &mut Diagnostic| {
+ let var = &vs[idx]; // HIR for the duplicate discriminant
+ let (span, display_discr) = match var.disr_expr {
+ Some(ref expr) => {
+ // In the case the discriminant is both a duplicate and overflowed, let the user know
+ if let hir::ExprKind::Lit(lit) = &tcx.hir().body(expr.body).value.kind
+ && let rustc_ast::LitKind::Int(lit_value, _int_kind) = &lit.node
+ && *lit_value != dis.val
+ {
+ (tcx.hir().span(expr.hir_id), format!("`{dis}` (overflowed from `{lit_value}`)"))
+ // Otherwise, format the value as-is
+ } else {
+ (tcx.hir().span(expr.hir_id), format!("`{dis}`"))
+ }
+ }
+ None => {
+ // At this point we know this discriminant is a duplicate, and was not explicitly
+ // assigned by the user. Here we iterate backwards to fetch the HIR for the last
+ // explicitly assigned discriminant, and letting the user know that this was the
+ // increment startpoint, and how many steps from there leading to the duplicate
+ if let Some((n, hir::Variant { span, ident, .. })) =
+ vs[..idx].iter().rev().enumerate().find(|v| v.1.disr_expr.is_some())
+ {
+ let ve_ident = var.ident;
+ let n = n + 1;
+ let sp = if n > 1 { "variants" } else { "variant" };
+
+ err.span_label(
+ *span,
+ format!("discriminant for `{ve_ident}` incremented from this startpoint (`{ident}` + {n} {sp} later => `{ve_ident}` = {dis})"),
+ );
+ }
+
+ (vs[idx].span, format!("`{dis}`"))
+ }
+ };
+
+ err.span_label(span, format!("{display_discr} assigned here"));
+ };
+
+ // Here we loop through the discriminants, comparing each discriminant to another.
+ // When a duplicate is detected, we instantiate an error and point to both
+ // initial and duplicate value. The duplicate discriminant is then discarded by swapping
+ // it with the last element and decrementing the `vec.len` (which is why we have to evaluate
+ // `discrs.len()` anew every iteration, and why this could be tricky to do in a functional
+ // style as we are mutating `discrs` on the fly).
+ let mut i = 0;
+ while i < discrs.len() {
+ let hir_var_i_idx = discrs[i].0.index();
+ let mut error: Option<DiagnosticBuilder<'_, _>> = None;
+
+ let mut o = i + 1;
+ while o < discrs.len() {
+ let hir_var_o_idx = discrs[o].0.index();
+
+ if discrs[i].1.val == discrs[o].1.val {
+ let err = error.get_or_insert_with(|| {
+ let mut ret = struct_span_err!(
+ tcx.sess,
+ self_span,
+ E0081,
+ "discriminant value `{}` assigned more than once",
+ discrs[i].1,
+ );
+
+ report(discrs[i].1, hir_var_i_idx, &mut ret);
+
+ ret
+ });
+
+ report(discrs[o].1, hir_var_o_idx, err);
+
+ // Safe to unwrap here, as we wouldn't reach this point if `discrs` was empty
+ discrs[o] = *discrs.last().unwrap();
+ discrs.pop();
+ } else {
+ o += 1;
+ }
+ }
+
+ if let Some(mut e) = error {
+ e.emit();
+ }
+
+ i += 1;
+ }
+}
+
+pub(super) fn check_type_params_are_used<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &ty::Generics,
+ ty: Ty<'tcx>,
+) {
+ debug!("check_type_params_are_used(generics={:?}, ty={:?})", generics, ty);
+
+ assert_eq!(generics.parent, None);
+
+ if generics.own_counts().types == 0 {
+ return;
+ }
+
+ let mut params_used = BitSet::new_empty(generics.params.len());
+
+ if ty.references_error() {
+ // If there is already another error, do not emit
+ // an error for not using a type parameter.
+ assert!(tcx.sess.has_errors().is_some());
+ return;
+ }
+
+ for leaf in ty.walk() {
+ if let GenericArgKind::Type(leaf_ty) = leaf.unpack()
+ && let ty::Param(param) = leaf_ty.kind()
+ {
+ debug!("found use of ty param {:?}", param);
+ params_used.insert(param.index);
+ }
+ }
+
+ for param in &generics.params {
+ if !params_used.contains(param.index)
+ && let ty::GenericParamDefKind::Type { .. } = param.kind
+ {
+ let span = tcx.def_span(param.def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0091,
+ "type parameter `{}` is unused",
+ param.name,
+ )
+ .span_label(span, "unused type parameter")
+ .emit();
+ }
+ }
+}
+
+pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let module = tcx.hir_module_items(module_def_id);
+ for id in module.items() {
+ check_item_type(tcx, id);
+ }
+}
+
+fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
+ struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
+ .span_label(span, "recursive `async fn`")
+ .note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
+ .note(
+ "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
+ )
+ .emit()
+}
+
+/// Emit an error for recursive opaque types.
+///
+/// If this is a return `impl Trait`, find the item's return expressions and point at them. For
+/// direct recursion this is enough, but for indirect recursion also point at the last intermediary
+/// `impl Trait`.
+///
+/// If all the return expressions evaluate to `!`, then we explain that the error will go away
+/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder.
+fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed {
+ let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type");
+
+ let mut label = false;
+ if let Some((def_id, visitor)) = get_owner_return_paths(tcx, def_id) {
+ let typeck_results = tcx.typeck(def_id);
+ if visitor
+ .returns
+ .iter()
+ .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id))
+ .all(|ty| matches!(ty.kind(), ty::Never))
+ {
+ let spans = visitor
+ .returns
+ .iter()
+ .filter(|expr| typeck_results.node_type_opt(expr.hir_id).is_some())
+ .map(|expr| expr.span)
+ .collect::<Vec<Span>>();
+ let span_len = spans.len();
+ if span_len == 1 {
+ err.span_label(spans[0], "this returned value is of `!` type");
+ } else {
+ let mut multispan: MultiSpan = spans.clone().into();
+ for span in spans {
+ multispan.push_span_label(span, "this returned value is of `!` type");
+ }
+ err.span_note(multispan, "these returned values have a concrete \"never\" type");
+ }
+ err.help("this error will resolve once the item's body returns a concrete type");
+ } else {
+ let mut seen = FxHashSet::default();
+ seen.insert(span);
+ err.span_label(span, "recursive opaque type");
+ label = true;
+ for (sp, ty) in visitor
+ .returns
+ .iter()
+ .filter_map(|e| typeck_results.node_type_opt(e.hir_id).map(|t| (e.span, t)))
+ .filter(|(_, ty)| !matches!(ty.kind(), ty::Never))
+ {
+ struct OpaqueTypeCollector(Vec<DefId>);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypeCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *t.kind() {
+ ty::Opaque(def, _) => {
+ self.0.push(def);
+ ControlFlow::CONTINUE
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+ }
+ let mut visitor = OpaqueTypeCollector(vec![]);
+ ty.visit_with(&mut visitor);
+ for def_id in visitor.0 {
+ let ty_span = tcx.def_span(def_id);
+ if !seen.contains(&ty_span) {
+ err.span_label(ty_span, &format!("returning this opaque type `{ty}`"));
+ seen.insert(ty_span);
+ }
+ err.span_label(sp, &format!("returning here with type `{ty}`"));
+ }
+ }
+ }
+ }
+ if !label {
+ err.span_label(span, "cannot resolve opaque type");
+ }
+ err.emit()
+}
diff --git a/compiler/rustc_hir_analysis/src/check/compare_method.rs b/compiler/rustc_hir_analysis/src/check/compare_method.rs
new file mode 100644
index 000000000..32f66b06f
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/compare_method.rs
@@ -0,0 +1,1825 @@
+use super::potentially_plural_count;
+use crate::errors::LifetimesOrBoundsMismatchOnTrait;
+use hir::def_id::{DefId, LocalDefId};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::intravisit;
+use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::util;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::util::ExplicitSelf;
+use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::{
+ self, AssocItem, DefIdTree, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitable,
+};
+use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
+use rustc_span::Span;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal,
+};
+use std::iter;
+
+/// Checks that a method from an impl conforms to the signature of
+/// the same method as declared in the trait.
+///
+/// # Parameters
+///
+/// - `impl_m`: type of the method we are checking
+/// - `impl_m_span`: span to use for reporting errors
+/// - `trait_m`: the method in the trait
+/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
+pub(crate) fn compare_impl_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
+
+ let impl_m_span = tcx.def_span(impl_m.def_id);
+
+ if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) {
+ return;
+ }
+
+ if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) {
+ return;
+ }
+
+ if let Err(_) = compare_generic_param_kinds(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) =
+ compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
+ {
+ return;
+ }
+
+ if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
+ {
+ return;
+ }
+}
+
+/// This function is best explained by example. Consider a trait:
+///
+/// trait Trait<'t, T> {
+/// // `trait_m`
+/// fn method<'a, M>(t: &'t T, m: &'a M) -> Self;
+/// }
+///
+/// And an impl:
+///
+/// impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
+/// // `impl_m`
+/// fn method<'b, N>(t: &'j &'i U, m: &'b N) -> Foo;
+/// }
+///
+/// We wish to decide if those two method types are compatible.
+/// For this we have to show that, assuming the bounds of the impl hold, the
+/// bounds of `trait_m` imply the bounds of `impl_m`.
+///
+/// We start out with `trait_to_impl_substs`, that maps the trait
+/// type parameters to impl type parameters. This is taken from the
+/// impl trait reference:
+///
+/// trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
+///
+/// We create a mapping `dummy_substs` that maps from the impl type
+/// parameters to fresh types and regions. For type parameters,
+/// this is the identity transform, but we could as well use any
+/// placeholder types. For regions, we convert from bound to free
+/// regions (Note: but only early-bound regions, i.e., those
+/// declared on the impl or used in type parameter bounds).
+///
+/// impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 }
+///
+/// Now we can apply `placeholder_substs` to the type of the impl method
+/// to yield a new function type in terms of our fresh, placeholder
+/// types:
+///
+/// <'b> fn(t: &'i0 U0, m: &'b) -> Foo
+///
+/// We now want to extract and substitute the type of the *trait*
+/// method and compare it. To do so, we must create a compound
+/// substitution by combining `trait_to_impl_substs` and
+/// `impl_to_placeholder_substs`, and also adding a mapping for the method
+/// type parameters. We extend the mapping to also include
+/// the method parameters.
+///
+/// trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 }
+///
+/// Applying this to the trait method type yields:
+///
+/// <'a> fn(t: &'i0 U0, m: &'a) -> Foo
+///
+/// This type is also the same but the name of the bound region (`'a`
+/// vs `'b`). However, the normal subtyping rules on fn types handle
+/// this kind of equivalency just fine.
+///
+/// We now use these substitutions to ensure that all declared bounds are
+/// satisfied by the implementation's method.
+///
+/// We do this by creating a parameter environment which contains a
+/// substitution corresponding to `impl_to_placeholder_substs`. We then build
+/// `trait_to_placeholder_substs` and use it to convert the predicates contained
+/// in the `trait_m` generics to the placeholder form.
+///
+/// Finally we register each of these predicates as an obligation and check that
+/// they hold.
+#[instrument(level = "debug", skip(tcx, impl_m_span, impl_trait_ref))]
+fn compare_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &AssocItem,
+ impl_m_span: Span,
+ trait_m: &AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // This node-id should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`).
+ //
+ // FIXME(@lcnr): remove that after removing `cause.body_id` from
+ // obligations.
+ let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
+ // We sometimes modify the span further down.
+ let mut cause = ObligationCause::new(
+ impl_m_span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+
+ // Create mapping from impl to placeholder.
+ let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+
+ // Create mapping from trait to placeholder.
+ let trait_to_placeholder_substs =
+ impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+ debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs);
+
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
+ let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
+
+ // Check region bounds.
+ check_region_bounds_on_impl_item(tcx, impl_m, trait_m, &trait_m_generics, &impl_m_generics)?;
+
+ // Create obligations for each predicate declared by the impl
+ // definition in the context of the trait's parameter
+ // environment. We can't just use `impl_env.caller_bounds`,
+ // however, because we want to replace all late-bound regions with
+ // region variables.
+ let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+
+ debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
+
+ // This is the only tricky bit of the new way we check implementation methods
+ // We need to build a set of predicates where only the method-level bounds
+ // are from the trait and we assume all other bounds from the implementation
+ // to be previously satisfied.
+ //
+ // We then register the obligations from the impl_m and check to see
+ // if all constraints hold.
+ hybrid_preds
+ .predicates
+ .extend(trait_m_predicates.instantiate_own(tcx, trait_to_placeholder_substs).predicates);
+
+ // Construct trait parameter environment and then shift it into the placeholder viewpoint.
+ // The key step here is to update the caller_bounds's predicates to be
+ // the new hybrid bounds we computed.
+ let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs);
+ for (predicate, span) in iter::zip(impl_m_own_bounds.predicates, impl_m_own_bounds.spans) {
+ let normalize_cause = traits::ObligationCause::misc(span, impl_m_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, normalize_cause, predicate);
+
+ ocx.register_obligations(obligations);
+ let cause = ObligationCause::new(
+ span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // We now need to check that the signature of the impl method is
+ // compatible with that of the trait method. We do this by
+ // checking that `impl_fty <: trait_fty`.
+ //
+ // FIXME. Unfortunately, this doesn't quite work right now because
+ // associated type normalization is not integrated into subtype
+ // checks. For the comparison to be valid, we need to
+ // normalize the associated types in the impl/trait methods
+ // first. However, because function types bind regions, just
+ // calling `normalize_associated_types_in` would have no effect on
+ // any associated types appearing in the fn arguments or return
+ // type.
+
+ // Compute placeholder form of impl and trait method tys.
+ let tcx = infcx.tcx;
+
+ let mut wf_tys = FxHashSet::default();
+
+ let impl_sig = infcx.replace_bound_vars_with_fresh_vars(
+ impl_m_span,
+ infer::HigherRankedType,
+ tcx.fn_sig(impl_m.def_id),
+ );
+
+ let norm_cause = ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let impl_sig = ocx.normalize(norm_cause.clone(), param_env, impl_sig);
+ let impl_fty = tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig));
+ debug!("compare_impl_method: impl_fty={:?}", impl_fty);
+
+ let trait_sig = tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs);
+ let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
+
+ // Next, add all inputs and output as well-formed tys. Importantly,
+ // we have to do this before normalization, since the normalized ty may
+ // not contain the input parameters. See issue #87748.
+ wf_tys.extend(trait_sig.inputs_and_output.iter());
+ let trait_sig = ocx.normalize(norm_cause, param_env, trait_sig);
+ // We also have to add the normalized trait signature
+ // as we don't normalize during implied bounds computation.
+ wf_tys.extend(trait_sig.inputs_and_output.iter());
+ let trait_fty = tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig));
+
+ debug!("compare_impl_method: trait_fty={:?}", trait_fty);
+
+ // FIXME: We'd want to keep more accurate spans than "the method signature" when
+ // processing the comparison between the trait and impl fn, but we sadly lose them
+ // and point at the whole signature when a trait bound or specific input or output
+ // type would be more appropriate. In other places we have a `Vec<Span>`
+ // corresponding to their `Vec<Predicate>`, but we don't have that here.
+ // Fixing this would improve the output of test `issue-83765.rs`.
+ let mut result = infcx
+ .at(&cause, param_env)
+ .sup(trait_fty, impl_fty)
+ .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok));
+
+ // HACK(RPITIT): #101614. When we are trying to infer the hidden types for
+ // RPITITs, we need to equate the output tys instead of just subtyping. If
+ // we just use `sup` above, we'll end up `&'static str <: _#1t`, which causes
+ // us to infer `_#1t = #'_#2r str`, where `'_#2r` is unconstrained, which gets
+ // fixed up to `ReEmpty`, and which is certainly not what we want.
+ if trait_fty.has_infer_types() {
+ result = result.and_then(|()| {
+ infcx
+ .at(&cause, param_env)
+ .eq(trait_sig.output(), impl_sig.output())
+ .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok))
+ });
+ }
+
+ if let Err(terr) = result {
+ debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty);
+
+ let (impl_err_span, trait_err_span) =
+ extract_spans_for_error_reporting(&infcx, terr, &cause, impl_m, trait_m);
+
+ cause.span = impl_err_span;
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span(),
+ E0053,
+ "method `{}` has an incompatible type for trait",
+ trait_m.name
+ );
+ match &terr {
+ TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0)
+ if trait_m.fn_has_self_parameter =>
+ {
+ let ty = trait_sig.inputs()[0];
+ let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty()) {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
+ _ => format!("self: {ty}"),
+ };
+
+ // When the `impl` receiver is an arbitrary self type, like `self: Box<Self>`, the
+ // span points only at the type `Box<Self`>, but we want to cover the whole
+ // argument pattern and type.
+ let span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, body) => tcx
+ .hir()
+ .body_param_names(body)
+ .zip(sig.decl.inputs.iter())
+ .map(|(param, ty)| param.span.to(ty.span))
+ .next()
+ .unwrap_or(impl_err_span),
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+
+ diag.span_suggestion(
+ span,
+ "change the self-receiver type to match the trait",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ TypeError::ArgumentMutability(i) | TypeError::ArgumentSorts(_, i) => {
+ if trait_sig.inputs().len() == *i {
+ // Suggestion to change output type. We do not suggest in `async` functions
+ // to avoid complex logic or incorrect output.
+ match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _)
+ if sig.header.asyncness == hir::IsAsync::NotAsync =>
+ {
+ let msg = "change the output type to match the trait";
+ let ap = Applicability::MachineApplicable;
+ match sig.decl.output {
+ hir::FnRetTy::DefaultReturn(sp) => {
+ let sugg = format!("-> {} ", trait_sig.output());
+ diag.span_suggestion_verbose(sp, msg, sugg, ap);
+ }
+ hir::FnRetTy::Return(hir_ty) => {
+ let sugg = trait_sig.output();
+ diag.span_suggestion(hir_ty.span, msg, sugg, ap);
+ }
+ };
+ }
+ _ => {}
+ };
+ } else if let Some(trait_ty) = trait_sig.inputs().get(*i) {
+ diag.span_suggestion(
+ impl_err_span,
+ "change the parameter type to match the trait",
+ trait_ty,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => {}
+ }
+
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ trait_err_span.map(|sp| (sp, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_fty.into(),
+ found: impl_fty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+
+ return Err(diag.emit());
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(infcx),
+ infcx.implied_bounds_tys(param_env, impl_m_hir_id, wf_tys),
+ );
+ infcx.check_region_obligations_and_report_errors(
+ impl_m.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+}
+
+pub fn collect_trait_impl_trait_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed> {
+ let impl_m = tcx.opt_associated_item(def_id).unwrap();
+ let trait_m = tcx.opt_associated_item(impl_m.trait_item_def_id.unwrap()).unwrap();
+ let impl_trait_ref = tcx.impl_trait_ref(impl_m.impl_container(tcx).unwrap()).unwrap();
+ let param_env = tcx.param_env(def_id);
+
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
+ let return_span = tcx.hir().fn_decl_by_hir_id(impl_m_hir_id).unwrap().output.span();
+ let cause = ObligationCause::new(
+ return_span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+
+ // Create mapping from impl to placeholder.
+ let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+
+ // Create mapping from trait to placeholder.
+ let trait_to_placeholder_substs =
+ impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ let norm_cause = ObligationCause::misc(return_span, impl_m_hir_id);
+ let impl_sig = ocx.normalize(
+ norm_cause.clone(),
+ param_env,
+ infcx.replace_bound_vars_with_fresh_vars(
+ return_span,
+ infer::HigherRankedType,
+ tcx.fn_sig(impl_m.def_id),
+ ),
+ );
+ let impl_return_ty = impl_sig.output();
+
+ let mut collector = ImplTraitInTraitCollector::new(&ocx, return_span, param_env, impl_m_hir_id);
+ let unnormalized_trait_sig = tcx
+ .liberate_late_bound_regions(
+ impl_m.def_id,
+ tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs),
+ )
+ .fold_with(&mut collector);
+ let trait_sig = ocx.normalize(norm_cause.clone(), param_env, unnormalized_trait_sig);
+ let trait_return_ty = trait_sig.output();
+
+ let wf_tys = FxHashSet::from_iter(
+ unnormalized_trait_sig.inputs_and_output.iter().chain(trait_sig.inputs_and_output.iter()),
+ );
+
+ match infcx.at(&cause, param_env).eq(trait_return_ty, impl_return_ty) {
+ Ok(infer::InferOk { value: (), obligations }) => {
+ ocx.register_obligations(obligations);
+ }
+ Err(terr) => {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span(),
+ E0053,
+ "method `{}` has an incompatible return type for trait",
+ trait_m.name
+ );
+ let hir = tcx.hir();
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ hir.get_if_local(impl_m.def_id)
+ .and_then(|node| node.fn_decl())
+ .map(|decl| (decl.output.span(), "return type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_return_ty.into(),
+ found: impl_return_ty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+ return Err(diag.emit());
+ }
+ }
+
+ // Unify the whole function signature. We need to do this to fully infer
+ // the lifetimes of the return type, but do this after unifying just the
+ // return types, since we want to avoid duplicating errors from
+ // `compare_predicate_entailment`.
+ match infcx
+ .at(&cause, param_env)
+ .eq(tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig)), tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig)))
+ {
+ Ok(infer::InferOk { value: (), obligations }) => {
+ ocx.register_obligations(obligations);
+ }
+ Err(terr) => {
+ let guar = tcx.sess.delay_span_bug(
+ return_span,
+ format!("could not unify `{trait_sig}` and `{impl_sig}`: {terr:?}"),
+ );
+ return Err(guar);
+ }
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // RPITs.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(infcx),
+ infcx.implied_bounds_tys(param_env, impl_m_hir_id, wf_tys),
+ );
+ infcx.check_region_obligations_and_report_errors(
+ impl_m.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ let mut collected_tys = FxHashMap::default();
+ for (def_id, (ty, substs)) in collector.types {
+ match infcx.fully_resolve(ty) {
+ Ok(ty) => {
+ // `ty` contains free regions that we created earlier while liberating the
+ // trait fn signature. However, projection normalization expects `ty` to
+ // contains `def_id`'s early-bound regions.
+ let id_substs = InternalSubsts::identity_for_item(tcx, def_id);
+ debug!(?id_substs, ?substs);
+ let map: FxHashMap<ty::GenericArg<'tcx>, ty::GenericArg<'tcx>> =
+ std::iter::zip(substs, id_substs).collect();
+ debug!(?map);
+
+ // NOTE(compiler-errors): RPITITs, like all other RPITs, have early-bound
+ // region substs that are synthesized during AST lowering. These are substs
+ // that are appended to the parent substs (trait and trait method). However,
+ // we're trying to infer the unsubstituted type value of the RPITIT inside
+ // the *impl*, so we can later use the impl's method substs to normalize
+ // an RPITIT to a concrete type (`confirm_impl_trait_in_trait_candidate`).
+ //
+ // Due to the design of RPITITs, during AST lowering, we have no idea that
+ // an impl method corresponds to a trait method with RPITITs in it. Therefore,
+ // we don't have a list of early-bound region substs for the RPITIT in the impl.
+ // Since early region parameters are index-based, we can't just rebase these
+ // (trait method) early-bound region substs onto the impl, and there's no
+ // guarantee that the indices from the trait substs and impl substs line up.
+ // So to fix this, we subtract the number of trait substs and add the number of
+ // impl substs to *renumber* these early-bound regions to their corresponding
+ // indices in the impl's substitutions list.
+ //
+ // Also, we only need to account for a difference in trait and impl substs,
+ // since we previously enforce that the trait method and impl method have the
+ // same generics.
+ let num_trait_substs = trait_to_impl_substs.len();
+ let num_impl_substs = tcx.generics_of(impl_m.container_id(tcx)).params.len();
+ let ty = tcx.fold_regions(ty, |region, _| {
+ let (ty::ReFree(_) | ty::ReEarlyBound(_)) = region.kind() else { return region; };
+ let Some(ty::ReEarlyBound(e)) = map.get(&region.into()).map(|r| r.expect_region().kind())
+ else {
+ tcx
+ .sess
+ .delay_span_bug(
+ return_span,
+ "expected ReFree to map to ReEarlyBound"
+ );
+ return tcx.lifetimes.re_static;
+ };
+ tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: e.def_id,
+ name: e.name,
+ index: (e.index as usize - num_trait_substs + num_impl_substs) as u32,
+ }))
+ });
+ debug!(%ty);
+ collected_tys.insert(def_id, ty);
+ }
+ Err(err) => {
+ tcx.sess.delay_span_bug(
+ return_span,
+ format!("could not fully resolve: {ty} => {err:?}"),
+ );
+ collected_tys.insert(def_id, tcx.ty_error());
+ }
+ }
+ }
+
+ Ok(&*tcx.arena.alloc(collected_tys))
+}
+
+struct ImplTraitInTraitCollector<'a, 'tcx> {
+ ocx: &'a ObligationCtxt<'a, 'tcx>,
+ types: FxHashMap<DefId, (Ty<'tcx>, ty::SubstsRef<'tcx>)>,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+}
+
+impl<'a, 'tcx> ImplTraitInTraitCollector<'a, 'tcx> {
+ fn new(
+ ocx: &'a ObligationCtxt<'a, 'tcx>,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ) -> Self {
+ ImplTraitInTraitCollector { ocx, types: FxHashMap::default(), span, param_env, body_id }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ImplTraitInTraitCollector<'_, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.ocx.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Projection(proj) = ty.kind()
+ && self.tcx().def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder
+ {
+ if let Some((ty, _)) = self.types.get(&proj.item_def_id) {
+ return *ty;
+ }
+ //FIXME(RPITIT): Deny nested RPITIT in substs too
+ if proj.substs.has_escaping_bound_vars() {
+ bug!("FIXME(RPITIT): error here");
+ }
+ // Replace with infer var
+ let infer_ty = self.ocx.infcx.next_ty_var(TypeVariableOrigin {
+ span: self.span,
+ kind: TypeVariableOriginKind::MiscVariable,
+ });
+ self.types.insert(proj.item_def_id, (infer_ty, proj.substs));
+ // Recurse into bounds
+ for (pred, pred_span) in self.tcx().bound_explicit_item_bounds(proj.item_def_id).subst_iter_copied(self.tcx(), proj.substs) {
+ let pred = pred.fold_with(self);
+ let pred = self.ocx.normalize(
+ ObligationCause::misc(self.span, self.body_id),
+ self.param_env,
+ pred,
+ );
+
+ self.ocx.register_obligation(traits::Obligation::new(
+ ObligationCause::new(
+ self.span,
+ self.body_id,
+ ObligationCauseCode::BindingObligation(proj.item_def_id, pred_span),
+ ),
+ self.param_env,
+ pred,
+ ));
+ }
+ infer_ty
+ } else {
+ ty.super_fold_with(self)
+ }
+ }
+}
+
+fn check_region_bounds_on_impl_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ trait_generics: &ty::Generics,
+ impl_generics: &ty::Generics,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_params = trait_generics.own_counts().lifetimes;
+ let impl_params = impl_generics.own_counts().lifetimes;
+
+ debug!(
+ "check_region_bounds_on_impl_item: \
+ trait_generics={:?} \
+ impl_generics={:?}",
+ trait_generics, impl_generics
+ );
+
+ // Must have same number of early-bound lifetime parameters.
+ // Unfortunately, if the user screws up the bounds, then this
+ // will change classification between early and late. E.g.,
+ // if in trait we have `<'a,'b:'a>`, and in impl we just have
+ // `<'a,'b>`, then we have 2 early-bound lifetime parameters
+ // in trait but 0 in the impl. But if we report "expected 2
+ // but found 0" it's confusing, because it looks like there
+ // are zero. Since I don't quite know how to phrase things at
+ // the moment, give a kind of vague error message.
+ if trait_params != impl_params {
+ let span = tcx
+ .hir()
+ .get_generics(impl_m.def_id.expect_local())
+ .expect("expected impl item to have generics or else we can't compare them")
+ .span;
+ let generics_span = if let Some(local_def_id) = trait_m.def_id.as_local() {
+ Some(
+ tcx.hir()
+ .get_generics(local_def_id)
+ .expect("expected trait item to have generics or else we can't compare them")
+ .span,
+ )
+ } else {
+ None
+ };
+
+ let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
+ span,
+ item_kind: assoc_item_kind_str(impl_m),
+ ident: impl_m.ident(tcx),
+ generics_span,
+ });
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+#[instrument(level = "debug", skip(infcx))]
+fn extract_spans_for_error_reporting<'tcx>(
+ infcx: &infer::InferCtxt<'tcx>,
+ terr: TypeError<'_>,
+ cause: &ObligationCause<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> (Span, Option<Span>) {
+ let tcx = infcx.tcx;
+ let mut impl_args = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let trait_args =
+ trait_m.def_id.as_local().map(|def_id| match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a TraitItemKind::Fn", trait_m),
+ });
+
+ match terr {
+ TypeError::ArgumentMutability(i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ TypeError::ArgumentSorts(ExpectedFound { .. }, i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ _ => (cause.span(), tcx.hir().span_if_local(trait_m.def_id)),
+ }
+}
+
+fn compare_self_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Try to give more informative error messages about self typing
+ // mismatches. Note that any mismatch will also be detected
+ // below, where we construct a canonical function type that
+ // includes the self parameter as a normal parameter. It's just
+ // that the error messages you get out of this code are a bit more
+ // inscrutable, particularly for cases where one method has no
+ // self.
+
+ let self_string = |method: &ty::AssocItem| {
+ let untransformed_self_ty = match method.container {
+ ty::ImplContainer => impl_trait_ref.self_ty(),
+ ty::TraitContainer => tcx.types.self_param,
+ };
+ let self_arg_ty = tcx.fn_sig(method.def_id).input(0);
+ let param_env = ty::ParamEnv::reveal_all();
+
+ let infcx = tcx.infer_ctxt().build();
+ let self_arg_ty = tcx.liberate_late_bound_regions(method.def_id, self_arg_ty);
+ let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok();
+ match ExplicitSelf::determine(self_arg_ty, can_eq_self) {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
+ _ => format!("self: {self_arg_ty}"),
+ }
+ };
+
+ match (trait_m.fn_has_self_parameter, impl_m.fn_has_self_parameter) {
+ (false, false) | (true, true) => {}
+
+ (false, true) => {
+ let self_descr = self_string(impl_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0185,
+ "method `{}` has a `{}` declaration in the impl, but not in the trait",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("`{self_descr}` used in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("trait method declared without `{self_descr}`"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ (true, false) => {
+ let self_descr = self_string(trait_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0186,
+ "method `{}` has a `{}` declaration in the trait, but not in the impl",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("expected `{self_descr}` in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("`{self_descr}` used in trait"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+/// Checks that the number of generics on a given assoc item in a trait impl is the same
+/// as the number of generics on the respective assoc item in the trait definition.
+///
+/// For example this code emits the errors in the following code:
+/// ```
+/// trait Trait {
+/// fn foo();
+/// type Assoc<T>;
+/// }
+///
+/// impl Trait for () {
+/// fn foo<T>() {}
+/// //~^ error
+/// type Assoc = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// Notably this does not error on `foo<T>` implemented as `foo<const N: u8>` or
+/// `foo<const N: u8>` implemented as `foo<const N: u32>`. This is handled in
+/// [`compare_generic_param_kinds`]. This function also does not handle lifetime parameters
+fn compare_number_of_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_: &ty::AssocItem,
+ _impl_span: Span,
+ trait_: &ty::AssocItem,
+ trait_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts();
+ let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts();
+
+ // This avoids us erroring on `foo<T>` implemented as `foo<const N: u8>` as this is implemented
+ // in `compare_generic_param_kinds` which will give a nicer error message than something like:
+ // "expected 1 type parameter, found 0 type parameters"
+ if (trait_own_counts.types + trait_own_counts.consts)
+ == (impl_own_counts.types + impl_own_counts.consts)
+ {
+ return Ok(());
+ }
+
+ let matchings = [
+ ("type", trait_own_counts.types, impl_own_counts.types),
+ ("const", trait_own_counts.consts, impl_own_counts.consts),
+ ];
+
+ let item_kind = assoc_item_kind_str(impl_);
+
+ let mut err_occurred = None;
+ for (kind, trait_count, impl_count) in matchings {
+ if impl_count != trait_count {
+ let arg_spans = |kind: ty::AssocKind, generics: &hir::Generics<'_>| {
+ let mut spans = generics
+ .params
+ .iter()
+ .filter(|p| match p.kind {
+ hir::GenericParamKind::Lifetime {
+ kind: hir::LifetimeParamKind::Elided,
+ } => {
+ // A fn can have an arbitrary number of extra elided lifetimes for the
+ // same signature.
+ !matches!(kind, ty::AssocKind::Fn)
+ }
+ _ => true,
+ })
+ .map(|p| p.span)
+ .collect::<Vec<Span>>();
+ if spans.is_empty() {
+ spans = vec![generics.span]
+ }
+ spans
+ };
+ let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() {
+ let trait_item = tcx.hir().expect_trait_item(def_id);
+ let arg_spans: Vec<Span> = arg_spans(trait_.kind, trait_item.generics);
+ let impl_trait_spans: Vec<Span> = trait_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ (Some(arg_spans), impl_trait_spans)
+ } else {
+ (trait_span.map(|s| vec![s]), vec![])
+ };
+
+ let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local());
+ let impl_item_impl_trait_spans: Vec<Span> = impl_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ let spans = arg_spans(impl_.kind, impl_item.generics);
+ let span = spans.first().copied();
+
+ let mut err = tcx.sess.struct_span_err_with_code(
+ spans,
+ &format!(
+ "{} `{}` has {} {kind} parameter{} but its trait \
+ declaration has {} {kind} parameter{}",
+ item_kind,
+ trait_.name,
+ impl_count,
+ pluralize!(impl_count),
+ trait_count,
+ pluralize!(trait_count),
+ kind = kind,
+ ),
+ DiagnosticId::Error("E0049".into()),
+ );
+
+ let mut suffix = None;
+
+ if let Some(spans) = trait_spans {
+ let mut spans = spans.iter();
+ if let Some(span) = spans.next() {
+ err.span_label(
+ *span,
+ format!(
+ "expected {} {} parameter{}",
+ trait_count,
+ kind,
+ pluralize!(trait_count),
+ ),
+ );
+ }
+ for span in spans {
+ err.span_label(*span, "");
+ }
+ } else {
+ suffix = Some(format!(", expected {trait_count}"));
+ }
+
+ if let Some(span) = span {
+ err.span_label(
+ span,
+ format!(
+ "found {} {} parameter{}{}",
+ impl_count,
+ kind,
+ pluralize!(impl_count),
+ suffix.unwrap_or_else(String::new),
+ ),
+ );
+ }
+
+ for span in impl_trait_spans.iter().chain(impl_item_impl_trait_spans.iter()) {
+ err.span_label(*span, "`impl Trait` introduces an implicit type parameter");
+ }
+
+ let reported = err.emit();
+ err_occurred = Some(reported);
+ }
+ }
+
+ if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) }
+}
+
+fn compare_number_of_method_arguments<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ trait_item_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_m_fty = tcx.fn_sig(impl_m.def_id);
+ let trait_m_fty = tcx.fn_sig(trait_m.def_id);
+ let trait_number_args = trait_m_fty.inputs().skip_binder().len();
+ let impl_number_args = impl_m_fty.inputs().skip_binder().len();
+ if trait_number_args != impl_number_args {
+ let trait_span = if let Some(def_id) = trait_m.def_id.as_local() {
+ match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref trait_m_sig, _) => {
+ let pos = if trait_number_args > 0 { trait_number_args - 1 } else { 0 };
+ if let Some(arg) = trait_m_sig.decl.inputs.get(pos) {
+ Some(if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(trait_m_sig.decl.inputs[0].span.lo())
+ })
+ } else {
+ trait_item_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ }
+ } else {
+ trait_item_span
+ };
+ let impl_span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref impl_m_sig, _) => {
+ let pos = if impl_number_args > 0 { impl_number_args - 1 } else { 0 };
+ if let Some(arg) = impl_m_sig.decl.inputs.get(pos) {
+ if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(impl_m_sig.decl.inputs[0].span.lo())
+ }
+ } else {
+ impl_m_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0050,
+ "method `{}` has {} but the declaration in trait `{}` has {}",
+ trait_m.name,
+ potentially_plural_count(impl_number_args, "parameter"),
+ tcx.def_path_str(trait_m.def_id),
+ trait_number_args
+ );
+ if let Some(trait_span) = trait_span {
+ err.span_label(
+ trait_span,
+ format!(
+ "trait requires {}",
+ potentially_plural_count(trait_number_args, "parameter")
+ ),
+ );
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ err.span_label(
+ impl_span,
+ format!(
+ "expected {}, found {}",
+ potentially_plural_count(trait_number_args, "parameter"),
+ impl_number_args
+ ),
+ );
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+fn compare_synthetic_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ // FIXME(chrisvittal) Clean up this function, list of FIXME items:
+ // 1. Better messages for the span labels
+ // 2. Explanation as to what is going on
+ // If we get here, we already have the same number of generics, so the zip will
+ // be okay.
+ let mut error_found = None;
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in
+ iter::zip(impl_m_type_params, trait_m_type_params)
+ {
+ if impl_synthetic != trait_synthetic {
+ let impl_def_id = impl_def_id.expect_local();
+ let impl_span = tcx.def_span(impl_def_id);
+ let trait_span = tcx.def_span(trait_def_id);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0643,
+ "method `{}` has incompatible signature for trait",
+ trait_m.name
+ );
+ err.span_label(trait_span, "declaration in trait here");
+ match (impl_synthetic, trait_synthetic) {
+ // The case where the impl method uses `impl Trait` but the trait method uses
+ // explicit generics
+ (true, false) => {
+ err.span_label(impl_span, "expected generic parameter, found `impl Trait`");
+ (|| {
+ // try taking the name from the trait impl
+ // FIXME: this is obviously suboptimal since the name can already be used
+ // as another generic argument
+ let new_name = tcx.opt_item_name(trait_def_id)?;
+ let trait_m = trait_m.def_id.as_local()?;
+ let trait_m = tcx.hir().expect_trait_item(trait_m);
+
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().expect_impl_item(impl_m);
+
+ // in case there are no generics, take the spot between the function name
+ // and the opening paren of the argument list
+ let new_generics_span = tcx.def_ident_span(impl_def_id)?.shrink_to_hi();
+ // in case there are generics, just replace them
+ let generics_span =
+ impl_m.generics.span.substitute_dummy(new_generics_span);
+ // replace with the generics from the trait
+ let new_generics =
+ tcx.sess.source_map().span_to_snippet(trait_m.generics.span).ok()?;
+
+ err.multipart_suggestion(
+ "try changing the `impl Trait` argument to a generic parameter",
+ vec![
+ // replace `impl Trait` with `T`
+ (impl_span, new_name.to_string()),
+ // replace impl method generics with trait method generics
+ // This isn't quite right, as users might have changed the names
+ // of the generics, but it works for the common case
+ (generics_span, new_generics),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ // The case where the trait method uses `impl Trait`, but the impl method uses
+ // explicit generics.
+ (false, true) => {
+ err.span_label(impl_span, "expected `impl Trait`, found generic parameter");
+ (|| {
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().expect_impl_item(impl_m);
+ let input_tys = match impl_m.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs,
+ _ => unreachable!(),
+ };
+ struct Visitor(Option<Span>, hir::def_id::LocalDefId);
+ impl<'v> intravisit::Visitor<'v> for Visitor {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ intravisit::walk_ty(self, ty);
+ if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) =
+ ty.kind
+ && let Res::Def(DefKind::TyParam, def_id) = path.res
+ && def_id == self.1.to_def_id()
+ {
+ self.0 = Some(ty.span);
+ }
+ }
+ }
+ let mut visitor = Visitor(None, impl_def_id);
+ for ty in input_tys {
+ intravisit::Visitor::visit_ty(&mut visitor, ty);
+ }
+ let span = visitor.0?;
+
+ let bounds = impl_m.generics.bounds_for_param(impl_def_id).next()?.bounds;
+ let bounds = bounds.first()?.span().to(bounds.last()?.span());
+ let bounds = tcx.sess.source_map().span_to_snippet(bounds).ok()?;
+
+ err.multipart_suggestion(
+ "try removing the generic parameter and using `impl Trait` instead",
+ vec![
+ // delete generic parameters
+ (impl_m.generics.span, String::new()),
+ // replace param usage with `impl Trait`
+ (span, format!("impl {bounds}")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ _ => unreachable!(),
+ }
+ let reported = err.emit();
+ error_found = Some(reported);
+ }
+ }
+ if let Some(reported) = error_found { Err(reported) } else { Ok(()) }
+}
+
+/// Checks that all parameters in the generics of a given assoc item in a trait impl have
+/// the same kind as the respective generic parameter in the trait def.
+///
+/// For example all 4 errors in the following code are emitted here:
+/// ```
+/// trait Foo {
+/// fn foo<const N: u8>();
+/// type bar<const N: u8>;
+/// fn baz<const N: u32>();
+/// type blah<T>;
+/// }
+///
+/// impl Foo for () {
+/// fn foo<const N: u64>() {}
+/// //~^ error
+/// type bar<const N: u64> {}
+/// //~^ error
+/// fn baz<T>() {}
+/// //~^ error
+/// type blah<const N: i64> = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// This function does not handle lifetime parameters
+fn compare_generic_param_kinds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_item: &ty::AssocItem,
+ trait_item: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ assert_eq!(impl_item.kind, trait_item.kind);
+
+ let ty_const_params_of = |def_id| {
+ tcx.generics_of(def_id).params.iter().filter(|param| {
+ matches!(
+ param.kind,
+ GenericParamDefKind::Const { .. } | GenericParamDefKind::Type { .. }
+ )
+ })
+ };
+
+ for (param_impl, param_trait) in
+ iter::zip(ty_const_params_of(impl_item.def_id), ty_const_params_of(trait_item.def_id))
+ {
+ use GenericParamDefKind::*;
+ if match (&param_impl.kind, &param_trait.kind) {
+ (Const { .. }, Const { .. })
+ if tcx.type_of(param_impl.def_id) != tcx.type_of(param_trait.def_id) =>
+ {
+ true
+ }
+ (Const { .. }, Type { .. }) | (Type { .. }, Const { .. }) => true,
+ // this is exhaustive so that anyone adding new generic param kinds knows
+ // to make sure this error is reported for them.
+ (Const { .. }, Const { .. }) | (Type { .. }, Type { .. }) => false,
+ (Lifetime { .. }, _) | (_, Lifetime { .. }) => unreachable!(),
+ } {
+ let param_impl_span = tcx.def_span(param_impl.def_id);
+ let param_trait_span = tcx.def_span(param_trait.def_id);
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ param_impl_span,
+ E0053,
+ "{} `{}` has an incompatible generic parameter for trait `{}`",
+ assoc_item_kind_str(&impl_item),
+ trait_item.name,
+ &tcx.def_path_str(tcx.parent(trait_item.def_id))
+ );
+
+ let make_param_message = |prefix: &str, param: &ty::GenericParamDef| match param.kind {
+ Const { .. } => {
+ format!("{} const parameter of type `{}`", prefix, tcx.type_of(param.def_id))
+ }
+ Type { .. } => format!("{} type parameter", prefix),
+ Lifetime { .. } => unreachable!(),
+ };
+
+ let trait_header_span = tcx.def_ident_span(tcx.parent(trait_item.def_id)).unwrap();
+ err.span_label(trait_header_span, "");
+ err.span_label(param_trait_span, make_param_message("expected", param_trait));
+
+ let impl_header_span = tcx.def_span(tcx.parent(impl_item.def_id));
+ err.span_label(impl_header_span, "");
+ err.span_label(param_impl_span, make_param_message("found", param_impl));
+
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+/// Use `tcx.compare_assoc_const_impl_item_with_trait_item` instead
+pub(crate) fn raw_compare_const_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (impl_const_item_def, trait_const_item_def): (LocalDefId, DefId),
+) -> Result<(), ErrorGuaranteed> {
+ let impl_const_item = tcx.associated_item(impl_const_item_def);
+ let trait_const_item = tcx.associated_item(trait_const_item_def);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap();
+ debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
+
+ let impl_c_span = tcx.def_span(impl_const_item_def.to_def_id());
+
+ let infcx = tcx.infer_ctxt().build();
+ let param_env = tcx.param_env(impl_const_item_def.to_def_id());
+ let ocx = ObligationCtxt::new(&infcx);
+
+ // The below is for the most part highly similar to the procedure
+ // for methods above. It is simpler in many respects, especially
+ // because we shouldn't really have to deal with lifetimes or
+ // predicates. In fact some of this should probably be put into
+ // shared functions because of DRY violations...
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // Create a parameter environment that represents the implementation's
+ // method.
+ let impl_c_hir_id = tcx.hir().local_def_id_to_hir_id(impl_const_item_def);
+
+ // Compute placeholder form of impl and trait const tys.
+ let impl_ty = tcx.type_of(impl_const_item_def.to_def_id());
+ let trait_ty = tcx.bound_type_of(trait_const_item_def).subst(tcx, trait_to_impl_substs);
+ let mut cause = ObligationCause::new(
+ impl_c_span,
+ impl_c_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_const_item_def,
+ trait_item_def_id: trait_const_item_def,
+ kind: impl_const_item.kind,
+ },
+ );
+
+ // There is no "body" here, so just pass dummy id.
+ let impl_ty = ocx.normalize(cause.clone(), param_env, impl_ty);
+
+ debug!("compare_const_impl: impl_ty={:?}", impl_ty);
+
+ let trait_ty = ocx.normalize(cause.clone(), param_env, trait_ty);
+
+ debug!("compare_const_impl: trait_ty={:?}", trait_ty);
+
+ let err = infcx
+ .at(&cause, param_env)
+ .sup(trait_ty, impl_ty)
+ .map(|ok| ocx.register_infer_ok_obligations(ok));
+
+ if let Err(terr) = err {
+ debug!(
+ "checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
+ impl_ty, trait_ty
+ );
+
+ // Locate the Span containing just the type of the offending impl
+ match tcx.hir().expect_impl_item(impl_const_item_def).kind {
+ ImplItemKind::Const(ref ty, _) => cause.span = ty.span,
+ _ => bug!("{:?} is not a impl const", impl_const_item),
+ }
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span,
+ E0326,
+ "implemented const `{}` has an incompatible type for trait",
+ trait_const_item.name
+ );
+
+ let trait_c_span = trait_const_item_def.as_local().map(|trait_c_def_id| {
+ // Add a label to the Span containing just the type of the const
+ match tcx.hir().expect_trait_item(trait_c_def_id).kind {
+ TraitItemKind::Const(ref ty, _) => ty.span,
+ _ => bug!("{:?} is not a trait const", trait_const_item),
+ }
+ });
+
+ infcx.err_ctxt().note_type_err(
+ &mut diag,
+ &cause,
+ trait_c_span.map(|span| (span, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_ty.into(),
+ found: impl_ty.into(),
+ })),
+ terr,
+ false,
+ false,
+ );
+ return Err(diag.emit());
+ };
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ return Err(infcx.err_ctxt().report_fulfillment_errors(&errors, None, false));
+ }
+
+ // FIXME return `ErrorReported` if region obligations error?
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_const_item_def, &outlives_environment);
+ Ok(())
+}
+
+pub(crate) fn compare_ty_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
+
+ let _: Result<(), ErrorGuaranteed> = (|| {
+ compare_number_of_generics(tcx, impl_ty, impl_ty_span, trait_ty, trait_item_span)?;
+
+ compare_generic_param_kinds(tcx, impl_ty, trait_ty)?;
+
+ let sp = tcx.def_span(impl_ty.def_id);
+ compare_type_predicate_entailment(tcx, impl_ty, sp, trait_ty, impl_trait_ref)?;
+
+ check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
+ })();
+}
+
+/// The equivalent of [compare_predicate_entailment], but for associated types
+/// instead of associated functions.
+fn compare_type_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let trait_to_impl_substs =
+ impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs);
+
+ let impl_ty_generics = tcx.generics_of(impl_ty.def_id);
+ let trait_ty_generics = tcx.generics_of(trait_ty.def_id);
+ let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
+ let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
+
+ check_region_bounds_on_impl_item(
+ tcx,
+ impl_ty,
+ trait_ty,
+ &trait_ty_generics,
+ &impl_ty_generics,
+ )?;
+
+ let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs);
+
+ if impl_ty_own_bounds.is_empty() {
+ // Nothing to check.
+ return Ok(());
+ }
+
+ // This `HirId` should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`). This is what
+ // `regionck_item` expects.
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs);
+
+ // The predicates declared by the impl definition, the trait and the
+ // associated type in the trait are assumed.
+ let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+ hybrid_preds
+ .predicates
+ .extend(trait_ty_predicates.instantiate_own(tcx, trait_to_impl_substs).predicates);
+
+ debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
+
+ let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(&infcx);
+
+ debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+
+ assert_eq!(impl_ty_own_bounds.predicates.len(), impl_ty_own_bounds.spans.len());
+ for (span, predicate) in std::iter::zip(impl_ty_own_bounds.spans, impl_ty_own_bounds.predicates)
+ {
+ let cause = ObligationCause::misc(span, impl_ty_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, cause, predicate);
+
+ let cause = ObligationCause::new(
+ span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ kind: impl_ty.kind,
+ },
+ );
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+}
+
+/// Validate that `ProjectionCandidate`s created for this associated type will
+/// be valid.
+///
+/// Usually given
+///
+/// trait X { type Y: Copy } impl X for T { type Y = S; }
+///
+/// We are able to normalize `<T as X>::U` to `S`, and so when we check the
+/// impl is well-formed we have to prove `S: Copy`.
+///
+/// For default associated types the normalization is not possible (the value
+/// from the impl could be overridden). We also can't normalize generic
+/// associated types (yet) because they contain bound parameters.
+#[instrument(level = "debug", skip(tcx))]
+pub fn check_type_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ty: &ty::AssocItem,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Given
+ //
+ // impl<A, B> Foo<u32> for (A, B) {
+ // type Bar<C> =...
+ // }
+ //
+ // - `impl_trait_ref` would be `<(A, B) as Foo<u32>>
+ // - `impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
+ // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from
+ // the *trait* with the generic associated type parameters (as bound vars).
+ //
+ // A note regarding the use of bound vars here:
+ // Imagine as an example
+ // ```
+ // trait Family {
+ // type Member<C: Eq>;
+ // }
+ //
+ // impl Family for VecFamily {
+ // type Member<C: Eq> = i32;
+ // }
+ // ```
+ // Here, we would generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) }
+ // ```
+ // when we really would like to generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) :- Implemented(C: Eq) }
+ // ```
+ // But, this is probably fine, because although the first clause can be used with types C that
+ // do not implement Eq, for it to cause some kind of problem, there would have to be a
+ // VecFamily::Member<X> for some type X where !(X: Eq), that appears in the value of type
+ // Member<C: Eq> = .... That type would fail a well-formedness check that we ought to be doing
+ // elsewhere, which would check that any <T as Family>::Member<X> meets the bounds declared in
+ // the trait (notably, that X: Eq and T: Family).
+ let defs: &ty::Generics = tcx.generics_of(impl_ty.def_id);
+ let mut substs = smallvec::SmallVec::with_capacity(defs.count());
+ if let Some(def_id) = defs.parent {
+ let parent_defs = tcx.generics_of(def_id);
+ InternalSubsts::fill_item(&mut substs, tcx, parent_defs, &mut |param, _| {
+ tcx.mk_param_from_def(param)
+ });
+ }
+ let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
+ smallvec::SmallVec::with_capacity(defs.count());
+ InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ let kind = ty::BoundTyKind::Param(param.name);
+ let bound_var = ty::BoundVariableKind::Ty(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Lifetime => {
+ let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Region(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let bound_var = ty::BoundVariableKind::Const;
+ bound_vars.push(bound_var);
+ tcx.mk_const(ty::ConstS {
+ ty: tcx.type_of(param.def_id),
+ kind: ty::ConstKind::Bound(
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(bound_vars.len() - 1),
+ ),
+ })
+ .into()
+ }
+ });
+ let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
+ let impl_ty_substs = tcx.intern_substs(&substs);
+ let container_id = impl_ty.container_id(tcx);
+
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+ let impl_ty_value = tcx.type_of(impl_ty.def_id);
+
+ let param_env = tcx.param_env(impl_ty.def_id);
+
+ // When checking something like
+ //
+ // trait X { type Y: PartialEq<<Self as X>::Y> }
+ // impl X for T { default type Y = S; }
+ //
+ // We will have to prove the bound S: PartialEq<<T as X>::Y>. In this case
+ // we want <T as X>::Y to normalize to S. This is valid because we are
+ // checking the default value specifically here. Add this equality to the
+ // ParamEnv for normalization specifically.
+ let normalize_param_env = {
+ let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
+ match impl_ty_value.kind() {
+ ty::Projection(proj)
+ if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs =>
+ {
+ // Don't include this predicate if the projected type is
+ // exactly the same as the projection. This can occur in
+ // (somewhat dubious) code like this:
+ //
+ // impl<T> X for T where T: X { type Y = <T as X>::Y; }
+ }
+ _ => predicates.push(
+ ty::Binder::bind_with_vars(
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ item_def_id: trait_ty.def_id,
+ substs: rebased_substs,
+ },
+ term: impl_ty_value.into(),
+ },
+ bound_vars,
+ )
+ .to_predicate(tcx),
+ ),
+ };
+ ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ Reveal::UserFacing,
+ param_env.constness(),
+ )
+ };
+ debug!(?normalize_param_env);
+
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(&infcx);
+
+ let assumed_wf_types =
+ ocx.assumed_wf_types(param_env, impl_ty_span, impl_ty.def_id.expect_local());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let normalize_cause = ObligationCause::new(
+ impl_ty_span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CheckAssociatedTypeBounds {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ },
+ );
+ let mk_cause = |span: Span| {
+ let code = if span.is_dummy() {
+ traits::ItemObligation(trait_ty.def_id)
+ } else {
+ traits::BindingObligation(trait_ty.def_id, span)
+ };
+ ObligationCause::new(impl_ty_span, impl_ty_hir_id, code)
+ };
+
+ let obligations = tcx
+ .bound_explicit_item_bounds(trait_ty.def_id)
+ .subst_iter_copied(tcx, rebased_substs)
+ .map(|(concrete_ty_bound, span)| {
+ debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
+ traits::Obligation::new(mk_cause(span), param_env, concrete_ty_bound)
+ })
+ .collect();
+ debug!("check_type_bounds: item_bounds={:?}", obligations);
+
+ for mut obligation in util::elaborate_obligations(tcx, obligations) {
+ let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize(
+ &mut selcx,
+ normalize_param_env,
+ normalize_cause.clone(),
+ obligation.predicate,
+ );
+ debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
+ obligation.predicate = normalized_predicate;
+
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(obligation);
+ }
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let implied_bounds = infcx.implied_bounds_tys(param_env, impl_ty_hir_id, assumed_wf_types);
+ let outlives_environment =
+ OutlivesEnvironment::with_bounds(param_env, Some(&infcx), implied_bounds);
+
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ let constraints = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ for (key, value) in constraints {
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(
+ &ObligationCause::misc(
+ value.hidden_type.span,
+ tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()),
+ ),
+ tcx.mk_opaque(key.def_id.to_def_id(), key.substs),
+ value.hidden_type.ty,
+ TypeError::Mismatch,
+ )
+ .emit();
+ }
+
+ Ok(())
+}
+
+fn assoc_item_kind_str(impl_item: &ty::AssocItem) -> &'static str {
+ match impl_item.kind {
+ ty::AssocKind::Const => "const",
+ ty::AssocKind::Fn => "method",
+ ty::AssocKind::Type => "type",
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_hir_analysis/src/check/dropck.rs
index 321064ec0..a74016e22 100644
--- a/compiler/rustc_typeck/src/check/dropck.rs
+++ b/compiler/rustc_hir_analysis/src/check/dropck.rs
@@ -1,4 +1,4 @@
-// FIXME(@lcnr): Move this module out of `rustc_typeck`.
+// FIXME(@lcnr): Move this module out of `rustc_hir_analysis`.
//
// We don't do any drop checking during hir typeck.
use crate::hir::def_id::{DefId, LocalDefId};
@@ -144,6 +144,8 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
+ debug!(?assumptions_in_impl_context, ?dtor_predicates.predicates);
+
let self_param_env = tcx.param_env(self_type_did);
// An earlier version of this code attempted to do this checking
@@ -182,13 +184,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
let p = p.kind();
match (predicate.skip_binder(), p.skip_binder()) {
(ty::PredicateKind::Trait(a), ty::PredicateKind::Trait(b)) => {
- // Since struct predicates cannot have ~const, project the impl predicate
- // onto one that ignores the constness. This is equivalent to saying that
- // we match a `Trait` bound on the struct with a `Trait` or `~const Trait`
- // in the impl.
- let non_const_a =
- ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..a };
- relator.relate(predicate.rebind(non_const_a), p.rebind(b)).is_ok()
+ relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
}
(ty::PredicateKind::Projection(a), ty::PredicateKind::Projection(b)) => {
relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
@@ -196,7 +192,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
(
ty::PredicateKind::ConstEvaluatable(a),
ty::PredicateKind::ConstEvaluatable(b),
- ) => tcx.try_unify_abstract_consts(self_param_env.and((a, b))),
+ ) => relator.relate(predicate.rebind(a), predicate.rebind(b)).is_ok(),
(
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)),
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)),
diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 3f2a0da8d..609095c9c 100644
--- a/compiler/rustc_typeck/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -7,10 +7,10 @@ use crate::errors::{
};
use crate::require_same_types;
-use rustc_errors::struct_span_err;
+use hir::def_id::DefId;
+use rustc_errors::{struct_span_err, DiagnosticMessage};
use rustc_hir as hir;
use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::symbol::{kw, sym, Symbol};
use rustc_target::spec::abi::Abi;
@@ -26,7 +26,7 @@ fn equate_intrinsic_type<'tcx>(
) {
let (own_counts, span) = match &it.kind {
hir::ForeignItemKind::Fn(.., generics) => {
- let own_counts = tcx.generics_of(it.def_id.to_def_id()).own_counts();
+ let own_counts = tcx.generics_of(it.owner_id.to_def_id()).own_counts();
(own_counts, generics.span)
}
_ => {
@@ -57,18 +57,25 @@ fn equate_intrinsic_type<'tcx>(
{
let fty = tcx.mk_fn_ptr(sig);
let cause = ObligationCause::new(it.span, it.hir_id(), ObligationCauseCode::IntrinsicType);
- require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.def_id)), fty);
+ require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.owner_id)), fty);
}
}
/// Returns the unsafety of the given intrinsic.
-pub fn intrinsic_operation_unsafety(intrinsic: Symbol) -> hir::Unsafety {
- match intrinsic {
+pub fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: DefId) -> hir::Unsafety {
+ let has_safe_attr = match tcx.has_attr(intrinsic_id, sym::rustc_safe_intrinsic) {
+ true => hir::Unsafety::Normal,
+ false => hir::Unsafety::Unsafe,
+ };
+ let is_in_list = match tcx.item_name(intrinsic_id) {
// When adding a new intrinsic to this list,
// it's usually worth updating that intrinsic's documentation
// to note that it's safe to call, since
// safe extern fns are otherwise unprecedented.
sym::abort
+ | sym::assert_inhabited
+ | sym::assert_zero_valid
+ | sym::assert_uninit_valid
| sym::size_of
| sym::min_align_of
| sym::needs_drop
@@ -92,8 +99,7 @@ pub fn intrinsic_operation_unsafety(intrinsic: Symbol) -> hir::Unsafety {
| sym::type_id
| sym::likely
| sym::unlikely
- | sym::ptr_guaranteed_eq
- | sym::ptr_guaranteed_ne
+ | sym::ptr_guaranteed_cmp
| sym::minnumf32
| sym::minnumf64
| sym::maxnumf32
@@ -102,16 +108,29 @@ pub fn intrinsic_operation_unsafety(intrinsic: Symbol) -> hir::Unsafety {
| sym::type_name
| sym::forget
| sym::black_box
- | sym::variant_count => hir::Unsafety::Normal,
+ | sym::variant_count
+ | sym::ptr_mask => hir::Unsafety::Normal,
_ => hir::Unsafety::Unsafe,
+ };
+
+ if has_safe_attr != is_in_list {
+ tcx.sess.struct_span_err(
+ tcx.def_span(intrinsic_id),
+ DiagnosticMessage::Str(format!(
+ "intrinsic safety mismatch between list of intrinsics within the compiler and core library intrinsics for intrinsic `{}`",
+ tcx.item_name(intrinsic_id)
+ ))).emit();
}
+
+ is_in_list
}
/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`,
/// and in `library/core/src/intrinsics.rs`.
pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)));
- let intrinsic_name = tcx.item_name(it.def_id.to_def_id());
+ let intrinsic_id = it.owner_id.to_def_id();
+ let intrinsic_name = tcx.item_name(intrinsic_id);
let name_str = intrinsic_name.as_str();
let bound_vars = tcx.mk_bound_variable_kinds(
@@ -158,7 +177,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
};
(n_tps, 0, inputs, output, hir::Unsafety::Unsafe)
} else {
- let unsafety = intrinsic_operation_unsafety(intrinsic_name);
+ let unsafety = intrinsic_operation_unsafety(tcx, intrinsic_id);
let (n_tps, inputs, output) = match intrinsic_name {
sym::abort => (0, Vec::new(), tcx.types.never),
sym::unreachable => (0, Vec::new(), tcx.types.never),
@@ -200,6 +219,15 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
],
tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
),
+ sym::ptr_mask => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.usize,
+ ],
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ ),
+
sym::copy | sym::copy_nonoverlapping => (
1,
vec![
@@ -289,8 +317,8 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
(1, vec![param(0), param(0)], tcx.intern_tup(&[param(0), tcx.types.bool]))
}
- sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
- (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.bool)
+ sym::ptr_guaranteed_cmp => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.u8)
}
sym::const_allocate => {
@@ -465,7 +493,11 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
sym::simd_scatter => (3, vec![param(0), param(1), param(2)], tcx.mk_unit()),
sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)),
sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)),
- sym::simd_cast | sym::simd_as => (2, vec![param(0)], param(1)),
+ sym::simd_cast
+ | sym::simd_as
+ | sym::simd_cast_ptr
+ | sym::simd_expose_addr
+ | sym::simd_from_exposed_addr => (2, vec![param(0)], param(1)),
sym::simd_bitmask => (2, vec![param(0)], param(1)),
sym::simd_select | sym::simd_select_bitmask => {
(2, vec![param(0), param(1), param(1)], param(1))
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
new file mode 100644
index 000000000..17c4d0d48
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
@@ -0,0 +1,437 @@
+use rustc_ast::InlineAsmTemplatePiece;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitable, UintTy};
+use rustc_session::lint;
+use rustc_span::{Symbol, DUMMY_SP};
+use rustc_target::asm::{InlineAsmReg, InlineAsmRegClass, InlineAsmRegOrRegClass, InlineAsmType};
+
+pub struct InlineAsmCtxt<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ get_operand_ty: Box<dyn Fn(&'tcx hir::Expr<'tcx>) -> Ty<'tcx> + 'a>,
+}
+
+impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
+ pub fn new_global_asm(tcx: TyCtxt<'tcx>) -> Self {
+ InlineAsmCtxt {
+ tcx,
+ param_env: ty::ParamEnv::empty(),
+ get_operand_ty: Box::new(|e| bug!("asm operand in global asm: {e:?}")),
+ }
+ }
+
+ pub fn new_in_fn(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ get_operand_ty: impl Fn(&'tcx hir::Expr<'tcx>) -> Ty<'tcx> + 'a,
+ ) -> Self {
+ InlineAsmCtxt { tcx, param_env, get_operand_ty: Box::new(get_operand_ty) }
+ }
+
+ // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()`
+ fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool {
+ // Type still may have region variables, but `Sized` does not depend
+ // on those, so just erase them before querying.
+ if ty.is_sized(self.tcx, self.param_env) {
+ return true;
+ }
+ if let ty::Foreign(..) = ty.kind() {
+ return true;
+ }
+ false
+ }
+
+ fn check_asm_operand_type(
+ &self,
+ idx: usize,
+ reg: InlineAsmRegOrRegClass,
+ expr: &'tcx hir::Expr<'tcx>,
+ template: &[InlineAsmTemplatePiece],
+ is_input: bool,
+ tied_input: Option<(&'tcx hir::Expr<'tcx>, Option<InlineAsmType>)>,
+ target_features: &FxHashSet<Symbol>,
+ ) -> Option<InlineAsmType> {
+ let ty = (self.get_operand_ty)(expr);
+ if ty.has_non_region_infer() {
+ bug!("inference variable in asm operand ty: {:?} {:?}", expr, ty);
+ }
+ let asm_ty_isize = match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::I16,
+ 32 => InlineAsmType::I32,
+ 64 => InlineAsmType::I64,
+ _ => unreachable!(),
+ };
+
+ let asm_ty = match *ty.kind() {
+ // `!` is allowed for input but not for output (issue #87802)
+ ty::Never if is_input => return None,
+ ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
+ ty::FnPtr(_) => Some(asm_ty_isize),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if self.is_thin_ptr_ty(ty) => {
+ Some(asm_ty_isize)
+ }
+ ty::Adt(adt, substs) if adt.repr().simd() => {
+ let fields = &adt.non_enum_variant().fields;
+ let elem_ty = fields[0].ty(self.tcx, substs);
+ match elem_ty.kind() {
+ ty::Never | ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => {
+ Some(InlineAsmType::VecI8(fields.len() as u64))
+ }
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => {
+ Some(InlineAsmType::VecI16(fields.len() as u64))
+ }
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => {
+ Some(InlineAsmType::VecI32(fields.len() as u64))
+ }
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => {
+ Some(InlineAsmType::VecI64(fields.len() as u64))
+ }
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => {
+ Some(InlineAsmType::VecI128(fields.len() as u64))
+ }
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => {
+ Some(match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::VecI16(fields.len() as u64),
+ 32 => InlineAsmType::VecI32(fields.len() as u64),
+ 64 => InlineAsmType::VecI64(fields.len() as u64),
+ _ => unreachable!(),
+ })
+ }
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(fields.len() as u64)),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(fields.len() as u64)),
+ _ => None,
+ }
+ }
+ ty::Infer(_) => unreachable!(),
+ _ => None,
+ };
+ let Some(asm_ty) = asm_ty else {
+ let msg = &format!("cannot use value of type `{ty}` for inline assembly");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(
+ "only integers, floats, SIMD vectors, pointers and function pointers \
+ can be used as arguments for inline assembly",
+ );
+ err.emit();
+ return None;
+ };
+
+ // Check that the type implements Copy. The only case where this can
+ // possibly fail is for SIMD types which don't #[derive(Copy)].
+ if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
+ let msg = "arguments for inline assembly must be copyable";
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!("`{ty}` does not implement the Copy trait"));
+ err.emit();
+ }
+
+ // Ideally we wouldn't need to do this, but LLVM's register allocator
+ // really doesn't like it when tied operands have different types.
+ //
+ // This is purely an LLVM limitation, but we have to live with it since
+ // there is no way to hide this with implicit conversions.
+ //
+ // For the purposes of this check we only look at the `InlineAsmType`,
+ // which means that pointers and integers are treated as identical (modulo
+ // size).
+ if let Some((in_expr, Some(in_asm_ty))) = tied_input {
+ if in_asm_ty != asm_ty {
+ let msg = "incompatible types for asm inout argument";
+ let mut err = self.tcx.sess.struct_span_err(vec![in_expr.span, expr.span], msg);
+
+ let in_expr_ty = (self.get_operand_ty)(in_expr);
+ err.span_label(in_expr.span, &format!("type `{in_expr_ty}`"));
+ err.span_label(expr.span, &format!("type `{ty}`"));
+ err.note(
+ "asm inout arguments must have the same type, \
+ unless they are both pointers or integers of the same size",
+ );
+ err.emit();
+ }
+
+ // All of the later checks have already been done on the input, so
+ // let's not emit errors and warnings twice.
+ return Some(asm_ty);
+ }
+
+ // Check the type against the list of types supported by the selected
+ // register class.
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+ let reg_class = reg.reg_class();
+ let supported_tys = reg_class.supported_types(asm_arch);
+ let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
+ let msg = &format!("type `{ty}` cannot be used with this register class");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ let supported_tys: Vec<_> =
+ supported_tys.iter().map(|(t, _)| t.to_string()).collect();
+ err.note(&format!(
+ "register class `{}` supports these types: {}",
+ reg_class.name(),
+ supported_tys.join(", "),
+ ));
+ if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) {
+ err.help(&format!(
+ "consider using the `{}` register class instead",
+ suggest.name()
+ ));
+ }
+ err.emit();
+ return Some(asm_ty);
+ };
+
+ // Check whether the selected type requires a target feature. Note that
+ // this is different from the feature check we did earlier. While the
+ // previous check checked that this register class is usable at all
+ // with the currently enabled features, some types may only be usable
+ // with a register class when a certain feature is enabled. We check
+ // this here since it depends on the results of typeck.
+ //
+ // Also note that this check isn't run when the operand type is never
+ // (!). In that case we still need the earlier check to verify that the
+ // register class is usable at all.
+ if let Some(feature) = feature {
+ if !target_features.contains(&feature) {
+ let msg = &format!("`{}` target feature is not enabled", feature);
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!(
+ "this is required to use type `{}` with register class `{}`",
+ ty,
+ reg_class.name(),
+ ));
+ err.emit();
+ return Some(asm_ty);
+ }
+ }
+
+ // Check whether a modifier is suggested for using this type.
+ if let Some((suggested_modifier, suggested_result)) =
+ reg_class.suggest_modifier(asm_arch, asm_ty)
+ {
+ // Search for any use of this operand without a modifier and emit
+ // the suggestion for them.
+ let mut spans = vec![];
+ for piece in template {
+ if let &InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span } = piece
+ {
+ if operand_idx == idx && modifier.is_none() {
+ spans.push(span);
+ }
+ }
+ }
+ if !spans.is_empty() {
+ let (default_modifier, default_result) =
+ reg_class.default_modifier(asm_arch).unwrap();
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::ASM_SUB_REGISTER,
+ expr.hir_id,
+ spans,
+ "formatting may not be suitable for sub-register argument",
+ |lint| {
+ lint.span_label(expr.span, "for this argument");
+ lint.help(&format!(
+ "use `{{{idx}:{suggested_modifier}}}` to have the register formatted as `{suggested_result}`",
+ ));
+ lint.help(&format!(
+ "or use `{{{idx}:{default_modifier}}}` to keep the default formatting of `{default_result}`",
+ ));
+ lint
+ },
+ );
+ }
+ }
+
+ Some(asm_ty)
+ }
+
+ pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>, enclosing_id: hir::HirId) {
+ let hir = self.tcx.hir();
+ let enclosing_def_id = hir.local_def_id(enclosing_id).to_def_id();
+ let target_features = self.tcx.asm_target_features(enclosing_def_id);
+ let Some(asm_arch) = self.tcx.sess.asm_arch else {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "target architecture does not support asm");
+ return;
+ };
+ for (idx, (op, op_sp)) in asm.operands.iter().enumerate() {
+ // Validate register classes against currently enabled target
+ // features. We check that at least one type is available for
+ // the enabled features.
+ //
+ // We ignore target feature requirements for clobbers: if the
+ // feature is disabled then the compiler doesn't care what we
+ // do with the registers.
+ //
+ // Note that this is only possible for explicit register
+ // operands, which cannot be used in the asm string.
+ if let Some(reg) = op.reg() {
+ // Some explicit registers cannot be used depending on the
+ // target. Reject those here.
+ if let InlineAsmRegOrRegClass::Reg(reg) = reg {
+ if let InlineAsmReg::Err = reg {
+ // `validate` will panic on `Err`, as an error must
+ // already have been reported.
+ continue;
+ }
+ if let Err(msg) = reg.validate(
+ asm_arch,
+ self.tcx.sess.relocation_model(),
+ &target_features,
+ &self.tcx.sess.target,
+ op.is_clobber(),
+ ) {
+ let msg = format!("cannot use register `{}`: {}", reg.name(), msg);
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ continue;
+ }
+ }
+
+ if !op.is_clobber() {
+ let mut missing_required_features = vec![];
+ let reg_class = reg.reg_class();
+ if let InlineAsmRegClass::Err = reg_class {
+ continue;
+ }
+ for &(_, feature) in reg_class.supported_types(asm_arch) {
+ match feature {
+ Some(feature) => {
+ if target_features.contains(&feature) {
+ missing_required_features.clear();
+ break;
+ } else {
+ missing_required_features.push(feature);
+ }
+ }
+ None => {
+ missing_required_features.clear();
+ break;
+ }
+ }
+ }
+
+ // We are sorting primitive strs here and can use unstable sort here
+ missing_required_features.sort_unstable();
+ missing_required_features.dedup();
+ match &missing_required_features[..] {
+ [] => {}
+ [feature] => {
+ let msg = format!(
+ "register class `{}` requires the `{}` target feature",
+ reg_class.name(),
+ feature
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ features => {
+ let msg = format!(
+ "register class `{}` requires at least one of the following target features: {}",
+ reg_class.name(),
+ features
+ .iter()
+ .map(|f| f.as_str())
+ .intersperse(", ")
+ .collect::<String>(),
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ }
+ }
+ }
+
+ match *op {
+ hir::InlineAsmOperand::In { reg, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::Out { reg, late: _, ref expr } => {
+ if let Some(expr) = expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ }
+ hir::InlineAsmOperand::InOut { reg, late: _, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::SplitInOut { reg, late: _, ref in_expr, ref out_expr } => {
+ let in_ty = self.check_asm_operand_type(
+ idx,
+ reg,
+ in_expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ if let Some(out_expr) = out_expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ out_expr,
+ asm.template,
+ false,
+ Some((in_expr, in_ty)),
+ &target_features,
+ );
+ }
+ }
+ // No special checking is needed for these:
+ // - Typeck has checked that Const operands are integers.
+ // - AST lowering guarantees that SymStatic points to a static.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymStatic { .. } => {}
+ // Check that sym actually points to a function. Later passes
+ // depend on this.
+ hir::InlineAsmOperand::SymFn { anon_const } => {
+ let ty = self.tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
+ match ty.kind() {
+ ty::Never | ty::Error(_) => {}
+ ty::FnDef(..) => {}
+ _ => {
+ let mut err =
+ self.tcx.sess.struct_span_err(*op_sp, "invalid `sym` operand");
+ err.span_label(
+ self.tcx.hir().span(anon_const.body.hir_id),
+ &format!("is {} `{}`", ty.kind().article(), ty),
+ );
+ err.help("`sym` operands must refer to either a function or a static");
+ err.emit();
+ }
+ };
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs
new file mode 100644
index 000000000..2e7b10257
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/mod.rs
@@ -0,0 +1,515 @@
+/*!
+
+# typeck: check phase
+
+Within the check phase of type check, we check each item one at a time
+(bodies of function expressions are checked as part of the containing
+function). Inference is used to supply types wherever they are unknown.
+
+By far the most complex case is checking the body of a function. This
+can be broken down into several distinct phases:
+
+- gather: creates type variables to represent the type of each local
+ variable and pattern binding.
+
+- main: the main pass does the lion's share of the work: it
+ determines the types of all expressions, resolves
+ methods, checks for most invalid conditions, and so forth. In
+ some cases, where a type is unknown, it may create a type or region
+ variable and use that as the type of an expression.
+
+ In the process of checking, various constraints will be placed on
+ these type variables through the subtyping relationships requested
+ through the `demand` module. The `infer` module is in charge
+ of resolving those constraints.
+
+- regionck: after main is complete, the regionck pass goes over all
+ types looking for regions and making sure that they did not escape
+ into places where they are not in scope. This may also influence the
+ final assignments of the various region variables if there is some
+ flexibility.
+
+- writeback: writes the final types within a function body, replacing
+ type variables with their final inferred types. These final types
+ are written into the `tcx.node_types` table, which should *never* contain
+ any reference to a type variable.
+
+## Intermediate types
+
+While type checking a function, the intermediate types for the
+expressions, blocks, and so forth contained within the function are
+stored in `fcx.node_types` and `fcx.node_substs`. These types
+may contain unresolved type variables. After type checking is
+complete, the functions in the writeback module are used to take the
+types from this table, resolve them, and then write them into their
+permanent home in the type context `tcx`.
+
+This means that during inferencing you should use `fcx.write_ty()`
+and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
+nodes within the function.
+
+The types of top-level items, which never contain unbound type
+variables, are stored directly into the `tcx` typeck_results.
+
+N.B., a type variable is not the same thing as a type parameter. A
+type variable is an instance of a type parameter. That is,
+given a generic function `fn foo<T>(t: T)`, while checking the
+function `foo`, the type `ty_param(0)` refers to the type `T`, which
+is treated in abstract. However, when `foo()` is called, `T` will be
+substituted for a fresh type variable `N`. This variable will
+eventually be resolved to some concrete type (which might itself be
+a type parameter).
+
+*/
+
+mod check;
+mod compare_method;
+pub mod dropck;
+pub mod intrinsic;
+pub mod intrinsicck;
+mod region;
+pub mod wfcheck;
+
+pub use check::check_abi;
+
+use check::check_mod_item_types;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
+use rustc_session::parse::feature_err;
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{self, BytePos, Span, Symbol};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor;
+use std::num::NonZeroU32;
+
+use crate::require_c_abi_if_c_variadic;
+use crate::util::common::indenter;
+
+use self::compare_method::collect_trait_impl_trait_tys;
+use self::region::region_scope_tree;
+
+pub fn provide(providers: &mut Providers) {
+ wfcheck::provide(providers);
+ *providers = Providers {
+ adt_destructor,
+ check_mod_item_types,
+ region_scope_tree,
+ collect_trait_impl_trait_tys,
+ compare_assoc_const_impl_item_with_trait_item: compare_method::raw_compare_const_impl,
+ ..*providers
+ };
+}
+
+fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::Destructor> {
+ tcx.calculate_dtor(def_id, dropck::check_drop_impl)
+}
+
+/// Given a `DefId` for an opaque type in return position, find its parent item's return
+/// expressions.
+fn get_owner_return_paths<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+) -> Option<(LocalDefId, ReturnsVisitor<'tcx>)> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let parent_id = tcx.hir().get_parent_item(hir_id).def_id;
+ tcx.hir().find_by_def_id(parent_id).and_then(|node| node.body_id()).map(|body_id| {
+ let body = tcx.hir().body(body_id);
+ let mut visitor = ReturnsVisitor::default();
+ visitor.visit_body(body);
+ (parent_id, visitor)
+ })
+}
+
+/// Forbid defining intrinsics in Rust code,
+/// as they must always be defined by the compiler.
+// FIXME: Move this to a more appropriate place.
+pub fn fn_maybe_err(tcx: TyCtxt<'_>, sp: Span, abi: Abi) {
+ if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi {
+ tcx.sess.span_err(sp, "intrinsic must be in `extern \"rust-intrinsic\" { ... }` block");
+ }
+}
+
+fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
+ // Only restricted on wasm target for now
+ if !tcx.sess.target.is_like_wasm {
+ return;
+ }
+
+ // If `#[link_section]` is missing, then nothing to verify
+ let attrs = tcx.codegen_fn_attrs(id);
+ if attrs.link_section.is_none() {
+ return;
+ }
+
+ // For the wasm32 target statics with `#[link_section]` are placed into custom
+ // sections of the final output file, but this isn't link custom sections of
+ // other executable formats. Namely we can only embed a list of bytes,
+ // nothing with provenance (pointers to anything else). If any provenance
+ // show up, reject it here.
+ // `#[link_section]` may contain arbitrary, or even undefined bytes, but it is
+ // the consumer's responsibility to ensure all bytes that have been read
+ // have defined values.
+ if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
+ && alloc.inner().provenance().len() != 0
+ {
+ let msg = "statics with a custom `#[link_section]` must be a \
+ simple list of bytes on the wasm target with no \
+ extra levels of indirection such as references";
+ tcx.sess.span_err(tcx.def_span(id), msg);
+ }
+}
+
+fn report_forbidden_specialization(
+ tcx: TyCtxt<'_>,
+ impl_item: &hir::ImplItemRef,
+ parent_impl: DefId,
+) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_item.span,
+ E0520,
+ "`{}` specializes an item from a parent `impl`, but \
+ that item is not marked `default`",
+ impl_item.ident
+ );
+ err.span_label(impl_item.span, format!("cannot specialize default item `{}`", impl_item.ident));
+
+ match tcx.span_of_impl(parent_impl) {
+ Ok(span) => {
+ err.span_label(span, "parent `impl` is here");
+ err.note(&format!(
+ "to specialize, `{}` in the parent `impl` must be marked `default`",
+ impl_item.ident
+ ));
+ }
+ Err(cname) => {
+ err.note(&format!("parent implementation is in crate `{cname}`"));
+ }
+ }
+
+ err.emit();
+}
+
+fn missing_items_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[&ty::AssocItem],
+ full_impl_span: Span,
+) {
+ let missing_items_msg = missing_items
+ .iter()
+ .map(|trait_item| trait_item.name.to_string())
+ .collect::<Vec<_>>()
+ .join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing `{missing_items_msg}` in implementation"));
+
+ // `Span` before impl block closing brace.
+ let hi = full_impl_span.hi() - BytePos(1);
+ // Point at the place right before the closing brace of the relevant `impl` to suggest
+ // adding the associated item at the end of its body.
+ let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi);
+ // Obtain the level of indentation ending in `sugg_sp`.
+ let padding =
+ tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new());
+
+ for trait_item in missing_items {
+ let snippet = suggestion_signature(trait_item, tcx);
+ let code = format!("{}{}\n{}", padding, snippet, padding);
+ let msg = format!("implement the missing item: `{snippet}`");
+ let appl = Applicability::HasPlaceholders;
+ if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
+ err.span_label(span, format!("`{}` from trait", trait_item.name));
+ err.tool_only_span_suggestion(sugg_sp, &msg, code, appl);
+ } else {
+ err.span_suggestion_hidden(sugg_sp, &msg, code, appl);
+ }
+ }
+ err.emit();
+}
+
+fn missing_items_must_implement_one_of_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[Ident],
+ annotation_span: Option<Span>,
+) {
+ let missing_items_msg =
+ missing_items.iter().map(Ident::to_string).collect::<Vec<_>>().join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing one of: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing one of `{missing_items_msg}` in implementation"));
+
+ if let Some(annotation_span) = annotation_span {
+ err.span_note(annotation_span, "required because of this annotation");
+ }
+
+ err.emit();
+}
+
+fn default_body_is_unstable(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ item_did: DefId,
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+) {
+ let missing_item_name = &tcx.associated_item(item_did).name;
+ let use_of_unstable_library_feature_note = match reason {
+ Some(r) => format!("use of unstable library feature '{feature}': {r}"),
+ None => format!("use of unstable library feature '{feature}'"),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing: `{missing_item_name}`",
+ );
+ err.note(format!("default implementation of `{missing_item_name}` is unstable"));
+ err.note(use_of_unstable_library_feature_note);
+ rustc_session::parse::add_feature_diagnostics_for_issue(
+ &mut err,
+ &tcx.sess.parse_sess,
+ feature,
+ rustc_feature::GateIssue::Library(issue),
+ );
+ err.emit();
+}
+
+/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
+fn bounds_from_generic_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: ty::GenericPredicates<'tcx>,
+) -> (String, String) {
+ let mut types: FxHashMap<Ty<'tcx>, Vec<DefId>> = FxHashMap::default();
+ let mut projections = vec![];
+ for (predicate, _) in predicates.predicates {
+ debug!("predicate {:?}", predicate);
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ let entry = types.entry(trait_predicate.self_ty()).or_default();
+ let def_id = trait_predicate.def_id();
+ if Some(def_id) != tcx.lang_items().sized_trait() {
+ // Type params are `Sized` by default, do not add that restriction to the list
+ // if it is a positive requirement.
+ entry.push(trait_predicate.def_id());
+ }
+ }
+ ty::PredicateKind::Projection(projection_pred) => {
+ projections.push(bound_predicate.rebind(projection_pred));
+ }
+ _ => {}
+ }
+ }
+ let generics = if types.is_empty() {
+ "".to_string()
+ } else {
+ format!(
+ "<{}>",
+ types
+ .keys()
+ .filter_map(|t| match t.kind() {
+ ty::Param(_) => Some(t.to_string()),
+ // Avoid suggesting the following:
+ // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ };
+ let mut where_clauses = vec![];
+ for (ty, bounds) in types {
+ where_clauses
+ .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))));
+ }
+ for projection in &projections {
+ let p = projection.skip_binder();
+ // FIXME: this is not currently supported syntax, we should be looking at the `types` and
+ // insert the associated types where they correspond, but for now let's be "lazy" and
+ // propose this instead of the following valid resugaring:
+ // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>`
+ where_clauses.push(format!(
+ "{} = {}",
+ tcx.def_path_str(p.projection_ty.item_def_id),
+ p.term,
+ ));
+ }
+ let where_clauses = if where_clauses.is_empty() {
+ String::new()
+ } else {
+ format!(" where {}", where_clauses.join(", "))
+ };
+ (generics, where_clauses)
+}
+
+/// Return placeholder code for the given function.
+fn fn_sig_suggestion<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sig: ty::FnSig<'tcx>,
+ ident: Ident,
+ predicates: ty::GenericPredicates<'tcx>,
+ assoc: &ty::AssocItem,
+) -> String {
+ let args = sig
+ .inputs()
+ .iter()
+ .enumerate()
+ .map(|(i, ty)| {
+ Some(match ty.kind() {
+ ty::Param(_) if assoc.fn_has_self_parameter && i == 0 => "self".to_string(),
+ ty::Ref(reg, ref_ty, mutability) if i == 0 => {
+ let reg = format!("{reg} ");
+ let reg = match &reg[..] {
+ "'_ " | " " => "",
+ reg => reg,
+ };
+ if assoc.fn_has_self_parameter {
+ match ref_ty.kind() {
+ ty::Param(param) if param.name == kw::SelfUpper => {
+ format!("&{}{}self", reg, mutability.prefix_str())
+ }
+
+ _ => format!("self: {ty}"),
+ }
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ _ => {
+ if assoc.fn_has_self_parameter && i == 0 {
+ format!("self: {ty}")
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ })
+ })
+ .chain(std::iter::once(if sig.c_variadic { Some("...".to_string()) } else { None }))
+ .flatten()
+ .collect::<Vec<String>>()
+ .join(", ");
+ let output = sig.output();
+ let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() };
+
+ let unsafety = sig.unsafety.prefix_str();
+ let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates);
+
+ // FIXME: this is not entirely correct, as the lifetimes from borrowed params will
+ // not be present in the `fn` definition, not will we account for renamed
+ // lifetimes between the `impl` and the `trait`, but this should be good enough to
+ // fill in a significant portion of the missing code, and other subsequent
+ // suggestions can help the user fix the code.
+ format!("{unsafety}fn {ident}{generics}({args}){output}{where_clauses} {{ todo!() }}")
+}
+
+pub fn ty_kind_suggestion(ty: Ty<'_>) -> Option<&'static str> {
+ Some(match ty.kind() {
+ ty::Bool => "true",
+ ty::Char => "'a'",
+ ty::Int(_) | ty::Uint(_) => "42",
+ ty::Float(_) => "3.14159",
+ ty::Error(_) | ty::Never => return None,
+ _ => "value",
+ })
+}
+
+/// Return placeholder code for the given associated item.
+/// Similar to `ty::AssocItem::suggestion`, but appropriate for use as the code snippet of a
+/// structured suggestion.
+fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String {
+ match assoc.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ fn_sig_suggestion(
+ tcx,
+ tcx.fn_sig(assoc.def_id).skip_binder(),
+ assoc.ident(tcx),
+ tcx.predicates_of(assoc.def_id),
+ assoc,
+ )
+ }
+ ty::AssocKind::Type => format!("type {} = Type;", assoc.name),
+ ty::AssocKind::Const => {
+ let ty = tcx.type_of(assoc.def_id);
+ let val = ty_kind_suggestion(ty).unwrap_or("value");
+ format!("const {}: {} = {};", assoc.name, ty, val)
+ }
+ }
+}
+
+/// Emit an error when encountering two or more variants in a transparent enum.
+fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) {
+ let variant_spans: Vec<_> = adt
+ .variants()
+ .iter()
+ .map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap())
+ .collect();
+ let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),);
+ let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {msg}");
+ err.span_label(sp, &msg);
+ if let [start @ .., end] = &*variant_spans {
+ for variant_span in start {
+ err.span_label(*variant_span, "");
+ }
+ err.span_label(*end, &format!("too many variants in `{}`", tcx.def_path_str(did)));
+ }
+ err.emit();
+}
+
+/// Emit an error when encountering two or more non-zero-sized fields in a transparent
+/// enum.
+fn bad_non_zero_sized_fields<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ adt: ty::AdtDef<'tcx>,
+ field_count: usize,
+ field_spans: impl Iterator<Item = Span>,
+ sp: Span,
+) {
+ let msg = format!("needs at most one non-zero-sized field, but has {field_count}");
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0690,
+ "{}transparent {} {}",
+ if adt.is_enum() { "the variant of a " } else { "" },
+ adt.descr(),
+ msg,
+ );
+ err.span_label(sp, &msg);
+ for sp in field_spans {
+ err.span_label(sp, "this field is non-zero-sized");
+ }
+ err.emit();
+}
+
+// FIXME: Consider moving this method to a more fitting place.
+pub fn potentially_plural_count(count: usize, word: &str) -> String {
+ format!("{} {}{}", count, word, pluralize!(count))
+}
diff --git a/compiler/rustc_typeck/src/check/region.rs b/compiler/rustc_hir_analysis/src/check/region.rs
index 0081e9049..ff32329e4 100644
--- a/compiler/rustc_typeck/src/check/region.rs
+++ b/compiler/rustc_hir_analysis/src/check/region.rs
@@ -126,6 +126,29 @@ fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx h
for (i, statement) in blk.stmts.iter().enumerate() {
match statement.kind {
+ hir::StmtKind::Local(hir::Local { els: Some(els), .. }) => {
+ // Let-else has a special lexical structure for variables.
+ // First we take a checkpoint of the current scope context here.
+ let mut prev_cx = visitor.cx;
+
+ visitor.enter_scope(Scope {
+ id: blk.hir_id.local_id,
+ data: ScopeData::Remainder(FirstStatementIndex::new(i)),
+ });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_stmt(statement);
+ // We need to back out temporarily to the last enclosing scope
+ // for the `else` block, so that even the temporaries receiving
+ // extended lifetime will be dropped inside this block.
+ // We are visiting the `else` block in this order so that
+ // the sequence of visits agree with the order in the default
+ // `hir::intravisit` visitor.
+ mem::swap(&mut prev_cx, &mut visitor.cx);
+ visitor.terminating_scopes.insert(els.hir_id.local_id);
+ visitor.visit_block(els);
+ // From now on, we continue normally.
+ visitor.cx = prev_cx;
+ }
hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => {
// Each declaration introduces a subscope for bindings
// introduced by the declaration; this subscope covers a
@@ -138,10 +161,10 @@ fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx h
data: ScopeData::Remainder(FirstStatementIndex::new(i)),
});
visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_stmt(statement)
}
- hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+ hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => visitor.visit_stmt(statement),
}
- visitor.visit_stmt(statement)
}
walk_list!(visitor, visit_expr, &blk.expr);
}
@@ -229,9 +252,13 @@ fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx h
) => {
// For shortcircuiting operators, mark the RHS as a terminating
// scope since it only executes conditionally.
- terminating(r.hir_id.local_id);
- }
+ // `Let` expressions (in a let-chain) shouldn't be terminating, as their temporaries
+ // should live beyond the immediate expression
+ if !matches!(r.kind, hir::ExprKind::Let(_)) {
+ terminating(r.hir_id.local_id);
+ }
+ }
hir::ExprKind::If(_, ref then, Some(ref otherwise)) => {
terminating(then.hir_id.local_id);
terminating(otherwise.hir_id.local_id);
@@ -460,7 +487,6 @@ fn resolve_local<'tcx>(
visitor: &mut RegionResolutionVisitor<'tcx>,
pat: Option<&'tcx hir::Pat<'tcx>>,
init: Option<&'tcx hir::Expr<'tcx>>,
- els: Option<&'tcx hir::Block<'tcx>>,
) {
debug!("resolve_local(pat={:?}, init={:?})", pat, init);
@@ -547,9 +573,6 @@ fn resolve_local<'tcx>(
if let Some(pat) = pat {
visitor.visit_pat(pat);
}
- if let Some(els) = els {
- visitor.visit_block(els);
- }
/// Returns `true` if `pat` match the `P&` non-terminal.
///
@@ -587,8 +610,7 @@ fn resolve_local<'tcx>(
// & expression, and its lifetime would be extended to the end of the block (due
// to a different rule, not the below code).
match pat.kind {
- PatKind::Binding(hir::BindingAnnotation::Ref, ..)
- | PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true,
+ PatKind::Binding(hir::BindingAnnotation(hir::ByRef::Yes, _), ..) => true,
PatKind::Struct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(&fp.pat))
@@ -607,10 +629,7 @@ fn resolve_local<'tcx>(
PatKind::Box(ref subpat) => is_binding_pat(&subpat),
PatKind::Ref(_, _)
- | PatKind::Binding(
- hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
- ..,
- )
+ | PatKind::Binding(hir::BindingAnnotation(hir::ByRef::No, _), ..)
| PatKind::Wild
| PatKind::Path(_)
| PatKind::Lit(_)
@@ -770,7 +789,7 @@ impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> {
// (i.e., `'static`), which means that after `g` returns, it drops,
// and all the associated destruction scope rules apply.
self.cx.var_parent = None;
- resolve_local(self, None, Some(&body.value), None);
+ resolve_local(self, None, Some(&body.value));
}
if body.generator_kind.is_some() {
@@ -797,7 +816,7 @@ impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> {
resolve_expr(self, ex);
}
fn visit_local(&mut self, l: &'tcx Local<'tcx>) {
- resolve_local(self, Some(&l.pat), l.init, l.els)
+ resolve_local(self, Some(&l.pat), l.init)
}
}
diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index d0334cd0d..a23575004 100644
--- a/compiler/rustc_typeck/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -1,5 +1,5 @@
-use crate::check::regionck::OutlivesEnvironmentExt;
use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter};
+use hir::def::DefKind;
use rustc_ast as ast;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
@@ -10,22 +10,21 @@ use rustc_hir::ItemKind;
use rustc_infer::infer::outlives::env::{OutlivesEnvironment, RegionBoundPairs};
use rustc_infer::infer::outlives::obligations::TypeOutlives;
use rustc_infer::infer::{self, InferCtxt, TyCtxtInferExt};
-use rustc_infer::traits::Normalized;
+use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts, Subst};
use rustc_middle::ty::trait_def::TraitSpecializationKind;
use rustc_middle::ty::{
self, AdtKind, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable,
TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
+use rustc_middle::ty::{GenericArgKind, InternalSubsts};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::autoderef::Autoderef;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
-use rustc_trait_selection::traits::query::normalize::AtExt;
-use rustc_trait_selection::traits::query::NoSolution;
use rustc_trait_selection::traits::{
self, ObligationCause, ObligationCauseCode, ObligationCtxt, WellFormedLoc,
};
@@ -72,9 +71,11 @@ impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
) {
let cause =
traits::ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc));
+ // for a type to be WF, we do not need to check if const trait predicates satisfy.
+ let param_env = self.param_env.without_const();
self.ocx.register_obligation(traits::Obligation::new(
cause,
- self.param_env,
+ param_env,
ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx()),
));
}
@@ -86,31 +87,35 @@ pub(super) fn enter_wf_checking_ctxt<'tcx, F>(
body_def_id: LocalDefId,
f: F,
) where
- F: for<'a> FnOnce(&WfCheckingCtxt<'a, 'tcx>) -> FxHashSet<Ty<'tcx>>,
+ F: for<'a> FnOnce(&WfCheckingCtxt<'a, 'tcx>),
{
let param_env = tcx.param_env(body_def_id);
let body_id = tcx.hir().local_def_id_to_hir_id(body_def_id);
- tcx.infer_ctxt().enter(|ref infcx| {
- let ocx = ObligationCtxt::new(infcx);
- let mut wfcx = WfCheckingCtxt { ocx, span, body_id, param_env };
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
- if !tcx.features().trivial_bounds {
- wfcx.check_false_global_bounds()
- }
- let wf_tys = f(&mut wfcx);
- let errors = wfcx.select_all_or_error();
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- return;
- }
+ let assumed_wf_types = ocx.assumed_wf_types(param_env, span, body_def_id);
- let mut outlives_environment = OutlivesEnvironment::new(param_env);
- outlives_environment.add_implied_bounds(infcx, wf_tys, body_id);
- infcx.check_region_obligations_and_report_errors(body_def_id, &outlives_environment);
- })
+ let mut wfcx = WfCheckingCtxt { ocx, span, body_id, param_env };
+
+ if !tcx.features().trivial_bounds {
+ wfcx.check_false_global_bounds()
+ }
+ f(&mut wfcx);
+ let errors = wfcx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return;
+ }
+
+ let implied_bounds = infcx.implied_bounds_tys(param_env, body_id, assumed_wf_types);
+ let outlives_environment =
+ OutlivesEnvironment::with_bounds(param_env, Some(infcx), implied_bounds);
+
+ infcx.check_region_obligations_and_report_errors(body_def_id, &outlives_environment);
}
-fn check_well_formed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+fn check_well_formed(tcx: TyCtxt<'_>, def_id: hir::OwnerId) {
let node = tcx.hir().expect_owner(def_id);
match node {
hir::OwnerNode::Crate(_) => {}
@@ -142,10 +147,10 @@ fn check_well_formed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
/// the types first.
#[instrument(skip(tcx), level = "debug")]
fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
- let def_id = item.def_id;
+ let def_id = item.owner_id.def_id;
debug!(
- ?item.def_id,
+ ?item.owner_id,
item.name = ? tcx.def_path_str(def_id.to_def_id())
);
@@ -169,7 +174,7 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
// for `T`
hir::ItemKind::Impl(ref impl_) => {
let is_auto = tcx
- .impl_trait_ref(item.def_id)
+ .impl_trait_ref(def_id)
.map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id));
if let (hir::Defaultness::Default { .. }, true) = (impl_.defaultness, is_auto) {
let sp = impl_.of_trait.as_ref().map_or(item.span, |t| t.path.span);
@@ -205,13 +210,13 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
}
}
hir::ItemKind::Fn(ref sig, ..) => {
- check_item_fn(tcx, item.def_id, item.ident, item.span, sig.decl);
+ check_item_fn(tcx, def_id, item.ident, item.span, sig.decl);
}
hir::ItemKind::Static(ty, ..) => {
- check_item_type(tcx, item.def_id, ty.span, false);
+ check_item_type(tcx, def_id, ty.span, false);
}
hir::ItemKind::Const(ty, ..) => {
- check_item_type(tcx, item.def_id, ty.span, false);
+ check_item_type(tcx, def_id, ty.span, false);
}
hir::ItemKind::Struct(ref struct_def, ref ast_generics) => {
check_type_defn(tcx, item, false, |wfcx| vec![wfcx.non_enum_variant(struct_def)]);
@@ -241,24 +246,24 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
}
fn check_foreign_item(tcx: TyCtxt<'_>, item: &hir::ForeignItem<'_>) {
- let def_id = item.def_id;
+ let def_id = item.owner_id.def_id;
debug!(
- ?item.def_id,
+ ?item.owner_id,
item.name = ? tcx.def_path_str(def_id.to_def_id())
);
match item.kind {
hir::ForeignItemKind::Fn(decl, ..) => {
- check_item_fn(tcx, item.def_id, item.ident, item.span, decl)
+ check_item_fn(tcx, def_id, item.ident, item.span, decl)
}
- hir::ForeignItemKind::Static(ty, ..) => check_item_type(tcx, item.def_id, ty.span, true),
+ hir::ForeignItemKind::Static(ty, ..) => check_item_type(tcx, def_id, ty.span, true),
hir::ForeignItemKind::Type => (),
}
}
fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) {
- let def_id = trait_item.def_id;
+ let def_id = trait_item.owner_id.def_id;
let (method_sig, span) = match trait_item.kind {
hir::TraitItemKind::Fn(ref sig, _) => (Some(sig), trait_item.span),
@@ -266,11 +271,11 @@ fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) {
_ => (None, trait_item.span),
};
check_object_unsafe_self_trait_by_name(tcx, trait_item);
- check_associated_item(tcx, trait_item.def_id, span, method_sig);
+ check_associated_item(tcx, def_id, span, method_sig);
let encl_trait_def_id = tcx.local_parent(def_id);
let encl_trait = tcx.hir().expect_item(encl_trait_def_id);
- let encl_trait_def_id = encl_trait.def_id.to_def_id();
+ let encl_trait_def_id = encl_trait.owner_id.to_def_id();
let fn_lang_item_name = if Some(encl_trait_def_id) == tcx.lang_items().fn_trait() {
Some("fn")
} else if Some(encl_trait_def_id) == tcx.lang_items().fn_mut_trait() {
@@ -343,7 +348,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
loop {
let mut should_continue = false;
for gat_item in associated_items {
- let gat_def_id = gat_item.id.def_id;
+ let gat_def_id = gat_item.id.owner_id;
let gat_item = tcx.associated_item(gat_def_id);
// If this item is not an assoc ty, or has no substs, then it's not a GAT
if gat_item.kind != ty::AssocKind::Type {
@@ -360,7 +365,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
// constrains the GAT with individually.
let mut new_required_bounds: Option<FxHashSet<ty::Predicate<'_>>> = None;
for item in associated_items {
- let item_def_id = item.id.def_id;
+ let item_def_id = item.id.owner_id;
// Skip our own GAT, since it does not constrain itself at all.
if item_def_id == gat_def_id {
continue;
@@ -383,11 +388,11 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
tcx,
param_env,
item_hir_id,
- sig.output(),
+ sig.inputs_and_output,
// We also assume that all of the function signature's parameter types
// are well formed.
&sig.inputs().iter().copied().collect(),
- gat_def_id,
+ gat_def_id.def_id,
gat_generics,
)
}
@@ -410,7 +415,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
.copied()
.collect::<Vec<_>>(),
&FxHashSet::default(),
- gat_def_id,
+ gat_def_id.def_id,
gat_generics,
)
}
@@ -450,7 +455,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRe
}
for (gat_def_id, required_bounds) in required_bounds_by_item {
- let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id);
+ let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id.def_id);
debug!(?required_bounds);
let param_env = tcx.param_env(gat_def_id);
let gat_hir = gat_item_hir.hir_id();
@@ -658,7 +663,7 @@ fn ty_known_to_outlive<'tcx>(
resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |infcx, region_bound_pairs| {
let origin = infer::RelateParamBound(DUMMY_SP, ty, None);
let outlives = &mut TypeOutlives::new(infcx, tcx, region_bound_pairs, None, param_env);
- outlives.type_must_outlive(origin, ty, region);
+ outlives.type_must_outlive(origin, ty, region, ConstraintCategory::BoringNoLocation);
})
}
@@ -676,7 +681,12 @@ fn region_known_to_outlive<'tcx>(
use rustc_infer::infer::outlives::obligations::TypeOutlivesDelegate;
let origin = infer::RelateRegionParamBound(DUMMY_SP);
// `region_a: region_b` -> `region_b <= region_a`
- infcx.push_sub_region_constraint(origin, region_b, region_a);
+ infcx.push_sub_region_constraint(
+ origin,
+ region_b,
+ region_a,
+ ConstraintCategory::BoringNoLocation,
+ );
})
}
@@ -688,26 +698,32 @@ fn resolve_regions_with_wf_tys<'tcx>(
id: hir::HirId,
param_env: ty::ParamEnv<'tcx>,
wf_tys: &FxHashSet<Ty<'tcx>>,
- add_constraints: impl for<'a> FnOnce(&'a InferCtxt<'a, 'tcx>, &'a RegionBoundPairs<'tcx>),
+ add_constraints: impl for<'a> FnOnce(&'a InferCtxt<'tcx>, &'a RegionBoundPairs<'tcx>),
) -> bool {
// Unfortunately, we have to use a new `InferCtxt` each call, because
// region constraints get added and solved there and we need to test each
// call individually.
- tcx.infer_ctxt().enter(|infcx| {
- let mut outlives_environment = OutlivesEnvironment::new(param_env);
- outlives_environment.add_implied_bounds(&infcx, wf_tys.clone(), id);
- let region_bound_pairs = outlives_environment.region_bound_pairs();
+ let infcx = tcx.infer_ctxt().build();
+ let outlives_environment = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(&infcx),
+ infcx.implied_bounds_tys(param_env, id, wf_tys.clone()),
+ );
+ let region_bound_pairs = outlives_environment.region_bound_pairs();
- add_constraints(&infcx, region_bound_pairs);
+ add_constraints(&infcx, region_bound_pairs);
- let errors = infcx.resolve_regions(&outlives_environment);
+ infcx.process_registered_region_obligations(
+ outlives_environment.region_bound_pairs(),
+ param_env,
+ );
+ let errors = infcx.resolve_regions(&outlives_environment);
- debug!(?errors, "errors");
+ debug!(?errors, "errors");
- // If we were able to prove that the type outlives the region without
- // an error, it must be because of the implied or explicit bounds...
- errors.is_empty()
- })
+ // If we were able to prove that the type outlives the region without
+ // an error, it must be because of the implied or explicit bounds...
+ errors.is_empty()
}
/// TypeVisitor that looks for uses of GATs like
@@ -761,7 +777,7 @@ impl<'tcx> TypeVisitor<'tcx> for GATSubstCollector<'tcx> {
fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool {
match ty.kind {
hir::TyKind::TraitObject([trait_ref], ..) => match trait_ref.trait_ref.path.segments {
- [s] => s.res.and_then(|r| r.opt_def_id()) == Some(trait_def_id.to_def_id()),
+ [s] => s.res.opt_def_id() == Some(trait_def_id.to_def_id()),
_ => false,
},
_ => false,
@@ -772,9 +788,9 @@ fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool {
/// When this is done, suggest using `Self` instead.
fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem<'_>) {
let (trait_name, trait_def_id) =
- match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(item.hir_id())) {
+ match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(item.hir_id()).def_id) {
hir::Node::Item(item) => match item.kind {
- hir::ItemKind::Trait(..) => (item.ident, item.def_id),
+ hir::ItemKind::Trait(..) => (item.ident, item.owner_id),
_ => return,
},
_ => return,
@@ -782,18 +798,18 @@ fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem
let mut trait_should_be_self = vec![];
match &item.kind {
hir::TraitItemKind::Const(ty, _) | hir::TraitItemKind::Type(_, Some(ty))
- if could_be_self(trait_def_id, ty) =>
+ if could_be_self(trait_def_id.def_id, ty) =>
{
trait_should_be_self.push(ty.span)
}
hir::TraitItemKind::Fn(sig, _) => {
for ty in sig.decl.inputs {
- if could_be_self(trait_def_id, ty) {
+ if could_be_self(trait_def_id.def_id, ty) {
trait_should_be_self.push(ty.span);
}
}
match sig.decl.output {
- hir::FnRetTy::Return(ty) if could_be_self(trait_def_id, ty) => {
+ hir::FnRetTy::Return(ty) if could_be_self(trait_def_id.def_id, ty) => {
trait_should_be_self.push(ty.span);
}
_ => {}
@@ -822,16 +838,14 @@ fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem
}
fn check_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_>) {
- let def_id = impl_item.def_id;
-
let (method_sig, span) = match impl_item.kind {
hir::ImplItemKind::Fn(ref sig, _) => (Some(sig), impl_item.span),
// Constrain binding and overflow error spans to `<Ty>` in `type foo = <Ty>`.
- hir::ImplItemKind::TyAlias(ty) if ty.span != DUMMY_SP => (None, ty.span),
+ hir::ImplItemKind::Type(ty) if ty.span != DUMMY_SP => (None, ty.span),
_ => (None, impl_item.span),
};
- check_associated_item(tcx, def_id, span, method_sig);
+ check_associated_item(tcx, impl_item.owner_id.def_id, span, method_sig);
}
fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
@@ -965,7 +979,7 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
}
}
-#[tracing::instrument(level = "debug", skip(tcx, span, sig_if_method))]
+#[instrument(level = "debug", skip(tcx, span, sig_if_method))]
fn check_associated_item(
tcx: TyCtxt<'_>,
item_id: LocalDefId,
@@ -976,15 +990,9 @@ fn check_associated_item(
enter_wf_checking_ctxt(tcx, span, item_id, |wfcx| {
let item = tcx.associated_item(item_id);
- let (mut implied_bounds, self_ty) = match item.container {
- ty::TraitContainer => (FxHashSet::default(), tcx.types.self_param),
- ty::ImplContainer => {
- let def_id = item.container_id(tcx);
- (
- impl_implied_bounds(tcx, wfcx.param_env, def_id.expect_local(), span),
- tcx.type_of(def_id),
- )
- }
+ let self_ty = match item.container {
+ ty::TraitContainer => tcx.types.self_param,
+ ty::ImplContainer => tcx.type_of(item.container_id(tcx)),
};
match item.kind {
@@ -1002,7 +1010,6 @@ fn check_associated_item(
sig,
hir_sig.decl,
item.def_id.expect_local(),
- &mut implied_bounds,
);
check_method_receiver(wfcx, hir_sig, item, self_ty);
}
@@ -1017,8 +1024,6 @@ fn check_associated_item(
}
}
}
-
- implied_bounds
})
}
@@ -1040,9 +1045,11 @@ fn check_type_defn<'tcx, F>(
) where
F: FnMut(&WfCheckingCtxt<'_, 'tcx>) -> Vec<AdtVariant<'tcx>>,
{
- enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
+ let _ = tcx.representability(item.owner_id.def_id);
+
+ enter_wf_checking_ctxt(tcx, item.span, item.owner_id.def_id, |wfcx| {
let variants = lookup_fields(wfcx);
- let packed = tcx.adt_def(item.def_id).repr().packed();
+ let packed = tcx.adt_def(item.owner_id).repr().packed();
for variant in &variants {
// All field types must be well-formed.
@@ -1066,7 +1073,7 @@ fn check_type_defn<'tcx, F>(
// Just treat unresolved type expression as if it needs drop.
true
} else {
- ty.needs_drop(tcx, tcx.param_env(item.def_id))
+ ty.needs_drop(tcx, tcx.param_env(item.owner_id))
}
}
};
@@ -1098,8 +1105,6 @@ fn check_type_defn<'tcx, F>(
// Explicit `enum` discriminant values must const-evaluate successfully.
if let Some(discr_def_id) = variant.explicit_discr {
- let discr_substs = InternalSubsts::identity_for_item(tcx, discr_def_id.to_def_id());
-
let cause = traits::ObligationCause::new(
tcx.def_span(discr_def_id),
wfcx.body_id,
@@ -1108,31 +1113,28 @@ fn check_type_defn<'tcx, F>(
wfcx.register_obligation(traits::Obligation::new(
cause,
wfcx.param_env,
- ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(ty::Unevaluated::new(
- ty::WithOptConstParam::unknown(discr_def_id.to_def_id()),
- discr_substs,
- )))
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(
+ ty::Const::from_anon_const(tcx, discr_def_id),
+ ))
.to_predicate(tcx),
));
}
}
- check_where_clauses(wfcx, item.span, item.def_id);
-
- // No implied bounds in a struct definition.
- FxHashSet::default()
+ check_where_clauses(wfcx, item.span, item.owner_id.def_id);
});
}
#[instrument(skip(tcx, item))]
fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
- debug!(?item.def_id);
+ debug!(?item.owner_id);
- let trait_def = tcx.trait_def(item.def_id);
+ let def_id = item.owner_id.def_id;
+ let trait_def = tcx.trait_def(def_id);
if trait_def.is_marker
|| matches!(trait_def.specialization_kind, TraitSpecializationKind::Marker)
{
- for associated_def_id in &*tcx.associated_item_def_ids(item.def_id) {
+ for associated_def_id in &*tcx.associated_item_def_ids(def_id) {
struct_span_err!(
tcx.sess,
tcx.def_span(*associated_def_id),
@@ -1143,10 +1145,8 @@ fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
}
}
- enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
- check_where_clauses(wfcx, item.span, item.def_id);
-
- FxHashSet::default()
+ enter_wf_checking_ctxt(tcx, item.span, def_id, |wfcx| {
+ check_where_clauses(wfcx, item.span, def_id)
});
// Only check traits, don't check trait aliases
@@ -1186,9 +1186,7 @@ fn check_item_fn(
) {
enter_wf_checking_ctxt(tcx, span, def_id, |wfcx| {
let sig = tcx.fn_sig(def_id);
- let mut implied_bounds = FxHashSet::default();
- check_fn_or_method(wfcx, ident.span, sig, decl, def_id, &mut implied_bounds);
- implied_bounds
+ check_fn_or_method(wfcx, ident.span, sig, decl, def_id);
})
}
@@ -1231,13 +1229,10 @@ fn check_item_type(tcx: TyCtxt<'_>, item_id: LocalDefId, ty_span: Span, allow_fo
tcx.require_lang_item(LangItem::Sync, Some(ty_span)),
);
}
-
- // No implied bounds in a const, etc.
- FxHashSet::default()
});
}
-#[tracing::instrument(level = "debug", skip(tcx, ast_self_ty, ast_trait_ref))]
+#[instrument(level = "debug", skip(tcx, ast_self_ty, ast_trait_ref))]
fn check_impl<'tcx>(
tcx: TyCtxt<'tcx>,
item: &'tcx hir::Item<'tcx>,
@@ -1245,13 +1240,13 @@ fn check_impl<'tcx>(
ast_trait_ref: &Option<hir::TraitRef<'_>>,
constness: hir::Constness,
) {
- enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
+ enter_wf_checking_ctxt(tcx, item.span, item.owner_id.def_id, |wfcx| {
match *ast_trait_ref {
Some(ref ast_trait_ref) => {
// `#[rustc_reservation_impl]` impls are not real impls and
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
- let trait_ref = tcx.impl_trait_ref(item.def_id).unwrap();
+ let trait_ref = tcx.impl_trait_ref(item.owner_id).unwrap();
let trait_ref = wfcx.normalize(ast_trait_ref.path.span, None, trait_ref);
let trait_pred = ty::TraitPredicate {
trait_ref,
@@ -1273,19 +1268,21 @@ fn check_impl<'tcx>(
wfcx.register_obligations(obligations);
}
None => {
- let self_ty = tcx.type_of(item.def_id);
- let self_ty = wfcx.normalize(item.span, None, self_ty);
+ let self_ty = tcx.type_of(item.owner_id);
+ let self_ty = wfcx.normalize(
+ item.span,
+ Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
+ self_ty,
+ );
wfcx.register_wf_obligation(
ast_self_ty.span,
- Some(WellFormedLoc::Ty(item.hir_id().expect_owner())),
+ Some(WellFormedLoc::Ty(item.hir_id().expect_owner().def_id)),
self_ty.into(),
);
}
}
- check_where_clauses(wfcx, item.span, item.def_id);
-
- impl_implied_bounds(tcx, wfcx.param_env, item.def_id, item.span)
+ check_where_clauses(wfcx, item.span, item.owner_id.def_id);
});
}
@@ -1321,7 +1318,11 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
// parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
// be sure if it will error or not as user might always specify the other.
if !ty.needs_subst() {
- wfcx.register_wf_obligation(tcx.def_span(param.def_id), None, ty.into());
+ wfcx.register_wf_obligation(
+ tcx.def_span(param.def_id),
+ Some(WellFormedLoc::Ty(param.def_id.expect_local())),
+ ty.into(),
+ );
}
}
}
@@ -1426,9 +1427,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
let substituted_pred = predicates.rebind(pred).subst(tcx, substs);
// Don't check non-defaulted params, dependent defaults (including lifetimes)
// or preds with multiple params.
- if substituted_pred.has_param_types_or_consts()
- || param_count.params.len() > 1
- || has_region
+ if substituted_pred.has_non_region_param() || param_count.params.len() > 1 || has_region
{
None
} else if predicates.0.predicates.iter().any(|&(p, _)| p == substituted_pred) {
@@ -1465,21 +1464,26 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
assert_eq!(predicates.predicates.len(), predicates.spans.len());
let wf_obligations =
iter::zip(&predicates.predicates, &predicates.spans).flat_map(|(&p, &sp)| {
- traits::wf::predicate_obligations(infcx, wfcx.param_env, wfcx.body_id, p, sp)
+ traits::wf::predicate_obligations(
+ infcx,
+ wfcx.param_env.without_const(),
+ wfcx.body_id,
+ p,
+ sp,
+ )
});
let obligations: Vec<_> = wf_obligations.chain(default_obligations).collect();
wfcx.register_obligations(obligations);
}
-#[tracing::instrument(level = "debug", skip(wfcx, span, hir_decl))]
+#[instrument(level = "debug", skip(wfcx, span, hir_decl))]
fn check_fn_or_method<'tcx>(
wfcx: &WfCheckingCtxt<'_, 'tcx>,
span: Span,
sig: ty::PolyFnSig<'tcx>,
hir_decl: &hir::FnDecl<'_>,
def_id: LocalDefId,
- implied_bounds: &mut FxHashSet<Ty<'tcx>>,
) {
let tcx = wfcx.tcx();
let sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig);
@@ -1521,23 +1525,66 @@ fn check_fn_or_method<'tcx>(
);
}
- implied_bounds.extend(sig.inputs());
-
- wfcx.register_wf_obligation(hir_decl.output.span(), None, sig.output().into());
+ wfcx.register_wf_obligation(
+ hir_decl.output.span(),
+ Some(WellFormedLoc::Param {
+ function: def_id,
+ param_idx: sig.inputs().len().try_into().unwrap(),
+ }),
+ sig.output().into(),
+ );
- // FIXME(#27579) return types should not be implied bounds
- implied_bounds.insert(sig.output());
+ check_where_clauses(wfcx, span, def_id);
- debug!(?implied_bounds);
+ check_return_position_impl_trait_in_trait_bounds(
+ tcx,
+ wfcx,
+ def_id,
+ sig.output(),
+ hir_decl.output.span(),
+ );
+}
- check_where_clauses(wfcx, span, def_id);
+/// Basically `check_associated_type_bounds`, but separated for now and should be
+/// deduplicated when RPITITs get lowered into real associated items.
+fn check_return_position_impl_trait_in_trait_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ fn_def_id: LocalDefId,
+ fn_output: Ty<'tcx>,
+ span: Span,
+) {
+ if let Some(assoc_item) = tcx.opt_associated_item(fn_def_id.to_def_id())
+ && assoc_item.container == ty::AssocItemContainer::TraitContainer
+ {
+ for arg in fn_output.walk() {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(proj) = ty.kind()
+ && tcx.def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder
+ && tcx.impl_trait_in_trait_parent(proj.item_def_id) == fn_def_id.to_def_id()
+ {
+ let bounds = wfcx.tcx().explicit_item_bounds(proj.item_def_id);
+ let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| {
+ let normalized_bound = wfcx.normalize(span, None, bound);
+ traits::wf::predicate_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ normalized_bound,
+ bound_span,
+ )
+ });
+ wfcx.register_obligations(wf_obligations);
+ }
+ }
+ }
}
const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
`self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one \
of the previous types except `Self`)";
-#[tracing::instrument(level = "debug", skip(wfcx))]
+#[instrument(level = "debug", skip(wfcx))]
fn check_method_receiver<'tcx>(
wfcx: &WfCheckingCtxt<'_, 'tcx>,
fn_sig: &hir::FnSig<'_>,
@@ -1629,7 +1676,7 @@ fn receiver_is_valid<'tcx>(
// `self: Self` is always valid.
if can_eq_self(receiver_ty) {
if let Err(err) = wfcx.equate_types(&cause, wfcx.param_env, self_ty, receiver_ty) {
- infcx.report_mismatched_types(&cause, self_ty, receiver_ty, err).emit();
+ infcx.err_ctxt().report_mismatched_types(&cause, self_ty, receiver_ty, err).emit();
}
return true;
}
@@ -1661,7 +1708,10 @@ fn receiver_is_valid<'tcx>(
if let Err(err) =
wfcx.equate_types(&cause, wfcx.param_env, self_ty, potential_self_ty)
{
- infcx.report_mismatched_types(&cause, self_ty, potential_self_ty, err).emit();
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(&cause, self_ty, potential_self_ty, err)
+ .emit();
}
break;
@@ -1728,14 +1778,14 @@ fn check_variances_for_type_defn<'tcx>(
item: &hir::Item<'tcx>,
hir_generics: &hir::Generics<'_>,
) {
- let ty = tcx.type_of(item.def_id);
+ let ty = tcx.type_of(item.owner_id);
if tcx.has_error_field(ty) {
return;
}
- let ty_predicates = tcx.predicates_of(item.def_id);
+ let ty_predicates = tcx.predicates_of(item.owner_id);
assert_eq!(ty_predicates.parent, None);
- let variances = tcx.variances_of(item.def_id);
+ let variances = tcx.variances_of(item.owner_id);
let mut constrained_parameters: FxHashSet<_> = variances
.iter()
@@ -1748,7 +1798,7 @@ fn check_variances_for_type_defn<'tcx>(
// Lazily calculated because it is only needed in case of an error.
let explicitly_bounded_params = LazyCell::new(|| {
- let icx = crate::collect::ItemCtxt::new(tcx, item.def_id.to_def_id());
+ let icx = crate::collect::ItemCtxt::new(tcx, item.owner_id.to_def_id());
hir_generics
.predicates
.iter()
@@ -1817,6 +1867,7 @@ fn report_bivariance(
impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
/// Feature gates RFC 2056 -- trivial bounds, checking for global bounds that
/// aren't true.
+ #[instrument(level = "debug", skip(self))]
fn check_false_global_bounds(&mut self) {
let tcx = self.ocx.infcx.tcx;
let mut span = self.span;
@@ -1868,10 +1919,10 @@ impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalDefId) {
let items = tcx.hir_module_items(module);
- items.par_items(|item| tcx.ensure().check_well_formed(item.def_id));
- items.par_impl_items(|item| tcx.ensure().check_well_formed(item.def_id));
- items.par_trait_items(|item| tcx.ensure().check_well_formed(item.def_id));
- items.par_foreign_items(|item| tcx.ensure().check_well_formed(item.def_id));
+ items.par_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_impl_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_trait_items(|item| tcx.ensure().check_well_formed(item.owner_id));
+ items.par_foreign_items(|item| tcx.ensure().check_well_formed(item.owner_id));
}
///////////////////////////////////////////////////////////////////////////
@@ -1924,40 +1975,6 @@ impl<'a, 'tcx> WfCheckingCtxt<'a, 'tcx> {
}
}
-pub fn impl_implied_bounds<'tcx>(
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- impl_def_id: LocalDefId,
- span: Span,
-) -> FxHashSet<Ty<'tcx>> {
- // We completely ignore any obligations caused by normalizing the types
- // we assume to be well formed. Considering that the user of the implied
- // bounds will also normalize them, we leave it to them to emit errors
- // which should result in better causes and spans.
- tcx.infer_ctxt().enter(|infcx| {
- let cause = ObligationCause::misc(span, tcx.hir().local_def_id_to_hir_id(impl_def_id));
- match tcx.impl_trait_ref(impl_def_id) {
- Some(trait_ref) => {
- // Trait impl: take implied bounds from all types that
- // appear in the trait reference.
- match infcx.at(&cause, param_env).normalize(trait_ref) {
- Ok(Normalized { value, obligations: _ }) => value.substs.types().collect(),
- Err(NoSolution) => FxHashSet::default(),
- }
- }
-
- None => {
- // Inherent impl: take implied bounds from the `self` type.
- let self_ty = tcx.type_of(impl_def_id);
- match infcx.at(&cause, param_env).normalize(self_ty) {
- Ok(Normalized { value, obligations: _ }) => FxHashSet::from_iter([value]),
- Err(NoSolution) => FxHashSet::default(),
- }
- }
- }
- })
-}
-
fn error_392(
tcx: TyCtxt<'_>,
span: Span,
diff --git a/compiler/rustc_hir_analysis/src/check_unused.rs b/compiler/rustc_hir_analysis/src/check_unused.rs
new file mode 100644
index 000000000..d0c317334
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check_unused.rs
@@ -0,0 +1,192 @@
+use crate::errors::{ExternCrateNotIdiomatic, UnusedExternCrate};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::unord::UnordSet;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint;
+use rustc_span::{Span, Symbol};
+
+pub fn check_crate(tcx: TyCtxt<'_>) {
+ let mut used_trait_imports: UnordSet<LocalDefId> = Default::default();
+
+ for item_def_id in tcx.hir().body_owners() {
+ let imports = tcx.used_trait_imports(item_def_id);
+ debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
+ used_trait_imports.extend(imports.items().copied());
+ }
+
+ for &id in tcx.maybe_unused_trait_imports(()) {
+ debug_assert_eq!(tcx.def_kind(id), DefKind::Use);
+ if tcx.visibility(id).is_public() {
+ continue;
+ }
+ if used_trait_imports.contains(&id) {
+ continue;
+ }
+ let item = tcx.hir().expect_item(id);
+ if item.span.is_dummy() {
+ continue;
+ }
+ let hir::ItemKind::Use(path, _) = item.kind else { unreachable!() };
+ let msg = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(path.span) {
+ format!("unused import: `{}`", snippet)
+ } else {
+ "unused import".to_owned()
+ };
+ tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_IMPORTS,
+ item.hir_id(),
+ path.span,
+ msg,
+ |lint| lint,
+ );
+ }
+
+ unused_crates_lint(tcx);
+}
+
+fn unused_crates_lint(tcx: TyCtxt<'_>) {
+ let lint = lint::builtin::UNUSED_EXTERN_CRATES;
+
+ // Collect first the crates that are completely unused. These we
+ // can always suggest removing (no matter which edition we are
+ // in).
+ let unused_extern_crates: FxHashMap<LocalDefId, Span> = tcx
+ .maybe_unused_extern_crates(())
+ .iter()
+ .filter(|&&(def_id, _)| {
+ // The `def_id` here actually was calculated during resolution (at least
+ // at the time of this writing) and is being shipped to us via a side
+ // channel of the tcx. There may have been extra expansion phases,
+ // however, which ended up removing the `def_id` *after* expansion.
+ //
+ // As a result we need to verify that `def_id` is indeed still valid for
+ // our AST and actually present in the HIR map. If it's not there then
+ // there's safely nothing to warn about, and otherwise we carry on with
+ // our execution.
+ //
+ // Note that if we carry through to the `extern_mod_stmt_cnum` query
+ // below it'll cause a panic because `def_id` is actually bogus at this
+ // point in time otherwise.
+ if tcx.hir().find(tcx.hir().local_def_id_to_hir_id(def_id)).is_none() {
+ return false;
+ }
+ true
+ })
+ .filter(|&&(def_id, _)| {
+ tcx.extern_mod_stmt_cnum(def_id).map_or(true, |cnum| {
+ !tcx.is_compiler_builtins(cnum)
+ && !tcx.is_panic_runtime(cnum)
+ && !tcx.has_global_allocator(cnum)
+ && !tcx.has_panic_handler(cnum)
+ })
+ })
+ .cloned()
+ .collect();
+
+ // Collect all the extern crates (in a reliable order).
+ let mut crates_to_lint = vec![];
+
+ for id in tcx.hir().items() {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::ExternCrate) {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::ExternCrate(orig_name) = item.kind {
+ crates_to_lint.push(ExternCrateToLint {
+ def_id: item.owner_id.to_def_id(),
+ span: item.span,
+ orig_name,
+ warn_if_unused: !item.ident.as_str().starts_with('_'),
+ });
+ }
+ }
+ }
+
+ let extern_prelude = &tcx.resolutions(()).extern_prelude;
+
+ for extern_crate in &crates_to_lint {
+ let def_id = extern_crate.def_id.expect_local();
+ let item = tcx.hir().expect_item(def_id);
+
+ // If the crate is fully unused, we suggest removing it altogether.
+ // We do this in any edition.
+ if extern_crate.warn_if_unused {
+ if let Some(&span) = unused_extern_crates.get(&def_id) {
+ // Removal suggestion span needs to include attributes (Issue #54400)
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let span_with_attrs = tcx
+ .hir()
+ .attrs(id)
+ .iter()
+ .map(|attr| attr.span)
+ .fold(span, |acc, attr_span| acc.to(attr_span));
+
+ tcx.emit_spanned_lint(lint, id, span, UnusedExternCrate { span: span_with_attrs });
+ continue;
+ }
+ }
+
+ // If we are not in Rust 2018 edition, then we don't make any further
+ // suggestions.
+ if !tcx.sess.rust_2018() {
+ continue;
+ }
+
+ // If the extern crate isn't in the extern prelude,
+ // there is no way it can be written as a `use`.
+ let orig_name = extern_crate.orig_name.unwrap_or(item.ident.name);
+ if !extern_prelude.get(&orig_name).map_or(false, |from_item| !from_item) {
+ continue;
+ }
+
+ // If the extern crate is renamed, then we cannot suggest replacing it with a use as this
+ // would not insert the new name into the prelude, where other imports in the crate may be
+ // expecting it.
+ if extern_crate.orig_name.is_some() {
+ continue;
+ }
+
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ // If the extern crate has any attributes, they may have funky
+ // semantics we can't faithfully represent using `use` (most
+ // notably `#[macro_use]`). Ignore it.
+ if !tcx.hir().attrs(id).is_empty() {
+ continue;
+ }
+
+ let base_replacement = match extern_crate.orig_name {
+ Some(orig_name) => format!("use {} as {};", orig_name, item.ident.name),
+ None => format!("use {};", item.ident.name),
+ };
+ let vis = tcx.sess.source_map().span_to_snippet(item.vis_span).unwrap_or_default();
+ let add_vis = |to| if vis.is_empty() { to } else { format!("{} {}", vis, to) };
+ tcx.emit_spanned_lint(
+ lint,
+ id,
+ extern_crate.span,
+ ExternCrateNotIdiomatic {
+ span: extern_crate.span,
+ msg_code: add_vis("use".to_string()),
+ suggestion_code: add_vis(base_replacement),
+ },
+ );
+ }
+}
+
+struct ExternCrateToLint {
+ /// `DefId` of the extern crate
+ def_id: DefId,
+
+ /// span from the item
+ span: Span,
+
+ /// if `Some`, then this is renamed (`extern crate orig_name as
+ /// crate_name`), and -- perhaps surprisingly -- this stores the
+ /// *original* name (`item.name` will contain the new name)
+ orig_name: Option<Symbol>,
+
+ /// if `false`, the original name started with `_`, so we shouldn't lint
+ /// about it going unused (but we should still emit idiom lints).
+ warn_if_unused: bool,
+}
diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
new file mode 100644
index 000000000..b6c91d425
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
@@ -0,0 +1,572 @@
+//! Check properties that are required by built-in traits and set
+//! up data structures required by type-checking/codegen.
+
+use crate::errors::{CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem};
+use rustc_errors::{struct_span_err, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::ItemKind;
+use rustc_infer::infer;
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::adjustment::CoerceUnsizedInfo;
+use rustc_middle::ty::{self, suggest_constraining_type_params, Ty, TyCtxt, TypeVisitable};
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::misc::{can_type_implement_copy, CopyImplementationError};
+use rustc_trait_selection::traits::predicate_for_trait_def;
+use rustc_trait_selection::traits::{self, ObligationCause};
+use std::collections::BTreeMap;
+
+pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) {
+ let lang_items = tcx.lang_items();
+ Checker { tcx, trait_def_id }
+ .check(lang_items.drop_trait(), visit_implementation_of_drop)
+ .check(lang_items.copy_trait(), visit_implementation_of_copy)
+ .check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized)
+ .check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn);
+}
+
+struct Checker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+}
+
+impl<'tcx> Checker<'tcx> {
+ fn check<F>(&self, trait_def_id: Option<DefId>, mut f: F) -> &Self
+ where
+ F: FnMut(TyCtxt<'tcx>, LocalDefId),
+ {
+ if Some(self.trait_def_id) == trait_def_id {
+ for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) {
+ f(self.tcx, impl_def_id);
+ }
+ }
+ self
+ }
+}
+
+fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
+ // Destructors only work on local ADT types.
+ match tcx.type_of(impl_did).kind() {
+ ty::Adt(def, _) if def.did().is_local() => return,
+ ty::Error(_) => return,
+ _ => {}
+ }
+
+ let sp = match tcx.hir().expect_item(impl_did).kind {
+ ItemKind::Impl(ref impl_) => impl_.self_ty.span,
+ _ => bug!("expected Drop impl item"),
+ };
+
+ tcx.sess.emit_err(DropImplOnWrongItem { span: sp });
+}
+
+fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_copy: impl_did={:?}", impl_did);
+
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+
+ let self_type = tcx.type_of(impl_did);
+ debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type);
+
+ let param_env = tcx.param_env(impl_did);
+ assert!(!self_type.has_escaping_bound_vars());
+
+ debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type);
+
+ let span = match tcx.hir().expect_item(impl_did).kind {
+ ItemKind::Impl(hir::Impl { polarity: hir::ImplPolarity::Negative(_), .. }) => return,
+ ItemKind::Impl(impl_) => impl_.self_ty.span,
+ _ => bug!("expected Copy impl item"),
+ };
+
+ let cause = traits::ObligationCause::misc(span, impl_hir_id);
+ match can_type_implement_copy(tcx, param_env, self_type, cause) {
+ Ok(()) => {}
+ Err(CopyImplementationError::InfrigingFields(fields)) => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0204,
+ "the trait `Copy` may not be implemented for this type"
+ );
+
+ // We'll try to suggest constraining type parameters to fulfill the requirements of
+ // their `Copy` implementation.
+ let mut errors: BTreeMap<_, Vec<_>> = Default::default();
+ let mut bounds = vec![];
+
+ for (field, ty) in fields {
+ let field_span = tcx.def_span(field.did);
+ let field_ty_span = match tcx.hir().get_if_local(field.did) {
+ Some(hir::Node::Field(field_def)) => field_def.ty.span,
+ _ => field_span,
+ };
+ err.span_label(field_span, "this field does not implement `Copy`");
+ // Spin up a new FulfillmentContext, so we can get the _precise_ reason
+ // why this field does not implement Copy. This is useful because sometimes
+ // it is not immediately clear why Copy is not implemented for a field, since
+ // all we point at is the field itself.
+ let infcx = tcx.infer_ctxt().ignoring_regions().build();
+ for error in traits::fully_solve_bound(
+ &infcx,
+ traits::ObligationCause::dummy_with_span(field_ty_span),
+ param_env,
+ ty,
+ tcx.lang_items().copy_trait().unwrap(),
+ ) {
+ let error_predicate = error.obligation.predicate;
+ // Only note if it's not the root obligation, otherwise it's trivial and
+ // should be self-explanatory (i.e. a field literally doesn't implement Copy).
+
+ // FIXME: This error could be more descriptive, especially if the error_predicate
+ // contains a foreign type or if it's a deeply nested type...
+ if error_predicate != error.root_obligation.predicate {
+ errors
+ .entry((ty.to_string(), error_predicate.to_string()))
+ .or_default()
+ .push(error.obligation.cause.span);
+ }
+ if let ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ polarity: ty::ImplPolarity::Positive,
+ ..
+ }) = error_predicate.kind().skip_binder()
+ {
+ let ty = trait_ref.self_ty();
+ if let ty::Param(_) = ty.kind() {
+ bounds.push((
+ format!("{ty}"),
+ trait_ref.print_only_trait_path().to_string(),
+ Some(trait_ref.def_id),
+ ));
+ }
+ }
+ }
+ }
+ for ((ty, error_predicate), spans) in errors {
+ let span: MultiSpan = spans.into();
+ err.span_note(
+ span,
+ &format!("the `Copy` impl for `{}` requires that `{}`", ty, error_predicate),
+ );
+ }
+ suggest_constraining_type_params(
+ tcx,
+ tcx.hir().get_generics(impl_did).expect("impls always have generics"),
+ &mut err,
+ bounds.iter().map(|(param, constraint, def_id)| {
+ (param.as_str(), constraint.as_str(), *def_id)
+ }),
+ );
+ err.emit();
+ }
+ Err(CopyImplementationError::NotAnAdt) => {
+ tcx.sess.emit_err(CopyImplOnNonAdt { span });
+ }
+ Err(CopyImplementationError::HasDestructor) => {
+ tcx.sess.emit_err(CopyImplOnTypeWithDtor { span });
+ }
+ }
+}
+
+fn visit_implementation_of_coerce_unsized<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did);
+
+ // Just compute this for the side-effects, in particular reporting
+ // errors; other parts of the code may demand it for the info of
+ // course.
+ let span = tcx.def_span(impl_did);
+ tcx.at(span).coerce_unsized_info(impl_did);
+}
+
+fn visit_implementation_of_dispatch_from_dyn<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did);
+
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+ let span = tcx.hir().span(impl_hir_id);
+
+ let dispatch_from_dyn_trait = tcx.require_lang_item(LangItem::DispatchFromDyn, Some(span));
+
+ let source = tcx.type_of(impl_did);
+ assert!(!source.has_escaping_bound_vars());
+ let target = {
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
+ assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait);
+
+ trait_ref.substs.type_at(1)
+ };
+
+ debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", source, target);
+
+ let param_env = tcx.param_env(impl_did);
+
+ let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg);
+
+ let infcx = tcx.infer_ctxt().build();
+ let cause = ObligationCause::misc(span, impl_hir_id);
+
+ use rustc_type_ir::sty::TyKind::*;
+ match (source.kind(), target.kind()) {
+ (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
+ if infcx.at(&cause, param_env).eq(r_a, *r_b).is_ok() && mutbl_a == *mutbl_b => {}
+ (&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (),
+ (&Adt(def_a, substs_a), &Adt(def_b, substs_b))
+ if def_a.is_struct() && def_b.is_struct() =>
+ {
+ if def_a != def_b {
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
+
+ create_err(&format!(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected `{}`, found `{}`",
+ source_path, target_path,
+ ))
+ .emit();
+
+ return;
+ }
+
+ if def_a.repr().c() || def_a.repr().packed() {
+ create_err(
+ "structs implementing `DispatchFromDyn` may not have \
+ `#[repr(packed)]` or `#[repr(C)]`",
+ )
+ .emit();
+ }
+
+ let fields = &def_a.non_enum_variant().fields;
+
+ let coerced_fields = fields
+ .iter()
+ .filter(|field| {
+ let ty_a = field.ty(tcx, substs_a);
+ let ty_b = field.ty(tcx, substs_b);
+
+ if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
+ if layout.is_zst() && layout.align.abi.bytes() == 1 {
+ // ignore ZST fields with alignment of 1 byte
+ return false;
+ }
+ }
+
+ if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) {
+ if ok.obligations.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for structs containing the field being coerced, \
+ ZST fields with 1 byte alignment, and nothing else",
+ )
+ .note(&format!(
+ "extra field `{}` of type `{}` is not allowed",
+ field.name, ty_a,
+ ))
+ .emit();
+
+ return false;
+ }
+ }
+
+ return true;
+ })
+ .collect::<Vec<_>>();
+
+ if coerced_fields.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced, none found",
+ )
+ .emit();
+ } else if coerced_fields.len() > 1 {
+ create_err("implementing the `DispatchFromDyn` trait requires multiple coercions")
+ .note(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced",
+ )
+ .note(&format!(
+ "currently, {} fields need coercions: {}",
+ coerced_fields.len(),
+ coerced_fields
+ .iter()
+ .map(|field| {
+ format!(
+ "`{}` (`{}` to `{}`)",
+ field.name,
+ field.ty(tcx, substs_a),
+ field.ty(tcx, substs_b),
+ )
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ ))
+ .emit();
+ } else {
+ let errors = traits::fully_solve_obligations(
+ &infcx,
+ coerced_fields.into_iter().map(|field| {
+ predicate_for_trait_def(
+ tcx,
+ param_env,
+ cause.clone(),
+ dispatch_from_dyn_trait,
+ 0,
+ field.ty(tcx, substs_a),
+ &[field.ty(tcx, substs_b).into()],
+ )
+ }),
+ );
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+
+ // Finally, resolve all regions.
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
+ }
+ }
+ _ => {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures",
+ )
+ .emit();
+ }
+ }
+}
+
+pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: DefId) -> CoerceUnsizedInfo {
+ debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did);
+
+ // this provider should only get invoked for local def-ids
+ let impl_did = impl_did.expect_local();
+ let span = tcx.def_span(impl_did);
+
+ let coerce_unsized_trait = tcx.require_lang_item(LangItem::CoerceUnsized, Some(span));
+
+ let unsize_trait = tcx.lang_items().require(LangItem::Unsize).unwrap_or_else(|err| {
+ tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err.to_string()));
+ });
+
+ let source = tcx.type_of(impl_did);
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
+ assert_eq!(trait_ref.def_id, coerce_unsized_trait);
+ let target = trait_ref.substs.type_at(1);
+ debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", source, target);
+
+ let param_env = tcx.param_env(impl_did);
+ assert!(!source.has_escaping_bound_vars());
+
+ let err_info = CoerceUnsizedInfo { custom_kind: None };
+
+ debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target);
+
+ let infcx = tcx.infer_ctxt().build();
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+ let cause = ObligationCause::misc(span, impl_hir_id);
+ let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>,
+ mt_b: ty::TypeAndMut<'tcx>,
+ mk_ptr: &dyn Fn(Ty<'tcx>) -> Ty<'tcx>| {
+ if (mt_a.mutbl, mt_b.mutbl) == (hir::Mutability::Not, hir::Mutability::Mut) {
+ infcx
+ .err_ctxt()
+ .report_mismatched_types(
+ &cause,
+ mk_ptr(mt_b.ty),
+ target,
+ ty::error::TypeError::Mutability,
+ )
+ .emit();
+ }
+ (mt_a.ty, mt_b.ty, unsize_trait, None)
+ };
+ let (source, target, trait_def_id, kind) = match (source.kind(), target.kind()) {
+ (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => {
+ infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
+ let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
+ let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b };
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
+ }
+
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => {
+ let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
+ }
+
+ (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)),
+
+ (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b))
+ if def_a.is_struct() && def_b.is_struct() =>
+ {
+ if def_a != def_b {
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0377,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected `{}`, found `{}`",
+ source_path,
+ target_path
+ )
+ .emit();
+ return err_info;
+ }
+
+ // Here we are considering a case of converting
+ // `S<P0...Pn>` to S<Q0...Qn>`. As an example, let's imagine a struct `Foo<T, U>`,
+ // which acts like a pointer to `U`, but carries along some extra data of type `T`:
+ //
+ // struct Foo<T, U> {
+ // extra: T,
+ // ptr: *mut U,
+ // }
+ //
+ // We might have an impl that allows (e.g.) `Foo<T, [i32; 3]>` to be unsized
+ // to `Foo<T, [i32]>`. That impl would look like:
+ //
+ // impl<T, U: Unsize<V>, V> CoerceUnsized<Foo<T, V>> for Foo<T, U> {}
+ //
+ // Here `U = [i32; 3]` and `V = [i32]`. At runtime,
+ // when this coercion occurs, we would be changing the
+ // field `ptr` from a thin pointer of type `*mut [i32;
+ // 3]` to a fat pointer of type `*mut [i32]` (with
+ // extra data `3`). **The purpose of this check is to
+ // make sure that we know how to do this conversion.**
+ //
+ // To check if this impl is legal, we would walk down
+ // the fields of `Foo` and consider their types with
+ // both substitutes. We are looking to find that
+ // exactly one (non-phantom) field has changed its
+ // type, which we will expect to be the pointer that
+ // is becoming fat (we could probably generalize this
+ // to multiple thin pointers of the same type becoming
+ // fat, but we don't). In this case:
+ //
+ // - `extra` has type `T` before and type `T` after
+ // - `ptr` has type `*mut U` before and type `*mut V` after
+ //
+ // Since just one field changed, we would then check
+ // that `*mut U: CoerceUnsized<*mut V>` is implemented
+ // (in other words, that we know how to do this
+ // conversion). This will work out because `U:
+ // Unsize<V>`, and we have a builtin rule that `*mut
+ // U` can be coerced to `*mut V` if `U: Unsize<V>`.
+ let fields = &def_a.non_enum_variant().fields;
+ let diff_fields = fields
+ .iter()
+ .enumerate()
+ .filter_map(|(i, f)| {
+ let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
+
+ if tcx.type_of(f.did).is_phantom_data() {
+ // Ignore PhantomData fields
+ return None;
+ }
+
+ // Ignore fields that aren't changed; it may
+ // be that we could get away with subtyping or
+ // something more accepting, but we use
+ // equality because we want to be able to
+ // perform this check without computing
+ // variance where possible. (This is because
+ // we may have to evaluate constraint
+ // expressions in the course of execution.)
+ // See e.g., #41936.
+ if let Ok(ok) = infcx.at(&cause, param_env).eq(a, b) {
+ if ok.obligations.is_empty() {
+ return None;
+ }
+ }
+
+ // Collect up all fields that were significantly changed
+ // i.e., those that contain T in coerce_unsized T -> U
+ Some((i, a, b))
+ })
+ .collect::<Vec<_>>();
+
+ if diff_fields.is_empty() {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0374,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with one field \
+ being coerced, none found"
+ )
+ .emit();
+ return err_info;
+ } else if diff_fields.len() > 1 {
+ let item = tcx.hir().expect_item(impl_did);
+ let span =
+ if let ItemKind::Impl(hir::Impl { of_trait: Some(ref t), .. }) = item.kind {
+ t.path.span
+ } else {
+ tcx.def_span(impl_did)
+ };
+
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0375,
+ "implementing the trait \
+ `CoerceUnsized` requires multiple \
+ coercions"
+ )
+ .note(
+ "`CoerceUnsized` may only be implemented for \
+ a coercion between structures with one field being coerced",
+ )
+ .note(&format!(
+ "currently, {} fields need coercions: {}",
+ diff_fields.len(),
+ diff_fields
+ .iter()
+ .map(|&(i, a, b)| { format!("`{}` (`{}` to `{}`)", fields[i].name, a, b) })
+ .collect::<Vec<_>>()
+ .join(", ")
+ ))
+ .span_label(span, "requires multiple coercions")
+ .emit();
+ return err_info;
+ }
+
+ let (i, a, b) = diff_fields[0];
+ let kind = ty::adjustment::CustomCoerceUnsized::Struct(i);
+ (a, b, coerce_unsized_trait, Some(kind))
+ }
+
+ _ => {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0376,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures"
+ )
+ .emit();
+ return err_info;
+ }
+ };
+
+ // Register an obligation for `A: Trait<B>`.
+ let cause = traits::ObligationCause::misc(span, impl_hir_id);
+ let predicate =
+ predicate_for_trait_def(tcx, param_env, cause, trait_def_id, 0, source, &[target.into()]);
+ let errors = traits::fully_solve_obligation(&infcx, predicate);
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+
+ // Finally, resolve all regions.
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
+
+ CoerceUnsizedInfo { custom_kind: kind }
+}
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
index 52aad636f..2890c149b 100644
--- a/compiler/rustc_typeck/src/coherence/inherent_impls.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
@@ -58,7 +58,7 @@ const ADD_ATTR: &str =
impl<'tcx> InherentCollect<'tcx> {
fn check_def_id(&mut self, item: &hir::Item<'_>, self_ty: Ty<'tcx>, def_id: DefId) {
- let impl_def_id = item.def_id;
+ let impl_def_id = item.owner_id;
if let Some(def_id) = def_id.as_local() {
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
@@ -89,7 +89,7 @@ impl<'tcx> InherentCollect<'tcx> {
for impl_item in items {
if !self
.tcx
- .has_attr(impl_item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl)
+ .has_attr(impl_item.id.owner_id.to_def_id(), sym::rustc_allow_incoherent_impl)
{
struct_span_err!(
self.tcx.sess,
@@ -105,7 +105,7 @@ impl<'tcx> InherentCollect<'tcx> {
}
if let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) {
- self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id);
+ self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id.def_id);
} else {
bug!("unexpected self type: {:?}", self_ty);
}
@@ -135,7 +135,7 @@ impl<'tcx> InherentCollect<'tcx> {
for item in items {
if !self
.tcx
- .has_attr(item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl)
+ .has_attr(item.id.owner_id.to_def_id(), sym::rustc_allow_incoherent_impl)
{
struct_span_err!(
self.tcx.sess,
@@ -177,7 +177,7 @@ impl<'tcx> InherentCollect<'tcx> {
}
fn check_item(&mut self, id: hir::ItemId) {
- if !matches!(self.tcx.def_kind(id.def_id), DefKind::Impl) {
+ if !matches!(self.tcx.def_kind(id.owner_id), DefKind::Impl) {
return;
}
@@ -186,7 +186,7 @@ impl<'tcx> InherentCollect<'tcx> {
return;
};
- let self_ty = self.tcx.type_of(item.def_id);
+ let self_ty = self.tcx.type_of(item.owner_id);
match *self_ty.kind() {
ty::Adt(def, _) => {
self.check_def_id(item, self_ty, def.did());
@@ -220,7 +220,9 @@ impl<'tcx> InherentCollect<'tcx> {
| ty::Ref(..)
| ty::Never
| ty::FnPtr(_)
- | ty::Tuple(..) => self.check_primitive_impl(item.def_id, self_ty, items, ty.span),
+ | ty::Tuple(..) => {
+ self.check_primitive_impl(item.owner_id.def_id, self_ty, items, ty.span)
+ }
ty::Projection(..) | ty::Opaque(..) | ty::Param(_) => {
let mut err = struct_span_err!(
self.tcx.sess,
@@ -241,7 +243,7 @@ impl<'tcx> InherentCollect<'tcx> {
| ty::Bound(..)
| ty::Placeholder(_)
| ty::Infer(_) => {
- bug!("unexpected impl self type of impl: {:?} {:?}", item.def_id, self_ty);
+ bug!("unexpected impl self type of impl: {:?} {:?}", item.owner_id, self_ty);
}
ty::Error(_) => {}
}
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs
index 03e076bf5..972769eb1 100644
--- a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls_overlap.rs
@@ -58,6 +58,37 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
== item2.ident(self.tcx).normalize_to_macros_2_0()
}
+ fn check_for_duplicate_items_in_impl(&self, impl_: DefId) {
+ let impl_items = self.tcx.associated_items(impl_);
+
+ let mut seen_items = FxHashMap::default();
+ for impl_item in impl_items.in_definition_order() {
+ let span = self.tcx.def_span(impl_item.def_id);
+ let ident = impl_item.ident(self.tcx);
+
+ let norm_ident = ident.normalize_to_macros_2_0();
+ match seen_items.entry(norm_ident) {
+ Entry::Occupied(entry) => {
+ let former = entry.get();
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0592,
+ "duplicate definitions with name `{}`",
+ ident,
+ );
+ err.span_label(span, format!("duplicate definitions for `{}`", ident));
+ err.span_label(*former, format!("other definition for `{}`", ident));
+
+ err.emit();
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(span);
+ }
+ }
+ }
+ }
+
fn check_for_common_items_in_impls(
&self,
impl1: DefId,
@@ -117,29 +148,22 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
// inherent impls without warning.
SkipLeakCheck::Yes,
overlap_mode,
- |overlap| {
- self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id, overlap);
- false
- },
- || true,
- );
+ )
+ .map_or(true, |overlap| {
+ self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id, overlap);
+ false
+ });
}
fn check_item(&mut self, id: hir::ItemId) {
- let def_kind = self.tcx.def_kind(id.def_id);
+ let def_kind = self.tcx.def_kind(id.owner_id);
if !matches!(def_kind, DefKind::Enum | DefKind::Struct | DefKind::Trait | DefKind::Union) {
return;
}
- let impls = self.tcx.inherent_impls(id.def_id);
+ let impls = self.tcx.inherent_impls(id.owner_id);
- // If there is only one inherent impl block,
- // there is nothing to overlap check it with
- if impls.len() <= 1 {
- return;
- }
-
- let overlap_mode = OverlapMode::get(self.tcx, id.def_id.to_def_id());
+ let overlap_mode = OverlapMode::get(self.tcx, id.owner_id.to_def_id());
let impls_items = impls
.iter()
@@ -152,6 +176,8 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
const ALLOCATING_ALGO_THRESHOLD: usize = 500;
if impls.len() < ALLOCATING_ALGO_THRESHOLD {
for (i, &(&impl1_def_id, impl_items1)) in impls_items.iter().enumerate() {
+ self.check_for_duplicate_items_in_impl(impl1_def_id);
+
for &(&impl2_def_id, impl_items2) in &impls_items[(i + 1)..] {
if self.impls_have_common_items(impl_items1, impl_items2) {
self.check_for_overlapping_inherent_impls(
@@ -290,6 +316,8 @@ impl<'tcx> InherentOverlapChecker<'tcx> {
impl_blocks.sort_unstable();
for (i, &impl1_items_idx) in impl_blocks.iter().enumerate() {
let &(&impl1_def_id, impl_items1) = &impls_items[impl1_items_idx];
+ self.check_for_duplicate_items_in_impl(impl1_def_id);
+
for &impl2_items_idx in impl_blocks[(i + 1)..].iter() {
let &(&impl2_def_id, impl_items2) = &impls_items[impl2_items_idx];
if self.impls_have_common_items(impl_items1, impl_items2) {
diff --git a/compiler/rustc_typeck/src/coherence/mod.rs b/compiler/rustc_hir_analysis/src/coherence/mod.rs
index ae9ebe590..ae9ebe590 100644
--- a/compiler/rustc_typeck/src/coherence/mod.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/mod.rs
diff --git a/compiler/rustc_typeck/src/coherence/orphan.rs b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
index 1608550aa..bb45c3823 100644
--- a/compiler/rustc_typeck/src/coherence/orphan.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
@@ -2,10 +2,9 @@
//! crate or pertains to a type defined in this crate.
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::struct_span_err;
+use rustc_errors::{struct_span_err, DelayDm};
use rustc_errors::{Diagnostic, ErrorGuaranteed};
use rustc_hir as hir;
-use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::util::IgnoreRegions;
@@ -43,7 +42,7 @@ fn do_orphan_check_impl<'tcx>(
) -> Result<(), ErrorGuaranteed> {
let trait_def_id = trait_ref.def_id;
- let item = tcx.hir().item(hir::ItemId { def_id });
+ let item = tcx.hir().expect_item(def_id);
let hir::ItemKind::Impl(ref impl_) = item.kind else {
bug!("{:?} is not an impl: {:?}", def_id, item);
};
@@ -102,7 +101,7 @@ fn do_orphan_check_impl<'tcx>(
span_bug!(sp, "opaque type not found, but `has_opaque_types` is set")
}
- match traits::orphan_check(tcx, item.def_id.to_def_id()) {
+ match traits::orphan_check(tcx, item.owner_id.to_def_id()) {
Ok(()) => {}
Err(err) => emit_orphan_check_error(
tcx,
@@ -229,12 +228,8 @@ fn emit_orphan_check_error<'tcx>(
"only traits defined in the current crate {msg}"
);
err.span_label(sp, "impl doesn't use only types from inside the current crate");
- for (ty, is_target_ty) in &tys {
- let mut ty = *ty;
- tcx.infer_ctxt().enter(|infcx| {
- // Remove the lifetimes unnecessary for this error.
- ty = infcx.freshen(ty);
- });
+ for &(mut ty, is_target_ty) in &tys {
+ ty = tcx.erase_regions(ty);
ty = match ty.kind() {
// Remove the type arguments from the output, as they are not relevant.
// You can think of this as the reverse of `resolve_vars_if_possible`.
@@ -264,7 +259,7 @@ fn emit_orphan_check_error<'tcx>(
};
let msg = format!("{} is not defined in the current crate{}", ty, postfix);
- if *is_target_ty {
+ if is_target_ty {
// Point at `D<A>` in `impl<A, B> for C<B> in D<A>`
err.span_label(self_ty_span, &msg);
} else {
@@ -417,30 +412,31 @@ fn lint_auto_trait_impl<'tcx>(
lint::builtin::SUSPICIOUS_AUTO_TRAIT_IMPLS,
tcx.hir().local_def_id_to_hir_id(impl_def_id),
tcx.def_span(impl_def_id),
- |err| {
- let item_span = tcx.def_span(self_type_did);
- let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
- let mut err = err.build(&format!(
+ DelayDm(|| {
+ format!(
"cross-crate traits with a default impl, like `{}`, \
should not be specialized",
tcx.def_path_str(trait_ref.def_id),
- ));
+ )
+ }),
+ |lint| {
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
match arg {
ty::util::NotUniqueParam::DuplicateParam(arg) => {
- err.note(&format!("`{}` is mentioned multiple times", arg));
+ lint.note(&format!("`{}` is mentioned multiple times", arg));
}
ty::util::NotUniqueParam::NotParam(arg) => {
- err.note(&format!("`{}` is not a generic parameter", arg));
+ lint.note(&format!("`{}` is not a generic parameter", arg));
}
}
- err.span_note(
+ lint.span_note(
item_span,
&format!(
"try using the same sequence of generic parameters as the {} definition",
self_descr,
),
- );
- err.emit();
+ )
},
);
}
diff --git a/compiler/rustc_hir_analysis/src/coherence/unsafety.rs b/compiler/rustc_hir_analysis/src/coherence/unsafety.rs
new file mode 100644
index 000000000..a34815b45
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/coherence/unsafety.rs
@@ -0,0 +1,96 @@
+//! Unsafety checker: every impl either implements a trait defined in this
+//! crate or pertains to a type defined in this crate.
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::Unsafety;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::LocalDefId;
+
+pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ debug_assert!(matches!(tcx.def_kind(def_id), DefKind::Impl));
+ let item = tcx.hir().expect_item(def_id);
+ let hir::ItemKind::Impl(ref impl_) = item.kind else { bug!() };
+
+ if let Some(trait_ref) = tcx.impl_trait_ref(item.owner_id) {
+ let trait_def = tcx.trait_def(trait_ref.def_id);
+ let unsafe_attr =
+ impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle");
+ match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) {
+ (Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0199,
+ "implementing the trait `{}` is not unsafe",
+ trait_ref.print_only_trait_path()
+ )
+ .span_suggestion_verbose(
+ item.span.with_hi(item.span.lo() + rustc_span::BytePos(7)),
+ "remove `unsafe` from this trait implementation",
+ "",
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0200,
+ "the trait `{}` requires an `unsafe impl` declaration",
+ trait_ref.print_only_trait_path()
+ )
+ .note(format!(
+ "the trait `{}` enforces invariants that the compiler can't check. \
+ Review the trait documentation and make sure this implementation \
+ upholds those invariants before adding the `unsafe` keyword",
+ trait_ref.print_only_trait_path()
+ ))
+ .span_suggestion_verbose(
+ item.span.shrink_to_lo(),
+ "add `unsafe` to this trait implementation",
+ "unsafe ",
+ rustc_errors::Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ (Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0569,
+ "requires an `unsafe impl` declaration due to `#[{}]` attribute",
+ attr_name
+ )
+ .note(format!(
+ "the trait `{}` enforces invariants that the compiler can't check. \
+ Review the trait documentation and make sure this implementation \
+ upholds those invariants before adding the `unsafe` keyword",
+ trait_ref.print_only_trait_path()
+ ))
+ .span_suggestion_verbose(
+ item.span.shrink_to_lo(),
+ "add `unsafe` to this trait implementation",
+ "unsafe ",
+ rustc_errors::Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => {
+ // Reported in AST validation
+ tcx.sess.delay_span_bug(item.span, "unsafe negative impl");
+ }
+ (_, _, Unsafety::Normal, hir::ImplPolarity::Negative(_))
+ | (Unsafety::Unsafe, _, Unsafety::Unsafe, hir::ImplPolarity::Positive)
+ | (Unsafety::Normal, Some(_), Unsafety::Unsafe, hir::ImplPolarity::Positive)
+ | (Unsafety::Normal, None, Unsafety::Normal, _) => {
+ // OK
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
new file mode 100644
index 000000000..346d2e2fc
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -0,0 +1,2263 @@
+//! "Collection" is the process of determining the type and other external
+//! details of each item in Rust. Collection is specifically concerned
+//! with *inter-procedural* things -- for example, for a function
+//! definition, collection will figure out the type and signature of the
+//! function, but it will not visit the *body* of the function in any way,
+//! nor examine type annotations on local variables (that's the job of
+//! type *checking*).
+//!
+//! Collecting is ultimately defined by a bundle of queries that
+//! inquire after various facts about the items in the crate (e.g.,
+//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function
+//! for the full set.
+//!
+//! At present, however, we do run collection across all items in the
+//! crate as a kind of pass. This should eventually be factored away.
+
+use crate::astconv::AstConv;
+use crate::check::intrinsic::intrinsic_operation_unsafety;
+use crate::errors;
+use rustc_ast as ast;
+use rustc_ast::{MetaItemKind, NestedMetaItem};
+use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, StashKey};
+use rustc_hir as hir;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::{DefId, LocalDefId, LOCAL_CRATE};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::weak_lang_items;
+use rustc_hir::{GenericParamKind, Node};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::Linkage;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::util::{Discr, IntTypeExt};
+use rustc_middle::ty::ReprOptions;
+use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
+use rustc_session::lint;
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::{abi, SanitizerSet};
+use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName;
+use std::iter;
+
+mod generics_of;
+mod item_bounds;
+mod lifetimes;
+mod predicates_of;
+mod type_of;
+
+///////////////////////////////////////////////////////////////////////////
+// Main entry point
+
+fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut CollectItemTypesVisitor { tcx });
+}
+
+pub fn provide(providers: &mut Providers) {
+ lifetimes::provide(providers);
+ *providers = Providers {
+ opt_const_param_of: type_of::opt_const_param_of,
+ type_of: type_of::type_of,
+ item_bounds: item_bounds::item_bounds,
+ explicit_item_bounds: item_bounds::explicit_item_bounds,
+ generics_of: generics_of::generics_of,
+ predicates_of: predicates_of::predicates_of,
+ predicates_defined_on,
+ explicit_predicates_of: predicates_of::explicit_predicates_of,
+ super_predicates_of: predicates_of::super_predicates_of,
+ super_predicates_that_define_assoc_type:
+ predicates_of::super_predicates_that_define_assoc_type,
+ trait_explicit_predicates_and_bounds: predicates_of::trait_explicit_predicates_and_bounds,
+ type_param_predicates: predicates_of::type_param_predicates,
+ trait_def,
+ adt_def,
+ fn_sig,
+ impl_trait_ref,
+ impl_polarity,
+ is_foreign_item,
+ generator_kind,
+ codegen_fn_attrs,
+ asm_target_features,
+ collect_mod_item_types,
+ should_inherit_track_caller,
+ ..*providers
+ };
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+/// Context specific to some particular item. This is what implements
+/// [`AstConv`].
+///
+/// # `ItemCtxt` vs `FnCtxt`
+///
+/// `ItemCtxt` is primarily used to type-check item signatures and lower them
+/// from HIR to their [`ty::Ty`] representation, which is exposed using [`AstConv`].
+/// It's also used for the bodies of items like structs where the body (the fields)
+/// are just signatures.
+///
+/// This is in contrast to `FnCtxt`, which is used to type-check bodies of
+/// functions, closures, and `const`s -- anywhere that expressions and statements show up.
+///
+/// An important thing to note is that `ItemCtxt` does no inference -- it has no [`InferCtxt`] --
+/// while `FnCtxt` does do inference.
+///
+/// [`InferCtxt`]: rustc_infer::infer::InferCtxt
+///
+/// # Trait predicates
+///
+/// `ItemCtxt` has information about the predicates that are defined
+/// on the trait. Unfortunately, this predicate information is
+/// available in various different forms at various points in the
+/// process. So we can't just store a pointer to e.g., the AST or the
+/// parsed ty form, we have to be more flexible. To this end, the
+/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy
+/// `get_type_parameter_bounds` requests, drawing the information from
+/// the AST (`hir::Generics`), recursively.
+pub struct ItemCtxt<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ item_def_id: DefId,
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+#[derive(Default)]
+pub(crate) struct HirPlaceholderCollector(pub(crate) Vec<Span>);
+
+impl<'v> Visitor<'v> for HirPlaceholderCollector {
+ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
+ if let hir::TyKind::Infer = t.kind {
+ self.0.push(t.span);
+ }
+ intravisit::walk_ty(self, t)
+ }
+ fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) {
+ match generic_arg {
+ hir::GenericArg::Infer(inf) => {
+ self.0.push(inf.span);
+ intravisit::walk_inf(self, inf);
+ }
+ hir::GenericArg::Type(t) => self.visit_ty(t),
+ _ => {}
+ }
+ }
+ fn visit_array_length(&mut self, length: &'v hir::ArrayLen) {
+ if let &hir::ArrayLen::Infer(_, span) = length {
+ self.0.push(span);
+ }
+ intravisit::walk_array_len(self, length)
+ }
+}
+
+struct CollectItemTypesVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+/// If there are any placeholder types (`_`), emit an error explaining that this is not allowed
+/// and suggest adding type parameters in the appropriate place, taking into consideration any and
+/// all already existing generic type parameters to avoid suggesting a name that is already in use.
+pub(crate) fn placeholder_type_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: Option<&hir::Generics<'_>>,
+ placeholder_types: Vec<Span>,
+ suggest: bool,
+ hir_ty: Option<&hir::Ty<'_>>,
+ kind: &'static str,
+) {
+ if placeholder_types.is_empty() {
+ return;
+ }
+
+ placeholder_type_error_diag(tcx, generics, placeholder_types, vec![], suggest, hir_ty, kind)
+ .emit();
+}
+
+pub(crate) fn placeholder_type_error_diag<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: Option<&hir::Generics<'_>>,
+ placeholder_types: Vec<Span>,
+ additional_spans: Vec<Span>,
+ suggest: bool,
+ hir_ty: Option<&hir::Ty<'_>>,
+ kind: &'static str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ if placeholder_types.is_empty() {
+ return bad_placeholder(tcx, additional_spans, kind);
+ }
+
+ let params = generics.map(|g| g.params).unwrap_or_default();
+ let type_name = params.next_type_param_name(None);
+ let mut sugg: Vec<_> =
+ placeholder_types.iter().map(|sp| (*sp, (*type_name).to_string())).collect();
+
+ if let Some(generics) = generics {
+ if let Some(arg) = params.iter().find(|arg| {
+ matches!(arg.name, hir::ParamName::Plain(Ident { name: kw::Underscore, .. }))
+ }) {
+ // Account for `_` already present in cases like `struct S<_>(_);` and suggest
+ // `struct S<T>(T);` instead of `struct S<_, T>(T);`.
+ sugg.push((arg.span, (*type_name).to_string()));
+ } else if let Some(span) = generics.span_for_param_suggestion() {
+ // Account for bounds, we want `fn foo<T: E, K>(_: K)` not `fn foo<T, K: E>(_: K)`.
+ sugg.push((span, format!(", {}", type_name)));
+ } else {
+ sugg.push((generics.span, format!("<{}>", type_name)));
+ }
+ }
+
+ let mut err =
+ bad_placeholder(tcx, placeholder_types.into_iter().chain(additional_spans).collect(), kind);
+
+ // Suggest, but only if it is not a function in const or static
+ if suggest {
+ let mut is_fn = false;
+ let mut is_const_or_static = false;
+
+ if let Some(hir_ty) = hir_ty && let hir::TyKind::BareFn(_) = hir_ty.kind {
+ is_fn = true;
+
+ // Check if parent is const or static
+ let parent_id = tcx.hir().get_parent_node(hir_ty.hir_id);
+ let parent_node = tcx.hir().get(parent_id);
+
+ is_const_or_static = matches!(
+ parent_node,
+ Node::Item(&hir::Item {
+ kind: hir::ItemKind::Const(..) | hir::ItemKind::Static(..),
+ ..
+ }) | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Const(..),
+ ..
+ }) | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. })
+ );
+ }
+
+ // if function is wrapped around a const or static,
+ // then don't show the suggestion
+ if !(is_fn && is_const_or_static) {
+ err.multipart_suggestion(
+ "use type parameters instead",
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+
+ err
+}
+
+fn reject_placeholder_type_signatures_in_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &'tcx hir::Item<'tcx>,
+) {
+ let (generics, suggest) = match &item.kind {
+ hir::ItemKind::Union(_, generics)
+ | hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::TraitAlias(generics, _)
+ | hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. })
+ | hir::ItemKind::Struct(_, generics) => (generics, true),
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. })
+ | hir::ItemKind::TyAlias(_, generics) => (generics, false),
+ // `static`, `fn` and `const` are handled elsewhere to suggest appropriate type.
+ _ => return,
+ };
+
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_item(item);
+
+ placeholder_type_error(tcx, Some(generics), visitor.0, suggest, None, item.kind.descr());
+}
+
+impl<'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ convert_item(self.tcx, item.item_id());
+ reject_placeholder_type_signatures_in_item(self.tcx, item);
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { default: Some(_), .. } => {
+ let def_id = self.tcx.hir().local_def_id(param.hir_id);
+ self.tcx.ensure().type_of(def_id);
+ }
+ hir::GenericParamKind::Type { .. } => {}
+ hir::GenericParamKind::Const { default, .. } => {
+ let def_id = self.tcx.hir().local_def_id(param.hir_id);
+ self.tcx.ensure().type_of(def_id);
+ if let Some(default) = default {
+ let default_def_id = self.tcx.hir().local_def_id(default.hir_id);
+ // need to store default and type of default
+ self.tcx.ensure().type_of(default_def_id);
+ self.tcx.ensure().const_param_default(def_id);
+ }
+ }
+ }
+ }
+ intravisit::walk_generics(self, generics);
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Closure { .. } = expr.kind {
+ let def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ self.tcx.ensure().generics_of(def_id);
+ // We do not call `type_of` for closures here as that
+ // depends on typecheck and would therefore hide
+ // any further errors in case one typeck fails.
+ }
+ intravisit::walk_expr(self, expr);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ convert_trait_item(self.tcx, trait_item.trait_item_id());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ convert_impl_item(self.tcx, impl_item.impl_item_id());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Utility types and common code for the above passes.
+
+fn bad_placeholder<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut spans: Vec<Span>,
+ kind: &'static str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = if kind.ends_with('s') { format!("{}es", kind) } else { format!("{}s", kind) };
+
+ spans.sort();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ spans.clone(),
+ E0121,
+ "the placeholder `_` is not allowed within types on item signatures for {}",
+ kind
+ );
+ for span in spans {
+ err.span_label(span, "not allowed in type signatures");
+ }
+ err
+}
+
+impl<'tcx> ItemCtxt<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> ItemCtxt<'tcx> {
+ ItemCtxt { tcx, item_def_id }
+ }
+
+ pub fn to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_ty)
+ }
+
+ pub fn hir_id(&self) -> hir::HirId {
+ self.tcx.hir().local_def_id_to_hir_id(self.item_def_id.expect_local())
+ }
+
+ pub fn node(&self) -> hir::Node<'tcx> {
+ self.tcx.hir().get(self.hir_id())
+ }
+}
+
+impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn item_def_id(&self) -> Option<DefId> {
+ Some(self.item_def_id)
+ }
+
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ self.tcx.at(span).type_param_predicates((
+ self.item_def_id,
+ def_id.expect_local(),
+ assoc_name,
+ ))
+ }
+
+ fn re_infer(&self, _: Option<&ty::GenericParamDef>, _: Span) -> Option<ty::Region<'tcx>> {
+ None
+ }
+
+ fn allow_ty_infer(&self) -> bool {
+ false
+ }
+
+ fn ty_infer(&self, _: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ self.tcx().ty_error_with_message(span, "bad placeholder type")
+ }
+
+ fn ct_infer(&self, ty: Ty<'tcx>, _: Option<&ty::GenericParamDef>, span: Span) -> Const<'tcx> {
+ let ty = self.tcx.fold_regions(ty, |r, _| match *r {
+ ty::ReErased => self.tcx.lifetimes.re_static,
+ _ => r,
+ });
+ self.tcx().const_error_with_message(ty, span, "bad placeholder constant")
+ }
+
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ if let Some(trait_ref) = poly_trait_ref.no_bound_vars() {
+ let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+ self,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+ self.tcx().mk_projection(item_def_id, item_substs)
+ } else {
+ // There are no late-bound regions; we can just ignore the binder.
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0212,
+ "cannot use the associated type of a trait \
+ with uninferred generic parameters"
+ );
+
+ match self.node() {
+ hir::Node::Field(_) | hir::Node::Ctor(_) | hir::Node::Variant(_) => {
+ let item = self
+ .tcx
+ .hir()
+ .expect_item(self.tcx.hir().get_parent_item(self.hir_id()).def_id);
+ match &item.kind {
+ hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::Struct(_, generics)
+ | hir::ItemKind::Union(_, generics) => {
+ let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics);
+ let (lt_sp, sugg) = match generics.params {
+ [] => (generics.span, format!("<{}>", lt_name)),
+ [bound, ..] => {
+ (bound.span.shrink_to_lo(), format!("{}, ", lt_name))
+ }
+ };
+ let suggestions = vec![
+ (lt_sp, sugg),
+ (
+ span.with_hi(item_segment.ident.span.lo()),
+ format!(
+ "{}::",
+ // Replace the existing lifetimes with a new named lifetime.
+ self.tcx.replace_late_bound_regions_uncached(
+ poly_trait_ref,
+ |_| {
+ self.tcx.mk_region(ty::ReEarlyBound(
+ ty::EarlyBoundRegion {
+ def_id: item_def_id,
+ index: 0,
+ name: Symbol::intern(&lt_name),
+ },
+ ))
+ }
+ ),
+ ),
+ ),
+ ];
+ err.multipart_suggestion(
+ "use a fully qualified path with explicit lifetimes",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ }
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..),
+ ..
+ }) => {}
+ hir::Node::Item(_)
+ | hir::Node::ForeignItem(_)
+ | hir::Node::TraitItem(_)
+ | hir::Node::ImplItem(_) => {
+ err.span_suggestion_verbose(
+ span.with_hi(item_segment.ident.span.lo()),
+ "use a fully qualified path with inferred lifetimes",
+ format!(
+ "{}::",
+ // Erase named lt, we want `<A as B<'_>::C`, not `<A as B<'a>::C`.
+ self.tcx.anonymize_late_bound_regions(poly_trait_ref).skip_binder(),
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ err.emit();
+ self.tcx().ty_error()
+ }
+ }
+
+ fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ // Types in item signatures are not normalized to avoid undue dependencies.
+ ty
+ }
+
+ fn set_tainted_by_errors(&self) {
+ // There's no obvious place to track this, so just let it go.
+ }
+
+ fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) {
+ // There's no place to record types from signatures?
+ }
+}
+
+/// Synthesize a new lifetime name that doesn't clash with any of the lifetimes already present.
+fn get_new_lifetime_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ generics: &hir::Generics<'tcx>,
+) -> String {
+ let existing_lifetimes = tcx
+ .collect_referenced_late_bound_regions(&poly_trait_ref)
+ .into_iter()
+ .filter_map(|lt| {
+ if let ty::BoundRegionKind::BrNamed(_, name) = lt {
+ Some(name.as_str().to_string())
+ } else {
+ None
+ }
+ })
+ .chain(generics.params.iter().filter_map(|param| {
+ if let hir::GenericParamKind::Lifetime { .. } = &param.kind {
+ Some(param.name.ident().as_str().to_string())
+ } else {
+ None
+ }
+ }))
+ .collect::<FxHashSet<String>>();
+
+ let a_to_z_repeat_n = |n| {
+ (b'a'..=b'z').map(move |c| {
+ let mut s = '\''.to_string();
+ s.extend(std::iter::repeat(char::from(c)).take(n));
+ s
+ })
+ };
+
+ // If all single char lifetime names are present, we wrap around and double the chars.
+ (1..).flat_map(a_to_z_repeat_n).find(|lt| !existing_lifetimes.contains(lt.as_str())).unwrap()
+}
+
+fn convert_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) {
+ let it = tcx.hir().item(item_id);
+ debug!("convert: item {} with id {}", it.ident, it.hir_id());
+ let def_id = item_id.owner_id.def_id;
+
+ match it.kind {
+ // These don't define types.
+ hir::ItemKind::ExternCrate(_)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(_)
+ | hir::ItemKind::GlobalAsm(_) => {}
+ hir::ItemKind::ForeignMod { items, .. } => {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ tcx.ensure().generics_of(item.owner_id);
+ tcx.ensure().type_of(item.owner_id);
+ tcx.ensure().predicates_of(item.owner_id);
+ match item.kind {
+ hir::ForeignItemKind::Fn(..) => tcx.ensure().fn_sig(item.owner_id),
+ hir::ForeignItemKind::Static(..) => {
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_foreign_item(item);
+ placeholder_type_error(
+ tcx,
+ None,
+ visitor.0,
+ false,
+ None,
+ "static variable",
+ );
+ }
+ _ => (),
+ }
+ }
+ }
+ hir::ItemKind::Enum(ref enum_definition, _) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ convert_enum_variant_types(tcx, def_id.to_def_id(), enum_definition.variants);
+ }
+ hir::ItemKind::Impl { .. } => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().impl_trait_ref(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::Trait(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().trait_def(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+
+ for f in struct_def.fields() {
+ let def_id = tcx.hir().local_def_id(f.hir_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ convert_variant_ctor(tcx, ctor_hir_id);
+ }
+ }
+
+ // Desugared from `impl Trait`, so visited by the function's return type.
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
+ ..
+ }) => {}
+
+ // Don't call `type_of` on opaque types, since that depends on type
+ // checking function bodies. `check_item_type` ensures that it's called
+ // instead.
+ hir::ItemKind::OpaqueTy(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ tcx.ensure().explicit_item_bounds(def_id);
+ }
+ hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::Fn(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ match it.kind {
+ hir::ItemKind::Fn(..) => tcx.ensure().fn_sig(def_id),
+ hir::ItemKind::OpaqueTy(..) => tcx.ensure().item_bounds(def_id),
+ hir::ItemKind::Const(ty, ..) | hir::ItemKind::Static(ty, ..) => {
+ if !is_suggestable_infer_ty(ty) {
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_item(it);
+ placeholder_type_error(tcx, None, visitor.0, false, None, it.kind.descr());
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+}
+
+fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::TraitItemId) {
+ let trait_item = tcx.hir().trait_item(trait_item_id);
+ let def_id = trait_item_id.owner_id;
+ tcx.ensure().generics_of(def_id);
+
+ match trait_item.kind {
+ hir::TraitItemKind::Fn(..) => {
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().fn_sig(def_id);
+ }
+
+ hir::TraitItemKind::Const(.., Some(_)) => {
+ tcx.ensure().type_of(def_id);
+ }
+
+ hir::TraitItemKind::Const(hir_ty, _) => {
+ tcx.ensure().type_of(def_id);
+ // Account for `const C: _;`.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+ if !tcx.sess.diagnostic().has_stashed_diagnostic(hir_ty.span, StashKey::ItemNoType) {
+ placeholder_type_error(tcx, None, visitor.0, false, None, "constant");
+ }
+ }
+
+ hir::TraitItemKind::Type(_, Some(_)) => {
+ tcx.ensure().item_bounds(def_id);
+ tcx.ensure().type_of(def_id);
+ // Account for `type T = _;`.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+
+ hir::TraitItemKind::Type(_, None) => {
+ tcx.ensure().item_bounds(def_id);
+ // #74612: Visit and try to find bad placeholders
+ // even if there is no concrete type.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+ };
+
+ tcx.ensure().predicates_of(def_id);
+}
+
+fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::ImplItemId) {
+ let def_id = impl_item_id.owner_id;
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ let impl_item = tcx.hir().impl_item(impl_item_id);
+ match impl_item.kind {
+ hir::ImplItemKind::Fn(..) => {
+ tcx.ensure().fn_sig(def_id);
+ }
+ hir::ImplItemKind::Type(_) => {
+ // Account for `type T = _;`
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_impl_item(impl_item);
+
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+ hir::ImplItemKind::Const(..) => {}
+ }
+}
+
+fn convert_variant_ctor(tcx: TyCtxt<'_>, ctor_id: hir::HirId) {
+ let def_id = tcx.hir().local_def_id(ctor_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+}
+
+fn convert_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId, variants: &[hir::Variant<'_>]) {
+ let def = tcx.adt_def(def_id);
+ let repr_type = def.repr().discr_type();
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_discr = None::<Discr<'_>>;
+
+ // fill the discriminant values and field types
+ for variant in variants {
+ let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+ prev_discr = Some(
+ if let Some(ref e) = variant.disr_expr {
+ let expr_did = tcx.hir().local_def_id(e.hir_id);
+ def.eval_explicit_discr(tcx, expr_did.to_def_id())
+ } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) {
+ Some(discr)
+ } else {
+ struct_span_err!(tcx.sess, variant.span, E0370, "enum discriminant overflowed")
+ .span_label(
+ variant.span,
+ format!("overflowed on value after {}", prev_discr.unwrap()),
+ )
+ .note(&format!(
+ "explicitly set `{} = {}` if that is desired outcome",
+ variant.ident, wrapped_discr
+ ))
+ .emit();
+ None
+ }
+ .unwrap_or(wrapped_discr),
+ );
+
+ for f in variant.data.fields() {
+ let def_id = tcx.hir().local_def_id(f.hir_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+
+ // Convert the ctor, if any. This also registers the variant as
+ // an item.
+ if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
+ convert_variant_ctor(tcx, ctor_hir_id);
+ }
+ }
+}
+
+fn convert_variant(
+ tcx: TyCtxt<'_>,
+ variant_did: Option<LocalDefId>,
+ ctor_did: Option<LocalDefId>,
+ ident: Ident,
+ discr: ty::VariantDiscr,
+ def: &hir::VariantData<'_>,
+ adt_kind: ty::AdtKind,
+ parent_did: LocalDefId,
+) -> ty::VariantDef {
+ let mut seen_fields: FxHashMap<Ident, Span> = Default::default();
+ let fields = def
+ .fields()
+ .iter()
+ .map(|f| {
+ let fid = tcx.hir().local_def_id(f.hir_id);
+ let dup_span = seen_fields.get(&f.ident.normalize_to_macros_2_0()).cloned();
+ if let Some(prev_span) = dup_span {
+ tcx.sess.emit_err(errors::FieldAlreadyDeclared {
+ field_name: f.ident,
+ span: f.span,
+ prev_span,
+ });
+ } else {
+ seen_fields.insert(f.ident.normalize_to_macros_2_0(), f.span);
+ }
+
+ ty::FieldDef { did: fid.to_def_id(), name: f.ident.name, vis: tcx.visibility(fid) }
+ })
+ .collect();
+ let recovered = match def {
+ hir::VariantData::Struct(_, r) => *r,
+ _ => false,
+ };
+ ty::VariantDef::new(
+ ident.name,
+ variant_did.map(LocalDefId::to_def_id),
+ ctor_did.map(LocalDefId::to_def_id),
+ discr,
+ fields,
+ CtorKind::from_hir(def),
+ adt_kind,
+ parent_did.to_def_id(),
+ recovered,
+ adt_kind == AdtKind::Struct && tcx.has_attr(parent_did.to_def_id(), sym::non_exhaustive)
+ || variant_did.map_or(false, |variant_did| {
+ tcx.has_attr(variant_did.to_def_id(), sym::non_exhaustive)
+ }),
+ )
+}
+
+fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
+ use rustc_hir::*;
+
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let Node::Item(item) = tcx.hir().get(hir_id) else {
+ bug!();
+ };
+
+ let repr = ReprOptions::new(tcx, def_id.to_def_id());
+ let (kind, variants) = match item.kind {
+ ItemKind::Enum(ref def, _) => {
+ let mut distance_from_explicit = 0;
+ let variants = def
+ .variants
+ .iter()
+ .map(|v| {
+ let variant_did = Some(tcx.hir().local_def_id(v.id));
+ let ctor_did =
+ v.data.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let discr = if let Some(ref e) = v.disr_expr {
+ distance_from_explicit = 0;
+ ty::VariantDiscr::Explicit(tcx.hir().local_def_id(e.hir_id).to_def_id())
+ } else {
+ ty::VariantDiscr::Relative(distance_from_explicit)
+ };
+ distance_from_explicit += 1;
+
+ convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ v.ident,
+ discr,
+ &v.data,
+ AdtKind::Enum,
+ def_id,
+ )
+ })
+ .collect();
+
+ (AdtKind::Enum, variants)
+ }
+ ItemKind::Struct(ref def, _) => {
+ let variant_did = None::<LocalDefId>;
+ let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let variants = std::iter::once(convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ item.ident,
+ ty::VariantDiscr::Relative(0),
+ def,
+ AdtKind::Struct,
+ def_id,
+ ))
+ .collect();
+
+ (AdtKind::Struct, variants)
+ }
+ ItemKind::Union(ref def, _) => {
+ let variant_did = None;
+ let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let variants = std::iter::once(convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ item.ident,
+ ty::VariantDiscr::Relative(0),
+ def,
+ AdtKind::Union,
+ def_id,
+ ))
+ .collect();
+
+ (AdtKind::Union, variants)
+ }
+ _ => bug!(),
+ };
+ tcx.alloc_adt_def(def_id.to_def_id(), kind, variants, repr)
+}
+
+fn trait_def(tcx: TyCtxt<'_>, def_id: DefId) -> ty::TraitDef {
+ let item = tcx.hir().expect_item(def_id.expect_local());
+
+ let (is_auto, unsafety, items) = match item.kind {
+ hir::ItemKind::Trait(is_auto, unsafety, .., items) => {
+ (is_auto == hir::IsAuto::Yes, unsafety, items)
+ }
+ hir::ItemKind::TraitAlias(..) => (false, hir::Unsafety::Normal, &[][..]),
+ _ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"),
+ };
+
+ let paren_sugar = tcx.has_attr(def_id, sym::rustc_paren_sugar);
+ if paren_sugar && !tcx.features().unboxed_closures {
+ tcx.sess
+ .struct_span_err(
+ item.span,
+ "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \
+ which traits can use parenthetical notation",
+ )
+ .help("add `#![feature(unboxed_closures)]` to the crate attributes to use it")
+ .emit();
+ }
+
+ let is_marker = tcx.has_attr(def_id, sym::marker);
+ let skip_array_during_method_dispatch =
+ tcx.has_attr(def_id, sym::rustc_skip_array_during_method_dispatch);
+ let spec_kind = if tcx.has_attr(def_id, sym::rustc_unsafe_specialization_marker) {
+ ty::trait_def::TraitSpecializationKind::Marker
+ } else if tcx.has_attr(def_id, sym::rustc_specialization_trait) {
+ ty::trait_def::TraitSpecializationKind::AlwaysApplicable
+ } else {
+ ty::trait_def::TraitSpecializationKind::None
+ };
+ let must_implement_one_of = tcx
+ .get_attr(def_id, sym::rustc_must_implement_one_of)
+ // Check that there are at least 2 arguments of `#[rustc_must_implement_one_of]`
+ // and that they are all identifiers
+ .and_then(|attr| match attr.meta_item_list() {
+ Some(items) if items.len() < 2 => {
+ tcx.sess
+ .struct_span_err(
+ attr.span,
+ "the `#[rustc_must_implement_one_of]` attribute must be \
+ used with at least 2 args",
+ )
+ .emit();
+
+ None
+ }
+ Some(items) => items
+ .into_iter()
+ .map(|item| item.ident().ok_or(item.span()))
+ .collect::<Result<Box<[_]>, _>>()
+ .map_err(|span| {
+ tcx.sess
+ .struct_span_err(span, "must be a name of an associated function")
+ .emit();
+ })
+ .ok()
+ .zip(Some(attr.span)),
+ // Error is reported by `rustc_attr!`
+ None => None,
+ })
+ // Check that all arguments of `#[rustc_must_implement_one_of]` reference
+ // functions in the trait with default implementations
+ .and_then(|(list, attr_span)| {
+ let errors = list.iter().filter_map(|ident| {
+ let item = items.iter().find(|item| item.ident == *ident);
+
+ match item {
+ Some(item) if matches!(item.kind, hir::AssocItemKind::Fn { .. }) => {
+ if !tcx.impl_defaultness(item.id.owner_id).has_value() {
+ tcx.sess
+ .struct_span_err(
+ item.span,
+ "This function doesn't have a default implementation",
+ )
+ .span_note(attr_span, "required by this annotation")
+ .emit();
+
+ return Some(());
+ }
+
+ return None;
+ }
+ Some(item) => {
+ tcx.sess
+ .struct_span_err(item.span, "Not a function")
+ .span_note(attr_span, "required by this annotation")
+ .note(
+ "All `#[rustc_must_implement_one_of]` arguments \
+ must be associated function names",
+ )
+ .emit();
+ }
+ None => {
+ tcx.sess
+ .struct_span_err(ident.span, "Function not found in this trait")
+ .emit();
+ }
+ }
+
+ Some(())
+ });
+
+ (errors.count() == 0).then_some(list)
+ })
+ // Check for duplicates
+ .and_then(|list| {
+ let mut set: FxHashMap<Symbol, Span> = FxHashMap::default();
+ let mut no_dups = true;
+
+ for ident in &*list {
+ if let Some(dup) = set.insert(ident.name, ident.span) {
+ tcx.sess
+ .struct_span_err(vec![dup, ident.span], "Functions names are duplicated")
+ .note(
+ "All `#[rustc_must_implement_one_of]` arguments \
+ must be unique",
+ )
+ .emit();
+
+ no_dups = false;
+ }
+ }
+
+ no_dups.then_some(list)
+ });
+
+ ty::TraitDef::new(
+ def_id,
+ unsafety,
+ paren_sugar,
+ is_auto,
+ is_marker,
+ skip_array_during_method_dispatch,
+ spec_kind,
+ must_implement_one_of,
+ )
+}
+
+fn are_suggestable_generic_args(generic_args: &[hir::GenericArg<'_>]) -> bool {
+ generic_args.iter().any(|arg| match arg {
+ hir::GenericArg::Type(ty) => is_suggestable_infer_ty(ty),
+ hir::GenericArg::Infer(_) => true,
+ _ => false,
+ })
+}
+
+/// Whether `ty` is a type with `_` placeholders that can be inferred. Used in diagnostics only to
+/// use inference to provide suggestions for the appropriate type if possible.
+fn is_suggestable_infer_ty(ty: &hir::Ty<'_>) -> bool {
+ debug!(?ty);
+ use hir::TyKind::*;
+ match &ty.kind {
+ Infer => true,
+ Slice(ty) => is_suggestable_infer_ty(ty),
+ Array(ty, length) => {
+ is_suggestable_infer_ty(ty) || matches!(length, hir::ArrayLen::Infer(_, _))
+ }
+ Tup(tys) => tys.iter().any(is_suggestable_infer_ty),
+ Ptr(mut_ty) | Rptr(_, mut_ty) => is_suggestable_infer_ty(mut_ty.ty),
+ OpaqueDef(_, generic_args, _) => are_suggestable_generic_args(generic_args),
+ Path(hir::QPath::TypeRelative(ty, segment)) => {
+ is_suggestable_infer_ty(ty) || are_suggestable_generic_args(segment.args().args)
+ }
+ Path(hir::QPath::Resolved(ty_opt, hir::Path { segments, .. })) => {
+ ty_opt.map_or(false, is_suggestable_infer_ty)
+ || segments.iter().any(|segment| are_suggestable_generic_args(segment.args().args))
+ }
+ _ => false,
+ }
+}
+
+pub fn get_infer_ret_ty<'hir>(output: &'hir hir::FnRetTy<'hir>) -> Option<&'hir hir::Ty<'hir>> {
+ if let hir::FnRetTy::Return(ty) = output {
+ if is_suggestable_infer_ty(ty) {
+ return Some(&*ty);
+ }
+ }
+ None
+}
+
+#[instrument(level = "debug", skip(tcx))]
+fn fn_sig(tcx: TyCtxt<'_>, def_id: DefId) -> ty::PolyFnSig<'_> {
+ use rustc_hir::Node::*;
+ use rustc_hir::*;
+
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ let icx = ItemCtxt::new(tcx, def_id.to_def_id());
+
+ match tcx.hir().get(hir_id) {
+ TraitItem(hir::TraitItem {
+ kind: TraitItemKind::Fn(sig, TraitFn::Provided(_)),
+ generics,
+ ..
+ })
+ | Item(hir::Item { kind: ItemKind::Fn(sig, generics, _), .. }) => {
+ infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
+ }
+
+ ImplItem(hir::ImplItem { kind: ImplItemKind::Fn(sig, _), generics, .. }) => {
+ // Do not try to infer the return type for a impl method coming from a trait
+ if let Item(hir::Item { kind: ItemKind::Impl(i), .. }) =
+ tcx.hir().get(tcx.hir().get_parent_node(hir_id))
+ && i.of_trait.is_some()
+ {
+ <dyn AstConv<'_>>::ty_of_fn(
+ &icx,
+ hir_id,
+ sig.header.unsafety,
+ sig.header.abi,
+ sig.decl,
+ Some(generics),
+ None,
+ )
+ } else {
+ infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
+ }
+ }
+
+ TraitItem(hir::TraitItem {
+ kind: TraitItemKind::Fn(FnSig { header, decl, span: _ }, _),
+ generics,
+ ..
+ }) => <dyn AstConv<'_>>::ty_of_fn(
+ &icx,
+ hir_id,
+ header.unsafety,
+ header.abi,
+ decl,
+ Some(generics),
+ None,
+ ),
+
+ ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
+ let abi = tcx.hir().get_foreign_abi(hir_id);
+ compute_sig_of_foreign_fn_decl(tcx, def_id.to_def_id(), fn_decl, abi)
+ }
+
+ Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor_hir_id().is_some() => {
+ let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id));
+ let inputs =
+ data.fields().iter().map(|f| tcx.type_of(tcx.hir().local_def_id(f.hir_id)));
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ inputs,
+ ty,
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ ))
+ }
+
+ Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
+ // Closure signatures are not like other function
+ // signatures and cannot be accessed through `fn_sig`. For
+ // example, a closure signature excludes the `self`
+ // argument. In any case they are embedded within the
+ // closure type as part of the `ClosureSubsts`.
+ //
+ // To get the signature of a closure, you should use the
+ // `sig` method on the `ClosureSubsts`:
+ //
+ // substs.as_closure().sig(def_id, tcx)
+ bug!(
+ "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+ );
+ }
+
+ x => {
+ bug!("unexpected sort of node in fn_sig(): {:?}", x);
+ }
+ }
+}
+
+fn infer_return_ty_for_fn_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sig: &hir::FnSig<'_>,
+ generics: &hir::Generics<'_>,
+ def_id: LocalDefId,
+ icx: &ItemCtxt<'tcx>,
+) -> ty::PolyFnSig<'tcx> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ match get_infer_ret_ty(&sig.decl.output) {
+ Some(ty) => {
+ let fn_sig = tcx.typeck(def_id).liberated_fn_sigs()[hir_id];
+ // Typeck doesn't expect erased regions to be returned from `type_of`.
+ let fn_sig = tcx.fold_regions(fn_sig, |r, _| match *r {
+ ty::ReErased => tcx.lifetimes.re_static,
+ _ => r,
+ });
+ let fn_sig = ty::Binder::dummy(fn_sig);
+
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_ty(ty);
+ let mut diag = bad_placeholder(tcx, visitor.0, "return type");
+ let ret_ty = fn_sig.skip_binder().output();
+ if ret_ty.is_suggestable(tcx, false) {
+ diag.span_suggestion(
+ ty.span,
+ "replace with the correct return type",
+ ret_ty,
+ Applicability::MachineApplicable,
+ );
+ } else if matches!(ret_ty.kind(), ty::FnDef(..)) {
+ let fn_sig = ret_ty.fn_sig(tcx);
+ if fn_sig
+ .skip_binder()
+ .inputs_and_output
+ .iter()
+ .all(|t| t.is_suggestable(tcx, false))
+ {
+ diag.span_suggestion(
+ ty.span,
+ "replace with the correct return type",
+ fn_sig,
+ Applicability::MachineApplicable,
+ );
+ }
+ } else if ret_ty.is_closure() {
+ // We're dealing with a closure, so we should suggest using `impl Fn` or trait bounds
+ // to prevent the user from getting a papercut while trying to use the unique closure
+ // syntax (e.g. `[closure@src/lib.rs:2:5: 2:9]`).
+ diag.help("consider using an `Fn`, `FnMut`, or `FnOnce` trait bound");
+ diag.note("for more information on `Fn` traits and closure types, see https://doc.rust-lang.org/book/ch13-01-closures.html");
+ }
+ diag.emit();
+
+ fn_sig
+ }
+ None => <dyn AstConv<'_>>::ty_of_fn(
+ icx,
+ hir_id,
+ sig.header.unsafety,
+ sig.header.abi,
+ sig.decl,
+ Some(generics),
+ None,
+ ),
+ }
+}
+
+fn impl_trait_ref(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::TraitRef<'_>> {
+ let icx = ItemCtxt::new(tcx, def_id);
+ let item = tcx.hir().expect_item(def_id.expect_local());
+ match item.kind {
+ hir::ItemKind::Impl(ref impl_) => impl_.of_trait.as_ref().map(|ast_trait_ref| {
+ let selfty = tcx.type_of(def_id);
+ <dyn AstConv<'_>>::instantiate_mono_trait_ref(
+ &icx,
+ ast_trait_ref,
+ selfty,
+ check_impl_constness(tcx, impl_.constness, ast_trait_ref),
+ )
+ }),
+ _ => bug!(),
+ }
+}
+
+fn check_impl_constness(
+ tcx: TyCtxt<'_>,
+ constness: hir::Constness,
+ ast_trait_ref: &hir::TraitRef<'_>,
+) -> ty::BoundConstness {
+ match constness {
+ hir::Constness::Const => {
+ if let Some(trait_def_id) = ast_trait_ref.trait_def_id() && !tcx.has_attr(trait_def_id, sym::const_trait) {
+ let trait_name = tcx.item_name(trait_def_id).to_string();
+ tcx.sess.emit_err(errors::ConstImplForNonConstTrait {
+ trait_ref_span: ast_trait_ref.path.span,
+ trait_name,
+ local_trait_span: trait_def_id.as_local().map(|_| tcx.def_span(trait_def_id).shrink_to_lo()),
+ marking: (),
+ adding: (),
+ });
+ ty::BoundConstness::NotConst
+ } else {
+ ty::BoundConstness::ConstIfConst
+ }
+ },
+ hir::Constness::NotConst => ty::BoundConstness::NotConst,
+ }
+}
+
+fn impl_polarity(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ImplPolarity {
+ let is_rustc_reservation = tcx.has_attr(def_id, sym::rustc_reservation_impl);
+ let item = tcx.hir().expect_item(def_id.expect_local());
+ match &item.kind {
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Negative(span),
+ of_trait,
+ ..
+ }) => {
+ if is_rustc_reservation {
+ let span = span.to(of_trait.as_ref().map_or(*span, |t| t.path.span));
+ tcx.sess.span_err(span, "reservation impls can't be negative");
+ }
+ ty::ImplPolarity::Negative
+ }
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Positive,
+ of_trait: None,
+ ..
+ }) => {
+ if is_rustc_reservation {
+ tcx.sess.span_err(item.span, "reservation impls can't be inherent");
+ }
+ ty::ImplPolarity::Positive
+ }
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Positive,
+ of_trait: Some(_),
+ ..
+ }) => {
+ if is_rustc_reservation {
+ ty::ImplPolarity::Reservation
+ } else {
+ ty::ImplPolarity::Positive
+ }
+ }
+ item => bug!("impl_polarity: {:?} not an impl", item),
+ }
+}
+
+/// Returns the early-bound lifetimes declared in this generics
+/// listing. For anything other than fns/methods, this is just all
+/// the lifetimes that are declared. For fns or methods, we have to
+/// screen out those that do not appear in any where-clauses etc using
+/// `resolve_lifetime::early_bound_lifetimes`.
+fn early_bound_lifetimes_from_generics<'a, 'tcx: 'a>(
+ tcx: TyCtxt<'tcx>,
+ generics: &'a hir::Generics<'a>,
+) -> impl Iterator<Item = &'a hir::GenericParam<'a>> + Captures<'tcx> {
+ generics.params.iter().filter(move |param| match param.kind {
+ GenericParamKind::Lifetime { .. } => !tcx.is_late_bound(param.hir_id),
+ _ => false,
+ })
+}
+
+/// Returns a list of type predicates for the definition with ID `def_id`, including inferred
+/// lifetime constraints. This includes all predicates returned by `explicit_predicates_of`, plus
+/// inferred constraints concerning which regions outlive other regions.
+#[instrument(level = "debug", skip(tcx))]
+fn predicates_defined_on(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ let mut result = tcx.explicit_predicates_of(def_id);
+ debug!("predicates_defined_on: explicit_predicates_of({:?}) = {:?}", def_id, result,);
+ let inferred_outlives = tcx.inferred_outlives_of(def_id);
+ if !inferred_outlives.is_empty() {
+ debug!(
+ "predicates_defined_on: inferred_outlives_of({:?}) = {:?}",
+ def_id, inferred_outlives,
+ );
+ if result.predicates.is_empty() {
+ result.predicates = inferred_outlives;
+ } else {
+ result.predicates = tcx
+ .arena
+ .alloc_from_iter(result.predicates.iter().chain(inferred_outlives).copied());
+ }
+ }
+
+ debug!("predicates_defined_on({:?}) = {:?}", def_id, result);
+ result
+}
+
+fn compute_sig_of_foreign_fn_decl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ abi: abi::Abi,
+) -> ty::PolyFnSig<'tcx> {
+ let unsafety = if abi == abi::Abi::RustIntrinsic {
+ intrinsic_operation_unsafety(tcx, def_id)
+ } else {
+ hir::Unsafety::Unsafe
+ };
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let fty = <dyn AstConv<'_>>::ty_of_fn(
+ &ItemCtxt::new(tcx, def_id),
+ hir_id,
+ unsafety,
+ abi,
+ decl,
+ None,
+ None,
+ );
+
+ // Feature gate SIMD types in FFI, since I am not sure that the
+ // ABIs are handled at all correctly. -huonw
+ if abi != abi::Abi::RustIntrinsic
+ && abi != abi::Abi::PlatformIntrinsic
+ && !tcx.features().simd_ffi
+ {
+ let check = |ast_ty: &hir::Ty<'_>, ty: Ty<'_>| {
+ if ty.is_simd() {
+ let snip = tcx
+ .sess
+ .source_map()
+ .span_to_snippet(ast_ty.span)
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s));
+ tcx.sess
+ .struct_span_err(
+ ast_ty.span,
+ &format!(
+ "use of SIMD type{} in FFI is highly experimental and \
+ may result in invalid code",
+ snip
+ ),
+ )
+ .help("add `#![feature(simd_ffi)]` to the crate attributes to enable")
+ .emit();
+ }
+ };
+ for (input, ty) in iter::zip(decl.inputs, fty.inputs().skip_binder()) {
+ check(input, *ty)
+ }
+ if let hir::FnRetTy::Return(ref ty) = decl.output {
+ check(ty, fty.output().skip_binder())
+ }
+ }
+
+ fty
+}
+
+fn is_foreign_item(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::ForeignItem(..)) => true,
+ Some(_) => false,
+ _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id),
+ }
+}
+
+fn generator_kind(tcx: TyCtxt<'_>, def_id: DefId) -> Option<hir::GeneratorKind> {
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::Expr(&rustc_hir::Expr {
+ kind: rustc_hir::ExprKind::Closure(&rustc_hir::Closure { body, .. }),
+ ..
+ })) => tcx.hir().body(body).generator_kind(),
+ Some(_) => None,
+ _ => bug!("generator_kind applied to non-local def-id {:?}", def_id),
+ }
+}
+
+fn from_target_feature(
+ tcx: TyCtxt<'_>,
+ attr: &ast::Attribute,
+ supported_target_features: &FxHashMap<String, Option<Symbol>>,
+ target_features: &mut Vec<Symbol>,
+) {
+ let Some(list) = attr.meta_item_list() else { return };
+ let bad_item = |span| {
+ let msg = "malformed `target_feature` attribute input";
+ let code = "enable = \"..\"";
+ tcx.sess
+ .struct_span_err(span, msg)
+ .span_suggestion(span, "must be of the form", code, Applicability::HasPlaceholders)
+ .emit();
+ };
+ let rust_features = tcx.features();
+ for item in list {
+ // Only `enable = ...` is accepted in the meta-item list.
+ if !item.has_name(sym::enable) {
+ bad_item(item.span());
+ continue;
+ }
+
+ // Must be of the form `enable = "..."` (a string).
+ let Some(value) = item.value_str() else {
+ bad_item(item.span());
+ continue;
+ };
+
+ // We allow comma separation to enable multiple features.
+ target_features.extend(value.as_str().split(',').filter_map(|feature| {
+ let Some(feature_gate) = supported_target_features.get(feature) else {
+ let msg =
+ format!("the feature named `{}` is not valid for this target", feature);
+ let mut err = tcx.sess.struct_span_err(item.span(), &msg);
+ err.span_label(
+ item.span(),
+ format!("`{}` is not valid for this target", feature),
+ );
+ if let Some(stripped) = feature.strip_prefix('+') {
+ let valid = supported_target_features.contains_key(stripped);
+ if valid {
+ err.help("consider removing the leading `+` in the feature name");
+ }
+ }
+ err.emit();
+ return None;
+ };
+
+ // Only allow features whose feature gates have been enabled.
+ let allowed = match feature_gate.as_ref().copied() {
+ Some(sym::arm_target_feature) => rust_features.arm_target_feature,
+ Some(sym::hexagon_target_feature) => rust_features.hexagon_target_feature,
+ Some(sym::powerpc_target_feature) => rust_features.powerpc_target_feature,
+ Some(sym::mips_target_feature) => rust_features.mips_target_feature,
+ Some(sym::riscv_target_feature) => rust_features.riscv_target_feature,
+ Some(sym::avx512_target_feature) => rust_features.avx512_target_feature,
+ Some(sym::sse4a_target_feature) => rust_features.sse4a_target_feature,
+ Some(sym::tbm_target_feature) => rust_features.tbm_target_feature,
+ Some(sym::wasm_target_feature) => rust_features.wasm_target_feature,
+ Some(sym::cmpxchg16b_target_feature) => rust_features.cmpxchg16b_target_feature,
+ Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
+ Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
+ Some(sym::f16c_target_feature) => rust_features.f16c_target_feature,
+ Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
+ Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
+ Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
+ Some(name) => bug!("unknown target feature gate {}", name),
+ None => true,
+ };
+ if !allowed {
+ feature_err(
+ &tcx.sess.parse_sess,
+ feature_gate.unwrap(),
+ item.span(),
+ &format!("the target feature `{}` is currently unstable", feature),
+ )
+ .emit();
+ }
+ Some(Symbol::intern(feature))
+ }));
+ }
+}
+
+fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
+ use rustc_middle::mir::mono::Linkage::*;
+
+ // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
+ // applicable to variable declarations and may not really make sense for
+ // Rust code in the first place but allow them anyway and trust that the
+ // user knows what they're doing. Who knows, unanticipated use cases may pop
+ // up in the future.
+ //
+ // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
+ // and don't have to be, LLVM treats them as no-ops.
+ match name {
+ "appending" => Appending,
+ "available_externally" => AvailableExternally,
+ "common" => Common,
+ "extern_weak" => ExternalWeak,
+ "external" => External,
+ "internal" => Internal,
+ "linkonce" => LinkOnceAny,
+ "linkonce_odr" => LinkOnceODR,
+ "private" => Private,
+ "weak" => WeakAny,
+ "weak_odr" => WeakODR,
+ _ => tcx.sess.span_fatal(tcx.def_span(def_id), "invalid linkage specified"),
+ }
+}
+
+fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
+ if cfg!(debug_assertions) {
+ let def_kind = tcx.def_kind(did);
+ assert!(
+ def_kind.has_codegen_attrs(),
+ "unexpected `def_kind` in `codegen_fn_attrs`: {def_kind:?}",
+ );
+ }
+
+ let did = did.expect_local();
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(did));
+ let mut codegen_fn_attrs = CodegenFnAttrs::new();
+ if tcx.should_inherit_track_caller(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
+ }
+
+ let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
+
+ let mut inline_span = None;
+ let mut link_ordinal_span = None;
+ let mut no_sanitize_span = None;
+ for attr in attrs.iter() {
+ if attr.has_name(sym::cold) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
+ } else if attr.has_name(sym::rustc_allocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
+ } else if attr.has_name(sym::ffi_returns_twice) {
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE;
+ } else {
+ // `#[ffi_returns_twice]` is only allowed `extern fn`s.
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0724,
+ "`#[ffi_returns_twice]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::ffi_pure) {
+ if tcx.is_foreign_item(did) {
+ if attrs.iter().any(|a| a.has_name(sym::ffi_const)) {
+ // `#[ffi_const]` functions cannot be `#[ffi_pure]`
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0757,
+ "`#[ffi_const]` function cannot be `#[ffi_pure]`"
+ )
+ .emit();
+ } else {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE;
+ }
+ } else {
+ // `#[ffi_pure]` is only allowed on foreign functions
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0755,
+ "`#[ffi_pure]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::ffi_const) {
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST;
+ } else {
+ // `#[ffi_const]` is only allowed on foreign functions
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0756,
+ "`#[ffi_const]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::rustc_nounwind) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
+ } else if attr.has_name(sym::rustc_reallocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR;
+ } else if attr.has_name(sym::rustc_deallocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR;
+ } else if attr.has_name(sym::rustc_allocator_zeroed) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED;
+ } else if attr.has_name(sym::naked) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
+ } else if attr.has_name(sym::no_mangle) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ } else if attr.has_name(sym::no_coverage) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
+ } else if attr.has_name(sym::rustc_std_internal_symbol) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
+ } else if attr.has_name(sym::used) {
+ let inner = attr.meta_item_list();
+ match inner.as_deref() {
+ Some([item]) if item.has_name(sym::linker) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(linker)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER;
+ }
+ Some([item]) if item.has_name(sym::compiler) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(compiler)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
+ }
+ Some(_) => {
+ tcx.sess.emit_err(errors::ExpectedUsedSymbol { span: attr.span });
+ }
+ None => {
+ // Unfortunately, unconditionally using `llvm.used` causes
+ // issues in handling `.init_array` with the gold linker,
+ // but using `llvm.compiler.used` caused a nontrival amount
+ // of unintentional ecosystem breakage -- particularly on
+ // Mach-O targets.
+ //
+ // As a result, we emit `llvm.compiler.used` only on ELF
+ // targets. This is somewhat ad-hoc, but actually follows
+ // our pre-LLVM 13 behavior (prior to the ecosystem
+ // breakage), and seems to match `clang`'s behavior as well
+ // (both before and after LLVM 13), possibly because they
+ // have similar compatibility concerns to us. See
+ // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
+ // and following comments for some discussion of this, as
+ // well as the comments in `rustc_codegen_llvm` where these
+ // flags are handled.
+ //
+ // Anyway, to be clear: this is still up in the air
+ // somewhat, and is subject to change in the future (which
+ // is a good thing, because this would ideally be a bit
+ // more firmed up).
+ let is_like_elf = !(tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ || tcx.sess.target.is_like_wasm);
+ codegen_fn_attrs.flags |= if is_like_elf {
+ CodegenFnAttrFlags::USED
+ } else {
+ CodegenFnAttrFlags::USED_LINKER
+ };
+ }
+ }
+ } else if attr.has_name(sym::cmse_nonsecure_entry) {
+ if !matches!(tcx.fn_sig(did).abi(), abi::Abi::C { .. }) {
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0776,
+ "`#[cmse_nonsecure_entry]` requires C ABI"
+ )
+ .emit();
+ }
+ if !tcx.sess.target.llvm_target.contains("thumbv8m") {
+ struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY;
+ } else if attr.has_name(sym::thread_local) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL;
+ } else if attr.has_name(sym::track_caller) {
+ if !tcx.is_closure(did.to_def_id()) && tcx.fn_sig(did).abi() != abi::Abi::Rust {
+ struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
+ .emit();
+ }
+ if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::closure_track_caller,
+ attr.span,
+ "`#[track_caller]` on closures is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
+ } else if attr.has_name(sym::export_name) {
+ if let Some(s) = attr.value_str() {
+ if s.as_str().contains('\0') {
+ // `#[export_name = ...]` will be converted to a null-terminated string,
+ // so it may not contain any null characters.
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0648,
+ "`export_name` may not contain null characters"
+ )
+ .emit();
+ }
+ codegen_fn_attrs.export_name = Some(s);
+ }
+ } else if attr.has_name(sym::target_feature) {
+ if !tcx.is_closure(did.to_def_id())
+ && tcx.fn_sig(did).unsafety() == hir::Unsafety::Normal
+ {
+ if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
+ // The `#[target_feature]` attribute is allowed on
+ // WebAssembly targets on all functions, including safe
+ // ones. Other targets require that `#[target_feature]` is
+ // only applied to unsafe functions (pending the
+ // `target_feature_11` feature) because on most targets
+ // execution of instructions that are not supported is
+ // considered undefined behavior. For WebAssembly which is a
+ // 100% safe target at execution time it's not possible to
+ // execute undefined instructions, and even if a future
+ // feature was added in some form for this it would be a
+ // deterministic trap. There is no undefined behavior when
+ // executing WebAssembly so `#[target_feature]` is allowed
+ // on safe functions (but again, only for WebAssembly)
+ //
+ // Note that this is also allowed if `actually_rustdoc` so
+ // if a target is documenting some wasm-specific code then
+ // it's not spuriously denied.
+ } else if !tcx.features().target_feature_11 {
+ let mut err = feature_err(
+ &tcx.sess.parse_sess,
+ sym::target_feature_11,
+ attr.span,
+ "`#[target_feature(..)]` can only be applied to `unsafe` functions",
+ );
+ err.span_label(tcx.def_span(did), "not an `unsafe` function");
+ err.emit();
+ } else {
+ check_target_feature_trait_unsafe(tcx, did, attr.span);
+ }
+ }
+ from_target_feature(
+ tcx,
+ attr,
+ supported_target_features,
+ &mut codegen_fn_attrs.target_features,
+ );
+ } else if attr.has_name(sym::linkage) {
+ if let Some(val) = attr.value_str() {
+ codegen_fn_attrs.linkage = Some(linkage_by_name(tcx, did, val.as_str()));
+ }
+ } else if attr.has_name(sym::link_section) {
+ if let Some(val) = attr.value_str() {
+ if val.as_str().bytes().any(|b| b == 0) {
+ let msg = format!(
+ "illegal null byte in link_section \
+ value: `{}`",
+ &val
+ );
+ tcx.sess.span_err(attr.span, &msg);
+ } else {
+ codegen_fn_attrs.link_section = Some(val);
+ }
+ }
+ } else if attr.has_name(sym::link_name) {
+ codegen_fn_attrs.link_name = attr.value_str();
+ } else if attr.has_name(sym::link_ordinal) {
+ link_ordinal_span = Some(attr.span);
+ if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) {
+ codegen_fn_attrs.link_ordinal = ordinal;
+ }
+ } else if attr.has_name(sym::no_sanitize) {
+ no_sanitize_span = Some(attr.span);
+ if let Some(list) = attr.meta_item_list() {
+ for item in list.iter() {
+ if item.has_name(sym::address) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::ADDRESS;
+ } else if item.has_name(sym::cfi) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI;
+ } else if item.has_name(sym::memory) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
+ } else if item.has_name(sym::memtag) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG;
+ } else if item.has_name(sym::shadow_call_stack) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK;
+ } else if item.has_name(sym::thread) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD;
+ } else if item.has_name(sym::hwaddress) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS;
+ } else {
+ tcx.sess
+ .struct_span_err(item.span(), "invalid argument for `no_sanitize`")
+ .note("expected one of: `address`, `cfi`, `hwaddress`, `memory`, `memtag`, `shadow-call-stack`, or `thread`")
+ .emit();
+ }
+ }
+ }
+ } else if attr.has_name(sym::instruction_set) {
+ codegen_fn_attrs.instruction_set = match attr.meta_kind() {
+ Some(MetaItemKind::List(ref items)) => match items.as_slice() {
+ [NestedMetaItem::MetaItem(set)] => {
+ let segments =
+ set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
+ match segments.as_slice() {
+ [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
+ if !tcx.sess.target.has_thumb_interworking {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "target does not support `#[instruction_set]`"
+ )
+ .emit();
+ None
+ } else if segments[1] == sym::a32 {
+ Some(InstructionSetAttr::ArmA32)
+ } else if segments[1] == sym::t32 {
+ Some(InstructionSetAttr::ArmT32)
+ } else {
+ unreachable!()
+ }
+ }
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "invalid instruction set specified",
+ )
+ .emit();
+ None
+ }
+ }
+ }
+ [] => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0778,
+ "`#[instruction_set]` requires an argument"
+ )
+ .emit();
+ None
+ }
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "cannot specify more than one instruction set"
+ )
+ .emit();
+ None
+ }
+ },
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0778,
+ "must specify an instruction set"
+ )
+ .emit();
+ None
+ }
+ };
+ } else if attr.has_name(sym::repr) {
+ codegen_fn_attrs.alignment = match attr.meta_item_list() {
+ Some(items) => match items.as_slice() {
+ [item] => match item.name_value_literal() {
+ Some((sym::align, literal)) => {
+ let alignment = rustc_attr::parse_alignment(&literal.kind);
+
+ match alignment {
+ Ok(align) => Some(align),
+ Err(msg) => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0589,
+ "invalid `repr(align)` attribute: {}",
+ msg
+ )
+ .emit();
+
+ None
+ }
+ }
+ }
+ _ => None,
+ },
+ [] => None,
+ _ => None,
+ },
+ None => None,
+ };
+ }
+ }
+
+ codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| {
+ if !attr.has_name(sym::inline) {
+ return ia;
+ }
+ match attr.meta_kind() {
+ Some(MetaItemKind::Word) => InlineAttr::Hint,
+ Some(MetaItemKind::List(ref items)) => {
+ inline_span = Some(attr.span);
+ if items.len() != 1 {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0534,
+ "expected one argument"
+ )
+ .emit();
+ InlineAttr::None
+ } else if list_contains_name(&items, sym::always) {
+ InlineAttr::Always
+ } else if list_contains_name(&items, sym::never) {
+ InlineAttr::Never
+ } else {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ items[0].span(),
+ E0535,
+ "invalid argument"
+ )
+ .help("valid inline arguments are `always` and `never`")
+ .emit();
+
+ InlineAttr::None
+ }
+ }
+ Some(MetaItemKind::NameValue(_)) => ia,
+ None => ia,
+ }
+ });
+
+ codegen_fn_attrs.optimize = attrs.iter().fold(OptimizeAttr::None, |ia, attr| {
+ if !attr.has_name(sym::optimize) {
+ return ia;
+ }
+ let err = |sp, s| struct_span_err!(tcx.sess.diagnostic(), sp, E0722, "{}", s).emit();
+ match attr.meta_kind() {
+ Some(MetaItemKind::Word) => {
+ err(attr.span, "expected one argument");
+ ia
+ }
+ Some(MetaItemKind::List(ref items)) => {
+ inline_span = Some(attr.span);
+ if items.len() != 1 {
+ err(attr.span, "expected one argument");
+ OptimizeAttr::None
+ } else if list_contains_name(&items, sym::size) {
+ OptimizeAttr::Size
+ } else if list_contains_name(&items, sym::speed) {
+ OptimizeAttr::Speed
+ } else {
+ err(items[0].span(), "invalid argument");
+ OptimizeAttr::None
+ }
+ }
+ Some(MetaItemKind::NameValue(_)) => ia,
+ None => ia,
+ }
+ });
+
+ // #73631: closures inherit `#[target_feature]` annotations
+ if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) {
+ let owner_id = tcx.parent(did.to_def_id());
+ if tcx.def_kind(owner_id).has_codegen_attrs() {
+ codegen_fn_attrs
+ .target_features
+ .extend(tcx.codegen_fn_attrs(owner_id).target_features.iter().copied());
+ }
+ }
+
+ // If a function uses #[target_feature] it can't be inlined into general
+ // purpose functions as they wouldn't have the right target features
+ // enabled. For that reason we also forbid #[inline(always)] as it can't be
+ // respected.
+ if !codegen_fn_attrs.target_features.is_empty() {
+ if codegen_fn_attrs.inline == InlineAttr::Always {
+ if let Some(span) = inline_span {
+ tcx.sess.span_err(
+ span,
+ "cannot use `#[inline(always)]` with \
+ `#[target_feature]`",
+ );
+ }
+ }
+ }
+
+ if !codegen_fn_attrs.no_sanitize.is_empty() {
+ if codegen_fn_attrs.inline == InlineAttr::Always {
+ if let (Some(no_sanitize_span), Some(inline_span)) = (no_sanitize_span, inline_span) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(did);
+ tcx.struct_span_lint_hir(
+ lint::builtin::INLINE_NO_SANITIZE,
+ hir_id,
+ no_sanitize_span,
+ "`no_sanitize` will have no effect after inlining",
+ |lint| lint.span_note(inline_span, "inlining requested here"),
+ )
+ }
+ }
+ }
+
+ // Weak lang items have the same semantics as "std internal" symbols in the
+ // sense that they're preserved through all our LTO passes and only
+ // strippable by the linker.
+ //
+ // Additionally weak lang items have predetermined symbol names.
+ if tcx.is_weak_lang_item(did.to_def_id()) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
+ }
+ if let Some(name) = weak_lang_items::link_name(attrs) {
+ codegen_fn_attrs.export_name = Some(name);
+ codegen_fn_attrs.link_name = Some(name);
+ }
+ check_link_name_xor_ordinal(tcx, &codegen_fn_attrs, link_ordinal_span);
+
+ // Internal symbols to the standard library all have no_mangle semantics in
+ // that they have defined symbol names present in the function name. This
+ // also applies to weak symbols where they all have known symbol names.
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ }
+
+ // Any linkage to LLVM intrinsics for now forcibly marks them all as never
+ // unwinds since LLVM sometimes can't handle codegen which `invoke`s
+ // intrinsic functions.
+ if let Some(name) = &codegen_fn_attrs.link_name {
+ if name.as_str().starts_with("llvm.") {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
+ }
+ }
+
+ codegen_fn_attrs
+}
+
+/// Computes the set of target features used in a function for the purposes of
+/// inline assembly.
+fn asm_target_features<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx FxHashSet<Symbol> {
+ let mut target_features = tcx.sess.unstable_target_features.clone();
+ if tcx.def_kind(did).has_codegen_attrs() {
+ let attrs = tcx.codegen_fn_attrs(did);
+ target_features.extend(&attrs.target_features);
+ match attrs.instruction_set {
+ None => {}
+ Some(InstructionSetAttr::ArmA32) => {
+ target_features.remove(&sym::thumb_mode);
+ }
+ Some(InstructionSetAttr::ArmT32) => {
+ target_features.insert(sym::thumb_mode);
+ }
+ }
+ }
+
+ tcx.arena.alloc(target_features)
+}
+
+/// Checks if the provided DefId is a method in a trait impl for a trait which has track_caller
+/// applied to the method prototype.
+fn should_inherit_track_caller(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ if let Some(impl_item) = tcx.opt_associated_item(def_id)
+ && let ty::AssocItemContainer::ImplContainer = impl_item.container
+ && let Some(trait_item) = impl_item.trait_item_def_id
+ {
+ return tcx
+ .codegen_fn_attrs(trait_item)
+ .flags
+ .intersects(CodegenFnAttrFlags::TRACK_CALLER);
+ }
+
+ false
+}
+
+fn check_link_ordinal(tcx: TyCtxt<'_>, attr: &ast::Attribute) -> Option<u16> {
+ use rustc_ast::{Lit, LitIntType, LitKind};
+ if !tcx.features().raw_dylib && tcx.sess.target.arch == "x86" {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::raw_dylib,
+ attr.span,
+ "`#[link_ordinal]` is unstable on x86",
+ )
+ .emit();
+ }
+ let meta_item_list = attr.meta_item_list();
+ let meta_item_list: Option<&[ast::NestedMetaItem]> = meta_item_list.as_ref().map(Vec::as_ref);
+ let sole_meta_list = match meta_item_list {
+ Some([item]) => item.literal(),
+ Some(_) => {
+ tcx.sess
+ .struct_span_err(attr.span, "incorrect number of arguments to `#[link_ordinal]`")
+ .note("the attribute requires exactly one argument")
+ .emit();
+ return None;
+ }
+ _ => None,
+ };
+ if let Some(Lit { kind: LitKind::Int(ordinal, LitIntType::Unsuffixed), .. }) = sole_meta_list {
+ // According to the table at https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#import-header,
+ // the ordinal must fit into 16 bits. Similarly, the Ordinal field in COFFShortExport (defined
+ // in llvm/include/llvm/Object/COFFImportFile.h), which we use to communicate import information
+ // to LLVM for `#[link(kind = "raw-dylib"_])`, is also defined to be uint16_t.
+ //
+ // FIXME: should we allow an ordinal of 0? The MSVC toolchain has inconsistent support for this:
+ // both LINK.EXE and LIB.EXE signal errors and abort when given a .DEF file that specifies
+ // a zero ordinal. However, llvm-dlltool is perfectly happy to generate an import library
+ // for such a .DEF file, and MSVC's LINK.EXE is also perfectly happy to consume an import
+ // library produced by LLVM with an ordinal of 0, and it generates an .EXE. (I don't know yet
+ // if the resulting EXE runs, as I haven't yet built the necessary DLL -- see earlier comment
+ // about LINK.EXE failing.)
+ if *ordinal <= u16::MAX as u128 {
+ Some(*ordinal as u16)
+ } else {
+ let msg = format!("ordinal value in `link_ordinal` is too large: `{}`", &ordinal);
+ tcx.sess
+ .struct_span_err(attr.span, &msg)
+ .note("the value may not exceed `u16::MAX`")
+ .emit();
+ None
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(attr.span, "illegal ordinal format in `link_ordinal`")
+ .note("an unsuffixed integer value, e.g., `1`, is expected")
+ .emit();
+ None
+ }
+}
+
+fn check_link_name_xor_ordinal(
+ tcx: TyCtxt<'_>,
+ codegen_fn_attrs: &CodegenFnAttrs,
+ inline_span: Option<Span>,
+) {
+ if codegen_fn_attrs.link_name.is_none() || codegen_fn_attrs.link_ordinal.is_none() {
+ return;
+ }
+ let msg = "cannot use `#[link_name]` with `#[link_ordinal]`";
+ if let Some(span) = inline_span {
+ tcx.sess.span_err(span, msg);
+ } else {
+ tcx.sess.err(msg);
+ }
+}
+
+/// Checks the function annotated with `#[target_feature]` is not a safe
+/// trait method implementation, reporting an error if it is.
+fn check_target_feature_trait_unsafe(tcx: TyCtxt<'_>, id: LocalDefId, attr_span: Span) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(id);
+ let node = tcx.hir().get(hir_id);
+ if let Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) = node {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ let parent_item = tcx.hir().expect_item(parent_id.def_id);
+ if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) = parent_item.kind {
+ tcx.sess
+ .struct_span_err(
+ attr_span,
+ "`#[target_feature(..)]` cannot be applied to safe trait method",
+ )
+ .span_label(attr_span, "cannot be applied to safe trait method")
+ .span_label(tcx.def_span(id), "not an `unsafe` function")
+ .emit();
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/collect/generics_of.rs b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
new file mode 100644
index 000000000..c7777a946
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
@@ -0,0 +1,481 @@
+use crate::middle::resolve_lifetime as rl;
+use hir::{
+ intravisit::{self, Visitor},
+ GenericParamKind, HirId, Node,
+};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::Span;
+
+pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Generics {
+ use rustc_hir::*;
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+
+ let node = tcx.hir().get(hir_id);
+ let parent_def_id = match node {
+ Node::ImplItem(_)
+ | Node::TraitItem(_)
+ | Node::Variant(_)
+ | Node::Ctor(..)
+ | Node::Field(_) => {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ Some(parent_id.to_def_id())
+ }
+ // FIXME(#43408) always enable this once `lazy_normalization` is
+ // stable enough and does not need a feature gate anymore.
+ Node::AnonConst(_) => {
+ let parent_def_id = tcx.hir().get_parent_item(hir_id);
+
+ let mut in_param_ty = false;
+ for (_parent, node) in tcx.hir().parent_iter(hir_id) {
+ if let Some(generics) = node.generics() {
+ let mut visitor = AnonConstInParamTyDetector {
+ in_param_ty: false,
+ found_anon_const_in_param_ty: false,
+ ct: hir_id,
+ };
+
+ visitor.visit_generics(generics);
+ in_param_ty = visitor.found_anon_const_in_param_ty;
+ break;
+ }
+ }
+
+ if in_param_ty {
+ // We do not allow generic parameters in anon consts if we are inside
+ // of a const parameter type, e.g. `struct Foo<const N: usize, const M: [u8; N]>` is not allowed.
+ None
+ } else if tcx.lazy_normalization() {
+ if let Some(param_id) = tcx.hir().opt_const_param_default_param_hir_id(hir_id) {
+ // If the def_id we are calling generics_of on is an anon ct default i.e:
+ //
+ // struct Foo<const N: usize = { .. }>;
+ // ^^^ ^ ^^^^^^ def id of this anon const
+ // ^ ^ param_id
+ // ^ parent_def_id
+ //
+ // then we only want to return generics for params to the left of `N`. If we don't do that we
+ // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, substs: [N#0])`.
+ //
+ // This causes ICEs (#86580) when building the substs for Foo in `fn foo() -> Foo { .. }` as
+ // we substitute the defaults with the partially built substs when we build the substs. Subst'ing
+ // the `N#0` on the unevaluated const indexes into the empty substs we're in the process of building.
+ //
+ // We fix this by having this function return the parent's generics ourselves and truncating the
+ // generics to only include non-forward declared params (with the exception of the `Self` ty)
+ //
+ // For the above code example that means we want `substs: []`
+ // For the following struct def we want `substs: [N#0]` when generics_of is called on
+ // the def id of the `{ N + 1 }` anon const
+ // struct Foo<const N: usize, const M: usize = { N + 1 }>;
+ //
+ // This has some implications for how we get the predicates available to the anon const
+ // see `explicit_predicates_of` for more information on this
+ let generics = tcx.generics_of(parent_def_id.to_def_id());
+ let param_def = tcx.hir().local_def_id(param_id).to_def_id();
+ let param_def_idx = generics.param_def_id_to_index[&param_def];
+ // In the above example this would be .params[..N#0]
+ let params = generics.params[..param_def_idx as usize].to_owned();
+ let param_def_id_to_index =
+ params.iter().map(|param| (param.def_id, param.index)).collect();
+
+ return ty::Generics {
+ // we set the parent of these generics to be our parent's parent so that we
+ // dont end up with substs: [N, M, N] for the const default on a struct like this:
+ // struct Foo<const N: usize, const M: usize = { ... }>;
+ parent: generics.parent,
+ parent_count: generics.parent_count,
+ params,
+ param_def_id_to_index,
+ has_self: generics.has_self,
+ has_late_bound_regions: generics.has_late_bound_regions,
+ };
+ }
+
+ // HACK(eddyb) this provides the correct generics when
+ // `feature(generic_const_expressions)` is enabled, so that const expressions
+ // used with const generics, e.g. `Foo<{N+1}>`, can work at all.
+ //
+ // Note that we do not supply the parent generics when using
+ // `min_const_generics`.
+ Some(parent_def_id.to_def_id())
+ } else {
+ let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
+ match parent_node {
+ // HACK(eddyb) this provides the correct generics for repeat
+ // expressions' count (i.e. `N` in `[x; N]`), and explicit
+ // `enum` discriminants (i.e. `D` in `enum Foo { Bar = D }`),
+ // as they shouldn't be able to cause query cycle errors.
+ Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
+ if constant.hir_id() == hir_id =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ Node::Variant(Variant { disr_expr: Some(ref constant), .. })
+ if constant.hir_id == hir_id =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) => {
+ Some(tcx.typeck_root_def_id(def_id))
+ }
+ // Exclude `GlobalAsm` here which cannot have generics.
+ Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. })
+ if asm.operands.iter().any(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ | hir::InlineAsmOperand::SymFn { anon_const } => {
+ anon_const.hir_id == hir_id
+ }
+ _ => false,
+ }) =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ _ => None,
+ }
+ }
+ }
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
+ Some(tcx.typeck_root_def_id(def_id))
+ }
+ Node::Item(item) => match item.kind {
+ ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin:
+ hir::OpaqueTyOrigin::FnReturn(fn_def_id) | hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ in_trait,
+ ..
+ }) => {
+ if in_trait {
+ assert!(matches!(tcx.def_kind(fn_def_id), DefKind::AssocFn))
+ } else {
+ assert!(matches!(tcx.def_kind(fn_def_id), DefKind::AssocFn | DefKind::Fn))
+ }
+ Some(fn_def_id.to_def_id())
+ }
+ ItemKind::OpaqueTy(hir::OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ assert_ne!(parent_id, hir::CRATE_OWNER_ID);
+ debug!("generics_of: parent of opaque ty {:?} is {:?}", def_id, parent_id);
+ // Opaque types are always nested within another item, and
+ // inherit the generics of the item.
+ Some(parent_id.to_def_id())
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ enum Defaults {
+ Allowed,
+ // See #36887
+ FutureCompatDisallowed,
+ Deny,
+ }
+
+ let no_generics = hir::Generics::empty();
+ let ast_generics = node.generics().unwrap_or(&no_generics);
+ let (opt_self, allow_defaults) = match node {
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Trait(..) | ItemKind::TraitAlias(..) => {
+ // Add in the self type parameter.
+ //
+ // Something of a hack: use the node id for the trait, also as
+ // the node id for the Self type parameter.
+ let opt_self = Some(ty::GenericParamDef {
+ index: 0,
+ name: kw::SelfUpper,
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type {
+ has_default: false,
+ synthetic: false,
+ },
+ });
+
+ (opt_self, Defaults::Allowed)
+ }
+ ItemKind::TyAlias(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::OpaqueTy(..)
+ | ItemKind::Union(..) => (None, Defaults::Allowed),
+ _ => (None, Defaults::FutureCompatDisallowed),
+ }
+ }
+
+ // GATs
+ Node::TraitItem(item) if matches!(item.kind, TraitItemKind::Type(..)) => {
+ (None, Defaults::Deny)
+ }
+ Node::ImplItem(item) if matches!(item.kind, ImplItemKind::Type(..)) => {
+ (None, Defaults::Deny)
+ }
+
+ _ => (None, Defaults::FutureCompatDisallowed),
+ };
+
+ let has_self = opt_self.is_some();
+ let mut parent_has_self = false;
+ let mut own_start = has_self as u32;
+ let parent_count = parent_def_id.map_or(0, |def_id| {
+ let generics = tcx.generics_of(def_id);
+ assert!(!has_self);
+ parent_has_self = generics.has_self;
+ own_start = generics.count() as u32;
+ generics.parent_count + generics.params.len()
+ });
+
+ let mut params: Vec<_> = Vec::with_capacity(ast_generics.params.len() + has_self as usize);
+
+ if let Some(opt_self) = opt_self {
+ params.push(opt_self);
+ }
+
+ let early_lifetimes = super::early_bound_lifetimes_from_generics(tcx, ast_generics);
+ params.extend(early_lifetimes.enumerate().map(|(i, param)| ty::GenericParamDef {
+ name: param.name.ident().name,
+ index: own_start + i as u32,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind: ty::GenericParamDefKind::Lifetime,
+ }));
+
+ // Now create the real type and const parameters.
+ let type_start = own_start - has_self as u32 + params.len() as u32;
+ let mut i = 0;
+ let mut next_index = || {
+ let prev = i;
+ i += 1;
+ prev as u32 + type_start
+ };
+
+ const TYPE_DEFAULT_NOT_ALLOWED: &'static str = "defaults for type parameters are only allowed in \
+ `struct`, `enum`, `type`, or `trait` definitions";
+
+ params.extend(ast_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => None,
+ GenericParamKind::Type { ref default, synthetic, .. } => {
+ if default.is_some() {
+ match allow_defaults {
+ Defaults::Allowed => {}
+ Defaults::FutureCompatDisallowed
+ if tcx.features().default_type_parameter_fallback => {}
+ Defaults::FutureCompatDisallowed => {
+ tcx.struct_span_lint_hir(
+ lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
+ param.hir_id,
+ param.span,
+ TYPE_DEFAULT_NOT_ALLOWED,
+ |lint| lint,
+ );
+ }
+ Defaults::Deny => {
+ tcx.sess.span_err(param.span, TYPE_DEFAULT_NOT_ALLOWED);
+ }
+ }
+ }
+
+ let kind = ty::GenericParamDefKind::Type { has_default: default.is_some(), synthetic };
+
+ Some(ty::GenericParamDef {
+ index: next_index(),
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind,
+ })
+ }
+ GenericParamKind::Const { default, .. } => {
+ if !matches!(allow_defaults, Defaults::Allowed) && default.is_some() {
+ tcx.sess.span_err(
+ param.span,
+ "defaults for const parameters are only allowed in \
+ `struct`, `enum`, `type`, or `trait` definitions",
+ );
+ }
+
+ Some(ty::GenericParamDef {
+ index: next_index(),
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind: ty::GenericParamDefKind::Const { has_default: default.is_some() },
+ })
+ }
+ }));
+
+ // provide junk type parameter defs - the only place that
+ // cares about anything but the length is instantiation,
+ // and we don't do that for closures.
+ if let Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { movability: gen, .. }),
+ ..
+ }) = node
+ {
+ let dummy_args = if gen.is_some() {
+ &["<resume_ty>", "<yield_ty>", "<return_ty>", "<witness>", "<upvars>"][..]
+ } else {
+ &["<closure_kind>", "<closure_signature>", "<upvars>"][..]
+ };
+
+ params.extend(dummy_args.iter().map(|&arg| ty::GenericParamDef {
+ index: next_index(),
+ name: Symbol::intern(arg),
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type { has_default: false, synthetic: false },
+ }));
+ }
+
+ // provide junk type parameter defs for const blocks.
+ if let Node::AnonConst(_) = node {
+ let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
+ if let Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) = parent_node {
+ params.push(ty::GenericParamDef {
+ index: next_index(),
+ name: Symbol::intern("<const_ty>"),
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type { has_default: false, synthetic: false },
+ });
+ }
+ }
+
+ let param_def_id_to_index = params.iter().map(|param| (param.def_id, param.index)).collect();
+
+ ty::Generics {
+ parent: parent_def_id,
+ parent_count,
+ params,
+ param_def_id_to_index,
+ has_self: has_self || parent_has_self,
+ has_late_bound_regions: has_late_bound_regions(tcx, node),
+ }
+}
+
+fn has_late_bound_regions<'tcx>(tcx: TyCtxt<'tcx>, node: Node<'tcx>) -> Option<Span> {
+ struct LateBoundRegionsDetector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ outer_index: ty::DebruijnIndex,
+ has_late_bound_regions: Option<Span>,
+ }
+
+ impl<'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'tcx> {
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+ match ty.kind {
+ hir::TyKind::BareFn(..) => {
+ self.outer_index.shift_in(1);
+ intravisit::walk_ty(self, ty);
+ self.outer_index.shift_out(1);
+ }
+ _ => intravisit::walk_ty(self, ty),
+ }
+ }
+
+ fn visit_poly_trait_ref(&mut self, tr: &'tcx hir::PolyTraitRef<'tcx>) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+ self.outer_index.shift_in(1);
+ intravisit::walk_poly_trait_ref(self, tr);
+ self.outer_index.shift_out(1);
+ }
+
+ fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+
+ match self.tcx.named_region(lt.hir_id) {
+ Some(rl::Region::Static | rl::Region::EarlyBound(..)) => {}
+ Some(rl::Region::LateBound(debruijn, _, _)) if debruijn < self.outer_index => {}
+ Some(rl::Region::LateBound(..) | rl::Region::Free(..)) | None => {
+ self.has_late_bound_regions = Some(lt.span);
+ }
+ }
+ }
+ }
+
+ fn has_late_bound_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &'tcx hir::Generics<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ ) -> Option<Span> {
+ let mut visitor = LateBoundRegionsDetector {
+ tcx,
+ outer_index: ty::INNERMOST,
+ has_late_bound_regions: None,
+ };
+ for param in generics.params {
+ if let GenericParamKind::Lifetime { .. } = param.kind {
+ if tcx.is_late_bound(param.hir_id) {
+ return Some(param.span);
+ }
+ }
+ }
+ visitor.visit_fn_decl(decl);
+ visitor.has_late_bound_regions
+ }
+
+ match node {
+ Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => {
+ has_late_bound_regions(tcx, &item.generics, sig.decl)
+ }
+ _ => None,
+ },
+ Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => {
+ has_late_bound_regions(tcx, &item.generics, sig.decl)
+ }
+ _ => None,
+ },
+ Node::ForeignItem(item) => match item.kind {
+ hir::ForeignItemKind::Fn(fn_decl, _, ref generics) => {
+ has_late_bound_regions(tcx, generics, fn_decl)
+ }
+ _ => None,
+ },
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Fn(ref sig, .., ref generics, _) => {
+ has_late_bound_regions(tcx, generics, sig.decl)
+ }
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+struct AnonConstInParamTyDetector {
+ in_param_ty: bool,
+ found_anon_const_in_param_ty: bool,
+ ct: HirId,
+}
+
+impl<'v> Visitor<'v> for AnonConstInParamTyDetector {
+ fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) {
+ if let GenericParamKind::Const { ty, default: _ } = p.kind {
+ let prev = self.in_param_ty;
+ self.in_param_ty = true;
+ self.visit_ty(ty);
+ self.in_param_ty = prev;
+ }
+ }
+
+ fn visit_anon_const(&mut self, c: &'v hir::AnonConst) {
+ if self.in_param_ty && self.ct == c.hir_id {
+ self.found_anon_const_in_param_ty = true;
+ } else {
+ intravisit::walk_anon_const(self, c)
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
index 0d2b75d33..0d34a8bfe 100644
--- a/compiler/rustc_typeck/src/collect/item_bounds.rs
+++ b/compiler/rustc_hir_analysis/src/collect/item_bounds.rs
@@ -53,20 +53,28 @@ fn associated_type_bounds<'tcx>(
/// impl trait it isn't possible to write a suitable predicate on the
/// containing function and for type-alias impl trait we don't have a backwards
/// compatibility issue.
+#[instrument(level = "trace", skip(tcx), ret)]
fn opaque_type_bounds<'tcx>(
tcx: TyCtxt<'tcx>,
opaque_def_id: DefId,
ast_bounds: &'tcx [hir::GenericBound<'tcx>],
span: Span,
+ in_trait: bool,
) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
ty::print::with_no_queries!({
- let item_ty =
- tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id));
+ let substs = InternalSubsts::identity_for_item(tcx, opaque_def_id);
+ let item_ty = if in_trait {
+ tcx.mk_projection(opaque_def_id, substs)
+ } else {
+ tcx.mk_opaque(opaque_def_id, substs)
+ };
let icx = ItemCtxt::new(tcx, opaque_def_id);
let mut bounds = <dyn AstConv<'_>>::compute_bounds(&icx, item_ty, ast_bounds);
// Opaque types are implicitly sized unless a `?Sized` bound is found
<dyn AstConv<'_>>::add_implicitly_sized(&icx, &mut bounds, ast_bounds, None, span);
+ debug!(?bounds);
+
tcx.arena.alloc_from_iter(bounds.predicates(tcx, item_ty))
})
}
@@ -83,10 +91,10 @@ pub(super) fn explicit_item_bounds(
..
}) => associated_type_bounds(tcx, def_id, bounds, *span),
hir::Node::Item(hir::Item {
- kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }),
+ kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, in_trait, .. }),
span,
..
- }) => opaque_type_bounds(tcx, def_id, bounds, *span),
+ }) => opaque_type_bounds(tcx, def_id, bounds, *span, *in_trait),
_ => bug!("item_bounds called on {:?}", def_id),
}
}
diff --git a/compiler/rustc_hir_analysis/src/collect/lifetimes.rs b/compiler/rustc_hir_analysis/src/collect/lifetimes.rs
new file mode 100644
index 000000000..3f263a6de
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/collect/lifetimes.rs
@@ -0,0 +1,1888 @@
+//! Resolution of early vs late bound lifetimes.
+//!
+//! Name resolution for lifetimes is performed on the AST and embedded into HIR. From this
+//! information, typechecking needs to transform the lifetime parameters into bound lifetimes.
+//! Lifetimes can be early-bound or late-bound. Construction of typechecking terms needs to visit
+//! the types in HIR to identify late-bound lifetimes and assign their Debruijn indices. This file
+//! is also responsible for assigning their semantics to implicit lifetimes in trait objects.
+
+use rustc_ast::walk_list;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{GenericArg, GenericParam, GenericParamKind, HirIdMap, LifetimeName, Node};
+use rustc_middle::bug;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::resolve_lifetime::*;
+use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use std::fmt;
+
+trait RegionExt {
+ fn early(hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region);
+
+ fn late(index: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region);
+
+ fn id(&self) -> Option<DefId>;
+
+ fn shifted(self, amount: u32) -> Region;
+}
+
+impl RegionExt for Region {
+ fn early(hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region) {
+ let def_id = hir_map.local_def_id(param.hir_id);
+ debug!("Region::early: def_id={:?}", def_id);
+ (def_id, Region::EarlyBound(def_id.to_def_id()))
+ }
+
+ fn late(idx: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region) {
+ let depth = ty::INNERMOST;
+ let def_id = hir_map.local_def_id(param.hir_id);
+ debug!(
+ "Region::late: idx={:?}, param={:?} depth={:?} def_id={:?}",
+ idx, param, depth, def_id,
+ );
+ (def_id, Region::LateBound(depth, idx, def_id.to_def_id()))
+ }
+
+ fn id(&self) -> Option<DefId> {
+ match *self {
+ Region::Static => None,
+
+ Region::EarlyBound(id) | Region::LateBound(_, _, id) | Region::Free(_, id) => Some(id),
+ }
+ }
+
+ fn shifted(self, amount: u32) -> Region {
+ match self {
+ Region::LateBound(debruijn, idx, id) => {
+ Region::LateBound(debruijn.shifted_in(amount), idx, id)
+ }
+ _ => self,
+ }
+ }
+}
+
+/// Maps the id of each lifetime reference to the lifetime decl
+/// that it corresponds to.
+///
+/// FIXME. This struct gets converted to a `ResolveLifetimes` for
+/// actual use. It has the same data, but indexed by `LocalDefId`. This
+/// is silly.
+#[derive(Debug, Default)]
+struct NamedRegionMap {
+ // maps from every use of a named (not anonymous) lifetime to a
+ // `Region` describing how that region is bound
+ defs: HirIdMap<Region>,
+
+ // Maps relevant hir items to the bound vars on them. These include:
+ // - function defs
+ // - function pointers
+ // - closures
+ // - trait refs
+ // - bound types (like `T` in `for<'a> T<'a>: Foo`)
+ late_bound_vars: HirIdMap<Vec<ty::BoundVariableKind>>,
+}
+
+struct LifetimeContext<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ map: &'a mut NamedRegionMap,
+ scope: ScopeRef<'a>,
+
+ /// Indicates that we only care about the definition of a trait. This should
+ /// be false if the `Item` we are resolving lifetimes for is not a trait or
+ /// we eventually need lifetimes resolve for trait items.
+ trait_definition_only: bool,
+}
+
+#[derive(Debug)]
+enum Scope<'a> {
+ /// Declares lifetimes, and each can be early-bound or late-bound.
+ /// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
+ /// it should be shifted by the number of `Binder`s in between the
+ /// declaration `Binder` and the location it's referenced from.
+ Binder {
+ /// We use an IndexMap here because we want these lifetimes in order
+ /// for diagnostics.
+ lifetimes: FxIndexMap<LocalDefId, Region>,
+
+ scope_type: BinderScopeType,
+
+ /// The late bound vars for a given item are stored by `HirId` to be
+ /// queried later. However, if we enter an elision scope, we have to
+ /// later append the elided bound vars to the list and need to know what
+ /// to append to.
+ hir_id: hir::HirId,
+
+ s: ScopeRef<'a>,
+
+ /// If this binder comes from a where clause, specify how it was created.
+ /// This is used to diagnose inaccessible lifetimes in APIT:
+ /// ```ignore (illustrative)
+ /// fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
+ /// ```
+ where_bound_origin: Option<hir::PredicateOrigin>,
+ },
+
+ /// Lifetimes introduced by a fn are scoped to the call-site for that fn,
+ /// if this is a fn body, otherwise the original definitions are used.
+ /// Unspecified lifetimes are inferred, unless an elision scope is nested,
+ /// e.g., `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
+ Body {
+ id: hir::BodyId,
+ s: ScopeRef<'a>,
+ },
+
+ /// A scope which either determines unspecified lifetimes or errors
+ /// on them (e.g., due to ambiguity).
+ Elision {
+ s: ScopeRef<'a>,
+ },
+
+ /// Use a specific lifetime (if `Some`) or leave it unset (to be
+ /// inferred in a function body or potentially error outside one),
+ /// for the default choice of lifetime in a trait object type.
+ ObjectLifetimeDefault {
+ lifetime: Option<Region>,
+ s: ScopeRef<'a>,
+ },
+
+ /// When we have nested trait refs, we concatenate late bound vars for inner
+ /// trait refs from outer ones. But we also need to include any HRTB
+ /// lifetimes encountered when identifying the trait that an associated type
+ /// is declared on.
+ Supertrait {
+ lifetimes: Vec<ty::BoundVariableKind>,
+ s: ScopeRef<'a>,
+ },
+
+ TraitRefBoundary {
+ s: ScopeRef<'a>,
+ },
+
+ Root,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum BinderScopeType {
+ /// Any non-concatenating binder scopes.
+ Normal,
+ /// Within a syntactic trait ref, there may be multiple poly trait refs that
+ /// are nested (under the `associated_type_bounds` feature). The binders of
+ /// the inner poly trait refs are extended from the outer poly trait refs
+ /// and don't increase the late bound depth. If you had
+ /// `T: for<'a> Foo<Bar: for<'b> Baz<'a, 'b>>`, then the `for<'b>` scope
+ /// would be `Concatenating`. This also used in trait refs in where clauses
+ /// where we have two binders `for<> T: for<> Foo` (I've intentionally left
+ /// out any lifetimes because they aren't needed to show the two scopes).
+ /// The inner `for<>` has a scope of `Concatenating`.
+ Concatenating,
+}
+
+// A helper struct for debugging scopes without printing parent scopes
+struct TruncatedScopeDebug<'a>(&'a Scope<'a>);
+
+impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.0 {
+ Scope::Binder { lifetimes, scope_type, hir_id, where_bound_origin, s: _ } => f
+ .debug_struct("Binder")
+ .field("lifetimes", lifetimes)
+ .field("scope_type", scope_type)
+ .field("hir_id", hir_id)
+ .field("where_bound_origin", where_bound_origin)
+ .field("s", &"..")
+ .finish(),
+ Scope::Body { id, s: _ } => {
+ f.debug_struct("Body").field("id", id).field("s", &"..").finish()
+ }
+ Scope::Elision { s: _ } => f.debug_struct("Elision").field("s", &"..").finish(),
+ Scope::ObjectLifetimeDefault { lifetime, s: _ } => f
+ .debug_struct("ObjectLifetimeDefault")
+ .field("lifetime", lifetime)
+ .field("s", &"..")
+ .finish(),
+ Scope::Supertrait { lifetimes, s: _ } => f
+ .debug_struct("Supertrait")
+ .field("lifetimes", lifetimes)
+ .field("s", &"..")
+ .finish(),
+ Scope::TraitRefBoundary { s: _ } => f.debug_struct("TraitRefBoundary").finish(),
+ Scope::Root => f.debug_struct("Root").finish(),
+ }
+ }
+}
+
+type ScopeRef<'a> = &'a Scope<'a>;
+
+const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
+
+pub(crate) fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ resolve_lifetimes_trait_definition,
+ resolve_lifetimes,
+
+ named_region_map: |tcx, id| resolve_lifetimes_for(tcx, id).defs.get(&id),
+ is_late_bound_map,
+ object_lifetime_default,
+ late_bound_vars_map: |tcx, id| resolve_lifetimes_for(tcx, id).late_bound_vars.get(&id),
+
+ ..*providers
+ };
+}
+
+/// Like `resolve_lifetimes`, but does not resolve lifetimes for trait items.
+/// Also does not generate any diagnostics.
+///
+/// This is ultimately a subset of the `resolve_lifetimes` work. It effectively
+/// resolves lifetimes only within the trait "header" -- that is, the trait
+/// and supertrait list. In contrast, `resolve_lifetimes` resolves all the
+/// lifetimes within the trait and its items. There is room to refactor this,
+/// for example to resolve lifetimes for each trait item in separate queries,
+/// but it's convenient to do the entire trait at once because the lifetimes
+/// from the trait definition are in scope within the trait items as well.
+///
+/// The reason for this separate call is to resolve what would otherwise
+/// be a cycle. Consider this example:
+///
+/// ```ignore UNSOLVED (maybe @jackh726 knows what lifetime parameter to give Sub)
+/// trait Base<'a> {
+/// type BaseItem;
+/// }
+/// trait Sub<'b>: for<'a> Base<'a> {
+/// type SubItem: Sub<BaseItem = &'b u32>;
+/// }
+/// ```
+///
+/// When we resolve `Sub` and all its items, we also have to resolve `Sub<BaseItem = &'b u32>`.
+/// To figure out the index of `'b`, we have to know about the supertraits
+/// of `Sub` so that we can determine that the `for<'a>` will be in scope.
+/// (This is because we -- currently at least -- flatten all the late-bound
+/// lifetimes into a single binder.) This requires us to resolve the
+/// *trait definition* of `Sub`; basically just enough lifetime information
+/// to look at the supertraits.
+#[instrument(level = "debug", skip(tcx))]
+fn resolve_lifetimes_trait_definition(
+ tcx: TyCtxt<'_>,
+ local_def_id: LocalDefId,
+) -> ResolveLifetimes {
+ convert_named_region_map(do_resolve(tcx, local_def_id, true))
+}
+
+/// Computes the `ResolveLifetimes` map that contains data for an entire `Item`.
+/// You should not read the result of this query directly, but rather use
+/// `named_region_map`, `is_late_bound_map`, etc.
+#[instrument(level = "debug", skip(tcx))]
+fn resolve_lifetimes(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> ResolveLifetimes {
+ convert_named_region_map(do_resolve(tcx, local_def_id, false))
+}
+
+fn do_resolve(
+ tcx: TyCtxt<'_>,
+ local_def_id: LocalDefId,
+ trait_definition_only: bool,
+) -> NamedRegionMap {
+ let item = tcx.hir().expect_item(local_def_id);
+ let mut named_region_map =
+ NamedRegionMap { defs: Default::default(), late_bound_vars: Default::default() };
+ let mut visitor = LifetimeContext {
+ tcx,
+ map: &mut named_region_map,
+ scope: ROOT_SCOPE,
+ trait_definition_only,
+ };
+ visitor.visit_item(item);
+
+ named_region_map
+}
+
+fn convert_named_region_map(named_region_map: NamedRegionMap) -> ResolveLifetimes {
+ let mut rl = ResolveLifetimes::default();
+
+ for (hir_id, v) in named_region_map.defs {
+ let map = rl.defs.entry(hir_id.owner).or_default();
+ map.insert(hir_id.local_id, v);
+ }
+ for (hir_id, v) in named_region_map.late_bound_vars {
+ let map = rl.late_bound_vars.entry(hir_id.owner).or_default();
+ map.insert(hir_id.local_id, v);
+ }
+
+ debug!(?rl.defs);
+ debug!(?rl.late_bound_vars);
+ rl
+}
+
+/// Given `any` owner (structs, traits, trait methods, etc.), does lifetime resolution.
+/// There are two important things this does.
+/// First, we have to resolve lifetimes for
+/// the entire *`Item`* that contains this owner, because that's the largest "scope"
+/// where we can have relevant lifetimes.
+/// Second, if we are asking for lifetimes in a trait *definition*, we use `resolve_lifetimes_trait_definition`
+/// instead of `resolve_lifetimes`, which does not descend into the trait items and does not emit diagnostics.
+/// This allows us to avoid cycles. Importantly, if we ask for lifetimes for lifetimes that have an owner
+/// other than the trait itself (like the trait methods or associated types), then we just use the regular
+/// `resolve_lifetimes`.
+fn resolve_lifetimes_for<'tcx>(tcx: TyCtxt<'tcx>, def_id: hir::OwnerId) -> &'tcx ResolveLifetimes {
+ let item_id = item_for(tcx, def_id.def_id);
+ let local_def_id = item_id.owner_id.def_id;
+ if item_id.owner_id == def_id {
+ let item = tcx.hir().item(item_id);
+ match item.kind {
+ hir::ItemKind::Trait(..) => tcx.resolve_lifetimes_trait_definition(local_def_id),
+ _ => tcx.resolve_lifetimes(local_def_id),
+ }
+ } else {
+ tcx.resolve_lifetimes(local_def_id)
+ }
+}
+
+/// Finds the `Item` that contains the given `LocalDefId`
+fn item_for(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> hir::ItemId {
+ match tcx.hir().find_by_def_id(local_def_id) {
+ Some(Node::Item(item)) => {
+ return item.item_id();
+ }
+ _ => {}
+ }
+ let item = {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let mut parent_iter = tcx.hir().parent_iter(hir_id);
+ loop {
+ let node = parent_iter.next().map(|n| n.1);
+ match node {
+ Some(hir::Node::Item(item)) => break item.item_id(),
+ Some(hir::Node::Crate(_)) | None => bug!("Called `item_for` on an Item."),
+ _ => {}
+ }
+ }
+ };
+ item
+}
+
+fn late_region_as_bound_region<'tcx>(tcx: TyCtxt<'tcx>, region: &Region) -> ty::BoundVariableKind {
+ match region {
+ Region::LateBound(_, _, def_id) => {
+ let name = tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id.expect_local()));
+ ty::BoundVariableKind::Region(ty::BrNamed(*def_id, name))
+ }
+ _ => bug!("{:?} is not a late region", region),
+ }
+}
+
+impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
+ /// Returns the binders in scope and the type of `Binder` that should be created for a poly trait ref.
+ fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderScopeType) {
+ let mut scope = self.scope;
+ let mut supertrait_lifetimes = vec![];
+ loop {
+ match scope {
+ Scope::Body { .. } | Scope::Root => {
+ break (vec![], BinderScopeType::Normal);
+ }
+
+ Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => {
+ scope = s;
+ }
+
+ Scope::Supertrait { s, lifetimes } => {
+ supertrait_lifetimes = lifetimes.clone();
+ scope = s;
+ }
+
+ Scope::TraitRefBoundary { .. } => {
+ // We should only see super trait lifetimes if there is a `Binder` above
+ assert!(supertrait_lifetimes.is_empty());
+ break (vec![], BinderScopeType::Normal);
+ }
+
+ Scope::Binder { hir_id, .. } => {
+ // Nested poly trait refs have the binders concatenated
+ let mut full_binders =
+ self.map.late_bound_vars.entry(*hir_id).or_default().clone();
+ full_binders.extend(supertrait_lifetimes.into_iter());
+ break (full_binders, BinderScopeType::Concatenating);
+ }
+ }
+ }
+ }
+}
+impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ // We want to nest trait/impl items in their parent, but nothing else.
+ fn visit_nested_item(&mut self, _: hir::ItemId) {}
+
+ fn visit_trait_item_ref(&mut self, ii: &'tcx hir::TraitItemRef) {
+ if !self.trait_definition_only {
+ intravisit::walk_trait_item_ref(self, ii)
+ }
+ }
+
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let body = self.tcx.hir().body(body);
+ self.with(Scope::Body { id: body.id(), s: self.scope }, |this| {
+ this.visit_body(body);
+ });
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Closure(hir::Closure {
+ binder, bound_generic_params, fn_decl, ..
+ }) = e.kind
+ {
+ if let &hir::ClosureBinder::For { span: for_sp, .. } = binder {
+ fn span_of_infer(ty: &hir::Ty<'_>) -> Option<Span> {
+ struct V(Option<Span>);
+
+ impl<'v> Visitor<'v> for V {
+ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
+ match t.kind {
+ _ if self.0.is_some() => (),
+ hir::TyKind::Infer => {
+ self.0 = Some(t.span);
+ }
+ _ => intravisit::walk_ty(self, t),
+ }
+ }
+ }
+
+ let mut v = V(None);
+ v.visit_ty(ty);
+ v.0
+ }
+
+ let infer_in_rt_sp = match fn_decl.output {
+ hir::FnRetTy::DefaultReturn(sp) => Some(sp),
+ hir::FnRetTy::Return(ty) => span_of_infer(ty),
+ };
+
+ let infer_spans = fn_decl
+ .inputs
+ .into_iter()
+ .filter_map(span_of_infer)
+ .chain(infer_in_rt_sp)
+ .collect::<Vec<_>>();
+
+ if !infer_spans.is_empty() {
+ self.tcx.sess
+ .struct_span_err(
+ infer_spans,
+ "implicit types in closure signatures are forbidden when `for<...>` is present",
+ )
+ .span_label(for_sp, "`for<...>` is here")
+ .emit();
+ }
+ }
+
+ let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) =
+ bound_generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ (pair, r)
+ })
+ .unzip();
+
+ self.record_late_bound_vars(e.hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: e.hir_id,
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+
+ self.with(scope, |this| {
+ // a closure has no bounds, so everything
+ // contained within is scoped within its binder.
+ intravisit::walk_expr(this, e)
+ });
+ } else {
+ intravisit::walk_expr(self, e)
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ match &item.kind {
+ hir::ItemKind::Impl(hir::Impl { of_trait, .. }) => {
+ if let Some(of_trait) = of_trait {
+ self.record_late_bound_vars(of_trait.hir_ref_id, Vec::default());
+ }
+ }
+ _ => {}
+ }
+ match item.kind {
+ hir::ItemKind::Fn(_, ref generics, _) => {
+ self.visit_early_late(item.hir_id(), generics, |this| {
+ intravisit::walk_item(this, item);
+ });
+ }
+
+ hir::ItemKind::ExternCrate(_)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::GlobalAsm(..) => {
+ // These sorts of items have no lifetime parameters at all.
+ intravisit::walk_item(self, item);
+ }
+ hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
+ // No lifetime parameters, but implied 'static.
+ self.with(Scope::Elision { s: self.scope }, |this| {
+ intravisit::walk_item(this, item)
+ });
+ }
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { .. }) => {
+ // Opaque types are visited when we visit the
+ // `TyKind::OpaqueDef`, so that they have the lifetimes from
+ // their parent opaque_ty in scope.
+ //
+ // The core idea here is that since OpaqueTys are generated with the impl Trait as
+ // their owner, we can keep going until we find the Item that owns that. We then
+ // conservatively add all resolved lifetimes. Otherwise we run into problems in
+ // cases like `type Foo<'a> = impl Bar<As = impl Baz + 'a>`.
+ for (_hir_id, node) in self.tcx.hir().parent_iter(item.owner_id.into()) {
+ match node {
+ hir::Node::Item(parent_item) => {
+ let resolved_lifetimes: &ResolveLifetimes = self.tcx.resolve_lifetimes(
+ item_for(self.tcx, parent_item.owner_id.def_id).owner_id.def_id,
+ );
+ // We need to add *all* deps, since opaque tys may want them from *us*
+ for (&owner, defs) in resolved_lifetimes.defs.iter() {
+ defs.iter().for_each(|(&local_id, region)| {
+ self.map.defs.insert(hir::HirId { owner, local_id }, *region);
+ });
+ }
+ for (&owner, late_bound_vars) in
+ resolved_lifetimes.late_bound_vars.iter()
+ {
+ late_bound_vars.iter().for_each(|(&local_id, late_bound_vars)| {
+ self.record_late_bound_vars(
+ hir::HirId { owner, local_id },
+ late_bound_vars.clone(),
+ );
+ });
+ }
+ break;
+ }
+ hir::Node::Crate(_) => bug!("No Item about an OpaqueTy"),
+ _ => {}
+ }
+ }
+ }
+ hir::ItemKind::TyAlias(_, ref generics)
+ | hir::ItemKind::Enum(_, ref generics)
+ | hir::ItemKind::Struct(_, ref generics)
+ | hir::ItemKind::Union(_, ref generics)
+ | hir::ItemKind::Trait(_, _, ref generics, ..)
+ | hir::ItemKind::TraitAlias(ref generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { ref generics, .. }) => {
+ // These kinds of items have only early-bound lifetime parameters.
+ let lifetimes = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), param))
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => None,
+ })
+ .collect();
+ self.record_late_bound_vars(item.hir_id(), vec![]);
+ let scope = Scope::Binder {
+ hir_id: item.hir_id(),
+ lifetimes,
+ scope_type: BinderScopeType::Normal,
+ s: ROOT_SCOPE,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ intravisit::walk_item(this, item);
+ });
+ });
+ }
+ }
+ }
+
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
+ match item.kind {
+ hir::ForeignItemKind::Fn(_, _, ref generics) => {
+ self.visit_early_late(item.hir_id(), generics, |this| {
+ intravisit::walk_foreign_item(this, item);
+ })
+ }
+ hir::ForeignItemKind::Static(..) => {
+ intravisit::walk_foreign_item(self, item);
+ }
+ hir::ForeignItemKind::Type => {
+ intravisit::walk_foreign_item(self, item);
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ match ty.kind {
+ hir::TyKind::BareFn(ref c) => {
+ let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) = c
+ .generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ (pair, r)
+ })
+ .unzip();
+ self.record_late_bound_vars(ty.hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: ty.hir_id,
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ // a bare fn has no bounds, so everything
+ // contained within is scoped within its binder.
+ intravisit::walk_ty(this, ty);
+ });
+ }
+ hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
+ debug!(?bounds, ?lifetime, "TraitObject");
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ for bound in bounds {
+ this.visit_poly_trait_ref(bound);
+ }
+ });
+ match lifetime.name {
+ LifetimeName::ImplicitObjectLifetimeDefault => {
+ // If the user does not write *anything*, we
+ // use the object lifetime defaulting
+ // rules. So e.g., `Box<dyn Debug>` becomes
+ // `Box<dyn Debug + 'static>`.
+ self.resolve_object_lifetime_default(lifetime)
+ }
+ LifetimeName::Infer => {
+ // If the user writes `'_`, we use the *ordinary* elision
+ // rules. So the `'_` in e.g., `Box<dyn Debug + '_>` will be
+ // resolved the same as the `'_` in `&'_ Foo`.
+ //
+ // cc #48468
+ }
+ LifetimeName::Param(..) | LifetimeName::Static => {
+ // If the user wrote an explicit name, use that.
+ self.visit_lifetime(lifetime);
+ }
+ LifetimeName::Error => {}
+ }
+ }
+ hir::TyKind::Rptr(ref lifetime_ref, ref mt) => {
+ self.visit_lifetime(lifetime_ref);
+ let scope = Scope::ObjectLifetimeDefault {
+ lifetime: self.map.defs.get(&lifetime_ref.hir_id).cloned(),
+ s: self.scope,
+ };
+ self.with(scope, |this| this.visit_ty(&mt.ty));
+ }
+ hir::TyKind::OpaqueDef(item_id, lifetimes, _in_trait) => {
+ // Resolve the lifetimes in the bounds to the lifetime defs in the generics.
+ // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
+ // `type MyAnonTy<'b> = impl MyTrait<'b>;`
+ // ^ ^ this gets resolved in the scope of
+ // the opaque_ty generics
+ let opaque_ty = self.tcx.hir().item(item_id);
+ let (generics, bounds) = match opaque_ty.kind {
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ }) => {
+ intravisit::walk_ty(self, ty);
+
+ // Elided lifetimes are not allowed in non-return
+ // position impl Trait
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ let scope = Scope::Elision { s: this.scope };
+ this.with(scope, |this| {
+ intravisit::walk_item(this, opaque_ty);
+ })
+ });
+
+ return;
+ }
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
+ ref generics,
+ bounds,
+ ..
+ }) => (generics, bounds),
+ ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
+ };
+
+ // Resolve the lifetimes that are applied to the opaque type.
+ // These are resolved in the current scope.
+ // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
+ // `fn foo<'a>() -> MyAnonTy<'a> { ... }`
+ // ^ ^this gets resolved in the current scope
+ for lifetime in lifetimes {
+ let hir::GenericArg::Lifetime(lifetime) = lifetime else {
+ continue
+ };
+ self.visit_lifetime(lifetime);
+
+ // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
+ // and ban them. Type variables instantiated inside binders aren't
+ // well-supported at the moment, so this doesn't work.
+ // In the future, this should be fixed and this error should be removed.
+ let def = self.map.defs.get(&lifetime.hir_id).cloned();
+ let Some(Region::LateBound(_, _, def_id)) = def else {
+ continue
+ };
+ let Some(def_id) = def_id.as_local() else {
+ continue
+ };
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ // Ensure that the parent of the def is an item, not HRTB
+ let parent_id = self.tcx.hir().get_parent_node(hir_id);
+ if !parent_id.is_owner() {
+ if !self.trait_definition_only {
+ struct_span_err!(
+ self.tcx.sess,
+ lifetime.span,
+ E0657,
+ "`impl Trait` can only capture lifetimes \
+ bound at the fn or impl level"
+ )
+ .emit();
+ }
+ self.uninsert_lifetime_on_error(lifetime, def.unwrap());
+ }
+ if let hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::OpaqueTy { .. }, ..
+ }) = self.tcx.hir().get(parent_id)
+ {
+ if !self.trait_definition_only {
+ let mut err = self.tcx.sess.struct_span_err(
+ lifetime.span,
+ "higher kinded lifetime bounds on nested opaque types are not supported yet",
+ );
+ err.span_note(self.tcx.def_span(def_id), "lifetime declared here");
+ err.emit();
+ }
+ self.uninsert_lifetime_on_error(lifetime, def.unwrap());
+ }
+ }
+
+ // We want to start our early-bound indices at the end of the parent scope,
+ // not including any parent `impl Trait`s.
+ let mut lifetimes = FxIndexMap::default();
+ debug!(?generics.params);
+ for param in generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ let (def_id, reg) = Region::early(self.tcx.hir(), &param);
+ lifetimes.insert(def_id, reg);
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {}
+ }
+ }
+ self.record_late_bound_vars(ty.hir_id, vec![]);
+
+ let scope = Scope::Binder {
+ hir_id: ty.hir_id,
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ for bound in bounds {
+ this.visit_param_bound(bound);
+ }
+ })
+ });
+ }
+ _ => intravisit::walk_ty(self, ty),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ use self::hir::TraitItemKind::*;
+ match trait_item.kind {
+ Fn(_, _) => {
+ self.visit_early_late(trait_item.hir_id(), &trait_item.generics, |this| {
+ intravisit::walk_trait_item(this, trait_item)
+ });
+ }
+ Type(bounds, ref ty) => {
+ let generics = &trait_item.generics;
+ let lifetimes = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), param))
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => None,
+ })
+ .collect();
+ self.record_late_bound_vars(trait_item.hir_id(), vec![]);
+ let scope = Scope::Binder {
+ hir_id: trait_item.hir_id(),
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ for bound in bounds {
+ this.visit_param_bound(bound);
+ }
+ if let Some(ty) = ty {
+ this.visit_ty(ty);
+ }
+ })
+ });
+ }
+ Const(_, _) => {
+ // Only methods and types support generics.
+ assert!(trait_item.generics.params.is_empty());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ use self::hir::ImplItemKind::*;
+ match impl_item.kind {
+ Fn(..) => self.visit_early_late(impl_item.hir_id(), &impl_item.generics, |this| {
+ intravisit::walk_impl_item(this, impl_item)
+ }),
+ Type(ref ty) => {
+ let generics = &impl_item.generics;
+ let lifetimes: FxIndexMap<LocalDefId, Region> = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), param))
+ }
+ GenericParamKind::Const { .. } | GenericParamKind::Type { .. } => None,
+ })
+ .collect();
+ self.record_late_bound_vars(impl_item.hir_id(), vec![]);
+ let scope = Scope::Binder {
+ hir_id: impl_item.hir_id(),
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ this.visit_ty(ty);
+ })
+ });
+ }
+ Const(_, _) => {
+ // Only methods and types support generics.
+ assert!(impl_item.generics.params.is_empty());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
+ match lifetime_ref.name {
+ hir::LifetimeName::Static => self.insert_lifetime(lifetime_ref, Region::Static),
+ hir::LifetimeName::Param(param_def_id, _) => {
+ self.resolve_lifetime_ref(param_def_id, lifetime_ref)
+ }
+ // If we've already reported an error, just ignore `lifetime_ref`.
+ hir::LifetimeName::Error => {}
+ // Those will be resolved by typechecking.
+ hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Infer => {}
+ }
+ }
+
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
+ for (i, segment) in path.segments.iter().enumerate() {
+ let depth = path.segments.len() - i - 1;
+ if let Some(ref args) = segment.args {
+ self.visit_segment_args(path.res, depth, args);
+ }
+ }
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: intravisit::FnKind<'tcx>,
+ fd: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ _: Span,
+ _: hir::HirId,
+ ) {
+ let output = match fd.output {
+ hir::FnRetTy::DefaultReturn(_) => None,
+ hir::FnRetTy::Return(ref ty) => Some(&**ty),
+ };
+ self.visit_fn_like_elision(&fd.inputs, output, matches!(fk, intravisit::FnKind::Closure));
+ intravisit::walk_fn_kind(self, fk);
+ self.visit_nested_body(body_id)
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ for param in generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { ref default, .. } => {
+ if let Some(ref ty) = default {
+ this.visit_ty(&ty);
+ }
+ }
+ GenericParamKind::Const { ref ty, default } => {
+ this.visit_ty(&ty);
+ if let Some(default) = default {
+ this.visit_body(this.tcx.hir().body(default.body));
+ }
+ }
+ }
+ }
+ for predicate in generics.predicates {
+ match predicate {
+ &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ hir_id,
+ ref bounded_ty,
+ bounds,
+ ref bound_generic_params,
+ origin,
+ ..
+ }) => {
+ let lifetimes: FxIndexMap<LocalDefId, Region> =
+ bound_generic_params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, GenericParamKind::Lifetime { .. })
+ })
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ Region::late(late_bound_idx as u32, this.tcx.hir(), param)
+ })
+ .collect();
+ let binders: Vec<_> =
+ lifetimes
+ .iter()
+ .map(|(_, region)| {
+ late_region_as_bound_region(this.tcx, region)
+ })
+ .collect();
+ this.record_late_bound_vars(hir_id, binders.clone());
+ // Even if there are no lifetimes defined here, we still wrap it in a binder
+ // scope. If there happens to be a nested poly trait ref (an error), that
+ // will be `Concatenating` anyways, so we don't have to worry about the depth
+ // being wrong.
+ let scope = Scope::Binder {
+ hir_id,
+ lifetimes,
+ s: this.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: Some(origin),
+ };
+ this.with(scope, |this| {
+ this.visit_ty(&bounded_ty);
+ walk_list!(this, visit_param_bound, bounds);
+ })
+ }
+ &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ ref lifetime,
+ bounds,
+ ..
+ }) => {
+ this.visit_lifetime(lifetime);
+ walk_list!(this, visit_param_bound, bounds);
+
+ if lifetime.name != hir::LifetimeName::Static {
+ for bound in bounds {
+ let hir::GenericBound::Outlives(ref lt) = bound else {
+ continue;
+ };
+ if lt.name != hir::LifetimeName::Static {
+ continue;
+ }
+ this.insert_lifetime(lt, Region::Static);
+ this.tcx
+ .sess
+ .struct_span_warn(
+ lifetime.span,
+ &format!(
+ "unnecessary lifetime parameter `{}`",
+ lifetime.name.ident(),
+ ),
+ )
+ .help(&format!(
+ "you can use the `'static` lifetime directly, in place of `{}`",
+ lifetime.name.ident(),
+ ))
+ .emit();
+ }
+ }
+ }
+ &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+ ref lhs_ty,
+ ref rhs_ty,
+ ..
+ }) => {
+ this.visit_ty(lhs_ty);
+ this.visit_ty(rhs_ty);
+ }
+ }
+ }
+ })
+ }
+
+ fn visit_param_bound(&mut self, bound: &'tcx hir::GenericBound<'tcx>) {
+ match bound {
+ hir::GenericBound::LangItemTrait(_, _, hir_id, _) => {
+ // FIXME(jackh726): This is pretty weird. `LangItemTrait` doesn't go
+ // through the regular poly trait ref code, so we don't get another
+ // chance to introduce a binder. For now, I'm keeping the existing logic
+ // of "if there isn't a Binder scope above us, add one", but I
+ // imagine there's a better way to go about this.
+ let (binders, scope_type) = self.poly_trait_ref_binder_info();
+
+ self.record_late_bound_vars(*hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: *hir_id,
+ lifetimes: FxIndexMap::default(),
+ s: self.scope,
+ scope_type,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ intravisit::walk_param_bound(this, bound);
+ });
+ }
+ _ => intravisit::walk_param_bound(self, bound),
+ }
+ }
+
+ fn visit_poly_trait_ref(&mut self, trait_ref: &'tcx hir::PolyTraitRef<'tcx>) {
+ debug!("visit_poly_trait_ref(trait_ref={:?})", trait_ref);
+
+ let (mut binders, scope_type) = self.poly_trait_ref_binder_info();
+
+ let initial_bound_vars = binders.len() as u32;
+ let mut lifetimes: FxIndexMap<LocalDefId, Region> = FxIndexMap::default();
+ let binders_iter = trait_ref
+ .bound_generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair =
+ Region::late(initial_bound_vars + late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ lifetimes.insert(pair.0, pair.1);
+ r
+ });
+ binders.extend(binders_iter);
+
+ debug!(?binders);
+ self.record_late_bound_vars(trait_ref.trait_ref.hir_ref_id, binders);
+
+ // Always introduce a scope here, even if this is in a where clause and
+ // we introduced the binders around the bounded Ty. In that case, we
+ // just reuse the concatenation functionality also present in nested trait
+ // refs.
+ let scope = Scope::Binder {
+ hir_id: trait_ref.trait_ref.hir_ref_id,
+ lifetimes,
+ s: self.scope,
+ scope_type,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ walk_list!(this, visit_generic_param, trait_ref.bound_generic_params);
+ this.visit_trait_ref(&trait_ref.trait_ref);
+ });
+ }
+}
+
+fn object_lifetime_default<'tcx>(tcx: TyCtxt<'tcx>, param_def_id: DefId) -> ObjectLifetimeDefault {
+ debug_assert_eq!(tcx.def_kind(param_def_id), DefKind::TyParam);
+ let param_def_id = param_def_id.expect_local();
+ let parent_def_id = tcx.local_parent(param_def_id);
+ let generics = tcx.hir().get_generics(parent_def_id).unwrap();
+ let param_hir_id = tcx.local_def_id_to_hir_id(param_def_id);
+ let param = generics.params.iter().find(|p| p.hir_id == param_hir_id).unwrap();
+
+ // Scan the bounds and where-clauses on parameters to extract bounds
+ // of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
+ // for each type parameter.
+ match param.kind {
+ GenericParamKind::Type { .. } => {
+ let mut set = Set1::Empty;
+
+ // Look for `type: ...` where clauses.
+ for bound in generics.bounds_for_param(param_def_id) {
+ // Ignore `for<'a> type: ...` as they can change what
+ // lifetimes mean (although we could "just" handle it).
+ if !bound.bound_generic_params.is_empty() {
+ continue;
+ }
+
+ for bound in bound.bounds {
+ if let hir::GenericBound::Outlives(ref lifetime) = *bound {
+ set.insert(lifetime.name.normalize_to_macros_2_0());
+ }
+ }
+ }
+
+ match set {
+ Set1::Empty => ObjectLifetimeDefault::Empty,
+ Set1::One(hir::LifetimeName::Static) => ObjectLifetimeDefault::Static,
+ Set1::One(hir::LifetimeName::Param(param_def_id, _)) => {
+ ObjectLifetimeDefault::Param(param_def_id.to_def_id())
+ }
+ _ => ObjectLifetimeDefault::Ambiguous,
+ }
+ }
+ _ => {
+ bug!("object_lifetime_default_raw must only be called on a type parameter")
+ }
+ }
+}
+
+impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
+ fn with<F>(&mut self, wrap_scope: Scope<'_>, f: F)
+ where
+ F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
+ {
+ let LifetimeContext { tcx, map, .. } = self;
+ let mut this = LifetimeContext {
+ tcx: *tcx,
+ map,
+ scope: &wrap_scope,
+ trait_definition_only: self.trait_definition_only,
+ };
+ let span = debug_span!("scope", scope = ?TruncatedScopeDebug(&this.scope));
+ {
+ let _enter = span.enter();
+ f(&mut this);
+ }
+ }
+
+ fn record_late_bound_vars(&mut self, hir_id: hir::HirId, binder: Vec<ty::BoundVariableKind>) {
+ if let Some(old) = self.map.late_bound_vars.insert(hir_id, binder) {
+ bug!(
+ "overwrote bound vars for {hir_id:?}:\nold={old:?}\nnew={:?}",
+ self.map.late_bound_vars[&hir_id]
+ )
+ }
+ }
+
+ /// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
+ ///
+ /// Handles visiting fns and methods. These are a bit complicated because we must distinguish
+ /// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
+ /// within type bounds; those are early bound lifetimes, and the rest are late bound.
+ ///
+ /// For example:
+ ///
+ /// fn foo<'a,'b,'c,T:Trait<'b>>(...)
+ ///
+ /// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
+ /// lifetimes may be interspersed together.
+ ///
+ /// If early bound lifetimes are present, we separate them into their own list (and likewise
+ /// for late bound). They will be numbered sequentially, starting from the lowest index that is
+ /// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
+ /// bound lifetimes are resolved by name and associated with a binder ID (`binder_id`), so the
+ /// ordering is not important there.
+ fn visit_early_late<F>(
+ &mut self,
+ hir_id: hir::HirId,
+ generics: &'tcx hir::Generics<'tcx>,
+ walk: F,
+ ) where
+ F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
+ {
+ let mut named_late_bound_vars = 0;
+ let lifetimes: FxIndexMap<LocalDefId, Region> = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ if self.tcx.is_late_bound(param.hir_id) {
+ let late_bound_idx = named_late_bound_vars;
+ named_late_bound_vars += 1;
+ Some(Region::late(late_bound_idx, self.tcx.hir(), param))
+ } else {
+ Some(Region::early(self.tcx.hir(), param))
+ }
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => None,
+ })
+ .collect();
+
+ let binders: Vec<_> = generics
+ .params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, GenericParamKind::Lifetime { .. })
+ && self.tcx.is_late_bound(param.hir_id)
+ })
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ late_region_as_bound_region(self.tcx, &pair.1)
+ })
+ .collect();
+ self.record_late_bound_vars(hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id,
+ lifetimes,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, walk);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn resolve_lifetime_ref(
+ &mut self,
+ region_def_id: LocalDefId,
+ lifetime_ref: &'tcx hir::Lifetime,
+ ) {
+ // Walk up the scope chain, tracking the number of fn scopes
+ // that we pass through, until we find a lifetime with the
+ // given name or we run out of scopes.
+ // search.
+ let mut late_depth = 0;
+ let mut scope = self.scope;
+ let mut outermost_body = None;
+ let result = loop {
+ match *scope {
+ Scope::Body { id, s } => {
+ outermost_body = Some(id);
+ scope = s;
+ }
+
+ Scope::Root => {
+ break None;
+ }
+
+ Scope::Binder { ref lifetimes, scope_type, s, where_bound_origin, .. } => {
+ if let Some(&def) = lifetimes.get(&region_def_id) {
+ break Some(def.shifted(late_depth));
+ }
+ match scope_type {
+ BinderScopeType::Normal => late_depth += 1,
+ BinderScopeType::Concatenating => {}
+ }
+ // Fresh lifetimes in APIT used to be allowed in async fns and forbidden in
+ // regular fns.
+ if let Some(hir::PredicateOrigin::ImplTrait) = where_bound_origin
+ && let hir::LifetimeName::Param(_, hir::ParamName::Fresh) = lifetime_ref.name
+ && let hir::IsAsync::NotAsync = self.tcx.asyncness(lifetime_ref.hir_id.owner.def_id)
+ && !self.tcx.features().anonymous_lifetime_in_impl_trait
+ {
+ let mut diag = rustc_session::parse::feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::anonymous_lifetime_in_impl_trait,
+ lifetime_ref.span,
+ "anonymous lifetimes in `impl Trait` are unstable",
+ );
+
+ match self.tcx.hir().get_generics(lifetime_ref.hir_id.owner.def_id) {
+ Some(generics) => {
+
+ let new_param_sugg_tuple;
+
+ new_param_sugg_tuple = match generics.span_for_param_suggestion() {
+ Some(_) => {
+ Some((self.tcx.sess.source_map().span_through_char(generics.span, '<').shrink_to_hi(), "'a, ".to_owned()))
+ },
+ None => Some((generics.span, "<'a>".to_owned()))
+ };
+
+ let mut multi_sugg_vec = vec![(lifetime_ref.span.shrink_to_hi(), "'a ".to_owned())];
+
+ if let Some(new_tuple) = new_param_sugg_tuple{
+ multi_sugg_vec.push(new_tuple);
+ }
+
+ diag.span_label(lifetime_ref.span, "expected named lifetime parameter");
+ diag.multipart_suggestion("consider introducing a named lifetime parameter",
+ multi_sugg_vec,
+ rustc_errors::Applicability::MaybeIncorrect);
+
+ },
+ None => { }
+ }
+
+ diag.emit();
+ return;
+ }
+ scope = s;
+ }
+
+ Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ };
+
+ if let Some(mut def) = result {
+ if let Region::EarlyBound(..) = def {
+ // Do not free early-bound regions, only late-bound ones.
+ } else if let Some(body_id) = outermost_body {
+ let fn_id = self.tcx.hir().body_owner(body_id);
+ match self.tcx.hir().get(fn_id) {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(..), .. })
+ | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(..), ..
+ })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
+ let scope = self.tcx.hir().local_def_id(fn_id);
+ def = Region::Free(scope.to_def_id(), def.id().unwrap());
+ }
+ _ => {}
+ }
+ }
+
+ self.insert_lifetime(lifetime_ref, def);
+ return;
+ }
+
+ // We may fail to resolve higher-ranked lifetimes that are mentioned by APIT.
+ // AST-based resolution does not care for impl-trait desugaring, which are the
+ // responibility of lowering. This may create a mismatch between the resolution
+ // AST found (`region_def_id`) which points to HRTB, and what HIR allows.
+ // ```
+ // fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
+ // ```
+ //
+ // In such case, walk back the binders to diagnose it properly.
+ let mut scope = self.scope;
+ loop {
+ match *scope {
+ Scope::Binder {
+ where_bound_origin: Some(hir::PredicateOrigin::ImplTrait), ..
+ } => {
+ let mut err = self.tcx.sess.struct_span_err(
+ lifetime_ref.span,
+ "`impl Trait` can only mention lifetimes bound at the fn or impl level",
+ );
+ err.span_note(self.tcx.def_span(region_def_id), "lifetime declared here");
+ err.emit();
+ return;
+ }
+ Scope::Root => break,
+ Scope::Binder { s, .. }
+ | Scope::Body { s, .. }
+ | Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ }
+
+ self.tcx.sess.delay_span_bug(
+ lifetime_ref.span,
+ &format!("Could not resolve {:?} in scope {:#?}", lifetime_ref, self.scope,),
+ );
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_segment_args(
+ &mut self,
+ res: Res,
+ depth: usize,
+ generic_args: &'tcx hir::GenericArgs<'tcx>,
+ ) {
+ if generic_args.parenthesized {
+ self.visit_fn_like_elision(
+ generic_args.inputs(),
+ Some(generic_args.bindings[0].ty()),
+ false,
+ );
+ return;
+ }
+
+ for arg in generic_args.args {
+ if let hir::GenericArg::Lifetime(lt) = arg {
+ self.visit_lifetime(lt);
+ }
+ }
+
+ // Figure out if this is a type/trait segment,
+ // which requires object lifetime defaults.
+ let type_def_id = match res {
+ Res::Def(DefKind::AssocTy, def_id) if depth == 1 => Some(self.tcx.parent(def_id)),
+ Res::Def(DefKind::Variant, def_id) if depth == 0 => Some(self.tcx.parent(def_id)),
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::TyAlias
+ | DefKind::Trait,
+ def_id,
+ ) if depth == 0 => Some(def_id),
+ _ => None,
+ };
+
+ debug!(?type_def_id);
+
+ // Compute a vector of defaults, one for each type parameter,
+ // per the rules given in RFCs 599 and 1156. Example:
+ //
+ // ```rust
+ // struct Foo<'a, T: 'a, U> { }
+ // ```
+ //
+ // If you have `Foo<'x, dyn Bar, dyn Baz>`, we want to default
+ // `dyn Bar` to `dyn Bar + 'x` (because of the `T: 'a` bound)
+ // and `dyn Baz` to `dyn Baz + 'static` (because there is no
+ // such bound).
+ //
+ // Therefore, we would compute `object_lifetime_defaults` to a
+ // vector like `['x, 'static]`. Note that the vector only
+ // includes type parameters.
+ let object_lifetime_defaults = type_def_id.map_or_else(Vec::new, |def_id| {
+ let in_body = {
+ let mut scope = self.scope;
+ loop {
+ match *scope {
+ Scope::Root => break false,
+
+ Scope::Body { .. } => break true,
+
+ Scope::Binder { s, .. }
+ | Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ }
+ };
+
+ let map = &self.map;
+ let generics = self.tcx.generics_of(def_id);
+
+ // `type_def_id` points to an item, so there is nothing to inherit generics from.
+ debug_assert_eq!(generics.parent_count, 0);
+
+ let set_to_region = |set: ObjectLifetimeDefault| match set {
+ ObjectLifetimeDefault::Empty => {
+ if in_body {
+ None
+ } else {
+ Some(Region::Static)
+ }
+ }
+ ObjectLifetimeDefault::Static => Some(Region::Static),
+ ObjectLifetimeDefault::Param(param_def_id) => {
+ // This index can be used with `generic_args` since `parent_count == 0`.
+ let index = generics.param_def_id_to_index[&param_def_id] as usize;
+ generic_args.args.get(index).and_then(|arg| match arg {
+ GenericArg::Lifetime(lt) => map.defs.get(&lt.hir_id).copied(),
+ _ => None,
+ })
+ }
+ ObjectLifetimeDefault::Ambiguous => None,
+ };
+ generics
+ .params
+ .iter()
+ .filter_map(|param| {
+ match self.tcx.def_kind(param.def_id) {
+ // Generic consts don't impose any constraints.
+ //
+ // We still store a dummy value here to allow generic parameters
+ // in an arbitrary order.
+ DefKind::ConstParam => Some(ObjectLifetimeDefault::Empty),
+ DefKind::TyParam => Some(self.tcx.object_lifetime_default(param.def_id)),
+ // We may also get a `Trait` or `TraitAlias` because of how generics `Self` parameter
+ // works. Ignore it because it can't have a meaningful lifetime default.
+ DefKind::LifetimeParam | DefKind::Trait | DefKind::TraitAlias => None,
+ dk => bug!("unexpected def_kind {:?}", dk),
+ }
+ })
+ .map(set_to_region)
+ .collect()
+ });
+
+ debug!(?object_lifetime_defaults);
+
+ let mut i = 0;
+ for arg in generic_args.args {
+ match arg {
+ GenericArg::Lifetime(_) => {}
+ GenericArg::Type(ty) => {
+ if let Some(&lt) = object_lifetime_defaults.get(i) {
+ let scope = Scope::ObjectLifetimeDefault { lifetime: lt, s: self.scope };
+ self.with(scope, |this| this.visit_ty(ty));
+ } else {
+ self.visit_ty(ty);
+ }
+ i += 1;
+ }
+ GenericArg::Const(ct) => {
+ self.visit_anon_const(&ct.value);
+ i += 1;
+ }
+ GenericArg::Infer(inf) => {
+ self.visit_id(inf.hir_id);
+ i += 1;
+ }
+ }
+ }
+
+ // Hack: when resolving the type `XX` in binding like `dyn
+ // Foo<'b, Item = XX>`, the current object-lifetime default
+ // would be to examine the trait `Foo` to check whether it has
+ // a lifetime bound declared on `Item`. e.g., if `Foo` is
+ // declared like so, then the default object lifetime bound in
+ // `XX` should be `'b`:
+ //
+ // ```rust
+ // trait Foo<'a> {
+ // type Item: 'a;
+ // }
+ // ```
+ //
+ // but if we just have `type Item;`, then it would be
+ // `'static`. However, we don't get all of this logic correct.
+ //
+ // Instead, we do something hacky: if there are no lifetime parameters
+ // to the trait, then we simply use a default object lifetime
+ // bound of `'static`, because there is no other possibility. On the other hand,
+ // if there ARE lifetime parameters, then we require the user to give an
+ // explicit bound for now.
+ //
+ // This is intended to leave room for us to implement the
+ // correct behavior in the future.
+ let has_lifetime_parameter =
+ generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
+
+ // Resolve lifetimes found in the bindings, so either in the type `XX` in `Item = XX` or
+ // in the trait ref `YY<...>` in `Item: YY<...>`.
+ for binding in generic_args.bindings {
+ let scope = Scope::ObjectLifetimeDefault {
+ lifetime: if has_lifetime_parameter { None } else { Some(Region::Static) },
+ s: self.scope,
+ };
+ if let Some(type_def_id) = type_def_id {
+ let lifetimes = LifetimeContext::supertrait_hrtb_lifetimes(
+ self.tcx,
+ type_def_id,
+ binding.ident,
+ );
+ self.with(scope, |this| {
+ let scope = Scope::Supertrait {
+ lifetimes: lifetimes.unwrap_or_default(),
+ s: this.scope,
+ };
+ this.with(scope, |this| this.visit_assoc_type_binding(binding));
+ });
+ } else {
+ self.with(scope, |this| this.visit_assoc_type_binding(binding));
+ }
+ }
+ }
+
+ /// Returns all the late-bound vars that come into scope from supertrait HRTBs, based on the
+ /// associated type name and starting trait.
+ /// For example, imagine we have
+ /// ```ignore (illustrative)
+ /// trait Foo<'a, 'b> {
+ /// type As;
+ /// }
+ /// trait Bar<'b>: for<'a> Foo<'a, 'b> {}
+ /// trait Bar: for<'b> Bar<'b> {}
+ /// ```
+ /// In this case, if we wanted to the supertrait HRTB lifetimes for `As` on
+ /// the starting trait `Bar`, we would return `Some(['b, 'a])`.
+ fn supertrait_hrtb_lifetimes(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> Option<Vec<ty::BoundVariableKind>> {
+ let trait_defines_associated_type_named = |trait_def_id: DefId| {
+ tcx.associated_items(trait_def_id)
+ .find_by_name_and_kind(tcx, assoc_name, ty::AssocKind::Type, trait_def_id)
+ .is_some()
+ };
+
+ use smallvec::{smallvec, SmallVec};
+ let mut stack: SmallVec<[(DefId, SmallVec<[ty::BoundVariableKind; 8]>); 8]> =
+ smallvec![(def_id, smallvec![])];
+ let mut visited: FxHashSet<DefId> = FxHashSet::default();
+ loop {
+ let Some((def_id, bound_vars)) = stack.pop() else {
+ break None;
+ };
+ // See issue #83753. If someone writes an associated type on a non-trait, just treat it as
+ // there being no supertrait HRTBs.
+ match tcx.def_kind(def_id) {
+ DefKind::Trait | DefKind::TraitAlias | DefKind::Impl => {}
+ _ => break None,
+ }
+
+ if trait_defines_associated_type_named(def_id) {
+ break Some(bound_vars.into_iter().collect());
+ }
+ let predicates =
+ tcx.super_predicates_that_define_assoc_type((def_id, Some(assoc_name)));
+ let obligations = predicates.predicates.iter().filter_map(|&(pred, _)| {
+ let bound_predicate = pred.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(data) => {
+ // The order here needs to match what we would get from `subst_supertrait`
+ let pred_bound_vars = bound_predicate.bound_vars();
+ let mut all_bound_vars = bound_vars.clone();
+ all_bound_vars.extend(pred_bound_vars.iter());
+ let super_def_id = data.trait_ref.def_id;
+ Some((super_def_id, all_bound_vars))
+ }
+ _ => None,
+ }
+ });
+
+ let obligations = obligations.filter(|o| visited.insert(o.0));
+ stack.extend(obligations);
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_fn_like_elision(
+ &mut self,
+ inputs: &'tcx [hir::Ty<'tcx>],
+ output: Option<&'tcx hir::Ty<'tcx>>,
+ in_closure: bool,
+ ) {
+ self.with(Scope::Elision { s: self.scope }, |this| {
+ for input in inputs {
+ this.visit_ty(input);
+ }
+ if !in_closure && let Some(output) = output {
+ this.visit_ty(output);
+ }
+ });
+ if in_closure && let Some(output) = output {
+ self.visit_ty(output);
+ }
+ }
+
+ fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
+ debug!("resolve_object_lifetime_default(lifetime_ref={:?})", lifetime_ref);
+ let mut late_depth = 0;
+ let mut scope = self.scope;
+ let lifetime = loop {
+ match *scope {
+ Scope::Binder { s, scope_type, .. } => {
+ match scope_type {
+ BinderScopeType::Normal => late_depth += 1,
+ BinderScopeType::Concatenating => {}
+ }
+ scope = s;
+ }
+
+ Scope::Root | Scope::Elision { .. } => break Region::Static,
+
+ Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
+
+ Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
+
+ Scope::Supertrait { s, .. } | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ };
+ self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) {
+ debug!(
+ node = ?self.tcx.hir().node_to_string(lifetime_ref.hir_id),
+ span = ?self.tcx.sess.source_map().span_to_diagnostic_string(lifetime_ref.span)
+ );
+ self.map.defs.insert(lifetime_ref.hir_id, def);
+ }
+
+ /// Sometimes we resolve a lifetime, but later find that it is an
+ /// error (esp. around impl trait). In that case, we remove the
+ /// entry into `map.defs` so as not to confuse later code.
+ fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) {
+ let old_value = self.map.defs.remove(&lifetime_ref.hir_id);
+ assert_eq!(old_value, Some(bad_def));
+ }
+}
+
+/// Detects late-bound lifetimes and inserts them into
+/// `late_bound`.
+///
+/// A region declared on a fn is **late-bound** if:
+/// - it is constrained by an argument type;
+/// - it does not appear in a where-clause.
+///
+/// "Constrained" basically means that it appears in any type but
+/// not amongst the inputs to a projection. In other words, `<&'a
+/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
+fn is_late_bound_map(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<&FxIndexSet<LocalDefId>> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let decl = tcx.hir().fn_decl_by_hir_id(hir_id)?;
+ let generics = tcx.hir().get_generics(def_id)?;
+
+ let mut late_bound = FxIndexSet::default();
+
+ let mut constrained_by_input = ConstrainedCollector::default();
+ for arg_ty in decl.inputs {
+ constrained_by_input.visit_ty(arg_ty);
+ }
+
+ let mut appears_in_output = AllCollector::default();
+ intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
+
+ debug!(?constrained_by_input.regions);
+
+ // Walk the lifetimes that appear in where clauses.
+ //
+ // Subtle point: because we disallow nested bindings, we can just
+ // ignore binders here and scrape up all names we see.
+ let mut appears_in_where_clause = AllCollector::default();
+ appears_in_where_clause.visit_generics(generics);
+ debug!(?appears_in_where_clause.regions);
+
+ // Late bound regions are those that:
+ // - appear in the inputs
+ // - do not appear in the where-clauses
+ // - are not implicitly captured by `impl Trait`
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => { /* fall through */ }
+
+ // Neither types nor consts are late-bound.
+ hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => continue,
+ }
+
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+
+ // appears in the where clauses? early-bound.
+ if appears_in_where_clause.regions.contains(&param_def_id) {
+ continue;
+ }
+
+ // does not appear in the inputs, but appears in the return type? early-bound.
+ if !constrained_by_input.regions.contains(&param_def_id)
+ && appears_in_output.regions.contains(&param_def_id)
+ {
+ continue;
+ }
+
+ debug!("lifetime {:?} with id {:?} is late-bound", param.name.ident(), param.hir_id);
+
+ let inserted = late_bound.insert(param_def_id);
+ assert!(inserted, "visited lifetime {:?} twice", param.hir_id);
+ }
+
+ debug!(?late_bound);
+ return Some(tcx.arena.alloc(late_bound));
+
+ #[derive(Default)]
+ struct ConstrainedCollector {
+ regions: FxHashSet<LocalDefId>,
+ }
+
+ impl<'v> Visitor<'v> for ConstrainedCollector {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ match ty.kind {
+ hir::TyKind::Path(
+ hir::QPath::Resolved(Some(_), _) | hir::QPath::TypeRelative(..),
+ ) => {
+ // ignore lifetimes appearing in associated type
+ // projections, as they are not *constrained*
+ // (defined above)
+ }
+
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
+ // consider only the lifetimes on the final
+ // segment; I am not sure it's even currently
+ // valid to have them elsewhere, but even if it
+ // is, those would be potentially inputs to
+ // projections
+ if let Some(last_segment) = path.segments.last() {
+ self.visit_path_segment(last_segment);
+ }
+ }
+
+ _ => {
+ intravisit::walk_ty(self, ty);
+ }
+ }
+ }
+
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
+ self.regions.insert(def_id);
+ }
+ }
+ }
+
+ #[derive(Default)]
+ struct AllCollector {
+ regions: FxHashSet<LocalDefId>,
+ }
+
+ impl<'v> Visitor<'v> for AllCollector {
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
+ self.regions.insert(def_id);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
new file mode 100644
index 000000000..2e84e1d01
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
@@ -0,0 +1,707 @@
+use crate::astconv::AstConv;
+use crate::bounds::Bounds;
+use crate::collect::ItemCtxt;
+use crate::constrained_generic_params as cgp;
+use hir::{HirId, Node};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::ToPredicate;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::{Span, DUMMY_SP};
+
+#[derive(Debug)]
+struct OnlySelfBounds(bool);
+
+/// Returns a list of all type predicates (explicit and implicit) for the definition with
+/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus
+/// `Self: Trait` predicates for traits.
+pub(super) fn predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ let mut result = tcx.predicates_defined_on(def_id);
+
+ if tcx.is_trait(def_id) {
+ // For traits, add `Self: Trait` predicate. This is
+ // not part of the predicates that a user writes, but it
+ // is something that one must prove in order to invoke a
+ // method or project an associated type.
+ //
+ // In the chalk setup, this predicate is not part of the
+ // "predicates" for a trait item. But it is useful in
+ // rustc because if you directly (e.g.) invoke a trait
+ // method like `Trait::method(...)`, you must naturally
+ // prove that the trait applies to the types that were
+ // used, and adding the predicate into this list ensures
+ // that this is done.
+ //
+ // We use a DUMMY_SP here as a way to signal trait bounds that come
+ // from the trait itself that *shouldn't* be shown as the source of
+ // an obligation and instead be skipped. Otherwise we'd use
+ // `tcx.def_span(def_id);`
+
+ let constness = if tcx.has_attr(def_id, sym::const_trait) {
+ ty::BoundConstness::ConstIfConst
+ } else {
+ ty::BoundConstness::NotConst
+ };
+
+ let span = rustc_span::DUMMY_SP;
+ result.predicates =
+ tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(std::iter::once((
+ ty::TraitRef::identity(tcx, def_id).with_constness(constness).to_predicate(tcx),
+ span,
+ ))));
+ }
+ debug!("predicates_of(def_id={:?}) = {:?}", def_id, result);
+ result
+}
+
+/// Returns a list of user-specified type predicates for the definition with ID `def_id`.
+/// N.B., this does not include any implied/inferred constraints.
+#[instrument(level = "trace", skip(tcx), ret)]
+fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ use rustc_hir::*;
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let node = tcx.hir().get(hir_id);
+
+ let mut is_trait = None;
+ let mut is_default_impl_trait = None;
+
+ let icx = ItemCtxt::new(tcx, def_id);
+
+ const NO_GENERICS: &hir::Generics<'_> = hir::Generics::empty();
+
+ // We use an `IndexSet` to preserves order of insertion.
+ // Preserving the order of insertion is important here so as not to break UI tests.
+ let mut predicates: FxIndexSet<(ty::Predicate<'_>, Span)> = FxIndexSet::default();
+
+ let ast_generics = match node {
+ Node::TraitItem(item) => item.generics,
+
+ Node::ImplItem(item) => item.generics,
+
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Impl(ref impl_) => {
+ if impl_.defaultness.is_default() {
+ is_default_impl_trait = tcx.impl_trait_ref(def_id).map(ty::Binder::dummy);
+ }
+ &impl_.generics
+ }
+ ItemKind::Fn(.., ref generics, _)
+ | ItemKind::TyAlias(_, ref generics)
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics) => *generics,
+
+ ItemKind::Trait(_, _, ref generics, ..) => {
+ is_trait = Some(ty::TraitRef::identity(tcx, def_id));
+ *generics
+ }
+ ItemKind::TraitAlias(ref generics, _) => {
+ is_trait = Some(ty::TraitRef::identity(tcx, def_id));
+ *generics
+ }
+ ItemKind::OpaqueTy(OpaqueTy {
+ origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
+ ..
+ }) => {
+ // return-position impl trait
+ //
+ // We don't inherit predicates from the parent here:
+ // If we have, say `fn f<'a, T: 'a>() -> impl Sized {}`
+ // then the return type is `f::<'static, T>::{{opaque}}`.
+ //
+ // If we inherited the predicates of `f` then we would
+ // require that `T: 'static` to show that the return
+ // type is well-formed.
+ //
+ // The only way to have something with this opaque type
+ // is from the return type of the containing function,
+ // which will ensure that the function's predicates
+ // hold.
+ return ty::GenericPredicates { parent: None, predicates: &[] };
+ }
+ ItemKind::OpaqueTy(OpaqueTy {
+ ref generics,
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ }) => {
+ // type-alias impl trait
+ generics
+ }
+
+ _ => NO_GENERICS,
+ }
+ }
+
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Static(..) => NO_GENERICS,
+ ForeignItemKind::Fn(_, _, ref generics) => *generics,
+ ForeignItemKind::Type => NO_GENERICS,
+ },
+
+ _ => NO_GENERICS,
+ };
+
+ let generics = tcx.generics_of(def_id);
+ let parent_count = generics.parent_count as u32;
+ let has_own_self = generics.has_self && parent_count == 0;
+
+ // Below we'll consider the bounds on the type parameters (including `Self`)
+ // and the explicit where-clauses, but to get the full set of predicates
+ // on a trait we need to add in the supertrait bounds and bounds found on
+ // associated types.
+ if let Some(_trait_ref) = is_trait {
+ predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned());
+ }
+
+ // In default impls, we can assume that the self type implements
+ // the trait. So in:
+ //
+ // default impl Foo for Bar { .. }
+ //
+ // we add a default where clause `Foo: Bar`. We do a similar thing for traits
+ // (see below). Recall that a default impl is not itself an impl, but rather a
+ // set of defaults that can be incorporated into another impl.
+ if let Some(trait_ref) = is_default_impl_trait {
+ predicates.insert((trait_ref.without_const().to_predicate(tcx), tcx.def_span(def_id)));
+ }
+
+ // Collect the region predicates that were declared inline as
+ // well. In the case of parameters declared on a fn or method, we
+ // have to be careful to only iterate over early-bound regions.
+ let mut index = parent_count
+ + has_own_self as u32
+ + super::early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32;
+
+ trace!(?predicates);
+ trace!(?ast_generics);
+
+ // Collect the predicates that were written inline by the user on each
+ // type parameter (e.g., `<T: Foo>`).
+ for param in ast_generics.params {
+ match param.kind {
+ // We already dealt with early bound lifetimes above.
+ GenericParamKind::Lifetime { .. } => (),
+ GenericParamKind::Type { .. } => {
+ let name = param.name.ident().name;
+ let param_ty = ty::ParamTy::new(index, name).to_ty(tcx);
+ index += 1;
+
+ let mut bounds = Bounds::default();
+ // Params are implicitly sized unless a `?Sized` bound is found
+ <dyn AstConv<'_>>::add_implicitly_sized(
+ &icx,
+ &mut bounds,
+ &[],
+ Some((param.hir_id, ast_generics.predicates)),
+ param.span,
+ );
+ trace!(?bounds);
+ predicates.extend(bounds.predicates(tcx, param_ty));
+ trace!(?predicates);
+ }
+ GenericParamKind::Const { .. } => {
+ // Bounds on const parameters are currently not possible.
+ index += 1;
+ }
+ }
+ }
+
+ trace!(?predicates);
+ // Add in the bounds that appear in the where-clause.
+ for predicate in ast_generics.predicates {
+ match predicate {
+ hir::WherePredicate::BoundPredicate(bound_pred) => {
+ let ty = icx.to_ty(bound_pred.bounded_ty);
+ let bound_vars = icx.tcx.late_bound_vars(bound_pred.hir_id);
+
+ // Keep the type around in a dummy predicate, in case of no bounds.
+ // That way, `where Ty:` is not a complete noop (see #53696) and `Ty`
+ // is still checked for WF.
+ if bound_pred.bounds.is_empty() {
+ if let ty::Param(_) = ty.kind() {
+ // This is a `where T:`, which can be in the HIR from the
+ // transformation that moves `?Sized` to `T`'s declaration.
+ // We can skip the predicate because type parameters are
+ // trivially WF, but also we *should*, to avoid exposing
+ // users who never wrote `where Type:,` themselves, to
+ // compiler/tooling bugs from not handling WF predicates.
+ } else {
+ let span = bound_pred.bounded_ty.span;
+ let predicate = ty::Binder::bind_with_vars(
+ ty::PredicateKind::WellFormed(ty.into()),
+ bound_vars,
+ );
+ predicates.insert((predicate.to_predicate(tcx), span));
+ }
+ }
+
+ let mut bounds = Bounds::default();
+ <dyn AstConv<'_>>::add_bounds(
+ &icx,
+ ty,
+ bound_pred.bounds.iter(),
+ &mut bounds,
+ bound_vars,
+ );
+ predicates.extend(bounds.predicates(tcx, ty));
+ }
+
+ hir::WherePredicate::RegionPredicate(region_pred) => {
+ let r1 = <dyn AstConv<'_>>::ast_region_to_region(&icx, &region_pred.lifetime, None);
+ predicates.extend(region_pred.bounds.iter().map(|bound| {
+ let (r2, span) = match bound {
+ hir::GenericBound::Outlives(lt) => {
+ (<dyn AstConv<'_>>::ast_region_to_region(&icx, lt, None), lt.span)
+ }
+ _ => bug!(),
+ };
+ let pred = ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
+ ty::OutlivesPredicate(r1, r2),
+ ))
+ .to_predicate(icx.tcx);
+
+ (pred, span)
+ }))
+ }
+
+ hir::WherePredicate::EqPredicate(..) => {
+ // FIXME(#20041)
+ }
+ }
+ }
+
+ if tcx.features().generic_const_exprs {
+ predicates.extend(const_evaluatable_predicates_of(tcx, def_id.expect_local()));
+ }
+
+ let mut predicates: Vec<_> = predicates.into_iter().collect();
+
+ // Subtle: before we store the predicates into the tcx, we
+ // sort them so that predicates like `T: Foo<Item=U>` come
+ // before uses of `U`. This avoids false ambiguity errors
+ // in trait checking. See `setup_constraining_predicates`
+ // for details.
+ if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node {
+ let self_ty = tcx.type_of(def_id);
+ let trait_ref = tcx.impl_trait_ref(def_id);
+ cgp::setup_constraining_predicates(
+ tcx,
+ &mut predicates,
+ trait_ref,
+ &mut cgp::parameters_for_impl(self_ty, trait_ref),
+ );
+ }
+
+ ty::GenericPredicates {
+ parent: generics.parent,
+ predicates: tcx.arena.alloc_from_iter(predicates),
+ }
+}
+
+fn const_evaluatable_predicates_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+) -> FxIndexSet<(ty::Predicate<'tcx>, Span)> {
+ struct ConstCollector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ preds: FxIndexSet<(ty::Predicate<'tcx>, Span)>,
+ }
+
+ impl<'tcx> intravisit::Visitor<'tcx> for ConstCollector<'tcx> {
+ fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
+ let def_id = self.tcx.hir().local_def_id(c.hir_id);
+ let ct = ty::Const::from_anon_const(self.tcx, def_id);
+ if let ty::ConstKind::Unevaluated(_) = ct.kind() {
+ let span = self.tcx.hir().span(c.hir_id);
+ self.preds.insert((
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(ct))
+ .to_predicate(self.tcx),
+ span,
+ ));
+ }
+ }
+
+ fn visit_const_param_default(&mut self, _param: HirId, _ct: &'tcx hir::AnonConst) {
+ // Do not look into const param defaults,
+ // these get checked when they are actually instantiated.
+ //
+ // We do not want the following to error:
+ //
+ // struct Foo<const N: usize, const M: usize = { N + 1 }>;
+ // struct Bar<const N: usize>(Foo<N, 3>);
+ }
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let node = tcx.hir().get(hir_id);
+
+ let mut collector = ConstCollector { tcx, preds: FxIndexSet::default() };
+ if let hir::Node::Item(item) = node && let hir::ItemKind::Impl(ref impl_) = item.kind {
+ if let Some(of_trait) = &impl_.of_trait {
+ debug!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id);
+ collector.visit_trait_ref(of_trait);
+ }
+
+ debug!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id);
+ collector.visit_ty(impl_.self_ty);
+ }
+
+ if let Some(generics) = node.generics() {
+ debug!("const_evaluatable_predicates_of({:?}): visit_generics", def_id);
+ collector.visit_generics(generics);
+ }
+
+ if let Some(fn_sig) = tcx.hir().fn_sig_by_hir_id(hir_id) {
+ debug!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id);
+ collector.visit_fn_decl(fn_sig.decl);
+ }
+ debug!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds);
+
+ collector.preds
+}
+
+pub(super) fn trait_explicit_predicates_and_bounds(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+) -> ty::GenericPredicates<'_> {
+ assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
+ gather_explicit_predicates_of(tcx, def_id.to_def_id())
+}
+
+pub(super) fn explicit_predicates_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> ty::GenericPredicates<'tcx> {
+ let def_kind = tcx.def_kind(def_id);
+ if let DefKind::Trait = def_kind {
+ // Remove bounds on associated types from the predicates, they will be
+ // returned by `explicit_item_bounds`.
+ let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id.expect_local());
+ let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+
+ let is_assoc_item_ty = |ty: Ty<'tcx>| {
+ // For a predicate from a where clause to become a bound on an
+ // associated type:
+ // * It must use the identity substs of the item.
+ // * Since any generic parameters on the item are not in scope,
+ // this means that the item is not a GAT, and its identity
+ // substs are the same as the trait's.
+ // * It must be an associated type for this trait (*not* a
+ // supertrait).
+ if let ty::Projection(projection) = ty.kind() {
+ projection.substs == trait_identity_substs
+ && tcx.associated_item(projection.item_def_id).container_id(tcx) == def_id
+ } else {
+ false
+ }
+ };
+
+ let predicates: Vec<_> = predicates_and_bounds
+ .predicates
+ .iter()
+ .copied()
+ .filter(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(tr) => !is_assoc_item_ty(tr.self_ty()),
+ ty::PredicateKind::Projection(proj) => {
+ !is_assoc_item_ty(proj.projection_ty.self_ty())
+ }
+ ty::PredicateKind::TypeOutlives(outlives) => !is_assoc_item_ty(outlives.0),
+ _ => true,
+ })
+ .collect();
+ if predicates.len() == predicates_and_bounds.predicates.len() {
+ predicates_and_bounds
+ } else {
+ ty::GenericPredicates {
+ parent: predicates_and_bounds.parent,
+ predicates: tcx.arena.alloc_slice(&predicates),
+ }
+ }
+ } else {
+ if matches!(def_kind, DefKind::AnonConst) && tcx.lazy_normalization() {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ if tcx.hir().opt_const_param_default_param_hir_id(hir_id).is_some() {
+ // In `generics_of` we set the generics' parent to be our parent's parent which means that
+ // we lose out on the predicates of our actual parent if we dont return those predicates here.
+ // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
+ //
+ // struct Foo<T, const N: usize = { <T as Trait>::ASSOC }>(T) where T: Trait;
+ // ^^^ ^^^^^^^^^^^^^^^^^^^^^^^ the def id we are calling
+ // ^^^ explicit_predicates_of on
+ // parent item we dont have set as the
+ // parent of generics returned by `generics_of`
+ //
+ // In the above code we want the anon const to have predicates in its param env for `T: Trait`
+ let item_def_id = tcx.hir().get_parent_item(hir_id);
+ // In the above code example we would be calling `explicit_predicates_of(Foo)` here
+ return tcx.explicit_predicates_of(item_def_id);
+ }
+ }
+ gather_explicit_predicates_of(tcx, def_id)
+ }
+}
+
+/// Ensures that the super-predicates of the trait with a `DefId`
+/// of `trait_def_id` are converted and stored. This also ensures that
+/// the transitive super-predicates are converted.
+pub(super) fn super_predicates_of(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+) -> ty::GenericPredicates<'_> {
+ tcx.super_predicates_that_define_assoc_type((trait_def_id, None))
+}
+
+/// Ensures that the super-predicates of the trait with a `DefId`
+/// of `trait_def_id` are converted and stored. This also ensures that
+/// the transitive super-predicates are converted.
+pub(super) fn super_predicates_that_define_assoc_type(
+ tcx: TyCtxt<'_>,
+ (trait_def_id, assoc_name): (DefId, Option<Ident>),
+) -> ty::GenericPredicates<'_> {
+ if trait_def_id.is_local() {
+ debug!("local trait");
+ let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local());
+
+ let Node::Item(item) = tcx.hir().get(trait_hir_id) else {
+ bug!("trait_node_id {} is not an item", trait_hir_id);
+ };
+
+ let (generics, bounds) = match item.kind {
+ hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
+ hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
+ _ => span_bug!(item.span, "super_predicates invoked on non-trait"),
+ };
+
+ let icx = ItemCtxt::new(tcx, trait_def_id);
+
+ // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`.
+ let self_param_ty = tcx.types.self_param;
+ let superbounds1 = if let Some(assoc_name) = assoc_name {
+ <dyn AstConv<'_>>::compute_bounds_that_match_assoc_type(
+ &icx,
+ self_param_ty,
+ bounds,
+ assoc_name,
+ )
+ } else {
+ <dyn AstConv<'_>>::compute_bounds(&icx, self_param_ty, bounds)
+ };
+
+ let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
+
+ // Convert any explicit superbounds in the where-clause,
+ // e.g., `trait Foo where Self: Bar`.
+ // In the case of trait aliases, however, we include all bounds in the where-clause,
+ // so e.g., `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
+ // as one of its "superpredicates".
+ let is_trait_alias = tcx.is_trait_alias(trait_def_id);
+ let superbounds2 = icx.type_parameter_bounds_in_generics(
+ generics,
+ item.hir_id(),
+ self_param_ty,
+ OnlySelfBounds(!is_trait_alias),
+ assoc_name,
+ );
+
+ // Combine the two lists to form the complete set of superbounds:
+ let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2));
+ debug!(?superbounds);
+
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ if assoc_name.is_none() {
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ for &(pred, span) in superbounds {
+ debug!("superbound: {:?}", pred);
+ if let ty::PredicateKind::Trait(bound) = pred.kind().skip_binder() {
+ tcx.at(span).super_predicates_of(bound.def_id());
+ }
+ }
+ }
+
+ ty::GenericPredicates { parent: None, predicates: superbounds }
+ } else {
+ // if `assoc_name` is None, then the query should've been redirected to an
+ // external provider
+ assert!(assoc_name.is_some());
+ tcx.super_predicates_of(trait_def_id)
+ }
+}
+
+/// Returns the predicates defined on `item_def_id` of the form
+/// `X: Foo` where `X` is the type parameter `def_id`.
+#[instrument(level = "trace", skip(tcx))]
+pub(super) fn type_param_predicates(
+ tcx: TyCtxt<'_>,
+ (item_def_id, def_id, assoc_name): (DefId, LocalDefId, Ident),
+) -> ty::GenericPredicates<'_> {
+ use rustc_hir::*;
+
+ // In the AST, bounds can derive from two places. Either
+ // written inline like `<T: Foo>` or in a where-clause like
+ // `where T: Foo`.
+
+ let param_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let param_owner = tcx.hir().ty_param_owner(def_id);
+ let generics = tcx.generics_of(param_owner);
+ let index = generics.param_def_id_to_index[&def_id.to_def_id()];
+ let ty = tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id));
+
+ // Don't look for bounds where the type parameter isn't in scope.
+ let parent = if item_def_id == param_owner.to_def_id() {
+ None
+ } else {
+ tcx.generics_of(item_def_id).parent
+ };
+
+ let mut result = parent
+ .map(|parent| {
+ let icx = ItemCtxt::new(tcx, parent);
+ icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id(), assoc_name)
+ })
+ .unwrap_or_default();
+ let mut extend = None;
+
+ let item_hir_id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
+ let ast_generics = match tcx.hir().get(item_hir_id) {
+ Node::TraitItem(item) => &item.generics,
+
+ Node::ImplItem(item) => &item.generics,
+
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Fn(.., ref generics, _)
+ | ItemKind::Impl(hir::Impl { ref generics, .. })
+ | ItemKind::TyAlias(_, ref generics)
+ | ItemKind::OpaqueTy(OpaqueTy {
+ ref generics,
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ })
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics) => generics,
+ ItemKind::Trait(_, _, ref generics, ..) => {
+ // Implied `Self: Trait` and supertrait bounds.
+ if param_id == item_hir_id {
+ let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id);
+ extend =
+ Some((identity_trait_ref.without_const().to_predicate(tcx), item.span));
+ }
+ generics
+ }
+ _ => return result,
+ }
+ }
+
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(_, _, ref generics) => generics,
+ _ => return result,
+ },
+
+ _ => return result,
+ };
+
+ let icx = ItemCtxt::new(tcx, item_def_id);
+ let extra_predicates = extend.into_iter().chain(
+ icx.type_parameter_bounds_in_generics(
+ ast_generics,
+ param_id,
+ ty,
+ OnlySelfBounds(true),
+ Some(assoc_name),
+ )
+ .into_iter()
+ .filter(|(predicate, _)| match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) => data.self_ty().is_param(index),
+ _ => false,
+ }),
+ );
+ result.predicates =
+ tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(extra_predicates));
+ result
+}
+
+impl<'tcx> ItemCtxt<'tcx> {
+ /// Finds bounds from `hir::Generics`. This requires scanning through the
+ /// AST. We do this to avoid having to convert *all* the bounds, which
+ /// would create artificial cycles. Instead, we can only convert the
+ /// bounds for a type parameter `X` if `X::Foo` is used.
+ #[instrument(level = "trace", skip(self, ast_generics))]
+ fn type_parameter_bounds_in_generics(
+ &self,
+ ast_generics: &'tcx hir::Generics<'tcx>,
+ param_id: hir::HirId,
+ ty: Ty<'tcx>,
+ only_self_bounds: OnlySelfBounds,
+ assoc_name: Option<Ident>,
+ ) -> Vec<(ty::Predicate<'tcx>, Span)> {
+ let param_def_id = self.tcx.hir().local_def_id(param_id).to_def_id();
+ trace!(?param_def_id);
+ ast_generics
+ .predicates
+ .iter()
+ .filter_map(|wp| match *wp {
+ hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
+ _ => None,
+ })
+ .flat_map(|bp| {
+ let bt = if bp.is_param_bound(param_def_id) {
+ Some(ty)
+ } else if !only_self_bounds.0 {
+ Some(self.to_ty(bp.bounded_ty))
+ } else {
+ None
+ };
+ let bvars = self.tcx.late_bound_vars(bp.hir_id);
+
+ bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b, bvars))).filter(
+ |(_, b, _)| match assoc_name {
+ Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name),
+ None => true,
+ },
+ )
+ })
+ .flat_map(|(bt, b, bvars)| predicates_from_bound(self, bt, b, bvars))
+ .collect()
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn bound_defines_assoc_item(&self, b: &hir::GenericBound<'_>, assoc_name: Ident) -> bool {
+ match b {
+ hir::GenericBound::Trait(poly_trait_ref, _) => {
+ let trait_ref = &poly_trait_ref.trait_ref;
+ if let Some(trait_did) = trait_ref.trait_def_id() {
+ self.tcx.trait_may_define_assoc_type(trait_did, assoc_name)
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
+}
+
+/// Converts a specific `GenericBound` from the AST into a set of
+/// predicates that apply to the self type. A vector is returned
+/// because this can be anywhere from zero predicates (`T: ?Sized` adds no
+/// predicates) to one (`T: Foo`) to many (`T: Bar<X = i32>` adds `T: Bar`
+/// and `<T as Bar>::X == i32`).
+fn predicates_from_bound<'tcx>(
+ astconv: &dyn AstConv<'tcx>,
+ param_ty: Ty<'tcx>,
+ bound: &'tcx hir::GenericBound<'tcx>,
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+) -> Vec<(ty::Predicate<'tcx>, Span)> {
+ let mut bounds = Bounds::default();
+ astconv.add_bounds(param_ty, [bound].into_iter(), &mut bounds, bound_vars);
+ bounds.predicates(astconv.tcx(), param_ty).collect()
+}
diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs
index 534ddfa95..c29a645eb 100644
--- a/compiler/rustc_typeck/src/collect/type_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs
@@ -1,6 +1,5 @@
use rustc_errors::{Applicability, StashKey};
use rustc_hir as hir;
-use rustc_hir::def::Res;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit;
use rustc_hir::intravisit::Visitor;
@@ -19,7 +18,6 @@ use crate::errors::UnconstrainedOpaqueType;
/// Computes the relevant generic parameter for a potential generic const argument.
///
/// This should be called using the query `tcx.opt_const_param_of`.
-#[instrument(level = "debug", skip(tcx))]
pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<DefId> {
use hir::*;
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
@@ -67,8 +65,8 @@ pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<
let ty = item_ctxt.ast_ty_to_ty(hir_ty);
// Iterate through the generics of the projection to find the one that corresponds to
- // the def_id that this query was called with. We filter to only const args here as a
- // precaution for if it's ever allowed to elide lifetimes in GAT's. It currently isn't
+ // the def_id that this query was called with. We filter to only type and const args here
+ // as a precaution for if it's ever allowed to elide lifetimes in GAT's. It currently isn't
// but it can't hurt to be safe ^^
if let ty::Projection(projection) = ty.kind() {
let generics = tcx.generics_of(projection.item_def_id);
@@ -79,7 +77,7 @@ pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<
args.args
.iter()
.filter(|arg| arg.is_ty_or_const())
- .position(|arg| arg.id() == hir_id)
+ .position(|arg| arg.hir_id() == hir_id)
})
.unwrap_or_else(|| {
bug!("no arg matching AnonConst in segment");
@@ -112,7 +110,7 @@ pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<
args.args
.iter()
.filter(|arg| arg.is_ty_or_const())
- .position(|arg| arg.id() == hir_id)
+ .position(|arg| arg.hir_id() == hir_id)
})
.unwrap_or_else(|| {
bug!("no arg matching AnonConst in segment");
@@ -166,7 +164,7 @@ pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<
args.args
.iter()
.filter(|arg| arg.is_ty_or_const())
- .position(|arg| arg.id() == hir_id)
+ .position(|arg| arg.hir_id() == hir_id)
.map(|index| (index, seg)).or_else(|| args.bindings
.iter()
.filter_map(TypeBinding::opt_const)
@@ -180,15 +178,12 @@ pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<
return None;
};
- // Try to use the segment resolution if it is valid, otherwise we
- // default to the path resolution.
- let res = segment.res.filter(|&r| r != Res::Err).unwrap_or(path.res);
- let generics = match tcx.res_generics_def_id(res) {
+ let generics = match tcx.res_generics_def_id(segment.res) {
Some(def_id) => tcx.generics_of(def_id),
None => {
tcx.sess.delay_span_bug(
tcx.def_span(def_id),
- &format!("unexpected anon const res {:?} in path: {:?}", res, path),
+ &format!("unexpected anon const res {:?} in path: {:?}", segment.res, path),
);
return None;
}
@@ -229,7 +224,7 @@ fn get_path_containing_arg_in_pat<'hir>(
.iter()
.filter_map(|seg| seg.args)
.flat_map(|args| args.args)
- .any(|arg| arg.id() == arg_id)
+ .any(|arg| arg.hir_id() == arg_id)
};
let mut arg_path = None;
pat.walk(|pat| match pat.kind {
@@ -289,7 +284,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
icx.to_ty(ty)
}
}
- ImplItemKind::TyAlias(ty) => {
+ ImplItemKind::Type(ty) => {
if tcx.impl_trait_ref(tcx.hir().get_parent_item(hir_id)).is_none() {
check_feature_inherent_assoc_ty(tcx, item.span);
}
@@ -324,7 +319,15 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
}
}
ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty),
- ItemKind::Impl(hir::Impl { self_ty, .. }) => icx.to_ty(*self_ty),
+ ItemKind::Impl(hir::Impl { self_ty, .. }) => {
+ match self_ty.find_self_aliases() {
+ spans if spans.len() > 0 => {
+ tcx.sess.emit_err(crate::errors::SelfInImplSelf { span: spans.into(), note: (), });
+ tcx.ty_error()
+ },
+ _ => icx.to_ty(*self_ty),
+ }
+ },
ItemKind::Fn(..) => {
let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
tcx.mk_fn_def(def_id.to_def_id(), substs)
@@ -338,7 +341,15 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
find_opaque_ty_constraints_for_tait(tcx, def_id)
}
// Opaque types desugared from `impl Trait`.
- ItemKind::OpaqueTy(OpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner), .. }) => {
+ ItemKind::OpaqueTy(OpaqueTy {
+ origin:
+ hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner),
+ in_trait,
+ ..
+ }) => {
+ if in_trait {
+ assert!(tcx.impl_defaultness(owner).has_value());
+ }
find_opaque_ty_constraints_for_rpit(tcx, def_id, owner)
}
ItemKind::Trait(..)
@@ -379,7 +390,9 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
Node::Field(field) => icx.to_ty(field.ty),
- Node::Expr(&Expr { kind: ExprKind::Closure{..}, .. }) => tcx.typeck(def_id).node_type(hir_id),
+ Node::Expr(&Expr { kind: ExprKind::Closure { .. }, .. }) => {
+ tcx.typeck(def_id).node_type(hir_id)
+ }
Node::AnonConst(_) if let Some(param) = tcx.opt_const_param_of(def_id) => {
// We defer to `type_of` of the corresponding parameter
@@ -411,40 +424,93 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
| Node::Item(&Item { kind: ItemKind::GlobalAsm(asm), .. })
if asm.operands.iter().any(|(op, _op_sp)| match op {
hir::InlineAsmOperand::Const { anon_const }
- | hir::InlineAsmOperand::SymFn { anon_const } => anon_const.hir_id == hir_id,
+ | hir::InlineAsmOperand::SymFn { anon_const } => {
+ anon_const.hir_id == hir_id
+ }
_ => false,
}) =>
{
tcx.typeck(def_id).node_type(hir_id)
}
- Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => tcx
- .adt_def(tcx.hir().get_parent_item(hir_id))
- .repr()
- .discr_type()
- .to_ty(tcx),
+ Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => {
+ tcx.adt_def(tcx.hir().get_parent_item(hir_id)).repr().discr_type().to_ty(tcx)
+ }
- Node::TypeBinding(binding @ &TypeBinding { hir_id: binding_id, .. })
- if let Node::TraitRef(trait_ref) = tcx.hir().get(
- tcx.hir().get_parent_node(binding_id)
- ) =>
+ Node::TypeBinding(
+ binding @ &TypeBinding {
+ hir_id: binding_id,
+ kind: TypeBindingKind::Equality { term: Term::Const(ref e) },
+ ..
+ },
+ ) if let Node::TraitRef(trait_ref) =
+ tcx.hir().get(tcx.hir().get_parent_node(binding_id))
+ && e.hir_id == hir_id =>
{
- let Some(trait_def_id) = trait_ref.trait_def_id() else {
- return tcx.ty_error_with_message(DUMMY_SP, "Could not find trait");
- };
- let assoc_items = tcx.associated_items(trait_def_id);
- let assoc_item = assoc_items.find_by_name_and_kind(
- tcx, binding.ident, ty::AssocKind::Const, def_id.to_def_id(),
- );
- if let Some(assoc_item) = assoc_item {
- tcx.type_of(assoc_item.def_id)
- } else {
- // FIXME(associated_const_equality): add a useful error message here.
- tcx.ty_error_with_message(
- DUMMY_SP,
- "Could not find associated const on trait",
- )
- }
+ let Some(trait_def_id) = trait_ref.trait_def_id() else {
+ return tcx.ty_error_with_message(DUMMY_SP, "Could not find trait");
+ };
+ let assoc_items = tcx.associated_items(trait_def_id);
+ let assoc_item = assoc_items.find_by_name_and_kind(
+ tcx,
+ binding.ident,
+ ty::AssocKind::Const,
+ def_id.to_def_id(),
+ );
+ if let Some(assoc_item) = assoc_item {
+ tcx.type_of(assoc_item.def_id)
+ } else {
+ // FIXME(associated_const_equality): add a useful error message here.
+ tcx.ty_error_with_message(
+ DUMMY_SP,
+ "Could not find associated const on trait",
+ )
+ }
+ }
+
+ Node::TypeBinding(
+ binding @ &TypeBinding { hir_id: binding_id, gen_args, ref kind, .. },
+ ) if let Node::TraitRef(trait_ref) =
+ tcx.hir().get(tcx.hir().get_parent_node(binding_id))
+ && let Some((idx, _)) =
+ gen_args.args.iter().enumerate().find(|(_, arg)| {
+ if let GenericArg::Const(ct) = arg {
+ ct.value.hir_id == hir_id
+ } else {
+ false
+ }
+ }) =>
+ {
+ let Some(trait_def_id) = trait_ref.trait_def_id() else {
+ return tcx.ty_error_with_message(DUMMY_SP, "Could not find trait");
+ };
+ let assoc_items = tcx.associated_items(trait_def_id);
+ let assoc_item = assoc_items.find_by_name_and_kind(
+ tcx,
+ binding.ident,
+ match kind {
+ // I think `<A: T>` type bindings requires that `A` is a type
+ TypeBindingKind::Constraint { .. }
+ | TypeBindingKind::Equality { term: Term::Ty(..) } => {
+ ty::AssocKind::Type
+ }
+ TypeBindingKind::Equality { term: Term::Const(..) } => {
+ ty::AssocKind::Const
+ }
+ },
+ def_id.to_def_id(),
+ );
+ if let Some(param)
+ = assoc_item.map(|item| &tcx.generics_of(item.def_id).params[idx]).filter(|param| param.kind.is_ty_or_const())
+ {
+ tcx.type_of(param.def_id)
+ } else {
+ // FIXME(associated_const_equality): add a useful error message here.
+ tcx.ty_error_with_message(
+ DUMMY_SP,
+ "Could not find associated const on trait",
+ )
+ }
}
Node::GenericParam(&GenericParam {
@@ -453,8 +519,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
..
}) if ct.hir_id == hir_id => tcx.type_of(tcx.hir().local_def_id(param_hir_id)),
- x =>
- tcx.ty_error_with_message(
+ x => tcx.ty_error_with_message(
DUMMY_SP,
&format!("unexpected const parent in type_of(): {x:?}"),
),
@@ -508,6 +573,11 @@ fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> T
/// checked against it (we also carry the span of that first
/// type).
found: Option<ty::OpaqueHiddenType<'tcx>>,
+
+ /// In the presence of dead code, typeck may figure out a hidden type
+ /// while borrowck will now. We collect these cases here and check at
+ /// the end that we actually found a type that matches (modulo regions).
+ typeck_types: Vec<ty::OpaqueHiddenType<'tcx>>,
}
impl ConstraintLocator<'_> {
@@ -534,18 +604,23 @@ fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> T
self.found = Some(ty::OpaqueHiddenType { span: DUMMY_SP, ty: self.tcx.ty_error() });
return;
}
- if !tables.concrete_opaque_types.contains_key(&self.def_id) {
+ let Some(&typeck_hidden_ty) = tables.concrete_opaque_types.get(&self.def_id) else {
debug!("no constraints in typeck results");
return;
+ };
+ if self.typeck_types.iter().all(|prev| prev.ty != typeck_hidden_ty.ty) {
+ self.typeck_types.push(typeck_hidden_ty);
}
+
// Use borrowck to get the type with unerased regions.
let concrete_opaque_types = &self.tcx.mir_borrowck(item_def_id).concrete_opaque_types;
debug!(?concrete_opaque_types);
if let Some(&concrete_type) = concrete_opaque_types.get(&self.def_id) {
debug!(?concrete_type, "found constraint");
- if let Some(prev) = self.found {
- if concrete_type.ty != prev.ty && !(concrete_type, prev).references_error() {
+ if let Some(prev) = &mut self.found {
+ if concrete_type.ty != prev.ty && !(concrete_type, prev.ty).references_error() {
prev.report_mismatch(&concrete_type, self.tcx);
+ prev.ty = self.tcx.ty_error();
}
} else {
self.found = Some(concrete_type);
@@ -568,31 +643,31 @@ fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> T
intravisit::walk_expr(self, ex);
}
fn visit_item(&mut self, it: &'tcx Item<'tcx>) {
- trace!(?it.def_id);
+ trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
- if it.def_id != self.def_id {
- self.check(it.def_id);
+ if it.owner_id.def_id != self.def_id {
+ self.check(it.owner_id.def_id);
intravisit::walk_item(self, it);
}
}
fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) {
- trace!(?it.def_id);
+ trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
- if it.def_id != self.def_id {
- self.check(it.def_id);
+ if it.owner_id.def_id != self.def_id {
+ self.check(it.owner_id.def_id);
intravisit::walk_impl_item(self, it);
}
}
fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
- trace!(?it.def_id);
- self.check(it.def_id);
+ trace!(?it.owner_id);
+ self.check(it.owner_id.def_id);
intravisit::walk_trait_item(self, it);
}
}
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
let scope = tcx.hir().get_defining_scope(hir_id);
- let mut locator = ConstraintLocator { def_id: def_id, tcx, found: None };
+ let mut locator = ConstraintLocator { def_id: def_id, tcx, found: None, typeck_types: vec![] };
debug!(?scope);
@@ -622,16 +697,32 @@ fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> T
}
}
- match locator.found {
- Some(hidden) => hidden.ty,
- None => {
- tcx.sess.emit_err(UnconstrainedOpaqueType {
- span: tcx.def_span(def_id),
- name: tcx.item_name(tcx.local_parent(def_id).to_def_id()),
- });
- tcx.ty_error()
+ let Some(hidden) = locator.found else {
+ tcx.sess.emit_err(UnconstrainedOpaqueType {
+ span: tcx.def_span(def_id),
+ name: tcx.item_name(tcx.local_parent(def_id).to_def_id()),
+ what: match tcx.hir().get(scope) {
+ _ if scope == hir::CRATE_HIR_ID => "module",
+ Node::Item(hir::Item { kind: hir::ItemKind::Mod(_), .. }) => "module",
+ Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => "impl",
+ _ => "item",
+ },
+ });
+ return tcx.ty_error();
+ };
+
+ // Only check against typeck if we didn't already error
+ if !hidden.ty.references_error() {
+ for concrete_type in locator.typeck_types {
+ if tcx.erase_regions(concrete_type.ty) != tcx.erase_regions(hidden.ty)
+ && !(concrete_type, hidden).references_error()
+ {
+ hidden.report_mismatch(&concrete_type, tcx);
+ }
}
}
+
+ hidden.ty
}
fn find_opaque_ty_constraints_for_rpit(
@@ -687,24 +778,24 @@ fn find_opaque_ty_constraints_for_rpit(
intravisit::walk_expr(self, ex);
}
fn visit_item(&mut self, it: &'tcx Item<'tcx>) {
- trace!(?it.def_id);
+ trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
- if it.def_id != self.def_id {
- self.check(it.def_id);
+ if it.owner_id.def_id != self.def_id {
+ self.check(it.owner_id.def_id);
intravisit::walk_item(self, it);
}
}
fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) {
- trace!(?it.def_id);
+ trace!(?it.owner_id);
// The opaque type itself or its children are not within its reveal scope.
- if it.def_id != self.def_id {
- self.check(it.def_id);
+ if it.owner_id.def_id != self.def_id {
+ self.check(it.owner_id.def_id);
intravisit::walk_impl_item(self, it);
}
}
fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
- trace!(?it.def_id);
- self.check(it.def_id);
+ trace!(?it.owner_id);
+ self.check(it.owner_id.def_id);
intravisit::walk_trait_item(self, it);
}
}
@@ -732,20 +823,15 @@ fn find_opaque_ty_constraints_for_rpit(
// the `concrete_opaque_types` table.
tcx.ty_error()
} else {
- table
- .concrete_opaque_types
- .get(&def_id)
- .copied()
- .unwrap_or_else(|| {
- // We failed to resolve the opaque type or it
- // resolves to itself. We interpret this as the
- // no values of the hidden type ever being constructed,
- // so we can just make the hidden type be `!`.
- // For backwards compatibility reasons, we fall back to
- // `()` until we the diverging default is changed.
- Some(tcx.mk_diverging_default())
- })
- .expect("RPIT always have a hidden type from typeck")
+ table.concrete_opaque_types.get(&def_id).map(|ty| ty.ty).unwrap_or_else(|| {
+ // We failed to resolve the opaque type or it
+ // resolves to itself. We interpret this as the
+ // no values of the hidden type ever being constructed,
+ // so we can just make the hidden type be `!`.
+ // For backwards compatibility reasons, we fall back to
+ // `()` until we the diverging default is changed.
+ tcx.mk_diverging_default()
+ })
}
})
}
@@ -801,6 +887,9 @@ fn infer_placeholder_type<'a>(
match tcx.sess.diagnostic().steal_diagnostic(span, StashKey::ItemNoType) {
Some(mut err) => {
if !ty.references_error() {
+ // Only suggest adding `:` if it was missing (and suggested by parsing diagnostic)
+ let colon = if span == item_ident.span.shrink_to_hi() { ":" } else { "" };
+
// The parser provided a sub-optimal `HasPlaceholders` suggestion for the type.
// We are typeck and have the real type, so remove that and suggest the actual type.
// FIXME(eddyb) this looks like it should be functionality on `Diagnostic`.
@@ -816,7 +905,7 @@ fn infer_placeholder_type<'a>(
err.span_suggestion(
span,
&format!("provide a type for the {item}", item = kind),
- format!("{}: {}", item_ident, sugg_ty),
+ format!("{colon} {sugg_ty}"),
Applicability::MachineApplicable,
);
} else {
diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
index 8428e4664..213b89fc7 100644
--- a/compiler/rustc_typeck/src/constrained_generic_params.rs
+++ b/compiler/rustc_hir_analysis/src/constrained_generic_params.rs
@@ -114,9 +114,9 @@ pub fn identify_constrained_generic_params<'tcx>(
/// ```
/// The impl's predicates are collected from left to right. Ignoring
/// the implicit `Sized` bounds, these are
-/// * T: Debug
-/// * U: Iterator
-/// * <U as Iterator>::Item = T -- a desugared ProjectionPredicate
+/// * `T: Debug`
+/// * `U: Iterator`
+/// * `<U as Iterator>::Item = T` -- a desugared ProjectionPredicate
///
/// When we, for example, try to go over the trait-reference
/// `IntoIter<u32> as Trait`, we substitute the impl parameters with fresh
@@ -132,12 +132,16 @@ pub fn identify_constrained_generic_params<'tcx>(
///
/// We *do* have to be somewhat careful when projection targets contain
/// projections themselves, for example in
+///
+/// ```ignore (illustrative)
/// impl<S,U,V,W> Trait for U where
/// /* 0 */ S: Iterator<Item = U>,
/// /* - */ U: Iterator,
/// /* 1 */ <U as Iterator>::Item: ToOwned<Owned=(W,<V as Iterator>::Item)>
/// /* 2 */ W: Iterator<Item = V>
/// /* 3 */ V: Debug
+/// ```
+///
/// we have to evaluate the projections in the order I wrote them:
/// `V: Debug` requires `V` to be evaluated. The only projection that
/// *determines* `V` is 2 (1 contains it, but *does not determine it*,
diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs
new file mode 100644
index 000000000..d5b1a7ce1
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/errors.rs
@@ -0,0 +1,282 @@
+//! Errors emitted by `rustc_hir_analysis`.
+
+use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, Handler};
+use rustc_errors::{IntoDiagnostic, MultiSpan};
+use rustc_macros::{Diagnostic, LintDiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_span::{symbol::Ident, Span, Symbol};
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_unrecognized_atomic_operation, code = "E0092")]
+pub struct UnrecognizedAtomicOperation<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub op: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_wrong_number_of_generic_arguments_to_intrinsic, code = "E0094")]
+pub struct WrongNumberOfGenericArgumentsToIntrinsic<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub found: usize,
+ pub expected: usize,
+ pub descr: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_unrecognized_intrinsic_function, code = "E0093")]
+pub struct UnrecognizedIntrinsicFunction {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_lifetimes_or_bounds_mismatch_on_trait, code = "E0195")]
+pub struct LifetimesOrBoundsMismatchOnTrait {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(generics_label)]
+ pub generics_span: Option<Span>,
+ pub item_kind: &'static str,
+ pub ident: Ident,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_drop_impl_on_wrong_item, code = "E0120")]
+pub struct DropImplOnWrongItem {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_field_already_declared, code = "E0124")]
+pub struct FieldAlreadyDeclared {
+ pub field_name: Ident,
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(previous_decl_label)]
+ pub prev_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_copy_impl_on_type_with_dtor, code = "E0184")]
+pub struct CopyImplOnTypeWithDtor {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_multiple_relaxed_default_bounds, code = "E0203")]
+pub struct MultipleRelaxedDefaultBounds {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_copy_impl_on_non_adt, code = "E0206")]
+pub struct CopyImplOnNonAdt {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_trait_object_declared_with_no_traits, code = "E0224")]
+pub struct TraitObjectDeclaredWithNoTraits {
+ #[primary_span]
+ pub span: Span,
+ #[label(alias_span)]
+ pub trait_alias_span: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_ambiguous_lifetime_bound, code = "E0227")]
+pub struct AmbiguousLifetimeBound {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_assoc_type_binding_not_allowed, code = "E0229")]
+pub struct AssocTypeBindingNotAllowed {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_typeof_reserved_keyword_used, code = "E0516")]
+pub struct TypeofReservedKeywordUsed<'tcx> {
+ pub ty: Ty<'tcx>,
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[suggestion_verbose(code = "{ty}")]
+ pub opt_sugg: Option<(Span, Applicability)>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_value_of_associated_struct_already_specified, code = "E0719")]
+pub struct ValueOfAssociatedStructAlreadySpecified {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(previous_bound_label)]
+ pub prev_span: Span,
+ pub item_name: Ident,
+ pub def_path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_unconstrained_opaque_type)]
+#[note]
+pub struct UnconstrainedOpaqueType {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub what: &'static str,
+}
+
+pub struct MissingTypeParams {
+ pub span: Span,
+ pub def_span: Span,
+ pub span_snippet: Option<String>,
+ pub missing_type_params: Vec<Symbol>,
+ pub empty_generic_args: bool,
+}
+
+// Manual implementation of `IntoDiagnostic` to be able to call `span_to_snippet`.
+impl<'a> IntoDiagnostic<'a> for MissingTypeParams {
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = handler.struct_span_err_with_code(
+ self.span,
+ rustc_errors::fluent::hir_analysis_missing_type_params,
+ error_code!(E0393),
+ );
+ err.set_arg("parameterCount", self.missing_type_params.len());
+ err.set_arg(
+ "parameters",
+ self.missing_type_params
+ .iter()
+ .map(|n| format!("`{}`", n))
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+
+ err.span_label(self.def_span, rustc_errors::fluent::label);
+
+ let mut suggested = false;
+ // Don't suggest setting the type params if there are some already: the order is
+ // tricky to get right and the user will already know what the syntax is.
+ if let Some(snippet) = self.span_snippet && self.empty_generic_args {
+ if snippet.ends_with('>') {
+ // The user wrote `Trait<'a, T>` or similar. To provide an accurate suggestion
+ // we would have to preserve the right order. For now, as clearly the user is
+ // aware of the syntax, we do nothing.
+ } else {
+ // The user wrote `Iterator`, so we don't have a type we can suggest, but at
+ // least we can clue them to the correct syntax `Iterator<Type>`.
+ err.span_suggestion(
+ self.span,
+ rustc_errors::fluent::suggestion,
+ format!(
+ "{}<{}>",
+ snippet,
+ self.missing_type_params
+ .iter()
+ .map(|n| n.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ ),
+ Applicability::HasPlaceholders,
+ );
+ suggested = true;
+ }
+ }
+ if !suggested {
+ err.span_label(self.span, rustc_errors::fluent::no_suggestion_label);
+ }
+
+ err.note(rustc_errors::fluent::note);
+ err
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_manual_implementation, code = "E0183")]
+#[help]
+pub struct ManualImplementation {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub trait_name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_substs_on_overridden_impl)]
+pub struct SubstsOnOverriddenImpl {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(hir_analysis_unused_extern_crate)]
+pub struct UnusedExternCrate {
+ #[suggestion(applicability = "machine-applicable", code = "")]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(hir_analysis_extern_crate_not_idiomatic)]
+pub struct ExternCrateNotIdiomatic {
+ #[suggestion_short(applicability = "machine-applicable", code = "{suggestion_code}")]
+ pub span: Span,
+ pub msg_code: String,
+ pub suggestion_code: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_expected_used_symbol)]
+pub struct ExpectedUsedSymbol {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_const_impl_for_non_const_trait)]
+pub struct ConstImplForNonConstTrait {
+ #[primary_span]
+ pub trait_ref_span: Span,
+ pub trait_name: String,
+ #[suggestion(applicability = "machine-applicable", code = "#[const_trait]")]
+ pub local_trait_span: Option<Span>,
+ #[note]
+ pub marking: (),
+ #[note(adding)]
+ pub adding: (),
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_const_bound_for_non_const_trait)]
+pub struct ConstBoundForNonConstTrait {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_self_in_impl_self)]
+pub struct SelfInImplSelf {
+ #[primary_span]
+ pub span: MultiSpan,
+ #[note]
+ pub note: (),
+}
diff --git a/compiler/rustc_typeck/src/hir_wf_check.rs b/compiler/rustc_hir_analysis/src/hir_wf_check.rs
index 55c7a15f9..b0fdfcf38 100644
--- a/compiler/rustc_typeck/src/hir_wf_check.rs
+++ b/compiler/rustc_hir_analysis/src/hir_wf_check.rs
@@ -3,11 +3,10 @@ use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{ForeignItem, ForeignItemKind, HirId};
use rustc_infer::infer::TyCtxtInferExt;
-use rustc_infer::traits::TraitEngine;
use rustc_infer::traits::{ObligationCause, WellFormedLoc};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Region, ToPredicate, TyCtxt, TypeFoldable, TypeFolder};
-use rustc_trait_selection::traits::{self, TraitEngineExt};
+use rustc_trait_selection::traits;
pub fn provide(providers: &mut Providers) {
*providers = Providers { diagnostic_hir_wf_check, ..*providers };
@@ -65,41 +64,36 @@ fn diagnostic_hir_wf_check<'tcx>(
impl<'tcx> Visitor<'tcx> for HirWfCheck<'tcx> {
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
- self.tcx.infer_ctxt().enter(|infcx| {
- let mut fulfill = <dyn TraitEngine<'tcx>>::new(self.tcx);
- let tcx_ty =
- self.icx.to_ty(ty).fold_with(&mut EraseAllBoundRegions { tcx: self.tcx });
- let cause = traits::ObligationCause::new(
- ty.span,
- self.hir_id,
- traits::ObligationCauseCode::WellFormed(None),
- );
- fulfill.register_predicate_obligation(
- &infcx,
- traits::Obligation::new(
- cause,
- self.param_env,
- ty::Binder::dummy(ty::PredicateKind::WellFormed(tcx_ty.into()))
- .to_predicate(self.tcx),
- ),
- );
-
- let errors = fulfill.select_all_or_error(&infcx);
- if !errors.is_empty() {
- debug!("Wf-check got errors for {:?}: {:?}", ty, errors);
- for error in errors {
- if error.obligation.predicate == self.predicate {
- // Save the cause from the greatest depth - this corresponds
- // to picking more-specific types (e.g. `MyStruct<u8>`)
- // over less-specific types (e.g. `Option<MyStruct<u8>>`)
- if self.depth >= self.cause_depth {
- self.cause = Some(error.obligation.cause);
- self.cause_depth = self.depth
- }
+ let infcx = self.tcx.infer_ctxt().build();
+ let tcx_ty = self.icx.to_ty(ty).fold_with(&mut EraseAllBoundRegions { tcx: self.tcx });
+ let cause = traits::ObligationCause::new(
+ ty.span,
+ self.hir_id,
+ traits::ObligationCauseCode::WellFormed(None),
+ );
+ let errors = traits::fully_solve_obligation(
+ &infcx,
+ traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(tcx_ty.into()))
+ .to_predicate(self.tcx),
+ ),
+ );
+ if !errors.is_empty() {
+ debug!("Wf-check got errors for {:?}: {:?}", ty, errors);
+ for error in errors {
+ if error.obligation.predicate == self.predicate {
+ // Save the cause from the greatest depth - this corresponds
+ // to picking more-specific types (e.g. `MyStruct<u8>`)
+ // over less-specific types (e.g. `Option<MyStruct<u8>>`)
+ if self.depth >= self.cause_depth {
+ self.cause = Some(error.obligation.cause);
+ self.cause_depth = self.depth
}
}
}
- });
+ }
self.depth += 1;
intravisit::walk_ty(self, ty);
self.depth -= 1;
@@ -123,7 +117,7 @@ fn diagnostic_hir_wf_check<'tcx>(
let ty = match loc {
WellFormedLoc::Ty(_) => match hir.get(hir_id) {
hir::Node::ImplItem(item) => match item.kind {
- hir::ImplItemKind::TyAlias(ty) => Some(ty),
+ hir::ImplItemKind::Type(ty) => Some(ty),
hir::ImplItemKind::Const(ty, _) => Some(ty),
ref item => bug!("Unexpected ImplItem {:?}", item),
},
@@ -144,6 +138,10 @@ fn diagnostic_hir_wf_check<'tcx>(
hir::Node::ForeignItem(ForeignItem {
kind: ForeignItemKind::Static(ty, _), ..
}) => Some(*ty),
+ hir::Node::GenericParam(hir::GenericParam {
+ kind: hir::GenericParamKind::Type { default: Some(ty), .. },
+ ..
+ }) => Some(*ty),
ref node => bug!("Unexpected node {:?}", node),
},
WellFormedLoc::Param { function: _, param_idx } => {
diff --git a/compiler/rustc_typeck/src/impl_wf_check.rs b/compiler/rustc_hir_analysis/src/impl_wf_check.rs
index 9fee1eaae..136f61999 100644
--- a/compiler/rustc_typeck/src/impl_wf_check.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check.rs
@@ -11,7 +11,7 @@
use crate::constrained_generic_params as cgp;
use min_specialization::check_min_specialization;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashSet;
use rustc_errors::struct_span_err;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
@@ -19,8 +19,6 @@ use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
use rustc_span::{Span, Symbol};
-use std::collections::hash_map::Entry::{Occupied, Vacant};
-
mod min_specialization;
/// Checks that all the type/lifetime parameters on an impl also
@@ -57,11 +55,10 @@ fn check_mod_impl_wf(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
let min_specialization = tcx.features().min_specialization;
let module = tcx.hir_module_items(module_def_id);
for id in module.items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
- enforce_impl_params_are_constrained(tcx, id.def_id);
- enforce_impl_items_are_distinct(tcx, id.def_id);
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Impl) {
+ enforce_impl_params_are_constrained(tcx, id.owner_id.def_id);
if min_specialization {
- check_min_specialization(tcx, id.def_id);
+ check_min_specialization(tcx, id.owner_id.def_id);
}
}
}
@@ -194,35 +191,3 @@ fn report_unused_parameter(tcx: TyCtxt<'_>, span: Span, kind: &str, name: Symbol
}
err.emit();
}
-
-/// Enforce that we do not have two items in an impl with the same name.
-fn enforce_impl_items_are_distinct(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
- let mut seen_type_items = FxHashMap::default();
- let mut seen_value_items = FxHashMap::default();
- for &impl_item_ref in tcx.associated_item_def_ids(impl_def_id) {
- let impl_item = tcx.associated_item(impl_item_ref);
- let seen_items = match impl_item.kind {
- ty::AssocKind::Type => &mut seen_type_items,
- _ => &mut seen_value_items,
- };
- let span = tcx.def_span(impl_item_ref);
- let ident = impl_item.ident(tcx);
- match seen_items.entry(ident.normalize_to_macros_2_0()) {
- Occupied(entry) => {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0201,
- "duplicate definitions with name `{}`:",
- ident
- );
- err.span_label(*entry.get(), format!("previous definition of `{}` here", ident));
- err.span_label(span, "duplicate definition");
- err.emit();
- }
- Vacant(entry) => {
- entry.insert(span);
- }
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
index 74abb71a1..e806e9487 100644
--- a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
+++ b/compiler/rustc_hir_analysis/src/impl_wf_check/min_specialization.rs
@@ -65,27 +65,25 @@
//! cause use after frees with purely safe code in the same way as specializing
//! on traits with methods can.
-use crate::check::regionck::OutlivesEnvironmentExt;
-use crate::check::wfcheck::impl_implied_bounds;
use crate::constrained_generic_params as cgp;
use crate::errors::SubstsOnOverriddenImpl;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
-use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::specialization_graph::Node;
use rustc_middle::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
use rustc_middle::ty::trait_def::TraitSpecializationKind;
use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
use rustc_span::Span;
-use rustc_trait_selection::traits::{self, translate_substs, wf};
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
+use rustc_trait_selection::traits::{self, translate_substs, wf, ObligationCtxt};
pub(super) fn check_min_specialization(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
if let Some(node) = parent_specialization_node(tcx, impl_def_id) {
- tcx.infer_ctxt().enter(|infcx| {
- check_always_applicable(&infcx, impl_def_id, node);
- });
+ check_always_applicable(tcx, impl_def_id, node);
}
}
@@ -105,16 +103,14 @@ fn parent_specialization_node(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId) -> Opti
}
/// Check that `impl1` is a sound specialization
-fn check_always_applicable(infcx: &InferCtxt<'_, '_>, impl1_def_id: LocalDefId, impl2_node: Node) {
- if let Some((impl1_substs, impl2_substs)) = get_impl_substs(infcx, impl1_def_id, impl2_node) {
+fn check_always_applicable(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId, impl2_node: Node) {
+ if let Some((impl1_substs, impl2_substs)) = get_impl_substs(tcx, impl1_def_id, impl2_node) {
let impl2_def_id = impl2_node.def_id();
debug!(
"check_always_applicable(\nimpl1_def_id={:?},\nimpl2_def_id={:?},\nimpl2_substs={:?}\n)",
impl1_def_id, impl2_def_id, impl2_substs
);
- let tcx = infcx.tcx;
-
let parent_substs = if impl2_node.is_from_trait() {
impl2_substs.to_vec()
} else {
@@ -124,7 +120,7 @@ fn check_always_applicable(infcx: &InferCtxt<'_, '_>, impl1_def_id: LocalDefId,
let span = tcx.def_span(impl1_def_id);
check_static_lifetimes(tcx, &parent_substs, span);
check_duplicate_params(tcx, impl1_substs, &parent_substs, span);
- check_predicates(infcx, impl1_def_id, impl1_substs, impl2_node, impl2_substs, span);
+ check_predicates(tcx, impl1_def_id, impl1_substs, impl2_node, impl2_substs, span);
}
}
@@ -134,30 +130,37 @@ fn check_always_applicable(infcx: &InferCtxt<'_, '_>, impl1_def_id: LocalDefId,
///
/// Example
///
+/// ```ignore (illustrative)
/// impl<A, B> Foo<A> for B { /* impl2 */ }
/// impl<C> Foo<Vec<C>> for C { /* impl1 */ }
+/// ```
///
/// Would return `S1 = [C]` and `S2 = [Vec<C>, C]`.
fn get_impl_substs<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ tcx: TyCtxt<'tcx>,
impl1_def_id: LocalDefId,
impl2_node: Node,
) -> Option<(SubstsRef<'tcx>, SubstsRef<'tcx>)> {
- let tcx = infcx.tcx;
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
let param_env = tcx.param_env(impl1_def_id);
+ let impl1_hir_id = tcx.hir().local_def_id_to_hir_id(impl1_def_id);
+
+ let assumed_wf_types =
+ ocx.assumed_wf_types(param_env, tcx.def_span(impl1_def_id), impl1_def_id);
let impl1_substs = InternalSubsts::identity_for_item(tcx, impl1_def_id.to_def_id());
let impl2_substs =
translate_substs(infcx, param_env, impl1_def_id.to_def_id(), impl1_substs, impl2_node);
- let mut outlives_env = OutlivesEnvironment::new(param_env);
- let implied_bounds =
- impl_implied_bounds(infcx.tcx, param_env, impl1_def_id, tcx.def_span(impl1_def_id));
- outlives_env.add_implied_bounds(
- infcx,
- implied_bounds,
- tcx.hir().local_def_id_to_hir_id(impl1_def_id),
- );
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ ocx.infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return None;
+ }
+
+ let implied_bounds = infcx.implied_bounds_tys(param_env, impl1_hir_id, assumed_wf_types);
+ let outlives_env = OutlivesEnvironment::with_bounds(param_env, Some(infcx), implied_bounds);
infcx.check_region_obligations_and_report_errors(impl1_def_id, &outlives_env);
let Ok(impl2_substs) = infcx.fully_resolve(impl2_substs) else {
let span = tcx.def_span(impl1_def_id);
@@ -224,13 +227,17 @@ fn unconstrained_parent_impl_substs<'tcx>(
///
/// For example forbid the following:
///
+/// ```ignore (illustrative)
/// impl<A> Tr for A { }
/// impl<B> Tr for (B, B) { }
+/// ```
///
/// Note that only consider the unconstrained parameters of the base impl:
///
+/// ```ignore (illustrative)
/// impl<S, I: IntoIterator<Item = S>> Tr<S> for I { }
/// impl<T> Tr<T> for Vec<T> { }
+/// ```
///
/// The substs for the parent impl here are `[T, Vec<T>]`, which repeats `T`,
/// but `S` is constrained in the parent impl, so `parent_substs` is only
@@ -255,8 +262,10 @@ fn check_duplicate_params<'tcx>(
///
/// For example forbid the following:
///
+/// ```ignore (illustrative)
/// impl<A> Tr for A { }
/// impl Tr for &'static i32 { }
+/// ```
fn check_static_lifetimes<'tcx>(
tcx: TyCtxt<'tcx>,
parent_substs: &Vec<GenericArg<'tcx>>,
@@ -279,14 +288,13 @@ fn check_static_lifetimes<'tcx>(
/// * a well-formed predicate of a type argument of the trait being implemented,
/// including the `Self`-type.
fn check_predicates<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ tcx: TyCtxt<'tcx>,
impl1_def_id: LocalDefId,
impl1_substs: SubstsRef<'tcx>,
impl2_node: Node,
impl2_substs: SubstsRef<'tcx>,
span: Span,
) {
- let tcx = infcx.tcx;
let instantiated = tcx.predicates_of(impl1_def_id).instantiate(tcx, impl1_substs);
let impl1_predicates: Vec<_> = traits::elaborate_predicates_with_span(
tcx,
@@ -343,19 +351,21 @@ fn check_predicates<'tcx>(
// Include the well-formed predicates of the type parameters of the impl.
for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().substs {
- if let Some(obligations) = wf::obligations(
+ let infcx = &tcx.infer_ctxt().build();
+ let obligations = wf::obligations(
infcx,
tcx.param_env(impl1_def_id),
tcx.hir().local_def_id_to_hir_id(impl1_def_id),
0,
arg,
span,
- ) {
- impl2_predicates.extend(
- traits::elaborate_obligations(tcx, obligations)
- .map(|obligation| obligation.predicate),
- )
- }
+ )
+ .unwrap();
+
+ assert!(!obligations.needs_infer());
+ impl2_predicates.extend(
+ traits::elaborate_obligations(tcx, obligations).map(|obligation| obligation.predicate),
+ )
}
impl2_predicates.extend(
traits::elaborate_predicates_with_span(tcx, always_applicable_traits)
@@ -418,13 +428,10 @@ fn trait_predicate_kind<'tcx>(
predicate: ty::Predicate<'tcx>,
) -> Option<TraitSpecializationKind> {
match predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(ty::TraitPredicate {
- trait_ref,
- constness: ty::BoundConstness::NotConst,
- polarity: _,
- }) => Some(tcx.trait_def(trait_ref.def_id).specialization_kind),
- ty::PredicateKind::Trait(_)
- | ty::PredicateKind::RegionOutlives(_)
+ ty::PredicateKind::Trait(ty::TraitPredicate { trait_ref, constness: _, polarity: _ }) => {
+ Some(tcx.trait_def(trait_ref.def_id).specialization_kind)
+ }
+ ty::PredicateKind::RegionOutlives(_)
| ty::PredicateKind::TypeOutlives(_)
| ty::PredicateKind::Projection(_)
| ty::PredicateKind::WellFormed(_)
diff --git a/compiler/rustc_hir_analysis/src/lib.rs b/compiler/rustc_hir_analysis/src/lib.rs
new file mode 100644
index 000000000..525cd2419
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/lib.rs
@@ -0,0 +1,552 @@
+/*!
+
+# typeck
+
+The type checker is responsible for:
+
+1. Determining the type of each expression.
+2. Resolving methods and traits.
+3. Guaranteeing that most type rules are met. ("Most?", you say, "why most?"
+ Well, dear reader, read on.)
+
+The main entry point is [`check_crate()`]. Type checking operates in
+several major phases:
+
+1. The collect phase first passes over all items and determines their
+ type, without examining their "innards".
+
+2. Variance inference then runs to compute the variance of each parameter.
+
+3. Coherence checks for overlapping or orphaned impls.
+
+4. Finally, the check phase then checks function bodies and so forth.
+ Within the check phase, we check each function body one at a time
+ (bodies of function expressions are checked as part of the
+ containing function). Inference is used to supply types wherever
+ they are unknown. The actual checking of a function itself has
+ several phases (check, regionck, writeback), as discussed in the
+ documentation for the [`check`] module.
+
+The type checker is defined into various submodules which are documented
+independently:
+
+- astconv: converts the AST representation of types
+ into the `ty` representation.
+
+- collect: computes the types of each top-level item and enters them into
+ the `tcx.types` table for later use.
+
+- coherence: enforces coherence rules, builds some tables.
+
+- variance: variance inference
+
+- outlives: outlives inference
+
+- check: walks over function bodies and type checks them, inferring types for
+ local variables, type parameters, etc as necessary.
+
+- infer: finds the types to use for each type variable such that
+ all subtyping and assignment constraints are met. In essence, the check
+ module specifies the constraints, and the infer module solves them.
+
+## Note
+
+This API is completely unstable and subject to change.
+
+*/
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(drain_filter)]
+#![feature(hash_drain_filter)]
+#![feature(if_let_guard)]
+#![feature(is_sorted)]
+#![feature(iter_intersperse)]
+#![feature(let_chains)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(once_cell)]
+#![feature(slice_partition_dedup)]
+#![feature(try_blocks)]
+#![feature(is_some_and)]
+#![feature(type_alias_impl_trait)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+
+#[macro_use]
+extern crate rustc_middle;
+
+// These are used by Clippy.
+pub mod check;
+
+pub mod astconv;
+mod bounds;
+mod check_unused;
+mod coherence;
+// FIXME: This module shouldn't be public.
+pub mod collect;
+mod constrained_generic_params;
+mod errors;
+pub mod hir_wf_check;
+mod impl_wf_check;
+mod outlives;
+pub mod structured_errors;
+mod variance;
+
+use rustc_errors::{struct_span_err, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{Node, CRATE_HIR_ID};
+use rustc_infer::infer::{InferOk, TyCtxtInferExt};
+use rustc_middle::middle;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::util;
+use rustc_session::config::EntryFnType;
+use rustc_span::{symbol::sym, Span, DUMMY_SP};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
+
+use std::iter;
+
+use astconv::AstConv;
+use bounds::Bounds;
+
+fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) {
+ match (decl.c_variadic, abi) {
+ // The function has the correct calling convention, or isn't a "C-variadic" function.
+ (false, _) | (true, Abi::C { .. }) | (true, Abi::Cdecl { .. }) => {}
+ // The function is a "C-variadic" function with an incorrect calling convention.
+ (true, _) => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0045,
+ "C-variadic function must have C or cdecl calling convention"
+ );
+ err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
+ }
+ }
+}
+
+fn require_same_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+) -> bool {
+ let infcx = &tcx.infer_ctxt().build();
+ let param_env = ty::ParamEnv::empty();
+ let errors = match infcx.at(cause, param_env).eq(expected, actual) {
+ Ok(InferOk { obligations, .. }) => traits::fully_solve_obligations(infcx, obligations),
+ Err(err) => {
+ infcx.err_ctxt().report_mismatched_types(cause, expected, actual, err).emit();
+ return false;
+ }
+ };
+
+ match &errors[..] {
+ [] => true,
+ errors => {
+ infcx.err_ctxt().report_fulfillment_errors(errors, None, false);
+ false
+ }
+ }
+}
+
+fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
+ let main_fnsig = tcx.fn_sig(main_def_id);
+ let main_span = tcx.def_span(main_def_id);
+
+ fn main_fn_diagnostics_hir_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> hir::HirId {
+ if let Some(local_def_id) = def_id.as_local() {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let hir_type = tcx.type_of(local_def_id);
+ if !matches!(hir_type.kind(), ty::FnDef(..)) {
+ span_bug!(sp, "main has a non-function type: found `{}`", hir_type);
+ }
+ hir_id
+ } else {
+ CRATE_HIR_ID
+ }
+ }
+
+ fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
+ if !generics.params.is_empty() {
+ Some(generics.span)
+ } else {
+ None
+ }
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
+ Some(generics.where_clause_span)
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ Some(tcx.def_span(def_id))
+ }
+
+ fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(ref fn_sig, _, _), .. })) => {
+ Some(fn_sig.decl.output.span())
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ let mut error = false;
+ let main_diagnostics_hir_id = main_fn_diagnostics_hir_id(tcx, main_def_id, main_span);
+ let main_fn_generics = tcx.generics_of(main_def_id);
+ let main_fn_predicates = tcx.predicates_of(main_def_id);
+ if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() {
+ let generics_param_span = main_fn_generics_params_span(tcx, main_def_id);
+ let msg = "`main` function is not allowed to have generic \
+ parameters";
+ let mut diag =
+ struct_span_err!(tcx.sess, generics_param_span.unwrap_or(main_span), E0131, "{}", msg);
+ if let Some(generics_param_span) = generics_param_span {
+ let label = "`main` cannot have generic parameters";
+ diag.span_label(generics_param_span, label);
+ }
+ diag.emit();
+ error = true;
+ } else if !main_fn_predicates.predicates.is_empty() {
+ // generics may bring in implicit predicates, so we skip this check if generics is present.
+ let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id);
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ generics_where_clauses_span.unwrap_or(main_span),
+ E0646,
+ "`main` function is not allowed to have a `where` clause"
+ );
+ if let Some(generics_where_clauses_span) = generics_where_clauses_span {
+ diag.span_label(generics_where_clauses_span, "`main` cannot have a `where` clause");
+ }
+ diag.emit();
+ error = true;
+ }
+
+ let main_asyncness = tcx.asyncness(main_def_id);
+ if let hir::IsAsync::Async = main_asyncness {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ main_span,
+ E0752,
+ "`main` function is not allowed to be `async`"
+ );
+ let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
+ if let Some(asyncness_span) = asyncness_span {
+ diag.span_label(asyncness_span, "`main` function is not allowed to be `async`");
+ }
+ diag.emit();
+ error = true;
+ }
+
+ for attr in tcx.get_attrs(main_def_id, sym::track_caller) {
+ tcx.sess
+ .struct_span_err(attr.span, "`main` function is not allowed to be `#[track_caller]`")
+ .span_label(main_span, "`main` function is not allowed to be `#[track_caller]`")
+ .emit();
+ error = true;
+ }
+
+ if error {
+ return;
+ }
+
+ let expected_return_type;
+ if let Some(term_did) = tcx.lang_items().termination() {
+ let return_ty = main_fnsig.output();
+ let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span);
+ if !return_ty.bound_vars().is_empty() {
+ let msg = "`main` function return type is not allowed to have generic \
+ parameters";
+ struct_span_err!(tcx.sess, return_ty_span, E0131, "{}", msg).emit();
+ error = true;
+ }
+ let return_ty = return_ty.skip_binder();
+ let infcx = tcx.infer_ctxt().build();
+ // Main should have no WC, so empty param env is OK here.
+ let param_env = ty::ParamEnv::empty();
+ let cause = traits::ObligationCause::new(
+ return_ty_span,
+ main_diagnostics_hir_id,
+ ObligationCauseCode::MainFunctionType,
+ );
+ let ocx = traits::ObligationCtxt::new(&infcx);
+ let norm_return_ty = ocx.normalize(cause.clone(), param_env, return_ty);
+ ocx.register_bound(cause, param_env, norm_return_ty, term_did);
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ error = true;
+ }
+ // now we can take the return type of the given main function
+ expected_return_type = main_fnsig.output();
+ } else {
+ // standard () main return type
+ expected_return_type = ty::Binder::dummy(tcx.mk_unit());
+ }
+
+ if error {
+ return;
+ }
+
+ let se_ty = tcx.mk_fn_ptr(expected_return_type.map_bound(|expected_return_type| {
+ tcx.mk_fn_sig(iter::empty(), expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
+ }));
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(
+ main_span,
+ main_diagnostics_hir_id,
+ ObligationCauseCode::MainFunctionType,
+ ),
+ se_ty,
+ tcx.mk_fn_ptr(main_fnsig),
+ );
+}
+fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
+ let start_def_id = start_def_id.expect_local();
+ let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id);
+ let start_span = tcx.def_span(start_def_id);
+ let start_t = tcx.type_of(start_def_id);
+ match start_t.kind() {
+ ty::FnDef(..) => {
+ if let Some(Node::Item(it)) = tcx.hir().find(start_id) {
+ if let hir::ItemKind::Fn(ref sig, ref generics, _) = it.kind {
+ let mut error = false;
+ if !generics.params.is_empty() {
+ struct_span_err!(
+ tcx.sess,
+ generics.span,
+ E0132,
+ "start function is not allowed to have type parameters"
+ )
+ .span_label(generics.span, "start function cannot have type parameters")
+ .emit();
+ error = true;
+ }
+ if generics.has_where_clause_predicates {
+ struct_span_err!(
+ tcx.sess,
+ generics.where_clause_span,
+ E0647,
+ "start function is not allowed to have a `where` clause"
+ )
+ .span_label(
+ generics.where_clause_span,
+ "start function cannot have a `where` clause",
+ )
+ .emit();
+ error = true;
+ }
+ if let hir::IsAsync::Async = sig.header.asyncness {
+ let span = tcx.def_span(it.owner_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0752,
+ "`start` is not allowed to be `async`"
+ )
+ .span_label(span, "`start` is not allowed to be `async`")
+ .emit();
+ error = true;
+ }
+
+ let attrs = tcx.hir().attrs(start_id);
+ for attr in attrs {
+ if attr.has_name(sym::track_caller) {
+ tcx.sess
+ .struct_span_err(
+ attr.span,
+ "`start` is not allowed to be `#[track_caller]`",
+ )
+ .span_label(
+ start_span,
+ "`start` is not allowed to be `#[track_caller]`",
+ )
+ .emit();
+ error = true;
+ }
+ }
+
+ if error {
+ return;
+ }
+ }
+ }
+
+ let se_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ [tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))].iter().cloned(),
+ tcx.types.isize,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ )));
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType),
+ se_ty,
+ tcx.mk_fn_ptr(tcx.fn_sig(start_def_id)),
+ );
+ }
+ _ => {
+ span_bug!(start_span, "start has a non-function type: found `{}`", start_t);
+ }
+ }
+}
+
+fn check_for_entry_fn(tcx: TyCtxt<'_>) {
+ match tcx.entry_fn(()) {
+ Some((def_id, EntryFnType::Main { .. })) => check_main_fn_ty(tcx, def_id),
+ Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
+ _ => {}
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ collect::provide(providers);
+ coherence::provide(providers);
+ check::provide(providers);
+ variance::provide(providers);
+ outlives::provide(providers);
+ impl_wf_check::provide(providers);
+ hir_wf_check::provide(providers);
+}
+
+pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
+ let _prof_timer = tcx.sess.timer("type_check_crate");
+
+ // this ensures that later parts of type checking can assume that items
+ // have valid types and not error
+ // FIXME(matthewjasper) We shouldn't need to use `track_errors`.
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("type_collecting", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().collect_mod_item_types(module))
+ });
+ })?;
+
+ if tcx.features().rustc_attrs {
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("outlives_testing", || outlives::test::test_inferred_outlives(tcx));
+ })?;
+ }
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("impl_wf_inference", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().check_mod_impl_wf(module))
+ });
+ })?;
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("coherence_checking", || {
+ for &trait_def_id in tcx.all_local_trait_impls(()).keys() {
+ tcx.ensure().coherent_trait(trait_def_id);
+ }
+
+ // these queries are executed for side-effects (error reporting):
+ tcx.ensure().crate_inherent_impls(());
+ tcx.ensure().crate_inherent_impls_overlap_check(());
+ });
+ })?;
+
+ if tcx.features().rustc_attrs {
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("variance_testing", || variance::test::test_variance(tcx));
+ })?;
+ }
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("wf_checking", || {
+ tcx.hir().par_for_each_module(|module| tcx.ensure().check_mod_type_wf(module))
+ });
+ })?;
+
+ // NOTE: This is copy/pasted in librustdoc/core.rs and should be kept in sync.
+ tcx.sess.time("item_types_checking", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module))
+ });
+
+ tcx.sess.time("item_bodies_checking", || tcx.typeck_item_bodies(()));
+
+ check_unused::check_crate(tcx);
+ check_for_entry_fn(tcx);
+
+ if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
+}
+
+/// A quasi-deprecated helper used in rustdoc and clippy to get
+/// the type from a HIR node.
+pub fn hir_ty_to_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ // In case there are any projections, etc., find the "environment"
+ // def-ID that will be used to determine the traits/predicates in
+ // scope. This is derived from the enclosing item-like thing.
+ let env_def_id = tcx.hir().get_parent_item(hir_ty.hir_id);
+ let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
+ <dyn AstConv<'_>>::ast_ty_to_ty(&item_cx, hir_ty)
+}
+
+pub fn hir_trait_to_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ hir_trait: &hir::TraitRef<'_>,
+ self_ty: Ty<'tcx>,
+) -> Bounds<'tcx> {
+ // In case there are any projections, etc., find the "environment"
+ // def-ID that will be used to determine the traits/predicates in
+ // scope. This is derived from the enclosing item-like thing.
+ let env_def_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id);
+ let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
+ let mut bounds = Bounds::default();
+ let _ = <dyn AstConv<'_>>::instantiate_poly_trait_ref(
+ &item_cx,
+ hir_trait,
+ DUMMY_SP,
+ ty::BoundConstness::NotConst,
+ self_ty,
+ &mut bounds,
+ true,
+ );
+
+ bounds
+}
diff --git a/compiler/rustc_typeck/src/outlives/explicit.rs b/compiler/rustc_hir_analysis/src/outlives/explicit.rs
index 7534482cc..7534482cc 100644
--- a/compiler/rustc_typeck/src/outlives/explicit.rs
+++ b/compiler/rustc_hir_analysis/src/outlives/explicit.rs
diff --git a/compiler/rustc_typeck/src/outlives/implicit_infer.rs b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs
index 3b779280e..90c6edb65 100644
--- a/compiler/rustc_typeck/src/outlives/implicit_infer.rs
+++ b/compiler/rustc_hir_analysis/src/outlives/implicit_infer.rs
@@ -1,8 +1,8 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use rustc_span::Span;
use super::explicit::ExplicitPredicatesMap;
@@ -29,7 +29,7 @@ pub(super) fn infer_predicates<'tcx>(
// Visit all the crates and infer predicates
for id in tcx.hir().items() {
- let item_did = id.def_id;
+ let item_did = id.owner_id;
debug!("InferVisitor::visit_item(item={:?})", item_did);
diff --git a/compiler/rustc_hir_analysis/src/outlives/mod.rs b/compiler/rustc_hir_analysis/src/outlives/mod.rs
new file mode 100644
index 000000000..e50c26765
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/outlives/mod.rs
@@ -0,0 +1,129 @@
+use hir::Node;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, CratePredicatesMap, ToPredicate, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+mod explicit;
+mod implicit_infer;
+/// Code to write unit test for outlives.
+pub mod test;
+mod utils;
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { inferred_outlives_of, inferred_outlives_crate, ..*providers };
+}
+
+fn inferred_outlives_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[(ty::Predicate<'_>, Span)] {
+ let id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
+
+ if matches!(tcx.def_kind(item_def_id), hir::def::DefKind::AnonConst) && tcx.lazy_normalization()
+ {
+ if tcx.hir().opt_const_param_default_param_hir_id(id).is_some() {
+ // In `generics_of` we set the generics' parent to be our parent's parent which means that
+ // we lose out on the predicates of our actual parent if we dont return those predicates here.
+ // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
+ //
+ // struct Foo<'a, 'b, const N: usize = { ... }>(&'a &'b ());
+ // ^^^ ^^^^^^^ the def id we are calling
+ // ^^^ inferred_outlives_of on
+ // parent item we dont have set as the
+ // parent of generics returned by `generics_of`
+ //
+ // In the above code we want the anon const to have predicates in its param env for `'b: 'a`
+ let item_def_id = tcx.hir().get_parent_item(id);
+ // In the above code example we would be calling `inferred_outlives_of(Foo)` here
+ return tcx.inferred_outlives_of(item_def_id);
+ }
+ }
+
+ match tcx.hir().get(id) {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..) => {
+ let crate_map = tcx.inferred_outlives_crate(());
+
+ let predicates = crate_map.predicates.get(&item_def_id).copied().unwrap_or(&[]);
+
+ if tcx.has_attr(item_def_id, sym::rustc_outlives) {
+ let mut pred: Vec<String> = predicates
+ .iter()
+ .map(|(out_pred, _)| match out_pred.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(p) => p.to_string(),
+ ty::PredicateKind::TypeOutlives(p) => p.to_string(),
+ err => bug!("unexpected predicate {:?}", err),
+ })
+ .collect();
+ pred.sort();
+
+ let span = tcx.def_span(item_def_id);
+ let mut err = tcx.sess.struct_span_err(span, "rustc_outlives");
+ for p in &pred {
+ err.note(p);
+ }
+ err.emit();
+ }
+
+ debug!("inferred_outlives_of({:?}) = {:?}", item_def_id, predicates);
+
+ predicates
+ }
+
+ _ => &[],
+ },
+
+ _ => &[],
+ }
+}
+
+fn inferred_outlives_crate(tcx: TyCtxt<'_>, (): ()) -> CratePredicatesMap<'_> {
+ // Compute a map from each struct/enum/union S to the **explicit**
+ // outlives predicates (`T: 'a`, `'a: 'b`) that the user wrote.
+ // Typically there won't be many of these, except in older code where
+ // they were mandatory. Nonetheless, we have to ensure that every such
+ // predicate is satisfied, so they form a kind of base set of requirements
+ // for the type.
+
+ // Compute the inferred predicates
+ let global_inferred_outlives = implicit_infer::infer_predicates(tcx);
+
+ // Convert the inferred predicates into the "collected" form the
+ // global data structure expects.
+ //
+ // FIXME -- consider correcting impedance mismatch in some way,
+ // probably by updating the global data structure.
+ let predicates = global_inferred_outlives
+ .iter()
+ .map(|(&def_id, set)| {
+ let predicates = &*tcx.arena.alloc_from_iter(set.0.iter().filter_map(
+ |(ty::OutlivesPredicate(kind1, region2), &span)| {
+ match kind1.unpack() {
+ GenericArgKind::Type(ty1) => Some((
+ ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(ty1, *region2),
+ ))
+ .to_predicate(tcx),
+ span,
+ )),
+ GenericArgKind::Lifetime(region1) => Some((
+ ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
+ ty::OutlivesPredicate(region1, *region2),
+ ))
+ .to_predicate(tcx),
+ span,
+ )),
+ GenericArgKind::Const(_) => {
+ // Generic consts don't impose any constraints.
+ None
+ }
+ }
+ },
+ ));
+ (def_id, predicates)
+ })
+ .collect();
+
+ ty::CratePredicatesMap { predicates }
+}
diff --git a/compiler/rustc_hir_analysis/src/outlives/test.rs b/compiler/rustc_hir_analysis/src/outlives/test.rs
new file mode 100644
index 000000000..fa2ac5659
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/outlives/test.rs
@@ -0,0 +1,21 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+pub fn test_inferred_outlives(tcx: TyCtxt<'_>) {
+ for id in tcx.hir().items() {
+ // For unit testing: check for a special "rustc_outlives"
+ // attribute and report an error with various results if found.
+ if tcx.has_attr(id.owner_id.to_def_id(), sym::rustc_outlives) {
+ let inferred_outlives_of = tcx.inferred_outlives_of(id.owner_id);
+ struct_span_err!(
+ tcx.sess,
+ tcx.def_span(id.owner_id),
+ E0640,
+ "{:?}",
+ inferred_outlives_of
+ )
+ .emit();
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/outlives/utils.rs b/compiler/rustc_hir_analysis/src/outlives/utils.rs
new file mode 100644
index 000000000..0409c7081
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/outlives/utils.rs
@@ -0,0 +1,186 @@
+use rustc_infer::infer::outlives::components::{push_outlives_components, Component};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Region, Ty, TyCtxt};
+use rustc_span::Span;
+use smallvec::smallvec;
+use std::collections::BTreeMap;
+
+/// Tracks the `T: 'a` or `'a: 'a` predicates that we have inferred
+/// must be added to the struct header.
+pub(crate) type RequiredPredicates<'tcx> =
+ BTreeMap<ty::OutlivesPredicate<GenericArg<'tcx>, ty::Region<'tcx>>, Span>;
+
+/// Given a requirement `T: 'a` or `'b: 'a`, deduce the
+/// outlives_component and add it to `required_predicates`
+pub(crate) fn insert_outlives_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ kind: GenericArg<'tcx>,
+ outlived_region: Region<'tcx>,
+ span: Span,
+ required_predicates: &mut RequiredPredicates<'tcx>,
+) {
+ // If the `'a` region is bound within the field type itself, we
+ // don't want to propagate this constraint to the header.
+ if !is_free_region(outlived_region) {
+ return;
+ }
+
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => {
+ // `T: 'outlived_region` for some type `T`
+ // But T could be a lot of things:
+ // e.g., if `T = &'b u32`, then `'b: 'outlived_region` is
+ // what we want to add.
+ //
+ // Or if within `struct Foo<U>` you had `T = Vec<U>`, then
+ // we would want to add `U: 'outlived_region`
+ let mut components = smallvec![];
+ push_outlives_components(tcx, ty, &mut components);
+ for component in components {
+ match component {
+ Component::Region(r) => {
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, 'b> {
+ // x: &'a &'b u32
+ // }
+ // ```
+ //
+ // Here `outlived_region = 'a` and `kind = &'b
+ // u32`. Decomposing `&'b u32` into
+ // components would yield `'b`, and we add the
+ // where clause that `'b: 'a`.
+ insert_outlives_predicate(
+ tcx,
+ r.into(),
+ outlived_region,
+ span,
+ required_predicates,
+ );
+ }
+
+ Component::Param(param_ty) => {
+ // param_ty: ty::ParamTy
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, U> {
+ // x: &'a Vec<U>
+ // }
+ // ```
+ //
+ // Here `outlived_region = 'a` and `kind =
+ // Vec<U>`. Decomposing `Vec<U>` into
+ // components would yield `U`, and we add the
+ // where clause that `U: 'a`.
+ let ty: Ty<'tcx> = param_ty.to_ty(tcx);
+ required_predicates
+ .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
+ .or_insert(span);
+ }
+
+ Component::Projection(proj_ty) => {
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, T: Iterator> {
+ // x: &'a <T as Iterator>::Item
+ // }
+ // ```
+ //
+ // Here we want to add an explicit `where <T as Iterator>::Item: 'a`.
+ let ty: Ty<'tcx> = tcx.mk_projection(proj_ty.item_def_id, proj_ty.substs);
+ required_predicates
+ .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
+ .or_insert(span);
+ }
+
+ Component::Opaque(def_id, substs) => {
+ // This would arise from something like:
+ //
+ // ```rust
+ // type Opaque<T> = impl Sized;
+ // fn defining<T>() -> Opaque<T> {}
+ // struct Ss<'a, T>(&'a Opaque<T>);
+ // ```
+ //
+ // Here we want to have an implied bound `Opaque<T>: 'a`
+
+ let ty = tcx.mk_opaque(def_id, substs);
+ required_predicates
+ .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
+ .or_insert(span);
+ }
+
+ Component::EscapingProjection(_) => {
+ // As above, but the projection involves
+ // late-bound regions. Therefore, the WF
+ // requirement is not checked in type definition
+ // but at fn call site, so ignore it.
+ //
+ // ```
+ // struct Foo<'a, T: Iterator> {
+ // x: for<'b> fn(<&'b T as Iterator>::Item)
+ // // ^^^^^^^^^^^^^^^^^^^^^^^^^
+ // }
+ // ```
+ //
+ // Since `'b` is not in scope on `Foo`, can't
+ // do anything here, ignore it.
+ }
+
+ Component::UnresolvedInferenceVariable(_) => bug!("not using infcx"),
+ }
+ }
+ }
+
+ GenericArgKind::Lifetime(r) => {
+ if !is_free_region(r) {
+ return;
+ }
+ required_predicates.entry(ty::OutlivesPredicate(kind, outlived_region)).or_insert(span);
+ }
+
+ GenericArgKind::Const(_) => {
+ // Generic consts don't impose any constraints.
+ }
+ }
+}
+
+fn is_free_region(region: Region<'_>) -> bool {
+ // First, screen for regions that might appear in a type header.
+ match *region {
+ // These correspond to `T: 'a` relationships:
+ //
+ // struct Foo<'a, T> {
+ // field: &'a T, // this would generate a ReEarlyBound referencing `'a`
+ // }
+ //
+ // We care about these, so fall through.
+ ty::ReEarlyBound(_) => true,
+
+ // These correspond to `T: 'static` relationships which can be
+ // rather surprising.
+ //
+ // struct Foo<'a, T> {
+ // field: &'static T, // this would generate a ReStatic
+ // }
+ ty::ReStatic => false,
+
+ // Late-bound regions can appear in `fn` types:
+ //
+ // struct Foo<T> {
+ // field: for<'b> fn(&'b T) // e.g., 'b here
+ // }
+ //
+ // The type above might generate a `T: 'b` bound, but we can
+ // ignore it. We can't put it on the struct header anyway.
+ ty::ReLateBound(..) => false,
+
+ // These regions don't appear in types from type declarations:
+ ty::ReErased | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReFree(..) => {
+ bug!("unexpected region in outlives inference: {:?}", region);
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/structured_errors.rs b/compiler/rustc_hir_analysis/src/structured_errors.rs
index 0b46fce17..0b46fce17 100644
--- a/compiler/rustc_typeck/src/structured_errors.rs
+++ b/compiler/rustc_hir_analysis/src/structured_errors.rs
diff --git a/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs b/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs
index 324df313e..324df313e 100644
--- a/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs
+++ b/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs
diff --git a/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs b/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs
index bb6088054..bb6088054 100644
--- a/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs
+++ b/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs
diff --git a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs b/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs
index 99729391e..435912464 100644
--- a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs
+++ b/compiler/rustc_hir_analysis/src/structured_errors/wrong_number_of_generic_args.rs
@@ -4,7 +4,6 @@ use rustc_errors::{
MultiSpan,
};
use rustc_hir as hir;
-use rustc_middle::hir::map::fn_sig;
use rustc_middle::ty::{self as ty, AssocItems, AssocKind, TyCtxt};
use rustc_session::Session;
use rustc_span::def_id::DefId;
@@ -292,62 +291,60 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
// Creates lifetime name suggestions from the lifetime parameter names
fn get_lifetime_args_suggestions_from_param_names(
&self,
- path_hir_id: Option<hir::HirId>,
+ path_hir_id: hir::HirId,
num_params_to_take: usize,
) -> String {
debug!(?path_hir_id);
- if let Some(path_hir_id) = path_hir_id {
- let mut ret = Vec::new();
- for (id, node) in self.tcx.hir().parent_iter(path_hir_id) {
- debug!(?id);
- let params = if let Some(generics) = node.generics() {
- generics.params
- } else if let hir::Node::Ty(ty) = node
- && let hir::TyKind::BareFn(bare_fn) = ty.kind
- {
- bare_fn.generic_params
- } else {
- &[]
- };
- ret.extend(params.iter().filter_map(|p| {
- let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit }
- = p.kind
- else { return None };
- let hir::ParamName::Plain(name) = p.name else { return None };
- Some(name.to_string())
- }));
- // Suggest `'static` when in const/static item-like.
- if let hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Static { .. } | hir::ItemKind::Const { .. },
- ..
- })
- | hir::Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Const { .. },
- ..
- })
- | hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::Const { .. },
- ..
- })
- | hir::Node::ForeignItem(hir::ForeignItem {
- kind: hir::ForeignItemKind::Static { .. },
- ..
- })
- | hir::Node::AnonConst(..) = node
- {
- ret.extend(
- std::iter::repeat("'static".to_owned())
- .take(num_params_to_take.saturating_sub(ret.len())),
- );
- }
- if ret.len() >= num_params_to_take {
- return ret[..num_params_to_take].join(", ");
- }
- // We cannot refer to lifetimes defined in an outer function.
- if let hir::Node::Item(_) = node {
- break;
- }
+ let mut ret = Vec::new();
+ for (id, node) in self.tcx.hir().parent_iter(path_hir_id) {
+ debug!(?id);
+ let params = if let Some(generics) = node.generics() {
+ generics.params
+ } else if let hir::Node::Ty(ty) = node
+ && let hir::TyKind::BareFn(bare_fn) = ty.kind
+ {
+ bare_fn.generic_params
+ } else {
+ &[]
+ };
+ ret.extend(params.iter().filter_map(|p| {
+ let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit }
+ = p.kind
+ else { return None };
+ let hir::ParamName::Plain(name) = p.name else { return None };
+ Some(name.to_string())
+ }));
+ // Suggest `'static` when in const/static item-like.
+ if let hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Static { .. } | hir::ItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::ForeignItem(hir::ForeignItem {
+ kind: hir::ForeignItemKind::Static { .. },
+ ..
+ })
+ | hir::Node::AnonConst(..) = node
+ {
+ ret.extend(
+ std::iter::repeat("'static".to_owned())
+ .take(num_params_to_take.saturating_sub(ret.len())),
+ );
+ }
+ if ret.len() >= num_params_to_take {
+ return ret[..num_params_to_take].join(", ");
+ }
+ // We cannot refer to lifetimes defined in an outer function.
+ if let hir::Node::Item(_) = node {
+ break;
}
}
@@ -368,7 +365,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
&self,
num_params_to_take: usize,
) -> String {
- let fn_sig = self.tcx.hir().get_if_local(self.def_id).and_then(fn_sig);
+ let fn_sig = self.tcx.hir().get_if_local(self.def_id).and_then(hir::Node::fn_sig);
let is_used_in_input = |def_id| {
fn_sig.map_or(false, |fn_sig| {
fn_sig.decl.inputs.iter().any(|ty| match ty.kind {
@@ -524,6 +521,7 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
if self.not_enough_args_provided() {
self.suggest_adding_args(err);
} else if self.too_many_args_provided() {
+ self.suggest_moving_args_from_assoc_fn_to_trait(err);
self.suggest_removing_args_or_generics(err);
} else {
unreachable!();
@@ -654,6 +652,144 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
}
}
+ /// Suggests moving redundant argument(s) of an associate function to the
+ /// trait it belongs to.
+ ///
+ /// ```compile_fail
+ /// Into::into::<Option<_>>(42) // suggests considering `Into::<Option<_>>::into(42)`
+ /// ```
+ fn suggest_moving_args_from_assoc_fn_to_trait(&self, err: &mut Diagnostic) {
+ let trait_ = match self.tcx.trait_of_item(self.def_id) {
+ Some(def_id) => def_id,
+ None => return,
+ };
+
+ // Skip suggestion when the associated function is itself generic, it is unclear
+ // how to split the provided parameters between those to suggest to the trait and
+ // those to remain on the associated type.
+ let num_assoc_fn_expected_args =
+ self.num_expected_type_or_const_args() + self.num_expected_lifetime_args();
+ if num_assoc_fn_expected_args > 0 {
+ return;
+ }
+
+ let num_assoc_fn_excess_args =
+ self.num_excess_type_or_const_args() + self.num_excess_lifetime_args();
+
+ let trait_generics = self.tcx.generics_of(trait_);
+ let num_trait_generics_except_self =
+ trait_generics.count() - if trait_generics.has_self { 1 } else { 0 };
+
+ let msg = format!(
+ "consider moving {these} generic argument{s} to the `{name}` trait, which takes up to {num} argument{s}",
+ these = pluralize!("this", num_assoc_fn_excess_args),
+ s = pluralize!(num_assoc_fn_excess_args),
+ name = self.tcx.item_name(trait_),
+ num = num_trait_generics_except_self,
+ );
+
+ if let Some(parent_node) = self.tcx.hir().find_parent_node(self.path_segment.hir_id)
+ && let Some(parent_node) = self.tcx.hir().find(parent_node)
+ && let hir::Node::Expr(expr) = parent_node {
+ match expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ self.suggest_moving_args_from_assoc_fn_to_trait_for_qualified_path(
+ err,
+ qpath,
+ msg,
+ num_assoc_fn_excess_args,
+ num_trait_generics_except_self
+ )
+ },
+ hir::ExprKind::MethodCall(..) => {
+ self.suggest_moving_args_from_assoc_fn_to_trait_for_method_call(
+ err,
+ trait_,
+ expr,
+ msg,
+ num_assoc_fn_excess_args,
+ num_trait_generics_except_self
+ )
+ },
+ _ => return,
+ }
+ }
+ }
+
+ fn suggest_moving_args_from_assoc_fn_to_trait_for_qualified_path(
+ &self,
+ err: &mut Diagnostic,
+ qpath: &'tcx hir::QPath<'tcx>,
+ msg: String,
+ num_assoc_fn_excess_args: usize,
+ num_trait_generics_except_self: usize,
+ ) {
+ if let hir::QPath::Resolved(_, path) = qpath
+ && let Some(trait_path_segment) = path.segments.get(0) {
+ let num_generic_args_supplied_to_trait = trait_path_segment.args().num_generic_params();
+
+ if num_assoc_fn_excess_args == num_trait_generics_except_self - num_generic_args_supplied_to_trait {
+ if let Some(span) = self.gen_args.span_ext()
+ && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ let sugg = vec![
+ (self.path_segment.ident.span, format!("{}::{}", snippet, self.path_segment.ident)),
+ (span.with_lo(self.path_segment.ident.span.hi()), "".to_owned())
+ ];
+
+ err.multipart_suggestion(
+ msg,
+ sugg,
+ Applicability::MaybeIncorrect
+ );
+ }
+ }
+ }
+ }
+
+ fn suggest_moving_args_from_assoc_fn_to_trait_for_method_call(
+ &self,
+ err: &mut Diagnostic,
+ trait_def_id: DefId,
+ expr: &'tcx hir::Expr<'tcx>,
+ msg: String,
+ num_assoc_fn_excess_args: usize,
+ num_trait_generics_except_self: usize,
+ ) {
+ let sm = self.tcx.sess.source_map();
+ let hir::ExprKind::MethodCall(_, rcvr, args, _) = expr.kind else { return; };
+ if num_assoc_fn_excess_args != num_trait_generics_except_self {
+ return;
+ }
+ let Some(gen_args) = self.gen_args.span_ext() else { return; };
+ let Ok(generics) = sm.span_to_snippet(gen_args) else { return; };
+ let Ok(rcvr) = sm.span_to_snippet(
+ rcvr.span.find_ancestor_inside(expr.span).unwrap_or(rcvr.span)
+ ) else { return; };
+ let Ok(rest) =
+ (match args {
+ [] => Ok(String::new()),
+ [arg] => sm.span_to_snippet(
+ arg.span.find_ancestor_inside(expr.span).unwrap_or(arg.span),
+ ),
+ [first, .., last] => {
+ let first_span =
+ first.span.find_ancestor_inside(expr.span).unwrap_or(first.span);
+ let last_span =
+ last.span.find_ancestor_inside(expr.span).unwrap_or(last.span);
+ sm.span_to_snippet(first_span.to(last_span))
+ }
+ }) else { return; };
+ let comma = if args.len() > 0 { ", " } else { "" };
+ let trait_path = self.tcx.def_path_str(trait_def_id);
+ let method_name = self.tcx.item_name(self.def_id);
+ err.span_suggestion(
+ expr.span,
+ msg,
+ format!("{trait_path}::{generics}::{method_name}({rcvr}{comma}{rest})"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
/// Suggests to remove redundant argument(s):
///
/// ```text
@@ -763,16 +899,13 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
// If there is a single unbound associated type and a single excess generic param
// suggest replacing the generic param with the associated type bound
if provided_args_matches_unbound_traits && !unbound_types.is_empty() {
- let mut suggestions = vec![];
let unused_generics = &self.gen_args.args[self.num_expected_type_or_const_args()..];
- for (potential, name) in iter::zip(unused_generics, &unbound_types) {
- if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(potential.span()) {
- suggestions.push((potential.span(), format!("{} = {}", name, snippet)));
- }
- }
+ let suggestions = iter::zip(unused_generics, &unbound_types)
+ .map(|(potential, name)| (potential.span().shrink_to_lo(), format!("{name} = ")))
+ .collect::<Vec<_>>();
if !suggestions.is_empty() {
- err.multipart_suggestion(
+ err.multipart_suggestion_verbose(
&format!(
"replace the generic bound{s} with the associated type{s}",
s = pluralize!(unbound_types.len())
diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_hir_analysis/src/variance/constraints.rs
index d79450e1a..eaf0310d5 100644
--- a/compiler/rustc_typeck/src/variance/constraints.rs
+++ b/compiler/rustc_hir_analysis/src/variance/constraints.rs
@@ -257,7 +257,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
self.add_constraints_from_invariant_substs(current, substs, variance);
}
- ty::Dynamic(data, r) => {
+ ty::Dynamic(data, r, _) => {
// The type `Foo<T+'a>` is contravariant w/r/t `'a`:
let contra = self.contravariant(variance);
self.add_constraints_from_region(current, r, contra);
@@ -271,11 +271,11 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
}
for projection in data.projection_bounds() {
- match projection.skip_binder().term {
- ty::Term::Ty(ty) => {
+ match projection.skip_binder().term.unpack() {
+ ty::TermKind::Ty(ty) => {
self.add_constraints_from_ty(current, ty, self.invariant);
}
- ty::Term::Const(c) => {
+ ty::TermKind::Const(c) => {
self.add_constraints_from_const(current, c, self.invariant)
}
}
@@ -411,11 +411,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
// way early-bound regions do, so we skip them here.
}
- ty::ReFree(..)
- | ty::ReVar(..)
- | ty::RePlaceholder(..)
- | ty::ReEmpty(_)
- | ty::ReErased => {
+ ty::ReFree(..) | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReErased => {
// We don't expect to see anything but 'static or bound
// regions when visiting member types or method types.
bug!(
diff --git a/compiler/rustc_typeck/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs
index 82103c5a0..82103c5a0 100644
--- a/compiler/rustc_typeck/src/variance/mod.rs
+++ b/compiler/rustc_hir_analysis/src/variance/mod.rs
diff --git a/compiler/rustc_typeck/src/variance/solve.rs b/compiler/rustc_hir_analysis/src/variance/solve.rs
index 97aca621a..97aca621a 100644
--- a/compiler/rustc_typeck/src/variance/solve.rs
+++ b/compiler/rustc_hir_analysis/src/variance/solve.rs
diff --git a/compiler/rustc_typeck/src/variance/terms.rs b/compiler/rustc_hir_analysis/src/variance/terms.rs
index 1f763011e..1f763011e 100644
--- a/compiler/rustc_typeck/src/variance/terms.rs
+++ b/compiler/rustc_hir_analysis/src/variance/terms.rs
diff --git a/compiler/rustc_hir_analysis/src/variance/test.rs b/compiler/rustc_hir_analysis/src/variance/test.rs
new file mode 100644
index 000000000..83ed3e44b
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/variance/test.rs
@@ -0,0 +1,15 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+pub fn test_variance(tcx: TyCtxt<'_>) {
+ // For unit testing: check for a special "rustc_variance"
+ // attribute and report an error with various results if found.
+ for id in tcx.hir().items() {
+ if tcx.has_attr(id.owner_id.to_def_id(), sym::rustc_variance) {
+ let variances_of = tcx.variances_of(id.owner_id);
+ struct_span_err!(tcx.sess, tcx.def_span(id.owner_id), E0208, "{:?}", variances_of)
+ .emit();
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/xform.rs b/compiler/rustc_hir_analysis/src/variance/xform.rs
index 027f0859f..027f0859f 100644
--- a/compiler/rustc_typeck/src/variance/xform.rs
+++ b/compiler/rustc_hir_analysis/src/variance/xform.rs
diff --git a/compiler/rustc_hir_pretty/Cargo.toml b/compiler/rustc_hir_pretty/Cargo.toml
index 46a8e7dee..1ea7be1ae 100644
--- a/compiler/rustc_hir_pretty/Cargo.toml
+++ b/compiler/rustc_hir_pretty/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index e0179bd3e..da27554a2 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -1,4 +1,6 @@
#![recursion_limit = "256"]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
use rustc_ast as ast;
use rustc_ast::util::parser::{self, AssocOp, Fixity};
@@ -7,7 +9,9 @@ use rustc_ast_pretty::pp::{self, Breaks};
use rustc_ast_pretty::pprust::{Comments, PrintState};
use rustc_hir as hir;
use rustc_hir::LifetimeParamKind;
-use rustc_hir::{GenericArg, GenericParam, GenericParamKind, Node, Term};
+use rustc_hir::{
+ BindingAnnotation, ByRef, GenericArg, GenericParam, GenericParamKind, Mutability, Node, Term,
+};
use rustc_hir::{GenericBound, PatKind, RangeEnd, TraitBoundModifier};
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, Ident, IdentPrinter, Symbol};
@@ -83,12 +87,14 @@ impl<'a> State<'a> {
Node::Variant(a) => self.print_variant(a),
Node::AnonConst(a) => self.print_anon_const(a),
Node::Expr(a) => self.print_expr(a),
+ Node::ExprField(a) => self.print_expr_field(&a),
Node::Stmt(a) => self.print_stmt(a),
Node::PathSegment(a) => self.print_path_segment(a),
Node::Ty(a) => self.print_type(a),
Node::TypeBinding(a) => self.print_type_binding(a),
Node::TraitRef(a) => self.print_trait_ref(a),
Node::Pat(a) => self.print_pat(a),
+ Node::PatField(a) => self.print_patfield(&a),
Node::Arm(a) => self.print_arm(a),
Node::Infer(_) => self.word("_"),
Node::Block(a) => {
@@ -881,7 +887,7 @@ impl<'a> State<'a> {
self.end(); // need to close a box
self.ann.nested(self, Nested::Body(body));
}
- hir::ImplItemKind::TyAlias(ty) => {
+ hir::ImplItemKind::Type(ty) => {
self.print_associated_type(ii.ident, ii.generics, None, Some(ty));
}
}
@@ -911,6 +917,10 @@ impl<'a> State<'a> {
if let Some(els) = els {
self.nbsp();
self.word_space("else");
+ // containing cbox, will be closed by print-block at `}`
+ self.cbox(0);
+ // head-box, will be closed by print-block after `{`
+ self.ibox(0);
self.print_block(els);
}
@@ -1123,20 +1133,7 @@ impl<'a> State<'a> {
) {
self.print_qpath(qpath, true);
self.word("{");
- self.commasep_cmnt(
- Consistent,
- fields,
- |s, field| {
- s.ibox(INDENT_UNIT);
- if !field.is_shorthand {
- s.print_ident(field.ident);
- s.word_space(":");
- }
- s.print_expr(field.expr);
- s.end()
- },
- |f| f.span,
- );
+ self.commasep_cmnt(Consistent, fields, |s, field| s.print_expr_field(field), |f| f.span);
if let Some(expr) = wth {
self.ibox(INDENT_UNIT);
if !fields.is_empty() {
@@ -1153,6 +1150,20 @@ impl<'a> State<'a> {
self.word("}");
}
+ fn print_expr_field(&mut self, field: &hir::ExprField<'_>) {
+ if self.attrs(field.hir_id).is_empty() {
+ self.space();
+ }
+ self.cbox(INDENT_UNIT);
+ self.print_outer_attributes(&self.attrs(field.hir_id));
+ if !field.is_shorthand {
+ self.print_ident(field.ident);
+ self.word_space(":");
+ }
+ self.print_expr(&field.expr);
+ self.end()
+ }
+
fn print_expr_tup(&mut self, exprs: &[hir::Expr<'_>]) {
self.popen();
self.commasep_exprs(Inconsistent, exprs);
@@ -1172,15 +1183,20 @@ impl<'a> State<'a> {
self.print_call_post(args)
}
- fn print_expr_method_call(&mut self, segment: &hir::PathSegment<'_>, args: &[hir::Expr<'_>]) {
- let base_args = &args[1..];
- self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX);
+ fn print_expr_method_call(
+ &mut self,
+ segment: &hir::PathSegment<'_>,
+ receiver: &hir::Expr<'_>,
+ args: &[hir::Expr<'_>],
+ ) {
+ let base_args = args;
+ self.print_expr_maybe_paren(&receiver, parser::PREC_POSTFIX);
self.word(".");
self.print_ident(segment.ident);
let generic_args = segment.args();
if !generic_args.args.is_empty() || !generic_args.bindings.is_empty() {
- self.print_generic_args(generic_args, segment.infer_args, true);
+ self.print_generic_args(generic_args, true);
}
self.print_call_post(base_args)
@@ -1240,7 +1256,7 @@ impl<'a> State<'a> {
fn print_literal(&mut self, lit: &hir::Lit) {
self.maybe_print_comment(lit.span.lo());
- self.word(lit.node.to_lit_token().to_string())
+ self.word(lit.node.to_token_lit().to_string())
}
fn print_inline_asm(&mut self, asm: &hir::InlineAsm<'_>) {
@@ -1385,8 +1401,8 @@ impl<'a> State<'a> {
hir::ExprKind::Call(func, args) => {
self.print_expr_call(func, args);
}
- hir::ExprKind::MethodCall(segment, args, _) => {
- self.print_expr_method_call(segment, args);
+ hir::ExprKind::MethodCall(segment, receiver, args, _) => {
+ self.print_expr_method_call(segment, receiver, args);
}
hir::ExprKind::Binary(op, lhs, rhs) => {
self.print_expr_binary(op, lhs, rhs);
@@ -1583,7 +1599,7 @@ impl<'a> State<'a> {
}
if segment.ident.name != kw::PathRoot {
self.print_ident(segment.ident);
- self.print_generic_args(segment.args(), segment.infer_args, colons_before_params);
+ self.print_generic_args(segment.args(), colons_before_params);
}
}
}
@@ -1591,7 +1607,7 @@ impl<'a> State<'a> {
pub fn print_path_segment(&mut self, segment: &hir::PathSegment<'_>) {
if segment.ident.name != kw::PathRoot {
self.print_ident(segment.ident);
- self.print_generic_args(segment.args(), segment.infer_args, false);
+ self.print_generic_args(segment.args(), false);
}
}
@@ -1610,11 +1626,7 @@ impl<'a> State<'a> {
}
if segment.ident.name != kw::PathRoot {
self.print_ident(segment.ident);
- self.print_generic_args(
- segment.args(),
- segment.infer_args,
- colons_before_params,
- );
+ self.print_generic_args(segment.args(), colons_before_params);
}
}
@@ -1622,11 +1634,7 @@ impl<'a> State<'a> {
self.word("::");
let item_segment = path.segments.last().unwrap();
self.print_ident(item_segment.ident);
- self.print_generic_args(
- item_segment.args(),
- item_segment.infer_args,
- colons_before_params,
- )
+ self.print_generic_args(item_segment.args(), colons_before_params)
}
hir::QPath::TypeRelative(qself, item_segment) => {
// If we've got a compound-qualified-path, let's push an additional pair of angle
@@ -1642,11 +1650,7 @@ impl<'a> State<'a> {
self.word("::");
self.print_ident(item_segment.ident);
- self.print_generic_args(
- item_segment.args(),
- item_segment.infer_args,
- colons_before_params,
- )
+ self.print_generic_args(item_segment.args(), colons_before_params)
}
hir::QPath::LangItem(lang_item, span, _) => {
self.word("#[lang = \"");
@@ -1659,7 +1663,6 @@ impl<'a> State<'a> {
fn print_generic_args(
&mut self,
generic_args: &hir::GenericArgs<'_>,
- infer_args: bool,
colons_before_params: bool,
) {
if generic_args.parenthesized {
@@ -1684,7 +1687,11 @@ impl<'a> State<'a> {
let mut nonelided_generic_args: bool = false;
let elide_lifetimes = generic_args.args.iter().all(|arg| match arg {
- GenericArg::Lifetime(lt) => lt.is_elided(),
+ GenericArg::Lifetime(lt) if lt.is_elided() => true,
+ GenericArg::Lifetime(_) => {
+ nonelided_generic_args = true;
+ false
+ }
_ => {
nonelided_generic_args = true;
true
@@ -1706,13 +1713,6 @@ impl<'a> State<'a> {
);
}
- // FIXME(eddyb): this would leak into error messages (e.g.,
- // "non-exhaustive patterns: `Some::<..>(_)` not covered").
- if infer_args && false {
- start_or_comma(self);
- self.word("..");
- }
-
for binding in generic_args.bindings {
start_or_comma(self);
self.print_type_binding(binding);
@@ -1726,7 +1726,7 @@ impl<'a> State<'a> {
pub fn print_type_binding(&mut self, binding: &hir::TypeBinding<'_>) {
self.print_ident(binding.ident);
- self.print_generic_args(binding.gen_args, false, false);
+ self.print_generic_args(binding.gen_args, false);
self.space();
match binding.kind {
hir::TypeBindingKind::Equality { ref term } => {
@@ -1749,20 +1749,12 @@ impl<'a> State<'a> {
// is that it doesn't matter
match pat.kind {
PatKind::Wild => self.word("_"),
- PatKind::Binding(binding_mode, _, ident, sub) => {
- match binding_mode {
- hir::BindingAnnotation::Ref => {
- self.word_nbsp("ref");
- self.print_mutability(hir::Mutability::Not, false);
- }
- hir::BindingAnnotation::RefMut => {
- self.word_nbsp("ref");
- self.print_mutability(hir::Mutability::Mut, false);
- }
- hir::BindingAnnotation::Unannotated => {}
- hir::BindingAnnotation::Mutable => {
- self.word_nbsp("mut");
- }
+ PatKind::Binding(BindingAnnotation(by_ref, mutbl), _, ident, sub) => {
+ if by_ref == ByRef::Yes {
+ self.word_nbsp("ref");
+ }
+ if mutbl == Mutability::Mut {
+ self.word_nbsp("mut");
}
self.print_ident(ident);
if let Some(p) = sub {
@@ -1773,7 +1765,8 @@ impl<'a> State<'a> {
PatKind::TupleStruct(ref qpath, elts, ddpos) => {
self.print_qpath(qpath, true);
self.popen();
- if let Some(ddpos) = ddpos {
+ if let Some(ddpos) = ddpos.as_opt_usize() {
+ let ddpos = ddpos as usize;
self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(p));
if ddpos != 0 {
self.word_space(",");
@@ -1799,20 +1792,7 @@ impl<'a> State<'a> {
if !empty {
self.space();
}
- self.commasep_cmnt(
- Consistent,
- fields,
- |s, f| {
- s.cbox(INDENT_UNIT);
- if !f.is_shorthand {
- s.print_ident(f.ident);
- s.word_nbsp(":");
- }
- s.print_pat(f.pat);
- s.end()
- },
- |f| f.pat.span,
- );
+ self.commasep_cmnt(Consistent, &fields, |s, f| s.print_patfield(f), |f| f.pat.span);
if etc {
if !fields.is_empty() {
self.word_space(",");
@@ -1829,7 +1809,7 @@ impl<'a> State<'a> {
}
PatKind::Tuple(elts, ddpos) => {
self.popen();
- if let Some(ddpos) = ddpos {
+ if let Some(ddpos) = ddpos.as_opt_usize() {
self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(p));
if ddpos != 0 {
self.word_space(",");
@@ -1907,6 +1887,20 @@ impl<'a> State<'a> {
self.ann.post(self, AnnNode::Pat(pat))
}
+ pub fn print_patfield(&mut self, field: &hir::PatField<'_>) {
+ if self.attrs(field.hir_id).is_empty() {
+ self.space();
+ }
+ self.cbox(INDENT_UNIT);
+ self.print_outer_attributes(&self.attrs(field.hir_id));
+ if !field.is_shorthand {
+ self.print_ident(field.ident);
+ self.word_nbsp(":");
+ }
+ self.print_pat(field.pat);
+ self.end();
+ }
+
pub fn print_param(&mut self, arg: &hir::Param<'_>) {
self.print_outer_attributes(self.attrs(arg.hir_id));
self.print_pat(arg.pat);
@@ -2403,9 +2397,9 @@ fn contains_exterior_struct_lit(value: &hir::Expr<'_>) -> bool {
contains_exterior_struct_lit(x)
}
- hir::ExprKind::MethodCall(.., exprs, _) => {
+ hir::ExprKind::MethodCall(_, receiver, ..) => {
// `X { y: 1 }.bar(...)`
- contains_exterior_struct_lit(&exprs[0])
+ contains_exterior_struct_lit(receiver)
}
_ => false,
diff --git a/compiler/rustc_hir_typeck/Cargo.toml b/compiler/rustc_hir_typeck/Cargo.toml
new file mode 100644
index 000000000..093f9bb84
--- /dev/null
+++ b/compiler/rustc_hir_typeck/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "rustc_hir_typeck"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_analysis = { path = "../rustc_hir_analysis" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_hir_typeck/src/_match.rs
index 1b13c98e4..2b15d4dcd 100644
--- a/compiler/rustc_typeck/src/check/_match.rs
+++ b/compiler/rustc_hir_typeck/src/_match.rs
@@ -1,10 +1,10 @@
-use crate::check::coercion::{AsCoercionSite, CoerceMany};
-use crate::check::{Diverges, Expectation, FnCtxt, Needs};
+use crate::coercion::{AsCoercionSite, CoerceMany};
+use crate::{Diverges, Expectation, FnCtxt, Needs};
use rustc_errors::{Applicability, MultiSpan};
use rustc_hir::{self as hir, ExprKind};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::traits::Obligation;
-use rustc_middle::ty::{self, ToPredicate, Ty, TypeVisitable};
+use rustc_middle::ty::{self, ToPredicate, Ty};
use rustc_span::Span;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use rustc_trait_selection::traits::{
@@ -12,7 +12,7 @@ use rustc_trait_selection::traits::{
};
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self), level = "debug", ret)]
pub fn check_match(
&self,
expr: &'tcx hir::Expr<'tcx>,
@@ -94,7 +94,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let arm_ty = self.check_expr_with_expectation(&arm.body, expected);
all_arms_diverge &= self.diverges.get();
- let opt_suggest_box_span = self.opt_suggest_box_span(arm_ty, orig_expected);
+ let opt_suggest_box_span = prior_arm.and_then(|(_, prior_arm_ty, _)| {
+ self.opt_suggest_box_span(prior_arm_ty, arm_ty, orig_expected)
+ });
let (arm_block_id, arm_span) = if let hir::ExprKind::Block(blk, _) = arm.body.kind {
(Some(blk.hir_id), self.find_block_span(blk))
@@ -135,9 +137,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Some(&arm.body),
arm_ty,
Some(&mut |err| {
- let Some(ret) = self.ret_type_span else {
- return;
- };
+ let Some(ret) = self
+ .tcx
+ .hir()
+ .find_by_def_id(self.body_id.owner.def_id)
+ .and_then(|owner| owner.fn_decl())
+ .map(|decl| decl.output.span())
+ else { return; };
let Expectation::IsLast(stmt) = orig_expected else {
return
};
@@ -210,9 +216,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We won't diverge unless the scrutinee or all arms diverge.
self.diverges.set(scrut_diverges | all_arms_diverge);
- let match_ty = coercion.complete(self);
- debug!(?match_ty);
- match_ty
+ coercion.complete(self)
}
/// When the previously checked expression (the scrutinee) diverges,
@@ -255,7 +259,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.help("consider adding an `else` block that evaluates to the expected type");
error = true;
},
- ret_reason.is_none(),
+ false,
);
error
}
@@ -468,53 +472,80 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
- // When we have a `match` as a tail expression in a `fn` with a returned `impl Trait`
- // we check if the different arms would work with boxed trait objects instead and
- // provide a structured suggestion in that case.
+ /// When we have a `match` as a tail expression in a `fn` with a returned `impl Trait`
+ /// we check if the different arms would work with boxed trait objects instead and
+ /// provide a structured suggestion in that case.
pub(crate) fn opt_suggest_box_span(
&self,
- outer_ty: Ty<'tcx>,
+ first_ty: Ty<'tcx>,
+ second_ty: Ty<'tcx>,
orig_expected: Expectation<'tcx>,
) -> Option<Span> {
+ // FIXME(compiler-errors): This really shouldn't need to be done during the
+ // "good" path of typeck, but here we are.
match orig_expected {
- Expectation::ExpectHasType(expected)
- if self.in_tail_expr
- && self.ret_coercion.as_ref()?.borrow().merged_ty().has_opaque_types()
- && self.can_coerce(outer_ty, expected) =>
- {
- let obligations = self.fulfillment_cx.borrow().pending_obligations();
- let mut suggest_box = !obligations.is_empty();
- for o in obligations {
- match o.predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(t) => {
- let pred =
- ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
- trait_ref: ty::TraitRef {
- def_id: t.def_id(),
- substs: self.tcx.mk_substs_trait(outer_ty, &[]),
- },
- constness: t.constness,
- polarity: t.polarity,
- }));
- let obl = Obligation::new(
- o.cause.clone(),
- self.param_env,
- pred.to_predicate(self.tcx),
- );
- suggest_box &= self.predicate_must_hold_modulo_regions(&obl);
- if !suggest_box {
- // We've encountered some obligation that didn't hold, so the
- // return expression can't just be boxed. We don't need to
- // evaluate the rest of the obligations.
- break;
+ Expectation::ExpectHasType(expected) => {
+ let TypeVariableOrigin {
+ span,
+ kind: TypeVariableOriginKind::OpaqueTypeInference(rpit_def_id),
+ ..
+ } = self.type_var_origin(expected)? else { return None; };
+
+ let sig = *self
+ .typeck_results
+ .borrow()
+ .liberated_fn_sigs()
+ .get(hir::HirId::make_owner(self.body_id.owner.def_id))?;
+
+ let substs = sig.output().walk().find_map(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Opaque(def_id, substs) = *ty.kind()
+ && def_id == rpit_def_id
+ {
+ Some(substs)
+ } else {
+ None
+ }
+ })?;
+ let opaque_ty = self.tcx.mk_opaque(rpit_def_id, substs);
+
+ if !self.can_coerce(first_ty, expected) || !self.can_coerce(second_ty, expected) {
+ return None;
+ }
+
+ for ty in [first_ty, second_ty] {
+ for (pred, _) in self
+ .tcx
+ .bound_explicit_item_bounds(rpit_def_id)
+ .subst_iter_copied(self.tcx, substs)
+ {
+ let pred = match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(mut trait_pred) => {
+ assert_eq!(trait_pred.trait_ref.self_ty(), opaque_ty);
+ trait_pred.trait_ref.substs =
+ self.tcx.mk_substs_trait(ty, &trait_pred.trait_ref.substs[1..]);
+ pred.kind().rebind(trait_pred).to_predicate(self.tcx)
}
+ ty::PredicateKind::Projection(mut proj_pred) => {
+ assert_eq!(proj_pred.projection_ty.self_ty(), opaque_ty);
+ proj_pred.projection_ty.substs = self
+ .tcx
+ .mk_substs_trait(ty, &proj_pred.projection_ty.substs[1..]);
+ pred.kind().rebind(proj_pred).to_predicate(self.tcx)
+ }
+ _ => continue,
+ };
+ if !self.predicate_must_hold_modulo_regions(&Obligation::new(
+ ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ pred,
+ )) {
+ return None;
}
- _ => {}
}
}
- // If all the obligations hold (or there are no obligations) the tail expression
- // we can suggest to return a boxed trait object instead of an opaque type.
- if suggest_box { self.ret_type_span } else { None }
+
+ Some(span)
}
_ => None,
}
diff --git a/compiler/rustc_typeck/src/check/autoderef.rs b/compiler/rustc_hir_typeck/src/autoderef.rs
index 59c366ad7..59c366ad7 100644
--- a/compiler/rustc_typeck/src/check/autoderef.rs
+++ b/compiler/rustc_hir_typeck/src/autoderef.rs
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
new file mode 100644
index 000000000..1b33f2f02
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -0,0 +1,831 @@
+use super::method::probe::{IsSuggestion, Mode, ProbeScope};
+use super::method::MethodCallee;
+use super::{Expectation, FnCtxt, TupleArgumentsFlag};
+
+use crate::type_error_struct;
+use rustc_ast::util::parser::PREC_POSTFIX;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, StashKey};
+use rustc_hir as hir;
+use rustc_hir::def::{self, Namespace, Res};
+use rustc_hir::def_id::DefId;
+use rustc_infer::{
+ infer,
+ traits::{self, Obligation},
+};
+use rustc_infer::{
+ infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind},
+ traits::ObligationCause,
+};
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::SubstsRef;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::DefIdOrName;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+use std::iter;
+
+/// Checks that it is legal to call methods of the trait corresponding
+/// to `trait_id` (this only cares about the trait, not the specific
+/// method that is called).
+pub fn check_legal_trait_for_method_call(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ receiver: Option<Span>,
+ expr_span: Span,
+ trait_id: DefId,
+) {
+ if tcx.lang_items().drop_trait() == Some(trait_id) {
+ let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
+ err.span_label(span, "explicit destructor calls not allowed");
+
+ let (sp, suggestion) = receiver
+ .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok())
+ .filter(|snippet| !snippet.is_empty())
+ .map(|snippet| (expr_span, format!("drop({snippet})")))
+ .unwrap_or_else(|| (span, "drop".to_string()));
+
+ err.span_suggestion(
+ sp,
+ "consider using `drop` function",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+
+ err.emit();
+ }
+}
+
+#[derive(Debug)]
+enum CallStep<'tcx> {
+ Builtin(Ty<'tcx>),
+ DeferredClosure(LocalDefId, ty::FnSig<'tcx>),
+ /// E.g., enum variant constructors.
+ Overloaded(MethodCallee<'tcx>),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let original_callee_ty = match &callee_expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(..) | hir::QPath::TypeRelative(..)) => self
+ .check_expr_with_expectation_and_args(
+ callee_expr,
+ Expectation::NoExpectation,
+ arg_exprs,
+ ),
+ _ => self.check_expr(callee_expr),
+ };
+
+ let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty);
+
+ let mut autoderef = self.autoderef(callee_expr.span, expr_ty);
+ let mut result = None;
+ while result.is_none() && autoderef.next().is_some() {
+ result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef);
+ }
+ self.register_predicates(autoderef.into_obligations());
+
+ let output = match result {
+ None => {
+ // this will report an error since original_callee_ty is not a fn
+ self.confirm_builtin_call(
+ call_expr,
+ callee_expr,
+ original_callee_ty,
+ arg_exprs,
+ expected,
+ )
+ }
+
+ Some(CallStep::Builtin(callee_ty)) => {
+ self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected)
+ }
+
+ Some(CallStep::DeferredClosure(def_id, fn_sig)) => {
+ self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, def_id, fn_sig)
+ }
+
+ Some(CallStep::Overloaded(method_callee)) => {
+ self.confirm_overloaded_call(call_expr, arg_exprs, expected, method_callee)
+ }
+ };
+
+ // we must check that return type of called functions is WF:
+ self.register_wf_obligation(output.into(), call_expr.span, traits::WellFormed(None));
+
+ output
+ }
+
+ fn try_overloaded_call_step(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ autoderef: &Autoderef<'a, 'tcx>,
+ ) -> Option<CallStep<'tcx>> {
+ let adjusted_ty =
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+ debug!(
+ "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})",
+ call_expr, adjusted_ty
+ );
+
+ // If the callee is a bare function or a closure, then we're all set.
+ match *adjusted_ty.kind() {
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ let adjustments = self.adjust_steps(autoderef);
+ self.apply_adjustments(callee_expr, adjustments);
+ return Some(CallStep::Builtin(adjusted_ty));
+ }
+
+ ty::Closure(def_id, substs) => {
+ let def_id = def_id.expect_local();
+
+ // Check whether this is a call to a closure where we
+ // haven't yet decided on whether the closure is fn vs
+ // fnmut vs fnonce. If so, we have to defer further processing.
+ if self.closure_kind(substs).is_none() {
+ let closure_sig = substs.as_closure().sig();
+ let closure_sig = self.replace_bound_vars_with_fresh_vars(
+ call_expr.span,
+ infer::FnCall,
+ closure_sig,
+ );
+ let adjustments = self.adjust_steps(autoderef);
+ self.record_deferred_call_resolution(
+ def_id,
+ DeferredCallResolution {
+ call_expr,
+ callee_expr,
+ adjusted_ty,
+ adjustments,
+ fn_sig: closure_sig,
+ closure_substs: substs,
+ },
+ );
+ return Some(CallStep::DeferredClosure(def_id, closure_sig));
+ }
+ }
+
+ // Hack: we know that there are traits implementing Fn for &F
+ // where F:Fn and so forth. In the particular case of types
+ // like `x: &mut FnMut()`, if there is a call `x()`, we would
+ // normally translate to `FnMut::call_mut(&mut x, ())`, but
+ // that winds up requiring `mut x: &mut FnMut()`. A little
+ // over the top. The simplest fix by far is to just ignore
+ // this case and deref again, so we wind up with
+ // `FnMut::call_mut(&mut *x, ())`.
+ ty::Ref(..) if autoderef.step_count() == 0 => {
+ return None;
+ }
+
+ ty::Error(_) => {
+ return None;
+ }
+
+ _ => {}
+ }
+
+ // Now, we look for the implementation of a Fn trait on the object's type.
+ // We first do it with the explicit instruction to look for an impl of
+ // `Fn<Tuple>`, with the tuple `Tuple` having an arity corresponding
+ // to the number of call parameters.
+ // If that fails (or_else branch), we try again without specifying the
+ // shape of the tuple (hence the None). This allows to detect an Fn trait
+ // is implemented, and use this information for diagnostic.
+ self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs))
+ .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None))
+ .map(|(autoref, method)| {
+ let mut adjustments = self.adjust_steps(autoderef);
+ adjustments.extend(autoref);
+ self.apply_adjustments(callee_expr, adjustments);
+ CallStep::Overloaded(method)
+ })
+ }
+
+ fn try_overloaded_call_traits(
+ &self,
+ call_expr: &hir::Expr<'_>,
+ adjusted_ty: Ty<'tcx>,
+ opt_arg_exprs: Option<&'tcx [hir::Expr<'tcx>]>,
+ ) -> Option<(Option<Adjustment<'tcx>>, MethodCallee<'tcx>)> {
+ // Try the options that are least restrictive on the caller first.
+ for (opt_trait_def_id, method_name, borrow) in [
+ (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true),
+ (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true),
+ (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false),
+ ] {
+ let Some(trait_def_id) = opt_trait_def_id else { continue };
+
+ let opt_input_types = opt_arg_exprs.map(|arg_exprs| {
+ [self.tcx.mk_tup(arg_exprs.iter().map(|e| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: e.span,
+ })
+ }))]
+ });
+ let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref);
+
+ if let Some(ok) = self.lookup_method_in_trait(
+ call_expr.span,
+ method_name,
+ trait_def_id,
+ adjusted_ty,
+ opt_input_types,
+ ) {
+ let method = self.register_infer_ok_obligations(ok);
+ let mut autoref = None;
+ if borrow {
+ // Check for &self vs &mut self in the method signature. Since this is either
+ // the Fn or FnMut trait, it should be one of those.
+ let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else {
+ // The `fn`/`fn_mut` lang item is ill-formed, which should have
+ // caused an error elsewhere.
+ self.tcx
+ .sess
+ .delay_span_bug(call_expr.span, "input to call/call_mut is not a ref?");
+ return None;
+ };
+
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // For initial two-phase borrow
+ // deployment, conservatively omit
+ // overloaded function call ops.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ },
+ };
+ autoref = Some(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ });
+ }
+ return Some((autoref, method));
+ }
+ }
+
+ None
+ }
+
+ /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the
+ /// likely intention is to call the closure, suggest `(||{})()`. (#55851)
+ fn identify_bad_closure_def_and_call(
+ &self,
+ err: &mut Diagnostic,
+ hir_id: hir::HirId,
+ callee_node: &hir::ExprKind<'_>,
+ callee_span: Span,
+ ) {
+ let hir = self.tcx.hir();
+ let parent_hir_id = hir.get_parent_node(hir_id);
+ let parent_node = hir.get(parent_hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, body, .. }),
+ ..
+ }),
+ hir::ExprKind::Block(..),
+ ) = (parent_node, callee_node)
+ {
+ let fn_decl_span = if hir.body(body).generator_kind
+ == Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure))
+ {
+ // Actually need to unwrap a few more layers of HIR to get to
+ // the _real_ closure...
+ let async_closure = hir.get_parent_node(hir.get_parent_node(parent_hir_id));
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }),
+ ..
+ }) = hir.get(async_closure)
+ {
+ fn_decl_span
+ } else {
+ return;
+ }
+ } else {
+ fn_decl_span
+ };
+
+ let start = fn_decl_span.shrink_to_lo();
+ let end = callee_span.shrink_to_hi();
+ err.multipart_suggestion(
+ "if you meant to create this closure and immediately call it, surround the \
+ closure with parentheses",
+ vec![(start, "(".to_string()), (end, ")".to_string())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the
+ /// likely intention is to create an array containing tuples.
+ fn maybe_suggest_bad_array_definition(
+ &self,
+ err: &mut Diagnostic,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ ) -> bool {
+ let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id);
+ let parent_node = self.tcx.hir().get(hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }),
+ hir::ExprKind::Tup(exp),
+ hir::ExprKind::Call(_, args),
+ ) = (parent_node, &callee_expr.kind, &call_expr.kind)
+ && args.len() == exp.len()
+ {
+ let start = callee_expr.span.shrink_to_hi();
+ err.span_suggestion(
+ start,
+ "consider separating array elements with a comma",
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ false
+ }
+
+ fn confirm_builtin_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (fn_sig, def_id) = match *callee_ty.kind() {
+ ty::FnDef(def_id, subst) => {
+ let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, subst);
+
+ // Unit testing: function items annotated with
+ // `#[rustc_evaluate_where_clauses]` trigger special output
+ // to let us test the trait evaluation system.
+ if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
+ let predicates = self.tcx.predicates_of(def_id);
+ let predicates = predicates.instantiate(self.tcx, subst);
+ for (predicate, predicate_span) in
+ predicates.predicates.iter().zip(&predicates.spans)
+ {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(callee_expr.span),
+ self.param_env,
+ *predicate,
+ );
+ let result = self.evaluate_obligation(&obligation);
+ self.tcx
+ .sess
+ .struct_span_err(
+ callee_expr.span,
+ &format!("evaluate({:?}) = {:?}", predicate, result),
+ )
+ .span_label(*predicate_span, "predicate")
+ .emit();
+ }
+ }
+ (fn_sig, Some(def_id))
+ }
+ ty::FnPtr(sig) => (sig, None),
+ _ => {
+ if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &callee_expr.kind
+ && let [segment] = path.segments
+ && let Some(mut diag) = self
+ .tcx
+ .sess
+ .diagnostic()
+ .steal_diagnostic(segment.ident.span, StashKey::CallIntoMethod)
+ {
+ // Try suggesting `foo(a)` -> `a.foo()` if possible.
+ if let Some(ty) =
+ self.suggest_call_as_method(
+ &mut diag,
+ segment,
+ arg_exprs,
+ call_expr,
+ expected
+ )
+ {
+ diag.emit();
+ return ty;
+ } else {
+ diag.emit();
+ }
+ }
+
+ self.report_invalid_callee(call_expr, callee_expr, callee_ty, arg_exprs);
+
+ // This is the "default" function signature, used in case of error.
+ // In that case, we check each argument against "error" in order to
+ // set up all the node type bindings.
+ (
+ ty::Binder::dummy(self.tcx.mk_fn_sig(
+ self.err_args(arg_exprs.len()).into_iter(),
+ self.tcx.ty_error(),
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ )),
+ None,
+ )
+ }
+ };
+
+ // Replace any late-bound regions that appear in the function
+ // signature with region variables. We also have to
+ // renormalize the associated types at this point, since they
+ // previously appeared within a `Binder<>` and hence would not
+ // have been normalized before.
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig);
+ let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig);
+
+ // Call the generic checker.
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::DontTupleArguments,
+ def_id,
+ );
+
+ fn_sig.output()
+ }
+
+ /// Attempts to reinterpret `method(rcvr, args...)` as `rcvr.method(args...)`
+ /// and suggesting the fix if the method probe is successful.
+ fn suggest_call_as_method(
+ &self,
+ diag: &mut Diagnostic,
+ segment: &'tcx hir::PathSegment<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ call_expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ if let [callee_expr, rest @ ..] = arg_exprs {
+ let callee_ty = self.check_expr(callee_expr);
+ // First, do a probe with `IsSuggestion(true)` to avoid emitting
+ // any strange errors. If it's successful, then we'll do a true
+ // method lookup.
+ let Ok(pick) = self
+ .probe_for_name(
+ call_expr.span,
+ Mode::MethodCall,
+ segment.ident,
+ IsSuggestion(true),
+ callee_ty,
+ call_expr.hir_id,
+ // We didn't record the in scope traits during late resolution
+ // so we need to probe AllTraits unfortunately
+ ProbeScope::AllTraits,
+ ) else {
+ return None;
+ };
+
+ let pick = self.confirm_method(
+ call_expr.span,
+ callee_expr,
+ call_expr,
+ callee_ty,
+ pick,
+ segment,
+ );
+ if pick.illegal_sized_bound.is_some() {
+ return None;
+ }
+
+ let up_to_rcvr_span = segment.ident.span.until(callee_expr.span);
+ let rest_span = callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ let rest_snippet = if let Some(first) = rest.first() {
+ self.tcx
+ .sess
+ .source_map()
+ .span_to_snippet(first.span.to(call_expr.span.shrink_to_hi()))
+ } else {
+ Ok(")".to_string())
+ };
+
+ if let Ok(rest_snippet) = rest_snippet {
+ let sugg = if callee_expr.precedence().order() >= PREC_POSTFIX {
+ vec![
+ (up_to_rcvr_span, "".to_string()),
+ (rest_span, format!(".{}({rest_snippet}", segment.ident)),
+ ]
+ } else {
+ vec![
+ (up_to_rcvr_span, "(".to_string()),
+ (rest_span, format!(").{}({rest_snippet}", segment.ident)),
+ ]
+ };
+ let self_ty = self.resolve_vars_if_possible(pick.callee.sig.inputs()[0]);
+ diag.multipart_suggestion(
+ format!(
+ "use the `.` operator to call the method `{}{}` on `{self_ty}`",
+ self.tcx
+ .associated_item(pick.callee.def_id)
+ .trait_container(self.tcx)
+ .map_or_else(
+ || String::new(),
+ |trait_def_id| self.tcx.def_path_str(trait_def_id) + "::"
+ ),
+ segment.ident
+ ),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+
+ // Let's check the method fully now
+ let return_ty = self.check_method_argument_types(
+ segment.ident.span,
+ call_expr,
+ Ok(pick.callee),
+ rest,
+ TupleArgumentsFlag::DontTupleArguments,
+ expected,
+ );
+
+ return Some(return_ty);
+ }
+ }
+
+ None
+ }
+
+ fn report_invalid_callee(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ ) {
+ let mut unit_variant = None;
+ if let hir::ExprKind::Path(qpath) = &callee_expr.kind
+ && let Res::Def(def::DefKind::Ctor(kind, def::CtorKind::Const), _)
+ = self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ // Only suggest removing parens if there are no arguments
+ && arg_exprs.is_empty()
+ {
+ let descr = match kind {
+ def::CtorOf::Struct => "struct",
+ def::CtorOf::Variant => "enum variant",
+ };
+ let removal_span = callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ unit_variant = Some((removal_span, descr, rustc_hir_pretty::qpath_to_string(qpath)));
+ }
+
+ let callee_ty = self.resolve_vars_if_possible(callee_ty);
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ callee_expr.span,
+ callee_ty,
+ E0618,
+ "expected function, found {}",
+ match &unit_variant {
+ Some((_, kind, path)) => format!("{kind} `{path}`"),
+ None => format!("`{callee_ty}`"),
+ }
+ );
+
+ self.identify_bad_closure_def_and_call(
+ &mut err,
+ call_expr.hir_id,
+ &callee_expr.kind,
+ callee_expr.span,
+ );
+
+ if let Some((removal_span, kind, path)) = &unit_variant {
+ err.span_suggestion_verbose(
+ *removal_span,
+ &format!(
+ "`{path}` is a unit {kind}, and does not take parentheses to be constructed",
+ ),
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ let mut inner_callee_path = None;
+ let def = match callee_expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ }
+ hir::ExprKind::Call(ref inner_callee, _) => {
+ // If the call spans more than one line and the callee kind is
+ // itself another `ExprCall`, that's a clue that we might just be
+ // missing a semicolon (Issue #51055)
+ let call_is_multiline = self.tcx.sess.source_map().is_multiline(call_expr.span);
+ if call_is_multiline {
+ err.span_suggestion(
+ callee_expr.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
+ inner_callee_path = Some(inner_qpath);
+ self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
+ } else {
+ Res::Err
+ }
+ }
+ _ => Res::Err,
+ };
+
+ if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) {
+ if let Some((maybe_def, output_ty, _)) =
+ self.extract_callable_info(callee_expr, callee_ty)
+ && !self.type_is_sized_modulo_regions(self.param_env, output_ty, callee_expr.span)
+ {
+ let descr = match maybe_def {
+ DefIdOrName::DefId(def_id) => self.tcx.def_kind(def_id).descr(def_id),
+ DefIdOrName::Name(name) => name,
+ };
+ err.span_label(
+ callee_expr.span,
+ format!("this {descr} returns an unsized value `{output_ty}`, so it cannot be called")
+ );
+ if let DefIdOrName::DefId(def_id) = maybe_def
+ && let Some(def_span) = self.tcx.hir().span_if_local(def_id)
+ {
+ err.span_label(def_span, "the callable type is defined here");
+ }
+ } else {
+ err.span_label(call_expr.span, "call expression requires function");
+ }
+ }
+
+ if let Some(span) = self.tcx.hir().res_span(def) {
+ let callee_ty = callee_ty.to_string();
+ let label = match (unit_variant, inner_callee_path) {
+ (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")),
+ (_, Some(hir::QPath::Resolved(_, path))) => self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(path.span)
+ .ok()
+ .map(|p| format!("`{p}` defined here returns `{callee_ty}`")),
+ _ => {
+ match def {
+ // Emit a different diagnostic for local variables, as they are not
+ // type definitions themselves, but rather variables *of* that type.
+ Res::Local(hir_id) => Some(format!(
+ "`{}` has type `{}`",
+ self.tcx.hir().name(hir_id),
+ callee_ty
+ )),
+ Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => {
+ Some(format!("`{}` defined here", self.tcx.def_path_str(def_id),))
+ }
+ _ => Some(format!("`{callee_ty}` defined here")),
+ }
+ }
+ };
+ if let Some(label) = label {
+ err.span_label(span, label);
+ }
+ }
+ err.emit();
+ }
+
+ fn confirm_deferred_closure_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ closure_def_id: LocalDefId,
+ fn_sig: ty::FnSig<'tcx>,
+ ) -> Ty<'tcx> {
+ // `fn_sig` is the *signature* of the closure being called. We
+ // don't know the full details yet (`Fn` vs `FnMut` etc), but we
+ // do know the types expected for each argument and the return
+ // type.
+
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::TupleArguments,
+ Some(closure_def_id.to_def_id()),
+ );
+
+ fn_sig.output()
+ }
+
+ fn confirm_overloaded_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ method_callee: MethodCallee<'tcx>,
+ ) -> Ty<'tcx> {
+ let output_type = self.check_method_argument_types(
+ call_expr.span,
+ call_expr,
+ Ok(method_callee),
+ arg_exprs,
+ TupleArgumentsFlag::TupleArguments,
+ expected,
+ );
+
+ self.write_method_call(call_expr.hir_id, method_callee);
+ output_type
+ }
+}
+
+#[derive(Debug)]
+pub struct DeferredCallResolution<'tcx> {
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ adjusted_ty: Ty<'tcx>,
+ adjustments: Vec<Adjustment<'tcx>>,
+ fn_sig: ty::FnSig<'tcx>,
+ closure_substs: SubstsRef<'tcx>,
+}
+
+impl<'a, 'tcx> DeferredCallResolution<'tcx> {
+ pub fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) {
+ debug!("DeferredCallResolution::resolve() {:?}", self);
+
+ // we should not be invoked until the closure kind has been
+ // determined by upvar inference
+ assert!(fcx.closure_kind(self.closure_substs).is_some());
+
+ // We may now know enough to figure out fn vs fnmut etc.
+ match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) {
+ Some((autoref, method_callee)) => {
+ // One problem is that when we get here, we are going
+ // to have a newly instantiated function signature
+ // from the call trait. This has to be reconciled with
+ // the older function signature we had before. In
+ // principle we *should* be able to fn_sigs(), but we
+ // can't because of the annoying need for a TypeTrace.
+ // (This always bites me, should find a way to
+ // refactor it.)
+ let method_sig = method_callee.sig;
+
+ debug!("attempt_resolution: method_callee={:?}", method_callee);
+
+ for (method_arg_ty, self_arg_ty) in
+ iter::zip(method_sig.inputs().iter().skip(1), self.fn_sig.inputs())
+ {
+ fcx.demand_eqtype(self.call_expr.span, *self_arg_ty, *method_arg_ty);
+ }
+
+ fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output());
+
+ let mut adjustments = self.adjustments;
+ adjustments.extend(autoref);
+ fcx.apply_adjustments(self.callee_expr, adjustments);
+
+ fcx.write_method_call(self.call_expr.hir_id, method_callee);
+ }
+ None => {
+ // This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once`
+ // lang items are not defined (issue #86238).
+ let mut err = fcx.inh.tcx.sess.struct_span_err(
+ self.call_expr.span,
+ "failed to find an overloaded call trait for closure call",
+ );
+ err.help(
+ "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \
+ and have associated `call`/`call_mut`/`call_once` functions",
+ );
+ err.emit();
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs
index 7aaddc2bd..d1dab0540 100644
--- a/compiler/rustc_typeck/src/check/cast.rs
+++ b/compiler/rustc_hir_typeck/src/cast.rs
@@ -30,35 +30,38 @@
use super::FnCtxt;
-use crate::hir::def_id::DefId;
use crate::type_error_struct;
-use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_errors::{struct_span_err, Applicability, DelayDm, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
-use rustc_hir::lang_items::LangItem;
use rustc_middle::mir::Mutability;
use rustc_middle::ty::adjustment::AllowTwoPhase;
use rustc_middle::ty::cast::{CastKind, CastTy};
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, Ty, TypeAndMut, TypeVisitable};
+use rustc_middle::ty::{self, Ty, TypeAndMut, TypeVisitable, VariantDef};
use rustc_session::lint;
use rustc_session::Session;
+use rustc_span::def_id::{DefId, LOCAL_CRATE};
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_trait_selection::infer::InferCtxtExt;
-use rustc_trait_selection::traits;
use rustc_trait_selection::traits::error_reporting::report_object_safety_error;
/// Reifies a cast check to be checked once we have full type information for
/// a function context.
#[derive(Debug)]
pub struct CastCheck<'tcx> {
+ /// The expression whose value is being casted
expr: &'tcx hir::Expr<'tcx>,
+ /// The source type for the cast expression
expr_ty: Ty<'tcx>,
expr_span: Span,
+ /// The target type. That is, the type we are casting to.
cast_ty: Ty<'tcx>,
cast_span: Span,
span: Span,
+ /// whether the cast is made in a const context or not.
+ pub constness: hir::Constness,
}
/// The kind of pointer and associated metadata (thin, length or vtable) - we
@@ -96,13 +99,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return Err(reported);
}
- if self.type_is_known_to_be_sized_modulo_regions(t, span) {
+ if self.type_is_sized_modulo_regions(self.param_env, t, span) {
return Ok(Some(PointerKind::Thin));
}
Ok(match *t.kind() {
ty::Slice(_) | ty::Str => Some(PointerKind::Length),
- ty::Dynamic(ref tty, ..) => Some(PointerKind::VTable(tty.principal_def_id())),
+ ty::Dynamic(ref tty, _, ty::Dyn) => Some(PointerKind::VTable(tty.principal_def_id())),
ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().fields.last() {
None => Some(PointerKind::Thin),
Some(f) => {
@@ -139,6 +142,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
| ty::Generator(..)
| ty::Adt(..)
| ty::Never
+ | ty::Dynamic(_, _, ty::DynStar)
| ty::Error(_) => {
let reported = self
.tcx
@@ -173,6 +177,7 @@ pub enum CastError {
/// or "a length". If this argument is None, then the metadata is unknown, for example,
/// when we're typechecking a type parameter with a ?Sized bound.
IntToFatCast(Option<&'static str>),
+ ForeignNonExhaustiveAdt,
}
impl From<ErrorGuaranteed> for CastError {
@@ -207,15 +212,16 @@ impl<'a, 'tcx> CastCheck<'tcx> {
cast_ty: Ty<'tcx>,
cast_span: Span,
span: Span,
+ constness: hir::Constness,
) -> Result<CastCheck<'tcx>, ErrorGuaranteed> {
let expr_span = expr.span.find_ancestor_inside(span).unwrap_or(expr.span);
- let check = CastCheck { expr, expr_ty, expr_span, cast_ty, cast_span, span };
+ let check = CastCheck { expr, expr_ty, expr_span, cast_ty, cast_span, span, constness };
// For better error messages, check for some obviously unsized
// cases now. We do a more thorough check at the end, once
// inference is more completely known.
match cast_ty.kind() {
- ty::Dynamic(..) | ty::Slice(..) => {
+ ty::Dynamic(_, _, ty::Dyn) | ty::Slice(..) => {
let reported = check.report_cast_to_unsized_type(fcx);
Err(reported)
}
@@ -523,7 +529,9 @@ impl<'a, 'tcx> CastCheck<'tcx> {
err.emit();
}
CastError::SizedUnsizedCast => {
- use crate::structured_errors::{SizedUnsizedCast, StructuredDiagnostic};
+ use rustc_hir_analysis::structured_errors::{
+ SizedUnsizedCast, StructuredDiagnostic,
+ };
SizedUnsizedCast {
sess: &fcx.tcx.sess,
@@ -591,6 +599,17 @@ impl<'a, 'tcx> CastCheck<'tcx> {
}
err.emit();
}
+ CastError::ForeignNonExhaustiveAdt => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .note("cannot cast an enum with a non-exhaustive variant when it's defined in another crate")
+ .emit();
+ }
}
}
@@ -670,19 +689,25 @@ impl<'a, 'tcx> CastCheck<'tcx> {
} else {
("", lint::builtin::TRIVIAL_CASTS)
};
- fcx.tcx.struct_span_lint_hir(lint, self.expr.hir_id, self.span, |err| {
- err.build(&format!(
- "trivial {}cast: `{}` as `{}`",
- adjective,
- fcx.ty_to_string(t_expr),
- fcx.ty_to_string(t_cast)
- ))
- .help(&format!(
- "cast can be replaced by coercion; this might \
- require {type_asc_or}a temporary variable"
- ))
- .emit();
- });
+ fcx.tcx.struct_span_lint_hir(
+ lint,
+ self.expr.hir_id,
+ self.span,
+ DelayDm(|| {
+ format!(
+ "trivial {}cast: `{}` as `{}`",
+ adjective,
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)
+ )
+ }),
+ |lint| {
+ lint.help(format!(
+ "cast can be replaced by coercion; this might \
+ require {type_asc_or}a temporary variable"
+ ))
+ },
+ );
}
#[instrument(skip(fcx), level = "debug")]
@@ -692,7 +717,7 @@ impl<'a, 'tcx> CastCheck<'tcx> {
debug!("check_cast({}, {:?} as {:?})", self.expr.hir_id, self.expr_ty, self.cast_ty);
- if !fcx.type_is_known_to_be_sized_modulo_regions(self.cast_ty, self.span)
+ if !fcx.type_is_sized_modulo_regions(fcx.param_env, self.cast_ty, self.span)
&& !self.cast_ty.has_infer_types()
{
self.report_cast_to_unsized_type(fcx);
@@ -789,6 +814,14 @@ impl<'a, 'tcx> CastCheck<'tcx> {
_ => return Err(CastError::NonScalar),
};
+ if let ty::Adt(adt_def, _) = *self.expr_ty.kind() {
+ if adt_def.did().krate != LOCAL_CRATE {
+ if adt_def.variants().iter().any(VariantDef::is_field_list_non_exhaustive) {
+ return Err(CastError::ForeignNonExhaustiveAdt);
+ }
+ }
+ }
+
match (t_from, t_cast) {
// These types have invariants! can't cast into them.
(_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar),
@@ -835,6 +868,14 @@ impl<'a, 'tcx> CastCheck<'tcx> {
(Int(Char) | Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast),
(Int(_) | Float, Int(_) | Float) => Ok(CastKind::NumericCast),
+
+ (_, DynStar) | (DynStar, _) => {
+ if fcx.tcx.features().dyn_star {
+ bug!("should be handled by `try_coerce`")
+ } else {
+ Err(CastError::IllegalCast)
+ }
+ }
}
}
@@ -976,12 +1017,12 @@ impl<'a, 'tcx> CastCheck<'tcx> {
lint::builtin::CENUM_IMPL_DROP_CAST,
self.expr.hir_id,
self.span,
- |err| {
- err.build(&format!(
- "cannot cast enum `{}` into integer `{}` because it implements `Drop`",
- self.expr_ty, self.cast_ty
- ))
- .emit();
+ DelayDm(|| format!(
+ "cannot cast enum `{}` into integer `{}` because it implements `Drop`",
+ self.expr_ty, self.cast_ty
+ )),
+ |lint| {
+ lint
},
);
}
@@ -992,12 +1033,11 @@ impl<'a, 'tcx> CastCheck<'tcx> {
lint::builtin::LOSSY_PROVENANCE_CASTS,
self.expr.hir_id,
self.span,
- |err| {
- let mut err = err.build(&format!(
+ DelayDm(|| format!(
"under strict provenance it is considered bad style to cast pointer `{}` to integer `{}`",
self.expr_ty, self.cast_ty
- ));
-
+ )),
+ |lint| {
let msg = "use `.addr()` to obtain the address of a pointer";
let expr_prec = self.expr.precedence().order();
@@ -1016,9 +1056,9 @@ impl<'a, 'tcx> CastCheck<'tcx> {
(cast_span, format!(").addr(){scalar_cast}")),
];
- err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
} else {
- err.span_suggestion(
+ lint.span_suggestion(
cast_span,
msg,
format!(".addr(){scalar_cast}"),
@@ -1026,12 +1066,12 @@ impl<'a, 'tcx> CastCheck<'tcx> {
);
}
- err.help(
+ lint.help(
"if you can't comply with strict provenance and need to expose the pointer \
provenance you can use `.expose_addr()` instead"
);
- err.emit();
+ lint
},
);
}
@@ -1041,32 +1081,25 @@ impl<'a, 'tcx> CastCheck<'tcx> {
lint::builtin::FUZZY_PROVENANCE_CASTS,
self.expr.hir_id,
self.span,
- |err| {
- let mut err = err.build(&format!(
- "strict provenance disallows casting integer `{}` to pointer `{}`",
- self.expr_ty, self.cast_ty
- ));
+ DelayDm(|| format!(
+ "strict provenance disallows casting integer `{}` to pointer `{}`",
+ self.expr_ty, self.cast_ty
+ )),
+ |lint| {
let msg = "use `.with_addr()` to adjust a valid pointer in the same allocation, to this address";
let suggestions = vec![
(self.expr_span.shrink_to_lo(), String::from("(...).with_addr(")),
(self.expr_span.shrink_to_hi().to(self.cast_span), String::from(")")),
];
- err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
- err.help(
+ lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ lint.help(
"if you can't comply with strict provenance and don't have a pointer with \
the correct provenance you can use `std::ptr::from_exposed_addr()` instead"
);
- err.emit();
+ lint
},
);
}
}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- fn type_is_known_to_be_sized_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
- let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
- traits::type_known_to_meet_bound_modulo_regions(self, self.param_env, ty, lang_item, span)
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
new file mode 100644
index 000000000..7f76364e1
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -0,0 +1,324 @@
+use crate::coercion::CoerceMany;
+use crate::gather_locals::GatherLocalsVisitor;
+use crate::{FnCtxt, Inherited};
+use crate::{GeneratorTypes, UnsafetyState};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ImplicitSelfKind, ItemKind, Node};
+use rustc_hir_analysis::check::fn_maybe_err;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::RegionVariableOrigin;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefId;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits;
+use std::cell::RefCell;
+
+/// Helper used for fns and closures. Does the grungy work of checking a function
+/// body and returns the function context used for that purpose, since in the case of a fn item
+/// there is still a bit more to do.
+///
+/// * ...
+/// * inherited: other fields inherited from the enclosing fn (if any)
+#[instrument(skip(inherited, body), level = "debug")]
+pub(super) fn check_fn<'a, 'tcx>(
+ inherited: &'a Inherited<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ fn_sig: ty::FnSig<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ fn_id: hir::HirId,
+ body: &'tcx hir::Body<'tcx>,
+ can_be_generator: Option<hir::Movability>,
+ return_type_pre_known: bool,
+) -> (FnCtxt<'a, 'tcx>, Option<GeneratorTypes<'tcx>>) {
+ // Create the function context. This is either derived from scratch or,
+ // in the case of closures, based on the outer context.
+ let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id);
+ fcx.ps.set(UnsafetyState::function(fn_sig.unsafety, fn_id));
+ fcx.return_type_pre_known = return_type_pre_known;
+
+ let tcx = fcx.tcx;
+ let hir = tcx.hir();
+
+ let declared_ret_ty = fn_sig.output();
+
+ let ret_ty =
+ fcx.register_infer_ok_obligations(fcx.infcx.replace_opaque_types_with_inference_vars(
+ declared_ret_ty,
+ body.value.hir_id,
+ decl.output.span(),
+ param_env,
+ ));
+ // If we replaced declared_ret_ty with infer vars, then we must be inferring
+ // an opaque type, so set a flag so we can improve diagnostics.
+ fcx.return_type_has_opaque = ret_ty != declared_ret_ty;
+
+ fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(ret_ty)));
+
+ let span = body.value.span;
+
+ fn_maybe_err(tcx, span, fn_sig.abi);
+
+ if fn_sig.abi == Abi::RustCall {
+ let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 };
+
+ let err = || {
+ let item = match tcx.hir().get(fn_id) {
+ Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header),
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(header, ..), ..
+ }) => Some(header),
+ Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(header, ..),
+ ..
+ }) => Some(header),
+ // Closures are RustCall, but they tuple their arguments, so shouldn't be checked
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => None,
+ node => bug!("Item being checked wasn't a function/closure: {:?}", node),
+ };
+
+ if let Some(header) = item {
+ tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple");
+ }
+ };
+
+ if fn_sig.inputs().len() != expected_args {
+ err()
+ } else {
+ // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on
+ // This will probably require wide-scale changes to support a TupleKind obligation
+ // We can't resolve this without knowing the type of the param
+ if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) {
+ err()
+ }
+ }
+ }
+
+ if body.generator_kind.is_some() && can_be_generator.is_some() {
+ let yield_ty = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span });
+ fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
+
+ // Resume type defaults to `()` if the generator has no argument.
+ let resume_ty = fn_sig.inputs().get(0).copied().unwrap_or_else(|| tcx.mk_unit());
+
+ fcx.resume_yield_tys = Some((resume_ty, yield_ty));
+ }
+
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let maybe_va_list = if fn_sig.c_variadic {
+ let span = body.params.last().unwrap().span;
+ let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span));
+ let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span));
+
+ Some(tcx.bound_type_of(va_list_did).subst(tcx, &[region.into()]))
+ } else {
+ None
+ };
+
+ // Add formal parameters.
+ let inputs_hir = hir.fn_decl_by_hir_id(fn_id).map(|decl| &decl.inputs);
+ let inputs_fn = fn_sig.inputs().iter().copied();
+ for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() {
+ // Check the pattern.
+ let ty_span = try { inputs_hir?.get(idx)?.span };
+ fcx.check_pat_top(&param.pat, param_ty, ty_span, false);
+
+ // Check that argument is Sized.
+ // The check for a non-trivial pattern is a hack to avoid duplicate warnings
+ // for simple cases like `fn foo(x: Trait)`,
+ // where we would error once on the parameter as a whole, and once on the binding `x`.
+ if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params {
+ fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span));
+ }
+
+ fcx.write_ty(param.hir_id, param_ty);
+ }
+
+ inherited.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
+
+ fcx.in_tail_expr = true;
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // FIXME: We need to verify that the return type is `Sized` after the return expression has
+ // been evaluated so that we have types available for all the nodes being returned, but that
+ // requires the coerced evaluated type to be stored. Moving `check_return_expr` before this
+ // causes unsized errors caused by the `declared_ret_ty` to point at the return expression,
+ // while keeping the current ordering we will ignore the tail expression's type because we
+ // don't know it yet. We can't do `check_expr_kind` while keeping `check_return_expr`
+ // because we will trigger "unreachable expression" lints unconditionally.
+ // Because of all of this, we perform a crude check to know whether the simplest `!Sized`
+ // case that a newcomer might make, returning a bare trait, and in that case we populate
+ // the tail expression's type so that the suggestion will be correct, but ignore all other
+ // possible cases.
+ fcx.check_expr(&body.value);
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ } else {
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ fcx.check_return_expr(&body.value, false);
+ }
+ fcx.in_tail_expr = false;
+
+ // We insert the deferred_generator_interiors entry after visiting the body.
+ // This ensures that all nested generators appear before the entry of this generator.
+ // resolve_generator_interiors relies on this property.
+ let gen_ty = if let (Some(_), Some(gen_kind)) = (can_be_generator, body.generator_kind) {
+ let interior = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span });
+ fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior, gen_kind));
+
+ let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap();
+ Some(GeneratorTypes {
+ resume_ty,
+ yield_ty,
+ interior,
+ movability: can_be_generator.unwrap(),
+ })
+ } else {
+ None
+ };
+
+ // Finalize the return check by taking the LUB of the return types
+ // we saw and assigning it to the expected return type. This isn't
+ // really expected to fail, since the coercions would have failed
+ // earlier when trying to find a LUB.
+ let coercion = fcx.ret_coercion.take().unwrap().into_inner();
+ let mut actual_return_ty = coercion.complete(&fcx);
+ debug!("actual_return_ty = {:?}", actual_return_ty);
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // We have special-cased the case where the function is declared
+ // `-> dyn Foo` and we don't actually relate it to the
+ // `fcx.ret_coercion`, so just substitute a type variable.
+ actual_return_ty =
+ fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::DynReturnFn, span });
+ debug!("actual_return_ty replaced with {:?}", actual_return_ty);
+ }
+
+ // HACK(oli-obk, compiler-errors): We should be comparing this against
+ // `declared_ret_ty`, but then anything uninferred would be inferred to
+ // the opaque type itself. That again would cause writeback to assume
+ // we have a recursive call site and do the sadly stabilized fallback to `()`.
+ fcx.demand_suptype(span, ret_ty, actual_return_ty);
+
+ // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
+ if let Some(panic_impl_did) = tcx.lang_items().panic_impl()
+ && panic_impl_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
+ if let Some(alloc_error_handler_did) = tcx.lang_items().oom()
+ && alloc_error_handler_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_alloc_error_fn(tcx, alloc_error_handler_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ (fcx, gen_ty)
+}
+
+fn check_panic_info_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(panic_info_did) = tcx.lang_items().panic_info() else {
+ tcx.sess.err("language item required, but not found: `panic_info`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_panic_info = match *inputs[0].kind() {
+ ty::Ref(region, ty, mutbl) => match *ty.kind() {
+ ty::Adt(ref adt, _) => {
+ adt.did() == panic_info_did && mutbl == hir::Mutability::Not && !region.is_static()
+ }
+ _ => false,
+ },
+ _ => false,
+ };
+
+ if !arg_is_panic_info {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no const parameters");
+ }
+}
+
+fn check_alloc_error_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(alloc_layout_did) = tcx.lang_items().alloc_layout() else {
+ tcx.sess.err("language item required, but not found: `alloc_layout`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_alloc_layout = match inputs[0].kind() {
+ ty::Adt(ref adt, _) => adt.did() == alloc_layout_did,
+ _ => false,
+ };
+
+ if !arg_is_alloc_layout {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `Layout`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` function should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess
+ .span_err(span, "`#[alloc_error_handler]` function should have no const parameters");
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/closure.rs b/compiler/rustc_hir_typeck/src/closure.rs
new file mode 100644
index 000000000..a5a45f75e
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/closure.rs
@@ -0,0 +1,824 @@
+//! Code for type-checking closure expressions.
+
+use super::{check_fn, Expectation, FnCtxt, GeneratorTypes};
+
+use hir::def::DefKind;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::ArgKind;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use std::cmp;
+use std::iter;
+
+/// What signature do we *expect* the closure to have from context?
+#[derive(Debug)]
+struct ExpectedSig<'tcx> {
+ /// Span that gave us this expectation, if we know that.
+ cause_span: Option<Span>,
+ sig: ty::PolyFnSig<'tcx>,
+}
+
+struct ClosureSignatures<'tcx> {
+ /// The signature users of the closure see.
+ bound_sig: ty::PolyFnSig<'tcx>,
+ /// The signature within the function body.
+ /// This mostly differs in the sense that lifetimes are now early bound and any
+ /// opaque types from the signature expectation are overriden in case there are
+ /// explicit hidden types written by the user in the closure signature.
+ liberated_sig: ty::FnSig<'tcx>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ #[instrument(skip(self, expr, _capture, decl, body_id), level = "debug")]
+ pub fn check_expr_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ _capture: hir::CaptureBy,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ gen: Option<hir::Movability>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ trace!("expr = {:#?}", expr);
+
+ // It's always helpful for inference if we know the kind of
+ // closure sooner rather than later, so first examine the expected
+ // type, and see if can glean a closure kind from there.
+ let (expected_sig, expected_kind) = match expected.to_option(self) {
+ Some(ty) => self.deduce_expectations_from_expected_type(ty),
+ None => (None, None),
+ };
+ let body = self.tcx.hir().body(body_id);
+ self.check_closure(expr, expected_kind, decl, body, gen, expected_sig)
+ }
+
+ #[instrument(skip(self, expr, body, decl), level = "debug", ret)]
+ fn check_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ opt_kind: Option<ty::ClosureKind>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ gen: Option<hir::Movability>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ let expr_def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ debug!(?expr_def_id);
+
+ let ClosureSignatures { bound_sig, liberated_sig } =
+ self.sig_of_closure(expr.hir_id, expr_def_id.to_def_id(), decl, body, expected_sig);
+
+ debug!(?bound_sig, ?liberated_sig);
+
+ let return_type_pre_known = !liberated_sig.output().is_ty_infer();
+
+ let generator_types = check_fn(
+ self,
+ self.param_env.without_const(),
+ liberated_sig,
+ decl,
+ expr.hir_id,
+ body,
+ gen,
+ return_type_pre_known,
+ )
+ .1;
+
+ let parent_substs = InternalSubsts::identity_for_item(
+ self.tcx,
+ self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
+ );
+
+ let tupled_upvars_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: self.tcx.hir().span(expr.hir_id),
+ });
+
+ if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
+ {
+ let generator_substs = ty::GeneratorSubsts::new(
+ self.tcx,
+ ty::GeneratorSubstsParts {
+ parent_substs,
+ resume_ty,
+ yield_ty,
+ return_ty: liberated_sig.output(),
+ witness: interior,
+ tupled_upvars_ty,
+ },
+ );
+
+ return self.tcx.mk_generator(
+ expr_def_id.to_def_id(),
+ generator_substs.substs,
+ movability,
+ );
+ }
+
+ // Tuple up the arguments and insert the resulting function type into
+ // the `closures` table.
+ let sig = bound_sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ iter::once(self.tcx.intern_tup(sig.inputs())),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ )
+ });
+
+ debug!(?sig, ?opt_kind);
+
+ let closure_kind_ty = match opt_kind {
+ Some(kind) => kind.to_ty(self.tcx),
+
+ // Create a type variable (for now) to represent the closure kind.
+ // It will be unified during the upvar inference phase (`upvar.rs`)
+ None => self.next_ty_var(TypeVariableOrigin {
+ // FIXME(eddyb) distinguish closure kind inference variables from the rest.
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: expr.span,
+ }),
+ };
+
+ let closure_substs = ty::ClosureSubsts::new(
+ self.tcx,
+ ty::ClosureSubstsParts {
+ parent_substs,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty: self.tcx.mk_fn_ptr(sig),
+ tupled_upvars_ty,
+ },
+ );
+
+ self.tcx.mk_closure(expr_def_id.to_def_id(), closure_substs.substs)
+ }
+
+ /// Given the expected type, figures out what it can about this closure we
+ /// are about to type check:
+ #[instrument(skip(self), level = "debug")]
+ fn deduce_expectations_from_expected_type(
+ &self,
+ expected_ty: Ty<'tcx>,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ match *expected_ty.kind() {
+ ty::Opaque(def_id, substs) => {
+ let bounds = self.tcx.bound_explicit_item_bounds(def_id);
+ let sig =
+ bounds.subst_iter_copied(self.tcx, substs).find_map(|(pred, span)| match pred
+ .kind()
+ .skip_binder()
+ {
+ ty::PredicateKind::Projection(proj_predicate) => self
+ .deduce_sig_from_projection(
+ Some(span),
+ pred.kind().rebind(proj_predicate),
+ ),
+ _ => None,
+ });
+
+ let kind = bounds
+ .0
+ .iter()
+ .filter_map(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(tp) => {
+ self.tcx.fn_trait_kind_from_lang_item(tp.def_id())
+ }
+ _ => None,
+ })
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+ trace!(?sig, ?kind);
+ (sig, kind)
+ }
+ ty::Dynamic(ref object_type, ..) => {
+ let sig = object_type.projection_bounds().find_map(|pb| {
+ let pb = pb.with_self_ty(self.tcx, self.tcx.types.trait_object_dummy_self);
+ self.deduce_sig_from_projection(None, pb)
+ });
+ let kind = object_type
+ .principal_def_id()
+ .and_then(|did| self.tcx.fn_trait_kind_from_lang_item(did));
+ (sig, kind)
+ }
+ ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
+ ty::FnPtr(sig) => {
+ let expected_sig = ExpectedSig { cause_span: None, sig };
+ (Some(expected_sig), Some(ty::ClosureKind::Fn))
+ }
+ _ => (None, None),
+ }
+ }
+
+ fn deduce_expectations_from_obligations(
+ &self,
+ expected_vid: ty::TyVid,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ let expected_sig =
+ self.obligations_for_self_ty(expected_vid).find_map(|(_, obligation)| {
+ debug!(?obligation.predicate);
+
+ let bound_predicate = obligation.predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) =
+ obligation.predicate.kind().skip_binder()
+ {
+ // Given a Projection predicate, we can potentially infer
+ // the complete signature.
+ self.deduce_sig_from_projection(
+ Some(obligation.cause.span),
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ });
+
+ // Even if we can't infer the full signature, we may be able to
+ // infer the kind. This can occur when we elaborate a predicate
+ // like `F : Fn<A>`. Note that due to subtyping we could encounter
+ // many viable options, so pick the most restrictive.
+ let expected_kind = self
+ .obligations_for_self_ty(expected_vid)
+ .filter_map(|(tr, _)| self.tcx.fn_trait_kind_from_lang_item(tr.def_id()))
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+
+ (expected_sig, expected_kind)
+ }
+
+ /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
+ /// everything we need to know about a closure or generator.
+ ///
+ /// The `cause_span` should be the span that caused us to
+ /// have this expected signature, or `None` if we can't readily
+ /// know that.
+ #[instrument(level = "debug", skip(self, cause_span), ret)]
+ fn deduce_sig_from_projection(
+ &self,
+ cause_span: Option<Span>,
+ projection: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<ExpectedSig<'tcx>> {
+ let tcx = self.tcx;
+
+ let trait_def_id = projection.trait_def_id(tcx);
+
+ let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some();
+ let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span);
+ let is_gen = gen_trait == trait_def_id;
+ if !is_fn && !is_gen {
+ debug!("not fn or generator");
+ return None;
+ }
+
+ if is_gen {
+ // Check that we deduce the signature from the `<_ as std::ops::Generator>::Return`
+ // associated item and not yield.
+ let return_assoc_item = self.tcx.associated_item_def_ids(gen_trait)[1];
+ if return_assoc_item != projection.projection_def_id() {
+ debug!("not return assoc item of generator");
+ return None;
+ }
+ }
+
+ let input_tys = if is_fn {
+ let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
+ let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
+ debug!(?arg_param_ty);
+
+ match arg_param_ty.kind() {
+ &ty::Tuple(tys) => tys,
+ _ => return None,
+ }
+ } else {
+ // Generators with a `()` resume type may be defined with 0 or 1 explicit arguments,
+ // else they must have exactly 1 argument. For now though, just give up in this case.
+ return None;
+ };
+
+ // Since this is a return parameter type it is safe to unwrap.
+ let ret_param_ty = projection.skip_binder().term.ty().unwrap();
+ let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty);
+ debug!(?ret_param_ty);
+
+ let sig = projection.rebind(self.tcx.mk_fn_sig(
+ input_tys.iter(),
+ ret_param_ty,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+
+ Some(ExpectedSig { cause_span, sig })
+ }
+
+ fn sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> ClosureSignatures<'tcx> {
+ if let Some(e) = expected_sig {
+ self.sig_of_closure_with_expectation(hir_id, expr_def_id, decl, body, e)
+ } else {
+ self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body)
+ }
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_no_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ClosureSignatures<'tcx> {
+ let bound_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ self.closure_sigs(expr_def_id, body, bound_sig)
+ }
+
+ /// Invoked to compute the signature of a closure expression. This
+ /// combines any user-provided type annotations (e.g., `|x: u32|
+ /// -> u32 { .. }`) with the expected signature.
+ ///
+ /// The approach is as follows:
+ ///
+ /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations.
+ /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any.
+ /// - If we have no expectation `E`, then the signature of the closure is `S`.
+ /// - Otherwise, the signature of the closure is E. Moreover:
+ /// - Skolemize the late-bound regions in `E`, yielding `E'`.
+ /// - Instantiate all the late-bound regions bound in the closure within `S`
+ /// with fresh (existential) variables, yielding `S'`
+ /// - Require that `E' = S'`
+ /// - We could use some kind of subtyping relationship here,
+ /// I imagine, but equality is easier and works fine for
+ /// our purposes.
+ ///
+ /// The key intuition here is that the user's types must be valid
+ /// from "the inside" of the closure, but the expectation
+ /// ultimately drives the overall signature.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (illustrative)
+ /// fn with_closure<F>(_: F)
+ /// where F: Fn(&u32) -> &u32 { .. }
+ ///
+ /// with_closure(|x: &u32| { ... })
+ /// ```
+ ///
+ /// Here:
+ /// - E would be `fn(&u32) -> &u32`.
+ /// - S would be `fn(&u32) ->
+ /// - E' is `&'!0 u32 -> &'!0 u32`
+ /// - S' is `&'?0 u32 -> ?T`
+ ///
+ /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`.
+ ///
+ /// # Arguments
+ ///
+ /// - `expr_def_id`: the `DefId` of the closure expression
+ /// - `decl`: the HIR declaration of the closure
+ /// - `body`: the body of the closure
+ /// - `expected_sig`: the expected signature (if any). Note that
+ /// this is missing a binder: that is, there may be late-bound
+ /// regions with depth 1, which are bound then by the closure.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_with_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ // Watch out for some surprises and just ignore the
+ // expectation if things don't see to match up with what we
+ // expect.
+ if expected_sig.sig.c_variadic() != decl.c_variadic {
+ return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body);
+ } else if expected_sig.sig.skip_binder().inputs_and_output.len() != decl.inputs.len() + 1 {
+ return self.sig_of_closure_with_mismatched_number_of_arguments(
+ expr_def_id,
+ decl,
+ body,
+ expected_sig,
+ );
+ }
+
+ // Create a `PolyFnSig`. Note the oddity that late bound
+ // regions appearing free in `expected_sig` are now bound up
+ // in this binder we are creating.
+ assert!(!expected_sig.sig.skip_binder().has_vars_bound_above(ty::INNERMOST));
+ let bound_sig = expected_sig.sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ sig.inputs().iter().cloned(),
+ sig.output(),
+ sig.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ )
+ });
+
+ // `deduce_expectations_from_expected_type` introduces
+ // late-bound lifetimes defined elsewhere, which we now
+ // anonymize away, so as not to confuse the user.
+ let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig);
+
+ let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig);
+
+ // Up till this point, we have ignored the annotations that the user
+ // gave. This function will check that they unify successfully.
+ // Along the way, it also writes out entries for types that the user
+ // wrote into our typeck results, which are then later used by the privacy
+ // check.
+ match self.merge_supplied_sig_with_expectation(
+ hir_id,
+ expr_def_id,
+ decl,
+ body,
+ closure_sigs,
+ ) {
+ Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok),
+ Err(_) => self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body),
+ }
+ }
+
+ fn sig_of_closure_with_mismatched_number_of_arguments(
+ &self,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let hir = self.tcx.hir();
+ let expr_map_node = hir.get_if_local(expr_def_id).unwrap();
+ let expected_args: Vec<_> = expected_sig
+ .sig
+ .skip_binder()
+ .inputs()
+ .iter()
+ .map(|ty| ArgKind::from_expected_ty(*ty, None))
+ .collect();
+ let (closure_span, found_args) = match self.get_fn_like_arguments(expr_map_node) {
+ Some((sp, args)) => (Some(sp), args),
+ None => (None, Vec::new()),
+ };
+ let expected_span =
+ expected_sig.cause_span.unwrap_or_else(|| hir.span_if_local(expr_def_id).unwrap());
+ self.report_arg_count_mismatch(
+ expected_span,
+ closure_span,
+ expected_args,
+ found_args,
+ true,
+ )
+ .emit();
+
+ let error_sig = self.error_sig_of_closure(decl);
+
+ self.closure_sigs(expr_def_id, body, error_sig)
+ }
+
+ /// Enforce the user's types against the expectation. See
+ /// `sig_of_closure_with_expectation` for details on the overall
+ /// strategy.
+ #[instrument(level = "debug", skip(self, hir_id, expr_def_id, decl, body, expected_sigs))]
+ fn merge_supplied_sig_with_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ mut expected_sigs: ClosureSignatures<'tcx>,
+ ) -> InferResult<'tcx, ClosureSignatures<'tcx>> {
+ // Get the signature S that the user gave.
+ //
+ // (See comment on `sig_of_closure_with_expectation` for the
+ // meaning of these letters.)
+ let supplied_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ debug!(?supplied_sig);
+
+ // FIXME(#45727): As discussed in [this comment][c1], naively
+ // forcing equality here actually results in suboptimal error
+ // messages in some cases. For now, if there would have been
+ // an obvious error, we fallback to declaring the type of the
+ // closure to be the one the user gave, which allows other
+ // error message code to trigger.
+ //
+ // However, I think [there is potential to do even better
+ // here][c2], since in *this* code we have the precise span of
+ // the type parameter in question in hand when we report the
+ // error.
+ //
+ // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706
+ // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796
+ self.commit_if_ok(|_| {
+ let mut all_obligations = vec![];
+ let inputs: Vec<_> = iter::zip(
+ decl.inputs,
+ supplied_sig.inputs().skip_binder(), // binder moved to (*) below
+ )
+ .map(|(hir_ty, &supplied_ty)| {
+ // Instantiate (this part of..) S to S', i.e., with fresh variables.
+ self.replace_bound_vars_with_fresh_vars(
+ hir_ty.span,
+ LateBoundRegionConversionTime::FnCall,
+ // (*) binder moved to here
+ supplied_sig.inputs().rebind(supplied_ty),
+ )
+ })
+ .collect();
+
+ // The liberated version of this signature should be a subtype
+ // of the liberated form of the expectation.
+ for ((hir_ty, &supplied_ty), expected_ty) in iter::zip(
+ iter::zip(decl.inputs, &inputs),
+ expected_sigs.liberated_sig.inputs(), // `liberated_sig` is E'.
+ ) {
+ // Check that E' = S'.
+ let cause = self.misc(hir_ty.span);
+ let InferOk { value: (), obligations } =
+ self.at(&cause, self.param_env).eq(*expected_ty, supplied_ty)?;
+ all_obligations.extend(obligations);
+ }
+
+ let supplied_output_ty = self.replace_bound_vars_with_fresh_vars(
+ decl.output.span(),
+ LateBoundRegionConversionTime::FnCall,
+ supplied_sig.output(),
+ );
+ let cause = &self.misc(decl.output.span());
+ let InferOk { value: (), obligations } = self
+ .at(cause, self.param_env)
+ .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?;
+ all_obligations.extend(obligations);
+
+ let inputs = inputs.into_iter().map(|ty| self.resolve_vars_if_possible(ty));
+
+ expected_sigs.liberated_sig = self.tcx.mk_fn_sig(
+ inputs,
+ supplied_output_ty,
+ expected_sigs.liberated_sig.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ );
+
+ Ok(InferOk { value: expected_sigs, obligations: all_obligations })
+ })
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ ///
+ /// Also, record this closure signature for later.
+ #[instrument(skip(self, decl, body), level = "debug", ret)]
+ fn supplied_sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ trace!("decl = {:#?}", decl);
+ debug!(?body.generator_kind);
+
+ let bound_vars = self.tcx.late_bound_vars(hir_id);
+
+ // First, convert the types that the user supplied (if any).
+ let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a));
+ let supplied_return = match decl.output {
+ hir::FnRetTy::Return(ref output) => astconv.ast_ty_to_ty(&output),
+ hir::FnRetTy::DefaultReturn(_) => match body.generator_kind {
+ // In the case of the async block that we create for a function body,
+ // we expect the return type of the block to match that of the enclosing
+ // function.
+ Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn)) => {
+ debug!("closure is async fn body");
+ self.deduce_future_output_from_obligations(expr_def_id, body.id().hir_id)
+ .unwrap_or_else(|| {
+ // AFAIK, deducing the future output
+ // always succeeds *except* in error cases
+ // like #65159. I'd like to return Error
+ // here, but I can't because I can't
+ // easily (and locally) prove that we
+ // *have* reported an
+ // error. --nikomatsakis
+ astconv.ty_infer(None, decl.output.span())
+ })
+ }
+
+ _ => astconv.ty_infer(None, decl.output.span()),
+ },
+ };
+
+ let result = ty::Binder::bind_with_vars(
+ self.tcx.mk_fn_sig(
+ supplied_arguments,
+ supplied_return,
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ),
+ bound_vars,
+ );
+ // Astconv can't normalize inputs or outputs with escaping bound vars,
+ // so normalize them here, after we've wrapped them in a binder.
+ let result = self.normalize_associated_types_in(self.tcx.hir().span(hir_id), result);
+
+ let c_result = self.inh.infcx.canonicalize_response(result);
+ self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result);
+
+ result
+ }
+
+ /// Invoked when we are translating the generator that results
+ /// from desugaring an `async fn`. Returns the "sugared" return
+ /// type of the `async fn` -- that is, the return type that the
+ /// user specified. The "desugared" return type is an `impl
+ /// Future<Output = T>`, so we do this by searching through the
+ /// obligations to extract the `T`.
+ #[instrument(skip(self), level = "debug", ret)]
+ fn deduce_future_output_from_obligations(
+ &self,
+ expr_def_id: DefId,
+ body_id: hir::HirId,
+ ) -> Option<Ty<'tcx>> {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(self.tcx.def_span(expr_def_id), "async fn generator outside of a fn")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+
+ let get_future_output = |predicate: ty::Predicate<'tcx>, span| {
+ // Search for a pending obligation like
+ //
+ // `<R as Future>::Output = T`
+ //
+ // where R is the return type we are expecting. This type `T`
+ // will be our output.
+ let bound_predicate = predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) = bound_predicate.skip_binder() {
+ self.deduce_future_output_from_projection(
+ span,
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ };
+
+ let output_ty = match *ret_ty.kind() {
+ ty::Infer(ty::TyVar(ret_vid)) => {
+ self.obligations_for_self_ty(ret_vid).find_map(|(_, obligation)| {
+ get_future_output(obligation.predicate, obligation.cause.span)
+ })?
+ }
+ ty::Opaque(def_id, substs) => self
+ .tcx
+ .bound_explicit_item_bounds(def_id)
+ .subst_iter_copied(self.tcx, substs)
+ .find_map(|(p, s)| get_future_output(p, s))?,
+ ty::Error(_) => return None,
+ ty::Projection(proj)
+ if self.tcx.def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder =>
+ {
+ self.tcx
+ .bound_explicit_item_bounds(proj.item_def_id)
+ .subst_iter_copied(self.tcx, proj.substs)
+ .find_map(|(p, s)| get_future_output(p, s))?
+ }
+ _ => span_bug!(
+ self.tcx.def_span(expr_def_id),
+ "async fn generator return type not an inference variable: {ret_ty}"
+ ),
+ };
+
+ // async fn that have opaque types in their return type need to redo the conversion to inference variables
+ // as they fetch the still opaque version from the signature.
+ let InferOk { value: output_ty, obligations } = self
+ .replace_opaque_types_with_inference_vars(
+ output_ty,
+ body_id,
+ self.tcx.def_span(expr_def_id),
+ self.param_env,
+ );
+ self.register_predicates(obligations);
+
+ Some(output_ty)
+ }
+
+ /// Given a projection like
+ ///
+ /// `<X as Future>::Output = T`
+ ///
+ /// where `X` is some type that has no late-bound regions, returns
+ /// `Some(T)`. If the projection is for some other trait, returns
+ /// `None`.
+ fn deduce_future_output_from_projection(
+ &self,
+ cause_span: Span,
+ predicate: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ debug!("deduce_future_output_from_projection(predicate={:?})", predicate);
+
+ // We do not expect any bound regions in our predicate, so
+ // skip past the bound vars.
+ let Some(predicate) = predicate.no_bound_vars() else {
+ debug!("deduce_future_output_from_projection: has late-bound regions");
+ return None;
+ };
+
+ // Check that this is a projection from the `Future` trait.
+ let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx);
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span));
+ if trait_def_id != future_trait {
+ debug!("deduce_future_output_from_projection: not a future");
+ return None;
+ }
+
+ // The `Future` trait has only one associated item, `Output`,
+ // so check that this is what we see.
+ let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0];
+ if output_assoc_item != predicate.projection_ty.item_def_id {
+ span_bug!(
+ cause_span,
+ "projecting associated item `{:?}` from future, which is not Output `{:?}`",
+ predicate.projection_ty.item_def_id,
+ output_assoc_item,
+ );
+ }
+
+ // Extract the type from the projection. Note that there can
+ // be no bound variables in this type because the "self type"
+ // does not have any regions in it.
+ let output_ty = self.resolve_vars_if_possible(predicate.term);
+ debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty);
+ // This is a projection on a Fn trait so will always be a type.
+ Some(output_ty.ty().unwrap())
+ }
+
+ /// Converts the types that the user supplied, in case that doing
+ /// so should yield an error, but returns back a signature where
+ /// all parameters are of type `TyErr`.
+ fn error_sig_of_closure(&self, decl: &hir::FnDecl<'_>) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ let supplied_arguments = decl.inputs.iter().map(|a| {
+ // Convert the types that the user supplied (if any), but ignore them.
+ astconv.ast_ty_to_ty(a);
+ self.tcx.ty_error()
+ });
+
+ if let hir::FnRetTy::Return(ref output) = decl.output {
+ astconv.ast_ty_to_ty(&output);
+ }
+
+ let result = ty::Binder::dummy(self.tcx.mk_fn_sig(
+ supplied_arguments,
+ self.tcx.ty_error(),
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ));
+
+ debug!("supplied_sig_of_closure: result={:?}", result);
+
+ result
+ }
+
+ fn closure_sigs(
+ &self,
+ expr_def_id: DefId,
+ body: &hir::Body<'_>,
+ bound_sig: ty::PolyFnSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig);
+ let liberated_sig = self.inh.normalize_associated_types_in(
+ body.value.span,
+ body.value.hir_id,
+ self.param_env,
+ liberated_sig,
+ );
+ ClosureSignatures { bound_sig, liberated_sig }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs
index 2ed5f569b..86597a703 100644
--- a/compiler/rustc_typeck/src/check/coercion.rs
+++ b/compiler/rustc_hir_typeck/src/coercion.rs
@@ -35,13 +35,15 @@
//! // and are then unable to coerce `&7i32` to `&mut i32`.
//! ```
-use crate::astconv::AstConv;
-use crate::check::FnCtxt;
+use crate::FnCtxt;
use rustc_errors::{
- struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::Expr;
+use rustc_hir_analysis::astconv::AstConv;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::infer::{Coercion, InferOk, InferResult};
use rustc_infer::traits::{Obligation, TraitEngine, TraitEngineExt};
@@ -59,7 +61,7 @@ use rustc_span::symbol::sym;
use rustc_span::{self, BytePos, DesugaringKind, Span};
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
use smallvec::{smallvec, SmallVec};
@@ -87,6 +89,19 @@ impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> {
type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>;
+struct CollectRetsVisitor<'tcx> {
+ ret_exprs: Vec<&'tcx hir::Expr<'tcx>>,
+}
+
+impl<'tcx> Visitor<'tcx> for CollectRetsVisitor<'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ if let hir::ExprKind::Ret(_) = expr.kind {
+ self.ret_exprs.push(expr);
+ }
+ intravisit::walk_expr(self, expr);
+ }
+}
+
/// Coercing a mutable reference to an immutable works, while
/// coercing `&T` to `&mut T` should be forbidden.
fn coerce_mutbls<'tcx>(
@@ -201,6 +216,9 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
ty::Ref(r_b, _, mutbl_b) => {
return self.coerce_borrowed_pointer(a, b, r_b, mutbl_b);
}
+ ty::Dynamic(predicates, region, ty::DynStar) if self.tcx.features().dyn_star => {
+ return self.coerce_dyn_star(a, b, predicates, region);
+ }
_ => {}
}
@@ -687,7 +705,12 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
// Object safety violations or miscellaneous.
Err(err) => {
- self.report_selection_error(obligation.clone(), &obligation, &err, false);
+ self.err_ctxt().report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &err,
+ false,
+ );
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
@@ -725,6 +748,63 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
Ok(coercion)
}
+ fn coerce_dyn_star(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ b_region: ty::Region<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ if !self.tcx.features().dyn_star {
+ return Err(TypeError::Mismatch);
+ }
+
+ if let ty::Dynamic(a_data, _, _) = a.kind()
+ && let ty::Dynamic(b_data, _, _) = b.kind()
+ {
+ if a_data.principal_def_id() == b_data.principal_def_id() {
+ return self.unify_and(a, b, |_| vec![]);
+ } else if !self.tcx().features().trait_upcasting {
+ let mut err = feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::trait_upcasting,
+ self.cause.span,
+ &format!(
+ "cannot cast `{a}` to `{b}`, trait upcasting coercion is experimental"
+ ),
+ );
+ err.emit();
+ }
+ }
+
+ // Check the obligations of the cast -- for example, when casting
+ // `usize` to `dyn* Clone + 'static`:
+ let obligations = predicates
+ .iter()
+ .map(|predicate| {
+ // For each existential predicate (e.g., `?Self: Clone`) substitute
+ // the type of the expression (e.g., `usize` in our example above)
+ // and then require that the resulting predicate (e.g., `usize: Clone`)
+ // holds (it does).
+ let predicate = predicate.with_self_ty(self.tcx, a);
+ Obligation::new(self.cause.clone(), self.param_env, predicate)
+ })
+ // Enforce the region bound (e.g., `usize: 'static`, in our example).
+ .chain([Obligation::new(
+ self.cause.clone(),
+ self.param_env,
+ self.tcx.mk_predicate(ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(a, b_region),
+ ))),
+ )])
+ .collect();
+
+ Ok(InferOk {
+ value: (vec![Adjustment { kind: Adjust::DynStar, target: b }], b),
+ obligations,
+ })
+ }
+
fn coerce_from_safe_fn<F, G>(
&self,
a: Ty<'tcx>,
@@ -1464,23 +1544,29 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
}
}
Err(coercion_error) => {
+ // Mark that we've failed to coerce the types here to suppress
+ // any superfluous errors we might encounter while trying to
+ // emit or provide suggestions on how to fix the initial error.
+ fcx.set_tainted_by_errors();
let (expected, found) = if label_expression_as_expected {
// In the case where this is a "forced unit", like
// `break`, we want to call the `()` "expected"
// since it is implied by the syntax.
// (Note: not all force-units work this way.)"
- (expression_ty, self.final_ty.unwrap_or(self.expected_ty))
+ (expression_ty, self.merged_ty())
} else {
// Otherwise, the "expected" type for error
// reporting is the current unification type,
// which is basically the LUB of the expressions
// we've seen so far (combined with the expected
// type)
- (self.final_ty.unwrap_or(self.expected_ty), expression_ty)
+ (self.merged_ty(), expression_ty)
};
+ let (expected, found) = fcx.resolve_vars_if_possible((expected, found));
let mut err;
let mut unsized_return = false;
+ let mut visitor = CollectRetsVisitor { ret_exprs: vec![] };
match *cause.code() {
ObligationCauseCode::ReturnNoExpression => {
err = struct_span_err!(
@@ -1506,6 +1592,10 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
if !fcx.tcx.features().unsized_locals {
unsized_return = self.is_return_ty_unsized(fcx, blk_id);
}
+ if let Some(expression) = expression
+ && let hir::ExprKind::Loop(loop_blk, ..) = expression.kind {
+ intravisit::walk_block(& mut visitor, loop_blk);
+ }
}
ObligationCauseCode::ReturnValue(id) => {
err = self.report_return_mismatched_types(
@@ -1524,7 +1614,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
}
}
_ => {
- err = fcx.report_mismatched_types(
+ err = fcx.err_ctxt().report_mismatched_types(
cause,
expected,
found,
@@ -1551,12 +1641,47 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
);
}
+ if visitor.ret_exprs.len() > 0 && let Some(expr) = expression {
+ self.note_unreachable_loop_return(&mut err, &expr, &visitor.ret_exprs);
+ }
err.emit_unless(unsized_return);
self.final_ty = Some(fcx.tcx.ty_error());
}
}
}
+ fn note_unreachable_loop_return(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ ret_exprs: &Vec<&'tcx hir::Expr<'tcx>>,
+ ) {
+ let hir::ExprKind::Loop(_, _, _, loop_span) = expr.kind else { return;};
+ let mut span: MultiSpan = vec![loop_span].into();
+ span.push_span_label(loop_span, "this might have zero elements to iterate on");
+ const MAXITER: usize = 3;
+ let iter = ret_exprs.iter().take(MAXITER);
+ for ret_expr in iter {
+ span.push_span_label(
+ ret_expr.span,
+ "if the loop doesn't execute, this value would never get returned",
+ );
+ }
+ err.span_note(
+ span,
+ "the function expects a value to always be returned, but loops might run zero times",
+ );
+ if MAXITER < ret_exprs.len() {
+ err.note(&format!(
+ "if the loop doesn't execute, {} other values would never get returned",
+ ret_exprs.len() - MAXITER
+ ));
+ }
+ err.help(
+ "return a value for the case when the loop has zero elements to iterate on, or \
+ consider changing the return type to account for that possibility",
+ );
+ }
fn report_return_mismatched_types<'a>(
&self,
@@ -1569,7 +1694,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expression: Option<&'tcx hir::Expr<'tcx>>,
blk_id: Option<hir::HirId>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err = fcx.report_mismatched_types(cause, expected, found, ty_err);
+ let mut err = fcx.err_ctxt().report_mismatched_types(cause, expected, found, ty_err);
let mut pointing_at_return_type = false;
let mut fn_output = None;
@@ -1623,7 +1748,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expected,
found,
can_suggest,
- fcx.tcx.hir().local_def_id_to_hir_id(fcx.tcx.hir().get_parent_item(id)),
+ fcx.tcx.hir().get_parent_item(id).into(),
);
}
if !pointing_at_return_type {
@@ -1632,7 +1757,7 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
}
let parent_id = fcx.tcx.hir().get_parent_item(id);
- let parent_item = fcx.tcx.hir().get_by_def_id(parent_id);
+ let parent_item = fcx.tcx.hir().get_by_def_id(parent_id.def_id);
if let (Some(expr), Some(_), Some((fn_decl, _, _))) =
(expression, blk_id, fcx.get_node_fn_decl(parent_item))
@@ -1644,13 +1769,34 @@ impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
expected,
found,
id,
- fcx.tcx.hir().local_def_id_to_hir_id(parent_id),
+ parent_id.into(),
+ );
+ }
+
+ let ret_coercion_span = fcx.ret_coercion_span.get();
+
+ if let Some(sp) = ret_coercion_span
+ // If the closure has an explicit return type annotation, or if
+ // the closure's return type has been inferred from outside
+ // requirements (such as an Fn* trait bound), then a type error
+ // may occur at the first return expression we see in the closure
+ // (if it conflicts with the declared return type). Skip adding a
+ // note in this case, since it would be incorrect.
+ && !fcx.return_type_pre_known
+ {
+ err.span_note(
+ sp,
+ &format!(
+ "return type inferred to be `{}` here",
+ expected
+ ),
);
}
- if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.get(), fn_output) {
+ if let (Some(sp), Some(fn_output)) = (ret_coercion_span, fn_output) {
self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output);
}
+
err
}
diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
index 4de48dc5b..16febfc46 100644
--- a/compiler/rustc_typeck/src/check/demand.rs
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -1,21 +1,20 @@
-use crate::check::FnCtxt;
-use rustc_infer::infer::InferOk;
-use rustc_middle::middle::stability::EvalResult;
-use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::traits::ObligationCause;
-
+use crate::FnCtxt;
use rustc_ast::util::parser::PREC_POSTFIX;
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{is_range_literal, Node};
+use rustc_infer::infer::InferOk;
use rustc_middle::lint::in_external_macro;
+use rustc_middle::middle::stability::EvalResult;
use rustc_middle::ty::adjustment::AllowTwoPhase;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut};
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{BytePos, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::ObligationCause;
use super::method::probe;
@@ -32,20 +31,22 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
error: Option<TypeError<'tcx>>,
) {
self.annotate_expected_due_to_let_ty(err, expr, error);
- self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr);
- self.suggest_compatible_variants(err, expr, expected, expr_ty);
- self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty);
- if self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty) {
- return;
- }
- self.suggest_no_capture_closure(err, expected, expr_ty);
- self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty);
- self.suggest_missing_parentheses(err, expr);
- self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected);
+
+ // Use `||` to give these suggestions a precedence
+ let _ = self.suggest_missing_parentheses(err, expr)
+ || self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr)
+ || self.suggest_compatible_variants(err, expr, expected, expr_ty)
+ || self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty)
+ || self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty)
+ || self.suggest_no_capture_closure(err, expected, expr_ty)
+ || self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty)
+ || self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected)
+ || self.suggest_copied_or_cloned(err, expr, expr_ty, expected)
+ || self.suggest_into(err, expr, expr_ty, expected);
+
self.note_type_is_not_clone(err, expected, expr_ty, expr);
self.note_need_for_fn_pointer(err, expected, expr_ty);
self.note_internal_mutation_in_method(err, expr, expected, expr_ty);
- self.report_closure_inferred_return_type(err, expected);
}
// Requires that the two types unify, and prints an error message if
@@ -77,7 +78,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.register_predicates(obligations);
None
}
- Err(e) => Some(self.report_mismatched_types(&cause, expected, actual, e)),
+ Err(e) => Some(self.err_ctxt().report_mismatched_types(&cause, expected, actual, e)),
}
}
@@ -107,7 +108,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.register_predicates(obligations);
None
}
- Err(e) => Some(self.report_mismatched_types(cause, expected, actual, e)),
+ Err(e) => Some(self.err_ctxt().report_mismatched_types(cause, expected, actual, e)),
}
}
@@ -131,7 +132,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
///
/// N.B., this code relies on `self.diverges` to be accurate. In particular, assignments to `!`
/// will be permitted if the diverges flag is currently "always".
- #[tracing::instrument(level = "debug", skip(self, expr, expected_ty_expr, allow_two_phase))]
+ #[instrument(level = "debug", skip(self, expr, expected_ty_expr, allow_two_phase))]
pub fn demand_coerce_diag(
&self,
expr: &hir::Expr<'tcx>,
@@ -151,7 +152,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let expr = expr.peel_drop_temps();
let cause = self.misc(expr.span);
let expr_ty = self.resolve_vars_with_obligations(checked_ty);
- let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e.clone());
+ let mut err = self.err_ctxt().report_mismatched_types(&cause, expected, expr_ty, e.clone());
let is_insufficiently_polymorphic =
matches!(e, TypeError::RegionsInsufficientlyPolymorphic(..));
@@ -286,7 +287,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr: &hir::Expr<'_>,
expected: Ty<'tcx>,
expr_ty: Ty<'tcx>,
- ) {
+ ) -> bool {
if let ty::Adt(expected_adt, substs) = expected.kind() {
if let hir::ExprKind::Field(base, ident) = expr.kind {
let base_ty = self.typeck_results.borrow().expr_ty(base);
@@ -299,7 +300,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"",
Applicability::MaybeIncorrect,
);
- return
+ return true;
}
}
@@ -338,7 +339,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) {
vec!["None", "Some(())"]
} else {
- return;
+ return false;
};
if let Some(indent) =
self.tcx.sess.source_map().indentation_before(span.shrink_to_lo())
@@ -358,7 +359,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Applicability::MaybeIncorrect,
);
}
- return;
+ return true;
}
}
}
@@ -375,7 +376,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let field_is_local = sole_field.did.is_local();
let field_is_accessible =
- sole_field.vis.is_accessible_from(expr.hir_id.owner.to_def_id(), self.tcx)
+ sole_field.vis.is_accessible_from(expr.hir_id.owner.def_id, self.tcx)
// Skip suggestions for unstable public fields (for example `Pin::pointer`)
&& matches!(self.tcx.eval_stability(sole_field.did, None, expr.span, None), EvalResult::Allow | EvalResult::Unmarked);
@@ -417,6 +418,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
hir::def::CtorKind::Const => unreachable!(),
};
+ // Suggest constructor as deep into the block tree as possible.
+ // This fixes https://github.com/rust-lang/rust/issues/101065,
+ // and also just helps make the most minimal suggestions.
+ let mut expr = expr;
+ while let hir::ExprKind::Block(block, _) = &expr.kind
+ && let Some(expr_) = &block.expr
+ {
+ expr = expr_
+ }
+
vec![
(expr.span.shrink_to_lo(), format!("{prefix}{variant}{open}")),
(expr.span.shrink_to_hi(), close.to_owned()),
@@ -435,6 +446,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
suggestions_for(&**variant, *ctor_kind, *field_name),
Applicability::MaybeIncorrect,
);
+ return true;
}
_ => {
// More than one matching variant.
@@ -450,9 +462,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
),
Applicability::MaybeIncorrect,
);
+ return true;
}
}
}
+
+ false
}
fn suggest_non_zero_new_unwrap(
@@ -461,19 +476,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr: &hir::Expr<'_>,
expected: Ty<'tcx>,
expr_ty: Ty<'tcx>,
- ) {
+ ) -> bool {
let tcx = self.tcx;
let (adt, unwrap) = match expected.kind() {
// In case Option<NonZero*> is wanted, but * is provided, suggest calling new
ty::Adt(adt, substs) if tcx.is_diagnostic_item(sym::Option, adt.did()) => {
// Unwrap option
- let ty::Adt(adt, _) = substs.type_at(0).kind() else { return };
+ let ty::Adt(adt, _) = substs.type_at(0).kind() else { return false; };
(adt, "")
}
// In case NonZero* is wanted, but * is provided also add `.unwrap()` to satisfy types
ty::Adt(adt, _) => (adt, ".unwrap()"),
- _ => return,
+ _ => return false,
};
let map = [
@@ -492,7 +507,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let Some((s, _)) = map
.iter()
.find(|&&(s, t)| self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t))
- else { return };
+ else { return false; };
let path = self.tcx.def_path_str(adt.non_enum_variant().def_id);
@@ -504,6 +519,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
],
Applicability::MaybeIncorrect,
);
+
+ true
}
pub fn get_conversion_methods(
@@ -513,24 +530,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
checked_ty: Ty<'tcx>,
hir_id: hir::HirId,
) -> Vec<AssocItem> {
- let mut methods =
- self.probe_for_return_type(span, probe::Mode::MethodCall, expected, checked_ty, hir_id);
- methods.retain(|m| {
- self.has_only_self_parameter(m)
- && self
- .tcx
- // This special internal attribute is used to permit
- // "identity-like" conversion methods to be suggested here.
- //
- // FIXME (#46459 and #46460): ideally
- // `std::convert::Into::into` and `std::borrow:ToOwned` would
- // also be `#[rustc_conversion_suggestion]`, if not for
- // method-probing false-positives and -negatives (respectively).
- //
- // FIXME? Other potential candidate methods: `as_ref` and
- // `as_mut`?
- .has_attr(m.def_id, sym::rustc_conversion_suggestion)
- });
+ let methods = self.probe_for_return_type(
+ span,
+ probe::Mode::MethodCall,
+ expected,
+ checked_ty,
+ hir_id,
+ |m| {
+ self.has_only_self_parameter(m)
+ && self
+ .tcx
+ // This special internal attribute is used to permit
+ // "identity-like" conversion methods to be suggested here.
+ //
+ // FIXME (#46459 and #46460): ideally
+ // `std::convert::Into::into` and `std::borrow:ToOwned` would
+ // also be `#[rustc_conversion_suggestion]`, if not for
+ // method-probing false-positives and -negatives (respectively).
+ //
+ // FIXME? Other potential candidate methods: `as_ref` and
+ // `as_mut`?
+ .has_attr(m.def_id, sym::rustc_conversion_suggestion)
+ },
+ );
methods
}
@@ -590,7 +612,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let closure_params_len = closure_fn_decl.inputs.len();
let (
Some(Node::Expr(hir::Expr {
- kind: hir::ExprKind::MethodCall(method_path, method_expr, _),
+ kind: hir::ExprKind::MethodCall(method_path, receiver, ..),
..
})),
1,
@@ -598,14 +620,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return None;
};
- let self_ty = self.typeck_results.borrow().expr_ty(&method_expr[0]);
- let self_ty = format!("{:?}", self_ty);
+ let self_ty = self.typeck_results.borrow().expr_ty(receiver);
let name = method_path.ident.name;
- let is_as_ref_able = (self_ty.starts_with("&std::option::Option")
- || self_ty.starts_with("&std::result::Result")
- || self_ty.starts_with("std::option::Option")
- || self_ty.starts_with("std::result::Result"))
- && (name == sym::map || name == sym::and_then);
+ let is_as_ref_able = match self_ty.peel_refs().kind() {
+ ty::Adt(def, _) => {
+ (self.tcx.is_diagnostic_item(sym::Option, def.did())
+ || self.tcx.is_diagnostic_item(sym::Result, def.did()))
+ && (name == sym::map || name == sym::and_then)
+ }
+ _ => false,
+ };
match (is_as_ref_able, self.sess().source_map().span_to_snippet(method_path.ident.span)) {
(true, Ok(src)) => {
let suggestion = format!("as_ref().{}", src);
@@ -637,11 +661,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}?;
match hir.find(hir.get_parent_node(expr.hir_id))? {
- Node::Expr(hir::Expr { kind: hir::ExprKind::Struct(_, fields, ..), .. }) => {
- for field in *fields {
- if field.ident.name == local.name && field.is_shorthand {
- return Some(local.name);
- }
+ Node::ExprField(field) => {
+ if field.ident.name == local.name && field.is_shorthand {
+ return Some(local.name);
}
}
_ => {}
@@ -697,7 +719,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
expr: &hir::Expr<'tcx>,
checked_ty: Ty<'tcx>,
expected: Ty<'tcx>,
- ) -> Option<(Span, String, String, Applicability, bool /* verbose */)> {
+ ) -> Option<(
+ Span,
+ String,
+ String,
+ Applicability,
+ bool, /* verbose */
+ bool, /* suggest `&` or `&mut` type annotation */
+ )> {
let sess = self.sess();
let sp = expr.span;
@@ -729,6 +758,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
String::new(),
Applicability::MachineApplicable,
true,
+ false,
));
}
}
@@ -743,6 +773,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"b".to_string(),
Applicability::MachineApplicable,
true,
+ false,
));
}
}
@@ -767,22 +798,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
if self.can_coerce(ref_ty, expected) {
let mut sugg_sp = sp;
- if let hir::ExprKind::MethodCall(ref segment, ref args, _) = expr.kind {
+ if let hir::ExprKind::MethodCall(ref segment, receiver, args, _) = expr.kind {
let clone_trait =
self.tcx.require_lang_item(LangItem::Clone, Some(segment.ident.span));
- if let ([arg], Some(true), sym::clone) = (
- &args[..],
- self.typeck_results.borrow().type_dependent_def_id(expr.hir_id).map(
+ if args.is_empty()
+ && self.typeck_results.borrow().type_dependent_def_id(expr.hir_id).map(
|did| {
let ai = self.tcx.associated_item(did);
ai.trait_container(self.tcx) == Some(clone_trait)
},
- ),
- segment.ident.name,
- ) {
+ ) == Some(true)
+ && segment.ident.name == sym::clone
+ {
// If this expression had a clone call when suggesting borrowing
// we want to suggest removing it because it'd now be unnecessary.
- sugg_sp = arg.span;
+ sugg_sp = receiver.span;
}
}
if let Ok(src) = sm.span_to_snippet(sugg_sp) {
@@ -793,7 +823,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ if is_range_literal(expr) => true,
_ => false,
};
- let sugg_expr = if needs_parens { format!("({src})") } else { src };
if let Some(sugg) = self.can_use_as_ref(expr) {
return Some((
@@ -802,6 +831,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
sugg.2,
Applicability::MachineApplicable,
false,
+ false,
));
}
@@ -821,6 +851,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
+ let sugg_expr = if needs_parens { format!("({src})") } else { src };
return Some(match mutability {
hir::Mutability::Mut => (
sp,
@@ -828,6 +859,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
format!("{prefix}&mut {sugg_expr}"),
Applicability::MachineApplicable,
false,
+ false,
),
hir::Mutability::Not => (
sp,
@@ -835,6 +867,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
format!("{prefix}&{sugg_expr}"),
Applicability::MachineApplicable,
false,
+ false,
),
});
}
@@ -864,6 +897,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
String::new(),
Applicability::MachineApplicable,
true,
+ true
));
}
return None;
@@ -877,6 +911,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
String::new(),
Applicability::MachineApplicable,
true,
+ true,
));
}
}
@@ -943,6 +978,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
src,
applicability,
true,
+ false,
));
}
}
@@ -983,6 +1019,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Applicability::MachineApplicable
},
true,
+ false,
));
}
@@ -1034,6 +1071,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
suggestion,
Applicability::MachineApplicable,
true,
+ false,
));
}
}
@@ -1072,21 +1110,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut sugg = vec![];
- if let Some(hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Struct(_, fields, _), ..
- })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
+ if let Some(hir::Node::ExprField(field)) =
+ self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
{
// `expr` is a literal field for a struct, only suggest if appropriate
- match (*fields)
- .iter()
- .find(|field| field.expr.hir_id == expr.hir_id && field.is_shorthand)
- {
+ if field.is_shorthand {
// This is a field literal
- Some(field) => {
- sugg.push((field.ident.span.shrink_to_lo(), format!("{}: ", field.ident)));
- }
+ sugg.push((field.ident.span.shrink_to_lo(), format!("{}: ", field.ident)));
+ } else {
// Likely a field was meant, but this field wasn't found. Do not suggest anything.
- None => return false,
+ return false;
}
};
@@ -1418,25 +1451,4 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ => false,
}
}
-
- // Report the type inferred by the return statement.
- fn report_closure_inferred_return_type(&self, err: &mut Diagnostic, expected: Ty<'tcx>) {
- if let Some(sp) = self.ret_coercion_span.get()
- // If the closure has an explicit return type annotation, or if
- // the closure's return type has been inferred from outside
- // requirements (such as an Fn* trait bound), then a type error
- // may occur at the first return expression we see in the closure
- // (if it conflicts with the declared return type). Skip adding a
- // note in this case, since it would be incorrect.
- && !self.return_type_pre_known
- {
- err.span_note(
- sp,
- &format!(
- "return type inferred to be `{}` here",
- self.resolve_vars_if_possible(expected)
- ),
- );
- }
- }
}
diff --git a/compiler/rustc_typeck/src/check/diverges.rs b/compiler/rustc_hir_typeck/src/diverges.rs
index 963a93a95..963a93a95 100644
--- a/compiler/rustc_typeck/src/check/diverges.rs
+++ b/compiler/rustc_hir_typeck/src/diverges.rs
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
new file mode 100644
index 000000000..175037f9b
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -0,0 +1,126 @@
+//! Errors emitted by `rustc_hir_analysis`.
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_span::{symbol::Ident, Span};
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_field_multiply_specified_in_initializer, code = "E0062")]
+pub struct FieldMultiplySpecifiedInInitializer {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(previous_use_label)]
+ pub prev_span: Span,
+ pub ident: Ident,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_return_stmt_outside_of_fn_body, code = "E0572")]
+pub struct ReturnStmtOutsideOfFnBody {
+ #[primary_span]
+ pub span: Span,
+ #[label(encl_body_label)]
+ pub encl_body_span: Option<Span>,
+ #[label(encl_fn_label)]
+ pub encl_fn_span: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_yield_expr_outside_of_generator, code = "E0627")]
+pub struct YieldExprOutsideOfGenerator {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_struct_expr_non_exhaustive, code = "E0639")]
+pub struct StructExprNonExhaustive {
+ #[primary_span]
+ pub span: Span,
+ pub what: &'static str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_method_call_on_unknown_type, code = "E0699")]
+pub struct MethodCallOnUnknownType {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_functional_record_update_on_non_struct, code = "E0436")]
+pub struct FunctionalRecordUpdateOnNonStruct {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_address_of_temporary_taken, code = "E0745")]
+pub struct AddressOfTemporaryTaken {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Subdiagnostic)]
+pub enum AddReturnTypeSuggestion {
+ #[suggestion(
+ hir_analysis_add_return_type_add,
+ code = "-> {found} ",
+ applicability = "machine-applicable"
+ )]
+ Add {
+ #[primary_span]
+ span: Span,
+ found: String,
+ },
+ #[suggestion(
+ hir_analysis_add_return_type_missing_here,
+ code = "-> _ ",
+ applicability = "has-placeholders"
+ )]
+ MissingHere {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Subdiagnostic)]
+pub enum ExpectedReturnTypeLabel<'tcx> {
+ #[label(hir_analysis_expected_default_return_type)]
+ Unit {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(hir_analysis_expected_return_type)]
+ Other {
+ #[primary_span]
+ span: Span,
+ expected: Ty<'tcx>,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_missing_parentheses_in_range, code = "E0689")]
+pub struct MissingParentheseInRange {
+ #[primary_span]
+ #[label(hir_analysis_missing_parentheses_in_range)]
+ pub span: Span,
+ pub ty_str: String,
+ pub method_name: String,
+ #[subdiagnostic]
+ pub add_missing_parentheses: Option<AddMissingParenthesesInRange>,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion_verbose(
+ hir_analysis_add_missing_parentheses_in_range,
+ applicability = "maybe-incorrect"
+)]
+pub struct AddMissingParenthesesInRange {
+ pub func_name: String,
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
diff --git a/compiler/rustc_typeck/src/check/expectation.rs b/compiler/rustc_hir_typeck/src/expectation.rs
index e9e810344..e9e810344 100644
--- a/compiler/rustc_typeck/src/check/expectation.rs
+++ b/compiler/rustc_hir_typeck/src/expectation.rs
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
new file mode 100644
index 000000000..9fde62a81
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -0,0 +1,2896 @@
+//! Type checking expressions.
+//!
+//! See `mod.rs` for more context on type checking in general.
+
+use crate::cast;
+use crate::coercion::CoerceMany;
+use crate::coercion::DynamicCoerceMany;
+use crate::errors::{AddressOfTemporaryTaken, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive};
+use crate::errors::{
+ FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct,
+ YieldExprOutsideOfGenerator,
+};
+use crate::fatally_break_rust;
+use crate::method::SelfSource;
+use crate::type_error_struct;
+use crate::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
+use crate::{
+ report_unexpected_variant_res, BreakableCtxt, Diverges, FnCtxt, Needs,
+ TupleArgumentsFlag::DontTupleArguments,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId,
+ ErrorGuaranteed, StashKey,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{Closure, ExprKind, HirId, QPath};
+use rustc_hir_analysis::astconv::AstConv as _;
+use rustc_hir_analysis::check::ty_kind_suggestion;
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::middle::stability;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
+use rustc_middle::ty::error::TypeError::FieldMisMatch;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AdtKind, Ty, TypeVisitable};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_session::parse::feature_err;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_target::spec::abi::Abi::RustIntrinsic;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::{self, ObligationCauseCode};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) {
+ let ty = self.check_expr_with_hint(expr, expected);
+ self.demand_eqtype(expr.span, expected, ty);
+ }
+
+ pub fn check_expr_has_type_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err)
+ }
+
+ fn check_expr_meets_expectation_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ mut extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
+ let mut ty = self.check_expr_with_expectation(expr, expected);
+
+ // While we don't allow *arbitrary* coercions here, we *do* allow
+ // coercions from ! to `expected`.
+ if ty.is_never() {
+ if let Some(adjustments) = self.typeck_results.borrow().adjustments().get(expr.hir_id) {
+ self.tcx().sess.delay_span_bug(
+ expr.span,
+ "expression with never type wound up being adjusted",
+ );
+ return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &adjustments[..] {
+ target.to_owned()
+ } else {
+ self.tcx().ty_error()
+ };
+ }
+
+ let adj_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::AdjustmentType,
+ span: expr.span,
+ });
+ self.apply_adjustments(
+ expr,
+ vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty }],
+ );
+ ty = adj_ty;
+ }
+
+ if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
+ let expr = expr.peel_drop_temps();
+ self.suggest_deref_ref_or_into(&mut err, expr, expected_ty, ty, None);
+ extend_err(&mut err);
+ err.emit();
+ }
+ ty
+ }
+
+ pub(super) fn check_expr_coercable_to_type(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_hint(expr, expected);
+ // checks don't need two phase
+ self.demand_coerce(expr, ty, expected, expected_ty_expr, AllowTwoPhase::No)
+ }
+
+ pub(super) fn check_expr_with_hint(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, ExpectHasType(expected))
+ }
+
+ fn check_expr_with_expectation_and_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_expectation(expr, expected);
+
+ // If the expression is used in a place whether mutable place is required
+ // e.g. LHS of assignment, perform the conversion.
+ if let Needs::MutPlace = needs {
+ self.convert_place_derefs_to_mutable(expr);
+ }
+
+ ty
+ }
+
+ pub(super) fn check_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, NoExpectation)
+ }
+
+ pub(super) fn check_expr_with_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
+ }
+
+ /// Invariant:
+ /// If an expression has any sub-expressions that result in a type error,
+ /// inspecting that expression's type with `ty.references_error()` will return
+ /// true. Likewise, if an expression is known to diverge, inspecting its
+ /// type with `ty::type_is_bot` will return true (n.b.: since Rust is
+ /// strict, _|_ can appear in the type of an expression that does not,
+ /// itself, diverge: for example, fn() -> _|_.)
+ /// Note that inspecting a type's structure *directly* may expose the fact
+ /// that there are actually multiple representations for `Error`, so avoid
+ /// that when err needs to be handled differently.
+ #[instrument(skip(self, expr), level = "debug")]
+ pub(super) fn check_expr_with_expectation(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_args(expr, expected, &[])
+ }
+
+ /// Same as `check_expr_with_expectation`, but allows us to pass in the arguments of a
+ /// `ExprKind::Call` when evaluating its callee when it is an `ExprKind::Path`.
+ pub(super) fn check_expr_with_expectation_and_args(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ if self.tcx().sess.verbose() {
+ // make this code only run with -Zverbose because it is probably slow
+ if let Ok(lint_str) = self.tcx.sess.source_map().span_to_snippet(expr.span) {
+ if !lint_str.contains('\n') {
+ debug!("expr text: {lint_str}");
+ } else {
+ let mut lines = lint_str.lines();
+ if let Some(line0) = lines.next() {
+ let remaining_lines = lines.count();
+ debug!("expr text: {line0}");
+ debug!("expr text: ...(and {remaining_lines} more lines)");
+ }
+ }
+ }
+ }
+
+ // True if `expr` is a `Try::from_ok(())` that is a result of desugaring a try block
+ // without the final expr (e.g. `try { return; }`). We don't want to generate an
+ // unreachable_code lint for it since warnings for autogenerated code are confusing.
+ let is_try_block_generated_unit_expr = match expr.kind {
+ ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
+ args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock)
+ }
+
+ _ => false,
+ };
+
+ // Warn for expressions after diverging siblings.
+ if !is_try_block_generated_unit_expr {
+ self.warn_if_unreachable(expr.hir_id, expr.span, "expression");
+ }
+
+ // Hide the outer diverging and has_errors flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ let ty = ensure_sufficient_stack(|| match &expr.kind {
+ hir::ExprKind::Path(
+ qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..),
+ ) => self.check_expr_path(qpath, expr, args),
+ _ => self.check_expr_kind(expr, expected),
+ });
+
+ // Warn for non-block expressions with diverging children.
+ match expr.kind {
+ ExprKind::Block(..)
+ | ExprKind::If(..)
+ | ExprKind::Let(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..) => {}
+ // If `expr` is a result of desugaring the try block and is an ok-wrapped
+ // diverging expression (e.g. it arose from desugaring of `try { return }`),
+ // we skip issuing a warning because it is autogenerated code.
+ ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {}
+ ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"),
+ ExprKind::MethodCall(segment, ..) => {
+ self.warn_if_unreachable(expr.hir_id, segment.ident.span, "call")
+ }
+ _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression"),
+ }
+
+ // Any expression that produces a value of type `!` must have diverged
+ if ty.is_never() {
+ self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
+ }
+
+ // Record the type, which applies it effects.
+ // We need to do this after the warning above, so that
+ // we don't warn for the diverging expression itself.
+ self.write_ty(expr.hir_id, ty);
+
+ // Combine the diverging and has_error flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+
+ debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id));
+ debug!("... {:?}, expected is {:?}", ty, expected);
+
+ ty
+ }
+
+ #[instrument(skip(self, expr), level = "debug")]
+ fn check_expr_kind(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("expr={:#?}", expr);
+
+ let tcx = self.tcx;
+ match expr.kind {
+ ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected),
+ ExprKind::Lit(ref lit) => self.check_lit(&lit, expected),
+ ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs, expected),
+ ExprKind::Assign(lhs, rhs, span) => {
+ self.check_expr_assign(expr, expected, lhs, rhs, span)
+ }
+ ExprKind::AssignOp(op, lhs, rhs) => {
+ self.check_binop_assign(expr, op, lhs, rhs, expected)
+ }
+ ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
+ ExprKind::AddrOf(kind, mutbl, oprnd) => {
+ self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr)
+ }
+ ExprKind::Path(QPath::LangItem(lang_item, _, hir_id)) => {
+ self.check_lang_item_path(lang_item, expr, hir_id)
+ }
+ ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr, &[]),
+ ExprKind::InlineAsm(asm) => {
+ // We defer some asm checks as we may not have resolved the input and output types yet (they may still be infer vars).
+ self.deferred_asm_checks.borrow_mut().push((asm, expr.hir_id));
+ self.check_expr_asm(asm)
+ }
+ ExprKind::Break(destination, ref expr_opt) => {
+ self.check_expr_break(destination, expr_opt.as_deref(), expr)
+ }
+ ExprKind::Continue(destination) => {
+ if destination.target_id.is_ok() {
+ tcx.types.never
+ } else {
+ // There was an error; make type-check fail.
+ tcx.ty_error()
+ }
+ }
+ ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr),
+ ExprKind::Let(let_expr) => self.check_expr_let(let_expr),
+ ExprKind::Loop(body, _, source, _) => {
+ self.check_expr_loop(body, source, expected, expr)
+ }
+ ExprKind::Match(discrim, arms, match_src) => {
+ self.check_match(expr, &discrim, arms, expected, match_src)
+ }
+ ExprKind::Closure(&Closure { capture_clause, fn_decl, body, movability, .. }) => {
+ self.check_expr_closure(expr, capture_clause, &fn_decl, body, movability, expected)
+ }
+ ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected),
+ ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected),
+ ExprKind::MethodCall(segment, receiver, args, _) => {
+ self.check_method_call(expr, segment, receiver, args, expected)
+ }
+ ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
+ ExprKind::Type(e, t) => {
+ let ty = self.to_ty_saving_user_provided_ty(&t);
+ self.check_expr_eq_type(&e, ty);
+ ty
+ }
+ ExprKind::If(cond, then_expr, opt_else_expr) => {
+ self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
+ }
+ ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected),
+ ExprKind::Array(args) => self.check_expr_array(args, expected, expr),
+ ExprKind::ConstBlock(ref anon_const) => {
+ self.check_expr_const_block(anon_const, expected, expr)
+ }
+ ExprKind::Repeat(element, ref count) => {
+ self.check_expr_repeat(element, count, expected, expr)
+ }
+ ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr),
+ ExprKind::Struct(qpath, fields, ref base_expr) => {
+ self.check_expr_struct(expr, expected, qpath, fields, base_expr)
+ }
+ ExprKind::Field(base, field) => self.check_field(expr, &base, field),
+ ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
+ ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
+ hir::ExprKind::Err => tcx.ty_error(),
+ }
+ }
+
+ fn check_expr_box(&self, expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>) -> Ty<'tcx> {
+ let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| match ty.kind() {
+ ty::Adt(def, _) if def.is_box() => Expectation::rvalue_hint(self, ty.boxed_ty()),
+ _ => NoExpectation,
+ });
+ let referent_ty = self.check_expr_with_expectation(expr, expected_inner);
+ self.require_type_is_sized(referent_ty, expr.span, traits::SizedBoxType);
+ self.tcx.mk_box(referent_ty)
+ }
+
+ fn check_expr_unary(
+ &self,
+ unop: hir::UnOp,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let expected_inner = match unop {
+ hir::UnOp::Not | hir::UnOp::Neg => expected,
+ hir::UnOp::Deref => NoExpectation,
+ };
+ let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner);
+
+ if !oprnd_t.references_error() {
+ oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
+ match unop {
+ hir::UnOp::Deref => {
+ if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) {
+ oprnd_t = ty;
+ } else {
+ let mut err = type_error_struct!(
+ tcx.sess,
+ expr.span,
+ oprnd_t,
+ E0614,
+ "type `{oprnd_t}` cannot be dereferenced",
+ );
+ let sp = tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) =
+ tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ }
+ err.emit();
+ oprnd_t = tcx.ty_error();
+ }
+ }
+ hir::UnOp::Not => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) {
+ oprnd_t = result;
+ }
+ }
+ hir::UnOp::Neg => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !oprnd_t.is_numeric() {
+ oprnd_t = result;
+ }
+ }
+ }
+ }
+ oprnd_t
+ }
+
+ fn check_expr_addr_of(
+ &self,
+ kind: hir::BorrowKind,
+ mutbl: hir::Mutability,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
+ match ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ if oprnd.is_syntactic_place_expr() {
+ // Places may legitimately have unsized types.
+ // For example, dereferences of a fat pointer and
+ // the last field of a struct can be unsized.
+ ExpectHasType(*ty)
+ } else {
+ Expectation::rvalue_hint(self, *ty)
+ }
+ }
+ _ => NoExpectation,
+ }
+ });
+ let ty =
+ self.check_expr_with_expectation_and_needs(&oprnd, hint, Needs::maybe_mut_place(mutbl));
+
+ let tm = ty::TypeAndMut { ty, mutbl };
+ match kind {
+ _ if tm.ty.references_error() => self.tcx.ty_error(),
+ hir::BorrowKind::Raw => {
+ self.check_named_place_expr(oprnd);
+ self.tcx.mk_ptr(tm)
+ }
+ hir::BorrowKind::Ref => {
+ // Note: at this point, we cannot say what the best lifetime
+ // is to use for resulting pointer. We want to use the
+ // shortest lifetime possible so as to avoid spurious borrowck
+ // errors. Moreover, the longest lifetime will depend on the
+ // precise details of the value whose address is being taken
+ // (and how long it is valid), which we don't know yet until
+ // type inference is complete.
+ //
+ // Therefore, here we simply generate a region variable. The
+ // region inferencer will then select a suitable value.
+ // Finally, borrowck will infer the value of the region again,
+ // this time with enough precision to check that the value
+ // whose address was taken can actually be made to live as long
+ // as it needs to live.
+ let region = self.next_region_var(infer::AddrOfRegion(expr.span));
+ self.tcx.mk_ref(region, tm)
+ }
+ }
+ }
+
+ /// Does this expression refer to a place that either:
+ /// * Is based on a local or static.
+ /// * Contains a dereference
+ /// Note that the adjustments for the children of `expr` should already
+ /// have been resolved.
+ fn check_named_place_expr(&self, oprnd: &'tcx hir::Expr<'tcx>) {
+ let is_named = oprnd.is_place_expr(|base| {
+ // Allow raw borrows if there are any deref adjustments.
+ //
+ // const VAL: (i32,) = (0,);
+ // const REF: &(i32,) = &(0,);
+ //
+ // &raw const VAL.0; // ERROR
+ // &raw const REF.0; // OK, same as &raw const (*REF).0;
+ //
+ // This is maybe too permissive, since it allows
+ // `let u = &raw const Box::new((1,)).0`, which creates an
+ // immediately dangling raw pointer.
+ self.typeck_results
+ .borrow()
+ .adjustments()
+ .get(base.hir_id)
+ .map_or(false, |x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_))))
+ });
+ if !is_named {
+ self.tcx.sess.emit_err(AddressOfTemporaryTaken { span: oprnd.span });
+ }
+ }
+
+ fn check_lang_item_path(
+ &self,
+ lang_item: hir::LangItem,
+ expr: &'tcx hir::Expr<'tcx>,
+ hir_id: Option<hir::HirId>,
+ ) -> Ty<'tcx> {
+ self.resolve_lang_item_path(lang_item, expr.span, expr.hir_id, hir_id).1
+ }
+
+ pub(crate) fn check_expr_path(
+ &self,
+ qpath: &'tcx hir::QPath<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let (res, opt_ty, segs) =
+ self.resolve_ty_and_res_fully_qualified_call(qpath, expr.hir_id, expr.span);
+ let ty = match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ tcx.ty_error()
+ }
+ Res::Def(DefKind::Ctor(_, CtorKind::Fictive), _) => {
+ report_unexpected_variant_res(tcx, res, qpath, expr.span);
+ tcx.ty_error()
+ }
+ _ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0,
+ };
+
+ if let ty::FnDef(did, ..) = *ty.kind() {
+ let fn_sig = ty.fn_sig(tcx);
+ if tcx.fn_sig(did).abi() == RustIntrinsic && tcx.item_name(did) == sym::transmute {
+ let from = fn_sig.inputs().skip_binder()[0];
+ let to = fn_sig.output().skip_binder();
+ // We defer the transmute to the end of typeck, once all inference vars have
+ // been resolved or we errored. This is important as we can only check transmute
+ // on concrete types, but the output type may not be known yet (it would only
+ // be known if explicitly specified via turbofish).
+ self.deferred_transmute_checks.borrow_mut().push((from, to, expr.hir_id));
+ }
+ if !tcx.features().unsized_fn_params {
+ // We want to remove some Sized bounds from std functions,
+ // but don't want to expose the removal to stable Rust.
+ // i.e., we don't want to allow
+ //
+ // ```rust
+ // drop as fn(str);
+ // ```
+ //
+ // to work in stable even if the Sized bound on `drop` is relaxed.
+ for i in 0..fn_sig.inputs().skip_binder().len() {
+ // We just want to check sizedness, so instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let span = args.get(i).map(|a| a.span).unwrap_or(expr.span);
+ let input = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.input(i),
+ );
+ self.require_type_is_sized_deferred(
+ input,
+ span,
+ traits::SizedArgumentType(None),
+ );
+ }
+ }
+ // Here we want to prevent struct constructors from returning unsized types.
+ // There were two cases this happened: fn pointer coercion in stable
+ // and usual function call in presence of unsized_locals.
+ // Also, as we just want to check sizedness, instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let output = self.replace_bound_vars_with_fresh_vars(
+ expr.span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.output(),
+ );
+ self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType);
+ }
+
+ // We always require that the type provided as the value for
+ // a type parameter outlives the moment of instantiation.
+ let substs = self.typeck_results.borrow().node_substs(expr.hir_id);
+ self.add_wf_bounds(substs, expr);
+
+ ty
+ }
+
+ fn check_expr_break(
+ &self,
+ destination: hir::Destination,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ if let Ok(target_id) = destination.target_id {
+ let (e_ty, cause);
+ if let Some(e) = expr_opt {
+ // If this is a break with a value, we need to type-check
+ // the expression. Get an expected type from the loop context.
+ let opt_coerce_to = {
+ // We should release `enclosing_breakables` before the `check_expr_with_hint`
+ // below, so can't move this block of code to the enclosing scope and share
+ // `ctxt` with the second `enclosing_breakables` borrow below.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ match enclosing_breakables.opt_find_breakable(target_id) {
+ Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()),
+ None => {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ }
+ }
+ };
+
+ // If the loop context is not a `loop { }`, then break with
+ // a value is illegal, and `opt_coerce_to` will be `None`.
+ // Just set expectation to error in that case.
+ let coerce_to = opt_coerce_to.unwrap_or_else(|| tcx.ty_error());
+
+ // Recurse without `enclosing_breakables` borrowed.
+ e_ty = self.check_expr_with_hint(e, coerce_to);
+ cause = self.misc(e.span);
+ } else {
+ // Otherwise, this is a break *without* a value. That's
+ // always legal, and is equivalent to `break ()`.
+ e_ty = tcx.mk_unit();
+ cause = self.misc(expr.span);
+ }
+
+ // Now that we have type-checked `expr_opt`, borrow
+ // the `enclosing_loops` field and let's coerce the
+ // type of `expr_opt` into what is expected.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ };
+
+ if let Some(ref mut coerce) = ctxt.coerce {
+ if let Some(ref e) = expr_opt {
+ coerce.coerce(self, &cause, e, e_ty);
+ } else {
+ assert!(e_ty.is_unit());
+ let ty = coerce.expected_ty();
+ coerce.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |mut err| {
+ self.suggest_mismatched_types_on_tail(
+ &mut err, expr, ty, e_ty, target_id,
+ );
+ if let Some(val) = ty_kind_suggestion(ty) {
+ let label = destination
+ .label
+ .map(|l| format!(" {}", l.ident))
+ .unwrap_or_else(String::new);
+ err.span_suggestion(
+ expr.span,
+ "give it a value of the expected type",
+ format!("break{label} {val}"),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ false,
+ );
+ }
+ } else {
+ // If `ctxt.coerce` is `None`, we can just ignore
+ // the type of the expression. This is because
+ // either this was a break *without* a value, in
+ // which case it is always a legal type (`()`), or
+ // else an error would have been flagged by the
+ // `loops` pass for using break with an expression
+ // where you are not supposed to.
+ assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some());
+ }
+
+ // If we encountered a `break`, then (no surprise) it may be possible to break from the
+ // loop... unless the value being returned from the loop diverges itself, e.g.
+ // `break return 5` or `break loop {}`.
+ ctxt.may_break |= !self.diverges.get().is_always();
+
+ // the type of a `break` is always `!`, since it diverges
+ tcx.types.never
+ } else {
+ // Otherwise, we failed to find the enclosing loop;
+ // this can only happen if the `break` was not
+ // inside a loop at all, which is caught by the
+ // loop-checking pass.
+ let err = self.tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+
+ // We still need to assign a type to the inner expression to
+ // prevent the ICE in #43162.
+ if let Some(e) = expr_opt {
+ self.check_expr_with_hint(e, err);
+
+ // ... except when we try to 'break rust;'.
+ // ICE this expression in particular (see #43162).
+ if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
+ if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
+ fatally_break_rust(self.tcx.sess);
+ }
+ }
+ }
+
+ // There was an error; make type-check fail.
+ err
+ }
+ }
+
+ fn check_expr_return(
+ &self,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ if self.ret_coercion.is_none() {
+ let mut err = ReturnStmtOutsideOfFnBody {
+ span: expr.span,
+ encl_body_span: None,
+ encl_fn_span: None,
+ };
+
+ let encl_item_id = self.tcx.hir().get_parent_item(expr.hir_id);
+
+ if let Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ })) = self.tcx.hir().find_by_def_id(encl_item_id.def_id)
+ {
+ // We are inside a function body, so reporting "return statement
+ // outside of function body" needs an explanation.
+
+ let encl_body_owner_id = self.tcx.hir().enclosing_body_owner(expr.hir_id);
+
+ // If this didn't hold, we would not have to report an error in
+ // the first place.
+ assert_ne!(encl_item_id.def_id, encl_body_owner_id);
+
+ let encl_body_id = self.tcx.hir().body_owned_by(encl_body_owner_id);
+ let encl_body = self.tcx.hir().body(encl_body_id);
+
+ err.encl_body_span = Some(encl_body.value.span);
+ err.encl_fn_span = Some(*encl_fn_span);
+ }
+
+ self.tcx.sess.emit_err(err);
+
+ if let Some(e) = expr_opt {
+ // We still have to type-check `e` (issue #86188), but calling
+ // `check_return_expr` only works inside fn bodies.
+ self.check_expr(e);
+ }
+ } else if let Some(e) = expr_opt {
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(e.span));
+ }
+ self.check_return_expr(e, true);
+ } else {
+ let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(expr.span));
+ }
+ let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
+ if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) {
+ coercion.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |db| {
+ let span = fn_decl.output.span();
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ db.span_label(
+ span,
+ format!("expected `{snippet}` because of this return type"),
+ );
+ }
+ },
+ true,
+ );
+ } else {
+ coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
+ }
+ }
+ self.tcx.types.never
+ }
+
+ /// `explicit_return` is `true` if we're checking an explicit `return expr`,
+ /// and `false` if we're checking a trailing expression.
+ pub(super) fn check_return_expr(
+ &self,
+ return_expr: &'tcx hir::Expr<'tcx>,
+ explicit_return: bool,
+ ) {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(return_expr.span, "check_return_expr called outside fn body")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty);
+ let mut span = return_expr.span;
+ // Use the span of the trailing expression for our cause,
+ // not the span of the entire function
+ if !explicit_return {
+ if let ExprKind::Block(body, _) = return_expr.kind && let Some(last_expr) = body.expr {
+ span = last_expr.span;
+ }
+ }
+ ret_coercion.borrow_mut().coerce(
+ self,
+ &self.cause(span, ObligationCauseCode::ReturnValue(return_expr.hir_id)),
+ return_expr,
+ return_expr_ty,
+ );
+
+ if self.return_type_has_opaque {
+ // Point any obligations that were registered due to opaque type
+ // inference at the return expression.
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_return_for_opaque_ty_error(errors, span, return_expr_ty);
+ });
+ }
+ }
+
+ fn point_at_return_for_opaque_ty_error(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ return_expr_ty: Ty<'tcx>,
+ ) {
+ // Don't point at the whole block if it's empty
+ if span == self.tcx.hir().span(self.body_id) {
+ return;
+ }
+ for err in errors {
+ let cause = &mut err.obligation.cause;
+ if let ObligationCauseCode::OpaqueReturnType(None) = cause.code() {
+ let new_cause = ObligationCause::new(
+ cause.span,
+ cause.body_id,
+ ObligationCauseCode::OpaqueReturnType(Some((return_expr_ty, span))),
+ );
+ *cause = new_cause;
+ }
+ }
+ }
+
+ pub(crate) fn check_lhs_assignable(
+ &self,
+ lhs: &'tcx hir::Expr<'tcx>,
+ err_code: &'static str,
+ op_span: Span,
+ adjust_err: impl FnOnce(&mut Diagnostic),
+ ) {
+ if lhs.is_syntactic_place_expr() {
+ return;
+ }
+
+ // FIXME: Make this use Diagnostic once error codes can be dynamically set.
+ let mut err = self.tcx.sess.struct_span_err_with_code(
+ op_span,
+ "invalid left-hand side of assignment",
+ DiagnosticId::Error(err_code.into()),
+ );
+ err.span_label(lhs.span, "cannot assign to this expression");
+
+ self.comes_from_while_condition(lhs.hir_id, |expr| {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern destructuring",
+ "let ",
+ Applicability::MachineApplicable,
+ );
+ });
+
+ adjust_err(&mut err);
+
+ err.emit();
+ }
+
+ // Check if an expression `original_expr_id` comes from the condition of a while loop,
+ // as opposed from the body of a while loop, which we can naively check by iterating
+ // parents until we find a loop...
+ pub(super) fn comes_from_while_condition(
+ &self,
+ original_expr_id: HirId,
+ then: impl FnOnce(&hir::Expr<'_>),
+ ) {
+ let mut parent = self.tcx.hir().get_parent_node(original_expr_id);
+ while let Some(node) = self.tcx.hir().find(parent) {
+ match node {
+ hir::Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::Loop(
+ hir::Block {
+ expr:
+ Some(hir::Expr {
+ kind:
+ hir::ExprKind::Match(expr, ..) | hir::ExprKind::If(expr, ..),
+ ..
+ }),
+ ..
+ },
+ _,
+ hir::LoopSource::While,
+ _,
+ ),
+ ..
+ }) => {
+ // Check if our original expression is a child of the condition of a while loop
+ let expr_is_ancestor = std::iter::successors(Some(original_expr_id), |id| {
+ self.tcx.hir().find_parent_node(*id)
+ })
+ .take_while(|id| *id != parent)
+ .any(|id| id == expr.hir_id);
+ // if it is, then we have a situation like `while Some(0) = value.get(0) {`,
+ // where `while let` was more likely intended.
+ if expr_is_ancestor {
+ then(expr);
+ }
+ break;
+ }
+ hir::Node::Item(_)
+ | hir::Node::ImplItem(_)
+ | hir::Node::TraitItem(_)
+ | hir::Node::Crate(_) => break,
+ _ => {
+ parent = self.tcx.hir().get_parent_node(parent);
+ }
+ }
+ }
+ }
+
+ // A generic function for checking the 'then' and 'else' clauses in an 'if'
+ // or 'if-else' expression.
+ fn check_then_else(
+ &self,
+ cond_expr: &'tcx hir::Expr<'tcx>,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ opt_else_expr: Option<&'tcx hir::Expr<'tcx>>,
+ sp: Span,
+ orig_expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool, |_| {});
+
+ self.warn_if_unreachable(
+ cond_expr.hir_id,
+ then_expr.span,
+ "block in `if` or `while` expression",
+ );
+
+ let cond_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ let expected = orig_expected.adjust_for_branches(self);
+ let then_ty = self.check_expr_with_expectation(then_expr, expected);
+ let then_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ // We've already taken the expected type's preferences
+ // into account when typing the `then` branch. To figure
+ // out the initial shot at a LUB, we thus only consider
+ // `expected` if it represents a *hard* constraint
+ // (`only_has_type`); otherwise, we just go with a
+ // fresh type variable.
+ let coerce_to_ty = expected.coercion_target_type(self, sp);
+ let mut coerce: DynamicCoerceMany<'_> = CoerceMany::new(coerce_to_ty);
+
+ coerce.coerce(self, &self.misc(sp), then_expr, then_ty);
+
+ if let Some(else_expr) = opt_else_expr {
+ let else_ty = self.check_expr_with_expectation(else_expr, expected);
+ let else_diverges = self.diverges.get();
+
+ let opt_suggest_box_span = self.opt_suggest_box_span(then_ty, else_ty, orig_expected);
+ let if_cause = self.if_cause(
+ sp,
+ cond_expr.span,
+ then_expr,
+ else_expr,
+ then_ty,
+ else_ty,
+ opt_suggest_box_span,
+ );
+
+ coerce.coerce(self, &if_cause, else_expr, else_ty);
+
+ // We won't diverge unless both branches do (or the condition does).
+ self.diverges.set(cond_diverges | then_diverges & else_diverges);
+ } else {
+ self.if_fallback_coercion(sp, then_expr, &mut coerce);
+
+ // If the condition is false we can't diverge.
+ self.diverges.set(cond_diverges);
+ }
+
+ let result_ty = coerce.complete(self);
+ if cond_ty.references_error() { self.tcx.ty_error() } else { result_ty }
+ }
+
+ /// Type check assignment expression `expr` of form `lhs = rhs`.
+ /// The expected type is `()` and is passed to the function for the purposes of diagnostics.
+ fn check_expr_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ span: Span,
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.coercion_target_type(self, expr.span);
+ if expected_ty == self.tcx.types.bool {
+ // The expected type is `bool` but this will result in `()` so we can reasonably
+ // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`.
+ // The likely cause of this is `if foo = bar { .. }`.
+ let actual_ty = self.tcx.mk_unit();
+ let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap();
+ let lhs_ty = self.check_expr(&lhs);
+ let rhs_ty = self.check_expr(&rhs);
+ let (applicability, eq) = if self.can_coerce(rhs_ty, lhs_ty) {
+ (Applicability::MachineApplicable, true)
+ } else if let ExprKind::Binary(
+ Spanned { node: hir::BinOpKind::And | hir::BinOpKind::Or, .. },
+ _,
+ rhs_expr,
+ ) = lhs.kind
+ {
+ // if x == 1 && y == 2 { .. }
+ // +
+ let actual_lhs_ty = self.check_expr(&rhs_expr);
+ (Applicability::MaybeIncorrect, self.can_coerce(rhs_ty, actual_lhs_ty))
+ } else if let ExprKind::Binary(
+ Spanned { node: hir::BinOpKind::And | hir::BinOpKind::Or, .. },
+ lhs_expr,
+ _,
+ ) = rhs.kind
+ {
+ // if x == 1 && y == 2 { .. }
+ // +
+ let actual_rhs_ty = self.check_expr(&lhs_expr);
+ (Applicability::MaybeIncorrect, self.can_coerce(actual_rhs_ty, lhs_ty))
+ } else {
+ (Applicability::MaybeIncorrect, false)
+ };
+ if !lhs.is_syntactic_place_expr()
+ && lhs.is_approximately_pattern()
+ && !matches!(lhs.kind, hir::ExprKind::Lit(_))
+ {
+ // Do not suggest `if let x = y` as `==` is way more likely to be the intention.
+ let hir = self.tcx.hir();
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::If { .. }, .. }) =
+ hir.get(hir.get_parent_node(hir.get_parent_node(expr.hir_id)))
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern matching",
+ "let ",
+ applicability,
+ );
+ };
+ }
+ if eq {
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "you might have meant to compare for equality",
+ '=',
+ applicability,
+ );
+ }
+
+ // If the assignment expression itself is ill-formed, don't
+ // bother emitting another error
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ err.delay_as_bug()
+ } else {
+ err.emit();
+ }
+ return self.tcx.ty_error();
+ }
+
+ let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
+
+ let suggest_deref_binop = |err: &mut Diagnostic, rhs_ty: Ty<'tcx>| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ // Can only assign if the type is sized, so if `DerefMut` yields a type that is
+ // unsized, do not suggest dereferencing it.
+ let lhs_deref_ty_is_sized = self
+ .infcx
+ .type_implements_trait(
+ self.tcx.lang_items().sized_trait().unwrap(),
+ lhs_deref_ty,
+ ty::List::empty(),
+ self.param_env,
+ )
+ .may_apply();
+ if lhs_deref_ty_is_sized && self.can_coerce(rhs_ty, lhs_deref_ty) {
+ err.span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "consider dereferencing here to assign to the mutably borrowed value",
+ "*",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ };
+
+ // This is (basically) inlined `check_expr_coercable_to_type`, but we want
+ // to suggest an additional fixup here in `suggest_deref_binop`.
+ let rhs_ty = self.check_expr_with_hint(&rhs, lhs_ty);
+ if let (_, Some(mut diag)) =
+ self.demand_coerce_diag(rhs, rhs_ty, lhs_ty, Some(lhs), AllowTwoPhase::No)
+ {
+ suggest_deref_binop(&mut diag, rhs_ty);
+ diag.emit();
+ }
+
+ self.check_lhs_assignable(lhs, "E0070", span, |err| {
+ if let Some(rhs_ty) = self.typeck_results.borrow().expr_ty_opt(rhs) {
+ suggest_deref_binop(err, rhs_ty);
+ }
+ });
+
+ self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
+
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+
+ pub(super) fn check_expr_let(&self, let_expr: &'tcx hir::Let<'tcx>) -> Ty<'tcx> {
+ // for let statements, this is done in check_stmt
+ let init = let_expr.init;
+ self.warn_if_unreachable(init.hir_id, init.span, "block in `let` expression");
+ // otherwise check exactly as a let statement
+ self.check_decl(let_expr.into());
+ // but return a bool, for this is a boolean expression
+ self.tcx.types.bool
+ }
+
+ fn check_expr_loop(
+ &self,
+ body: &'tcx hir::Block<'tcx>,
+ source: hir::LoopSource,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let coerce = match source {
+ // you can only use break with a value from a normal `loop { }`
+ hir::LoopSource::Loop => {
+ let coerce_to = expected.coercion_target_type(self, body.span);
+ Some(CoerceMany::new(coerce_to))
+ }
+
+ hir::LoopSource::While | hir::LoopSource::ForLoop => None,
+ };
+
+ let ctxt = BreakableCtxt {
+ coerce,
+ may_break: false, // Will get updated if/when we find a `break`.
+ };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
+ self.check_block_no_value(&body);
+ });
+
+ if ctxt.may_break {
+ // No way to know whether it's diverging because
+ // of a `break` or an outer `break` or `return`.
+ self.diverges.set(Diverges::Maybe);
+ }
+
+ // If we permit break with a value, then result type is
+ // the LUB of the breaks (possibly ! if none); else, it
+ // is nil. This makes sense because infinite loops
+ // (which would have type !) are only possible iff we
+ // permit break with a value [1].
+ if ctxt.coerce.is_none() && !ctxt.may_break {
+ // [1]
+ self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
+ }
+ ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
+ }
+
+ /// Checks a method call.
+ fn check_method_call(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ rcvr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let rcvr_t = self.check_expr(&rcvr);
+ // no need to check for bot/err -- callee does that
+ let rcvr_t = self.structurally_resolved_type(rcvr.span, rcvr_t);
+ let span = segment.ident.span;
+
+ let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) {
+ Ok(method) => {
+ // We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
+ // trigger this codepath causing `structurally_resolved_type` to emit an error.
+
+ self.write_method_call(expr.hir_id, method);
+ Ok(method)
+ }
+ Err(error) => {
+ if segment.ident.name != kw::Empty {
+ if let Some(mut err) = self.report_method_error(
+ span,
+ rcvr_t,
+ segment.ident,
+ SelfSource::MethodCall(rcvr),
+ error,
+ Some((rcvr, args)),
+ ) {
+ err.emit();
+ }
+ }
+ Err(())
+ }
+ };
+
+ // Call the generic checker.
+ self.check_method_argument_types(span, expr, method, &args, DontTupleArguments, expected)
+ }
+
+ fn check_expr_cast(
+ &self,
+ e: &'tcx hir::Expr<'tcx>,
+ t: &'tcx hir::Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // Find the type of `e`. Supply hints based on the type we are casting to,
+ // if appropriate.
+ let t_cast = self.to_ty_saving_user_provided_ty(t);
+ let t_cast = self.resolve_vars_if_possible(t_cast);
+ let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
+ let t_expr = self.resolve_vars_if_possible(t_expr);
+
+ // Eagerly check for some obvious errors.
+ if t_expr.references_error() || t_cast.references_error() {
+ self.tcx.ty_error()
+ } else {
+ // Defer other checks until we're done type checking.
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+ match cast::CastCheck::new(
+ self,
+ e,
+ t_expr,
+ t_cast,
+ t.span,
+ expr.span,
+ self.param_env.constness(),
+ ) {
+ Ok(cast_check) => {
+ debug!(
+ "check_expr_cast: deferring cast from {:?} to {:?}: {:?}",
+ t_cast, t_expr, cast_check,
+ );
+ deferred_cast_checks.push(cast_check);
+ t_cast
+ }
+ Err(_) => self.tcx.ty_error(),
+ }
+ }
+ }
+
+ fn check_expr_array(
+ &self,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let element_ty = if !args.is_empty() {
+ let coerce_to = expected
+ .to_option(self)
+ .and_then(|uty| match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ });
+ let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
+ assert_eq!(self.diverges.get(), Diverges::Maybe);
+ for e in args {
+ let e_ty = self.check_expr_with_hint(e, coerce_to);
+ let cause = self.misc(e.span);
+ coerce.coerce(self, &cause, e, e_ty);
+ }
+ coerce.complete(self)
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ };
+ let array_len = args.len() as u64;
+ self.suggest_array_len(expr, array_len);
+ self.tcx.mk_array(element_ty, array_len)
+ }
+
+ fn suggest_array_len(&self, expr: &'tcx hir::Expr<'tcx>, array_len: u64) {
+ let parent_node = self.tcx.hir().parent_iter(expr.hir_id).find(|(_, node)| {
+ !matches!(node, hir::Node::Expr(hir::Expr { kind: hir::ExprKind::AddrOf(..), .. }))
+ });
+ let Some((_,
+ hir::Node::Local(hir::Local { ty: Some(ty), .. })
+ | hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(ty, _), .. }))
+ ) = parent_node else {
+ return
+ };
+ if let hir::TyKind::Array(_, length) = ty.peel_refs().kind
+ && let hir::ArrayLen::Body(hir::AnonConst { hir_id, .. }) = length
+ && let Some(span) = self.tcx.hir().opt_span(hir_id)
+ {
+ match self.tcx.sess.diagnostic().steal_diagnostic(span, StashKey::UnderscoreForArrayLengths) {
+ Some(mut err) => {
+ err.span_suggestion(
+ span,
+ "consider specifying the array length",
+ array_len,
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ }
+ None => ()
+ }
+ }
+ }
+
+ fn check_expr_const_block(
+ &self,
+ anon_const: &'tcx hir::AnonConst,
+ expected: Expectation<'tcx>,
+ _expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let body = self.tcx.hir().body(anon_const.body);
+
+ // Create a new function context.
+ let fcx = FnCtxt::new(self, self.param_env.with_const(), body.value.hir_id);
+ crate::GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ let ty = fcx.check_expr_with_expectation(&body.value, expected);
+ fcx.require_type_is_sized(ty, body.value.span, traits::ConstSized);
+ fcx.write_ty(anon_const.hir_id, ty);
+ ty
+ }
+
+ fn check_expr_repeat(
+ &self,
+ element: &'tcx hir::Expr<'tcx>,
+ count: &'tcx hir::ArrayLen,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let count = self.array_length_to_const(count);
+ if let Some(count) = count.try_eval_usize(tcx, self.param_env) {
+ self.suggest_array_len(expr, count);
+ }
+
+ let uty = match expected {
+ ExpectHasType(uty) => match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let (element_ty, t) = match uty {
+ Some(uty) => {
+ self.check_expr_coercable_to_type(&element, uty, None);
+ (uty, uty)
+ }
+ None => {
+ let ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: element.span,
+ });
+ let element_ty = self.check_expr_has_type_or_error(&element, ty, |_| {});
+ (element_ty, ty)
+ }
+ };
+
+ if element_ty.references_error() {
+ return tcx.ty_error();
+ }
+
+ self.check_repeat_element_needs_copy_bound(element, count, element_ty);
+
+ tcx.mk_ty(ty::Array(t, count))
+ }
+
+ fn check_repeat_element_needs_copy_bound(
+ &self,
+ element: &hir::Expr<'_>,
+ count: ty::Const<'tcx>,
+ element_ty: Ty<'tcx>,
+ ) {
+ let tcx = self.tcx;
+ // Actual constants as the repeat element get inserted repeatedly instead of getting copied via Copy.
+ match &element.kind {
+ hir::ExprKind::ConstBlock(..) => return,
+ hir::ExprKind::Path(qpath) => {
+ let res = self.typeck_results.borrow().qpath_res(qpath, element.hir_id);
+ if let Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::AnonConst, _) = res
+ {
+ return;
+ }
+ }
+ _ => {}
+ }
+ // If someone calls a const fn, they can extract that call out into a separate constant (or a const
+ // block in the future), so we check that to tell them that in the diagnostic. Does not affect typeck.
+ let is_const_fn = match element.kind {
+ hir::ExprKind::Call(func, _args) => match *self.node_ty(func.hir_id).kind() {
+ ty::FnDef(def_id, _) => tcx.is_const_fn(def_id),
+ _ => false,
+ },
+ _ => false,
+ };
+
+ // If the length is 0, we don't create any elements, so we don't copy any. If the length is 1, we
+ // don't copy that one element, we move it. Only check for Copy if the length is larger.
+ if count.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
+ let lang_item = self.tcx.require_lang_item(LangItem::Copy, None);
+ let code = traits::ObligationCauseCode::RepeatElementCopy { is_const_fn };
+ self.require_type_meets(element_ty, element.span, code, lang_item);
+ }
+ }
+
+ fn check_expr_tuple(
+ &self,
+ elts: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let flds = expected.only_has_type(self).and_then(|ty| {
+ let ty = self.resolve_vars_with_obligations(ty);
+ match ty.kind() {
+ ty::Tuple(flds) => Some(&flds[..]),
+ _ => None,
+ }
+ });
+
+ let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds {
+ Some(fs) if i < fs.len() => {
+ let ety = fs[i];
+ self.check_expr_coercable_to_type(&e, ety, None);
+ ety
+ }
+ _ => self.check_expr_with_expectation(&e, NoExpectation),
+ });
+ let tuple = self.tcx.mk_tup(elt_ts_iter);
+ if tuple.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
+ tuple
+ }
+ }
+
+ fn check_expr_struct(
+ &self,
+ expr: &hir::Expr<'_>,
+ expected: Expectation<'tcx>,
+ qpath: &QPath<'_>,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ // Find the relevant variant
+ let Some((variant, adt_ty)) = self.check_struct_path(qpath, expr.hir_id) else {
+ self.check_struct_fields_on_error(fields, base_expr);
+ return self.tcx.ty_error();
+ };
+
+ // Prohibit struct expressions when non-exhaustive flag is set.
+ let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
+ if !adt.did().is_local() && variant.is_field_list_non_exhaustive() {
+ self.tcx
+ .sess
+ .emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() });
+ }
+
+ self.check_expr_struct_fields(
+ adt_ty,
+ expected,
+ expr.hir_id,
+ qpath.span(),
+ variant,
+ fields,
+ base_expr,
+ expr.span,
+ );
+
+ self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
+ adt_ty
+ }
+
+ fn check_expr_struct_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ expected: Expectation<'tcx>,
+ expr_id: hir::HirId,
+ span: Span,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ expr_span: Span,
+ ) {
+ let tcx = self.tcx;
+
+ let expected_inputs =
+ self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]);
+ let adt_ty_hint = if let Some(expected_inputs) = expected_inputs {
+ expected_inputs.get(0).cloned().unwrap_or(adt_ty)
+ } else {
+ adt_ty
+ };
+ // re-link the regions that EIfEO can erase.
+ self.demand_eqtype(span, adt_ty_hint, adt_ty);
+
+ let ty::Adt(adt, substs) = adt_ty.kind() else {
+ span_bug!(span, "non-ADT passed to check_expr_struct_fields");
+ };
+ let adt_kind = adt.adt_kind();
+
+ let mut remaining_fields = variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, field)| (field.ident(tcx).normalize_to_macros_2_0(), (i, field)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut seen_fields = FxHashMap::default();
+
+ let mut error_happened = false;
+
+ // Type-check each field.
+ for (idx, field) in ast_fields.iter().enumerate() {
+ let ident = tcx.adjust_ident(field.ident, variant.def_id);
+ let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
+ seen_fields.insert(ident, field.span);
+ self.write_field_index(field.hir_id, i);
+
+ // We don't look at stability attributes on
+ // struct-like enums (yet...), but it's definitely not
+ // a bug to have constructed one.
+ if adt_kind != AdtKind::Enum {
+ tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
+ }
+
+ self.field_ty(field.span, v_field, substs)
+ } else {
+ error_happened = true;
+ if let Some(prev_span) = seen_fields.get(&ident) {
+ tcx.sess.emit_err(FieldMultiplySpecifiedInInitializer {
+ span: field.ident.span,
+ prev_span: *prev_span,
+ ident,
+ });
+ } else {
+ self.report_unknown_field(
+ adt_ty,
+ variant,
+ field,
+ ast_fields,
+ adt.variant_descr(),
+ expr_span,
+ );
+ }
+
+ tcx.ty_error()
+ };
+
+ // Make sure to give a type to the field even if there's
+ // an error, so we can continue type-checking.
+ let ty = self.check_expr_with_hint(&field.expr, field_type);
+ let (_, diag) =
+ self.demand_coerce_diag(&field.expr, ty, field_type, None, AllowTwoPhase::No);
+
+ if let Some(mut diag) = diag {
+ if idx == ast_fields.len() - 1 && remaining_fields.is_empty() {
+ self.suggest_fru_from_range(field, variant, substs, &mut diag);
+ }
+ diag.emit();
+ }
+ }
+
+ // Make sure the programmer specified correct number of fields.
+ if adt_kind == AdtKind::Union {
+ if ast_fields.len() != 1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0784,
+ "union expressions should have exactly one field",
+ )
+ .emit();
+ }
+ }
+
+ // If check_expr_struct_fields hit an error, do not attempt to populate
+ // the fields with the base_expr. This could cause us to hit errors later
+ // when certain fields are assumed to exist that in fact do not.
+ if error_happened {
+ return;
+ }
+
+ if let Some(base_expr) = base_expr {
+ // FIXME: We are currently creating two branches here in order to maintain
+ // consistency. But they should be merged as much as possible.
+ let fru_tys = if self.tcx.features().type_changing_struct_update {
+ if adt.is_struct() {
+ // Make some fresh substitutions for our ADT type.
+ let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did());
+ // We do subtyping on the FRU fields first, so we can
+ // learn exactly what types we expect the base expr
+ // needs constrained to be compatible with the struct
+ // type we expect from the expectation value.
+ let fru_tys = variant
+ .fields
+ .iter()
+ .map(|f| {
+ let fru_ty = self.normalize_associated_types_in(
+ expr_span,
+ self.field_ty(base_expr.span, f, fresh_substs),
+ );
+ let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
+ if let Some(_) = remaining_fields.remove(&ident) {
+ let target_ty = self.field_ty(base_expr.span, f, substs);
+ let cause = self.misc(base_expr.span);
+ match self.at(&cause, self.param_env).sup(target_ty, fru_ty) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations)
+ }
+ Err(_) => {
+ // This should never happen, since we're just subtyping the
+ // remaining_fields, but it's fine to emit this, I guess.
+ self.err_ctxt()
+ .report_mismatched_types(
+ &cause,
+ target_ty,
+ fru_ty,
+ FieldMisMatch(variant.name, ident.name),
+ )
+ .emit();
+ }
+ }
+ }
+ self.resolve_vars_if_possible(fru_ty)
+ })
+ .collect();
+ // The use of fresh substs that we have subtyped against
+ // our base ADT type's fields allows us to guide inference
+ // along so that, e.g.
+ // ```
+ // MyStruct<'a, F1, F2, const C: usize> {
+ // f: F1,
+ // // Other fields that reference `'a`, `F2`, and `C`
+ // }
+ //
+ // let x = MyStruct {
+ // f: 1usize,
+ // ..other_struct
+ // };
+ // ```
+ // will have the `other_struct` expression constrained to
+ // `MyStruct<'a, _, F2, C>`, as opposed to just `_`...
+ // This is important to allow coercions to happen in
+ // `other_struct` itself. See `coerce-in-base-expr.rs`.
+ let fresh_base_ty = self.tcx.mk_adt(*adt, fresh_substs);
+ self.check_expr_has_type_or_error(
+ base_expr,
+ self.resolve_vars_if_possible(fresh_base_ty),
+ |_| {},
+ );
+ fru_tys
+ } else {
+ // Check the base_expr, regardless of a bad expected adt_ty, so we can get
+ // type errors on that expression, too.
+ self.check_expr(base_expr);
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ } else {
+ self.check_expr_has_type_or_error(base_expr, adt_ty, |_| {
+ let base_ty = self.typeck_results.borrow().expr_ty(*base_expr);
+ let same_adt = match (adt_ty.kind(), base_ty.kind()) {
+ (ty::Adt(adt, _), ty::Adt(base_adt, _)) if adt == base_adt => true,
+ _ => false,
+ };
+ if self.tcx.sess.is_nightly_build() && same_adt {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::type_changing_struct_update,
+ base_expr.span,
+ "type changing struct updating is experimental",
+ )
+ .emit();
+ }
+ });
+ match adt_ty.kind() {
+ ty::Adt(adt, substs) if adt.is_struct() => variant
+ .fields
+ .iter()
+ .map(|f| {
+ self.normalize_associated_types_in(expr_span, f.ty(self.tcx, substs))
+ })
+ .collect(),
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ }
+ };
+ self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys);
+ } else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() {
+ debug!(?remaining_fields);
+ let private_fields: Vec<&ty::FieldDef> = variant
+ .fields
+ .iter()
+ .filter(|field| !field.vis.is_accessible_from(tcx.parent_module(expr_id), tcx))
+ .collect();
+
+ if !private_fields.is_empty() {
+ self.report_private_fields(adt_ty, span, private_fields, ast_fields);
+ } else {
+ self.report_missing_fields(
+ adt_ty,
+ span,
+ remaining_fields,
+ variant,
+ ast_fields,
+ substs,
+ );
+ }
+ }
+ }
+
+ fn check_struct_fields_on_error(
+ &self,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) {
+ for field in fields {
+ self.check_expr(&field.expr);
+ }
+ if let Some(base) = *base_expr {
+ self.check_expr(&base);
+ }
+ }
+
+ /// Report an error for a struct field expression when there are fields which aren't provided.
+ ///
+ /// ```text
+ /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo`
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^ missing `you_can_use_this_field`
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_missing_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ substs: SubstsRef<'tcx>,
+ ) {
+ let len = remaining_fields.len();
+
+ let mut displayable_field_names: Vec<&str> =
+ remaining_fields.keys().map(|ident| ident.as_str()).collect();
+ // sorting &str primitives here, sort_unstable is ok
+ displayable_field_names.sort_unstable();
+
+ let mut truncated_fields_error = String::new();
+ let remaining_fields_names = match &displayable_field_names[..] {
+ [field1] => format!("`{}`", field1),
+ [field1, field2] => format!("`{field1}` and `{field2}`"),
+ [field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"),
+ _ => {
+ truncated_fields_error =
+ format!(" and {} other field{}", len - 3, pluralize!(len - 3));
+ displayable_field_names
+ .iter()
+ .take(3)
+ .map(|n| format!("`{n}`"))
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
+ };
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0063,
+ "missing field{} {}{} in initializer of `{}`",
+ pluralize!(len),
+ remaining_fields_names,
+ truncated_fields_error,
+ adt_ty
+ );
+ err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}"));
+
+ if let Some(last) = ast_fields.last() {
+ self.suggest_fru_from_range(last, variant, substs, &mut err);
+ }
+
+ err.emit();
+ }
+
+ /// If the last field is a range literal, but it isn't supposed to be, then they probably
+ /// meant to use functional update syntax.
+ fn suggest_fru_from_range(
+ &self,
+ last_expr_field: &hir::ExprField<'tcx>,
+ variant: &ty::VariantDef,
+ substs: SubstsRef<'tcx>,
+ err: &mut Diagnostic,
+ ) {
+ // I don't use 'is_range_literal' because only double-sided, half-open ranges count.
+ if let ExprKind::Struct(
+ QPath::LangItem(LangItem::Range, ..),
+ &[ref range_start, ref range_end],
+ _,
+ ) = last_expr_field.expr.kind
+ && let variant_field =
+ variant.fields.iter().find(|field| field.ident(self.tcx) == last_expr_field.ident)
+ && let range_def_id = self.tcx.lang_items().range_struct()
+ && variant_field
+ .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
+ .map(|adt| adt.did())
+ != range_def_id
+ {
+ let instead = self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(range_end.expr.span)
+ .map(|s| format!(" from `{s}`"))
+ .unwrap_or_default();
+ err.span_suggestion(
+ range_start.span.shrink_to_hi(),
+ &format!("to set the remaining fields{instead}, separate the last named field with a comma"),
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Report an error for a struct field expression when there are invisible fields.
+ ///
+ /// ```text
+ /// error: cannot construct `Foo` with struct literal syntax due to private fields
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_private_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ private_fields: Vec<&ty::FieldDef>,
+ used_fields: &'tcx [hir::ExprField<'tcx>],
+ ) {
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ &format!(
+ "cannot construct `{adt_ty}` with struct literal syntax due to private fields",
+ ),
+ );
+ let (used_private_fields, remaining_private_fields): (
+ Vec<(Symbol, Span, bool)>,
+ Vec<(Symbol, Span, bool)>,
+ ) = private_fields
+ .iter()
+ .map(|field| {
+ match used_fields.iter().find(|used_field| field.name == used_field.ident.name) {
+ Some(used_field) => (field.name, used_field.span, true),
+ None => (field.name, self.tcx.def_span(field.did), false),
+ }
+ })
+ .partition(|field| field.2);
+ err.span_labels(used_private_fields.iter().map(|(_, span, _)| *span), "private field");
+ if !remaining_private_fields.is_empty() {
+ let remaining_private_fields_len = remaining_private_fields.len();
+ let names = match &remaining_private_fields
+ .iter()
+ .map(|(name, _, _)| name)
+ .collect::<Vec<_>>()[..]
+ {
+ _ if remaining_private_fields_len > 6 => String::new(),
+ [name] => format!("`{name}` "),
+ [names @ .., last] => {
+ let names = names.iter().map(|name| format!("`{name}`")).collect::<Vec<_>>();
+ format!("{} and `{last}` ", names.join(", "))
+ }
+ [] => unreachable!(),
+ };
+ err.note(format!(
+ "... and other private field{s} {names}that {were} not provided",
+ s = pluralize!(remaining_private_fields_len),
+ were = pluralize!("was", remaining_private_fields_len),
+ ));
+ }
+ err.emit();
+ }
+
+ fn report_unknown_field(
+ &self,
+ ty: Ty<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ field: &hir::ExprField<'_>,
+ skip_fields: &[hir::ExprField<'_>],
+ kind_name: &str,
+ expr_span: Span,
+ ) {
+ if variant.is_recovered() {
+ self.set_tainted_by_errors();
+ return;
+ }
+ let mut err = self.err_ctxt().type_error_struct_with_diag(
+ field.ident.span,
+ |actual| match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0559,
+ "{} `{}::{}` has no field named `{}`",
+ kind_name,
+ actual,
+ variant.name,
+ field.ident
+ ),
+ _ => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0560,
+ "{} `{}` has no field named `{}`",
+ kind_name,
+ actual,
+ field.ident
+ ),
+ },
+ ty,
+ );
+
+ let variant_ident_span = self.tcx.def_ident_span(variant.def_id).unwrap();
+ match variant.ctor_kind {
+ CtorKind::Fn => match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => {
+ err.span_label(
+ variant_ident_span,
+ format!(
+ "`{adt}::{variant}` defined here",
+ adt = ty,
+ variant = variant.name,
+ ),
+ );
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ variant = variant.name,
+ ),
+ format!(
+ "{adt}::{variant}(/* fields */)",
+ adt = ty,
+ variant = variant.name,
+ ),
+ Applicability::HasPlaceholders,
+ );
+ }
+ _ => {
+ err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty));
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ kind_name = kind_name,
+ ),
+ format!("{adt}(/* fields */)", adt = ty),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ _ => {
+ // prevent all specified fields from being suggested
+ let skip_fields = skip_fields.iter().map(|x| x.ident.name);
+ if let Some(field_name) = self.suggest_field_name(
+ variant,
+ field.ident.name,
+ skip_fields.collect(),
+ expr_span,
+ ) {
+ err.span_suggestion(
+ field.ident.span,
+ "a field with a similar name exists",
+ field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ match ty.kind() {
+ ty::Adt(adt, ..) => {
+ if adt.is_enum() {
+ err.span_label(
+ field.ident.span,
+ format!("`{}::{}` does not have this field", ty, variant.name),
+ );
+ } else {
+ err.span_label(
+ field.ident.span,
+ format!("`{ty}` does not have this field"),
+ );
+ }
+ let available_field_names =
+ self.available_field_names(variant, expr_span);
+ if !available_field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(available_field_names)
+ ));
+ }
+ }
+ _ => bug!("non-ADT passed to report_unknown_field"),
+ }
+ };
+ }
+ }
+ err.emit();
+ }
+
+ // Return a hint about the closest match in field names
+ fn suggest_field_name(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ field: Symbol,
+ skip: Vec<Symbol>,
+ // The span where stability will be checked
+ span: Span,
+ ) -> Option<Symbol> {
+ let names = variant
+ .fields
+ .iter()
+ .filter_map(|field| {
+ // ignore already set fields and private fields from non-local crates
+ // and unstable fields.
+ if skip.iter().any(|&x| x == field.name)
+ || (!variant.def_id.is_local() && !field.vis.is_public())
+ || matches!(
+ self.tcx.eval_stability(field.did, None, span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ {
+ None
+ } else {
+ Some(field.name)
+ }
+ })
+ .collect::<Vec<Symbol>>();
+
+ find_best_match_for_name(&names, field, None)
+ }
+
+ fn available_field_names(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ access_span: Span,
+ ) -> Vec<Symbol> {
+ variant
+ .fields
+ .iter()
+ .filter(|field| {
+ let def_scope = self
+ .tcx
+ .adjust_ident_and_get_scope(field.ident(self.tcx), variant.def_id, self.body_id)
+ .1;
+ field.vis.is_accessible_from(def_scope, self.tcx)
+ && !matches!(
+ self.tcx.eval_stability(field.did, None, access_span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ })
+ .filter(|field| !self.tcx.is_doc_hidden(field.did))
+ .map(|field| field.name)
+ .collect()
+ }
+
+ fn name_series_display(&self, names: Vec<Symbol>) -> String {
+ // dynamic limit, to never omit just one field
+ let limit = if names.len() == 6 { 6 } else { 5 };
+ let mut display =
+ names.iter().take(limit).map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
+ if names.len() > limit {
+ display = format!("{} ... and {} others", display, names.len() - limit);
+ }
+ display
+ }
+
+ // Check field access expressions
+ fn check_field(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ base: &'tcx hir::Expr<'tcx>,
+ field: Ident,
+ ) -> Ty<'tcx> {
+ debug!("check_field(expr: {:?}, base: {:?}, field: {:?})", expr, base, field);
+ let base_ty = self.check_expr(base);
+ let base_ty = self.structurally_resolved_type(base.span, base_ty);
+ let mut private_candidate = None;
+ let mut autoderef = self.autoderef(expr.span, base_ty);
+ while let Some((deref_base_ty, _)) = autoderef.next() {
+ debug!("deref_base_ty: {:?}", deref_base_ty);
+ match deref_base_ty.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ debug!("struct named {:?}", deref_base_ty);
+ let (ident, def_scope) =
+ self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id);
+ let fields = &base_def.non_enum_variant().fields;
+ if let Some(index) = fields
+ .iter()
+ .position(|f| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
+ {
+ let field = &fields[index];
+ let field_ty = self.field_ty(expr.span, field, substs);
+ // Save the index of all fields regardless of their visibility in case
+ // of error recovery.
+ self.write_field_index(expr.hir_id, index);
+ let adjustments = self.adjust_steps(&autoderef);
+ if field.vis.is_accessible_from(def_scope, self.tcx) {
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
+ return field_ty;
+ }
+ private_candidate = Some((adjustments, base_def.did(), field_ty));
+ }
+ }
+ ty::Tuple(tys) => {
+ let fstr = field.as_str();
+ if let Ok(index) = fstr.parse::<usize>() {
+ if fstr == index.to_string() {
+ if let Some(&field_ty) = tys.get(index) {
+ let adjustments = self.adjust_steps(&autoderef);
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.write_field_index(expr.hir_id, index);
+ return field_ty;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+
+ if let Some((adjustments, did, field_ty)) = private_candidate {
+ // (#90483) apply adjustments to avoid ExprUseVisitor from
+ // creating erroneous projection.
+ self.apply_adjustments(base, adjustments);
+ self.ban_private_field_access(expr, base_ty, field, did);
+ return field_ty;
+ }
+
+ if field.name == kw::Empty {
+ } else if self.method_exists(field, base_ty, expr.hir_id, true) {
+ self.ban_take_value_of_method(expr, base_ty, field);
+ } else if !base_ty.is_primitive_ty() {
+ self.ban_nonexisting_field(field, base, expr, base_ty);
+ } else {
+ let field_name = field.to_string();
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ base_ty,
+ E0610,
+ "`{base_ty}` is a primitive type and therefore doesn't have fields",
+ );
+ let is_valid_suffix = |field: &str| {
+ if field == "f32" || field == "f64" {
+ return true;
+ }
+ let mut chars = field.chars().peekable();
+ match chars.peek() {
+ Some('e') | Some('E') => {
+ chars.next();
+ if let Some(c) = chars.peek()
+ && !c.is_numeric() && *c != '-' && *c != '+'
+ {
+ return false;
+ }
+ while let Some(c) = chars.peek() {
+ if !c.is_numeric() {
+ break;
+ }
+ chars.next();
+ }
+ }
+ _ => (),
+ }
+ let suffix = chars.collect::<String>();
+ suffix.is_empty() || suffix == "f32" || suffix == "f64"
+ };
+ let maybe_partial_suffix = |field: &str| -> Option<&str> {
+ let first_chars = ['f', 'l'];
+ if field.len() >= 1
+ && field.to_lowercase().starts_with(first_chars)
+ && field[1..].chars().all(|c| c.is_ascii_digit())
+ {
+ if field.to_lowercase().starts_with(['f']) { Some("f32") } else { Some("f64") }
+ } else {
+ None
+ }
+ };
+ if let ty::Infer(ty::IntVar(_)) = base_ty.kind()
+ && let ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(_, ast::LitIntType::Unsuffixed),
+ ..
+ }) = base.kind
+ && !base.span.from_expansion()
+ {
+ if is_valid_suffix(&field_name) {
+ err.span_suggestion_verbose(
+ field.span.shrink_to_lo(),
+ "if intended to be a floating point literal, consider adding a `0` after the period",
+ '0',
+ Applicability::MaybeIncorrect,
+ );
+ } else if let Some(correct_suffix) = maybe_partial_suffix(&field_name) {
+ err.span_suggestion_verbose(
+ field.span,
+ format!("if intended to be a floating point literal, consider adding a `0` after the period and a `{correct_suffix}` suffix"),
+ format!("0{correct_suffix}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err.emit();
+ }
+
+ self.tcx().ty_error()
+ }
+
+ fn suggest_await_on_field_access(
+ &self,
+ err: &mut Diagnostic,
+ field_ident: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ let output_ty = match self.get_impl_future_output_ty(ty) {
+ Some(output_ty) => self.resolve_vars_if_possible(output_ty),
+ _ => return,
+ };
+ let mut add_label = true;
+ if let ty::Adt(def, _) = output_ty.skip_binder().kind() {
+ // no field access on enum type
+ if !def.is_enum() {
+ if def
+ .non_enum_variant()
+ .fields
+ .iter()
+ .any(|field| field.ident(self.tcx) == field_ident)
+ {
+ add_label = false;
+ err.span_label(
+ field_ident.span,
+ "field not available in `impl Future`, but it is available in its `Output`",
+ );
+ err.span_suggestion_verbose(
+ base.span.shrink_to_hi(),
+ "consider `await`ing on the `Future` and access the field of its `Output`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ if add_label {
+ err.span_label(field_ident.span, &format!("field not found in `{ty}`"));
+ }
+ }
+
+ fn ban_nonexisting_field(
+ &self,
+ ident: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ base_ty: Ty<'tcx>,
+ ) {
+ debug!(
+ "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, base_ty={:?}",
+ ident, base, expr, base_ty
+ );
+ let mut err = self.no_such_field_err(ident, base_ty, base.hir_id);
+
+ match *base_ty.peel_refs().kind() {
+ ty::Array(_, len) => {
+ self.maybe_suggest_array_indexing(&mut err, expr, base, ident, len);
+ }
+ ty::RawPtr(..) => {
+ self.suggest_first_deref_field(&mut err, expr, base, ident);
+ }
+ ty::Adt(def, _) if !def.is_enum() => {
+ self.suggest_fields_on_recordish(&mut err, def, ident, expr.span);
+ }
+ ty::Param(param_ty) => {
+ self.point_at_param_definition(&mut err, param_ty);
+ }
+ ty::Opaque(_, _) => {
+ self.suggest_await_on_field_access(&mut err, ident, base, base_ty.peel_refs());
+ }
+ _ => {}
+ }
+
+ self.suggest_fn_call(&mut err, base, base_ty, |output_ty| {
+ if let ty::Adt(def, _) = output_ty.kind() && !def.is_enum() {
+ def.non_enum_variant().fields.iter().any(|field| {
+ field.ident(self.tcx) == ident
+ && field.vis.is_accessible_from(expr.hir_id.owner.def_id, self.tcx)
+ })
+ } else if let ty::Tuple(tys) = output_ty.kind()
+ && let Ok(idx) = ident.as_str().parse::<usize>()
+ {
+ idx < tys.len()
+ } else {
+ false
+ }
+ });
+
+ if ident.name == kw::Await {
+ // We know by construction that `<expr>.await` is either on Rust 2015
+ // or results in `ExprKind::Await`. Suggest switching the edition to 2018.
+ err.note("to `.await` a `Future`, switch to Rust 2018 or later");
+ err.help_use_latest_edition();
+ }
+
+ err.emit();
+ }
+
+ fn ban_private_field_access(
+ &self,
+ expr: &hir::Expr<'_>,
+ expr_t: Ty<'tcx>,
+ field: Ident,
+ base_did: DefId,
+ ) {
+ let struct_path = self.tcx().def_path_str(base_did);
+ let kind_name = self.tcx().def_kind(base_did).descr(base_did);
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ field.span,
+ E0616,
+ "field `{field}` of {kind_name} `{struct_path}` is private",
+ );
+ err.span_label(field.span, "private field");
+ // Also check if an accessible method exists, which is often what is meant.
+ if self.method_exists(field, expr_t, expr.hir_id, false) && !self.expr_in_place(expr.hir_id)
+ {
+ self.suggest_method_call(
+ &mut err,
+ &format!("a method `{field}` also exists, call it with parentheses"),
+ field,
+ expr_t,
+ expr,
+ None,
+ );
+ }
+ err.emit();
+ }
+
+ fn ban_take_value_of_method(&self, expr: &hir::Expr<'_>, expr_t: Ty<'tcx>, field: Ident) {
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0615,
+ "attempted to take value of method `{field}` on type `{expr_t}`",
+ );
+ err.span_label(field.span, "method, not a field");
+ let expr_is_call =
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::Call(callee, _args), .. }) =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ expr.hir_id == callee.hir_id
+ } else {
+ false
+ };
+ let expr_snippet =
+ self.tcx.sess.source_map().span_to_snippet(expr.span).unwrap_or_default();
+ let is_wrapped = expr_snippet.starts_with('(') && expr_snippet.ends_with(')');
+ let after_open = expr.span.lo() + rustc_span::BytePos(1);
+ let before_close = expr.span.hi() - rustc_span::BytePos(1);
+
+ if expr_is_call && is_wrapped {
+ err.multipart_suggestion(
+ "remove wrapping parentheses to call the method",
+ vec![
+ (expr.span.with_hi(after_open), String::new()),
+ (expr.span.with_lo(before_close), String::new()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else if !self.expr_in_place(expr.hir_id) {
+ // Suggest call parentheses inside the wrapping parentheses
+ let span = if is_wrapped {
+ expr.span.with_lo(after_open).with_hi(before_close)
+ } else {
+ expr.span
+ };
+ self.suggest_method_call(
+ &mut err,
+ "use parentheses to call the method",
+ field,
+ expr_t,
+ expr,
+ Some(span),
+ );
+ } else if let ty::RawPtr(ty_and_mut) = expr_t.kind()
+ && let ty::Adt(adt_def, _) = ty_and_mut.ty.kind()
+ && let ExprKind::Field(base_expr, _) = expr.kind
+ && adt_def.variants().len() == 1
+ && adt_def
+ .variants()
+ .iter()
+ .next()
+ .unwrap()
+ .fields
+ .iter()
+ .any(|f| f.ident(self.tcx) == field)
+ {
+ err.multipart_suggestion(
+ "to access the field, dereference first",
+ vec![
+ (base_expr.span.shrink_to_lo(), "(*".to_string()),
+ (base_expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help("methods are immutable and cannot be assigned to");
+ }
+
+ err.emit();
+ }
+
+ fn point_at_param_definition(&self, err: &mut Diagnostic, param: ty::ParamTy) {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let generic_param = generics.type_param(&param, self.tcx);
+ if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param.kind {
+ return;
+ }
+ let param_def_id = generic_param.def_id;
+ let param_hir_id = match param_def_id.as_local() {
+ Some(x) => self.tcx.hir().local_def_id_to_hir_id(x),
+ None => return,
+ };
+ let param_span = self.tcx.hir().span(param_hir_id);
+ let param_name = self.tcx.hir().ty_param_name(param_def_id.expect_local());
+
+ err.span_label(param_span, &format!("type parameter '{param_name}' declared here"));
+ }
+
+ fn suggest_fields_on_recordish(
+ &self,
+ err: &mut Diagnostic,
+ def: ty::AdtDef<'tcx>,
+ field: Ident,
+ access_span: Span,
+ ) {
+ if let Some(suggested_field_name) =
+ self.suggest_field_name(def.non_enum_variant(), field.name, vec![], access_span)
+ {
+ err.span_suggestion(
+ field.span,
+ "a field with a similar name exists",
+ suggested_field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(field.span, "unknown field");
+ let struct_variant_def = def.non_enum_variant();
+ let field_names = self.available_field_names(struct_variant_def, access_span);
+ if !field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(field_names),
+ ));
+ }
+ }
+ }
+
+ fn maybe_suggest_array_indexing(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ len: ty::Const<'tcx>,
+ ) {
+ if let (Some(len), Ok(user_index)) =
+ (len.try_eval_usize(self.tcx, self.param_env), field.as_str().parse::<u64>())
+ && let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span)
+ {
+ let help = "instead of using tuple indexing, use array indexing";
+ let suggestion = format!("{base}[{field}]");
+ let applicability = if len < user_index {
+ Applicability::MachineApplicable
+ } else {
+ Applicability::MaybeIncorrect
+ };
+ err.span_suggestion(expr.span, help, suggestion, applicability);
+ }
+ }
+
+ fn suggest_first_deref_field(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ ) {
+ if let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) {
+ let msg = format!("`{base}` is a raw pointer; try dereferencing it");
+ let suggestion = format!("(*{base}).{field}");
+ err.span_suggestion(expr.span, &msg, suggestion, Applicability::MaybeIncorrect);
+ }
+ }
+
+ fn no_such_field_err(
+ &self,
+ field: Ident,
+ expr_t: Ty<'tcx>,
+ id: HirId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let span = field.span;
+ debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t);
+
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0609,
+ "no field `{field}` on type `{expr_t}`",
+ );
+
+ // try to add a suggestion in case the field is a nested field of a field of the Adt
+ let mod_id = self.tcx.parent_module(id).to_def_id();
+ if let Some((fields, substs)) =
+ self.get_field_candidates_considering_privacy(span, expr_t, mod_id)
+ {
+ let candidate_fields: Vec<_> = fields
+ .filter_map(|candidate_field| {
+ self.check_for_nested_field_satisfying(
+ span,
+ &|candidate_field, _| candidate_field.ident(self.tcx()) == field,
+ candidate_field,
+ substs,
+ vec![],
+ mod_id,
+ )
+ })
+ .map(|mut field_path| {
+ field_path.pop();
+ field_path
+ .iter()
+ .map(|id| id.name.to_ident_string())
+ .collect::<Vec<String>>()
+ .join(".")
+ })
+ .collect::<Vec<_>>();
+
+ let len = candidate_fields.len();
+ if len > 0 {
+ err.span_suggestions(
+ field.span.shrink_to_lo(),
+ format!(
+ "{} of the expressions' fields {} a field of the same name",
+ if len > 1 { "some" } else { "one" },
+ if len > 1 { "have" } else { "has" },
+ ),
+ candidate_fields.iter().map(|path| format!("{path}.")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err
+ }
+
+ pub(crate) fn get_field_candidates_considering_privacy(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ mod_id: DefId,
+ ) -> Option<(impl Iterator<Item = &'tcx ty::FieldDef> + 'tcx, SubstsRef<'tcx>)> {
+ debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_ty);
+
+ for (base_t, _) in self.autoderef(span, base_ty) {
+ match base_t.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ let tcx = self.tcx;
+ let fields = &base_def.non_enum_variant().fields;
+ // Some struct, e.g. some that impl `Deref`, have all private fields
+ // because you're expected to deref them to access the _real_ fields.
+ // This, for example, will help us suggest accessing a field through a `Box<T>`.
+ if fields.iter().all(|field| !field.vis.is_accessible_from(mod_id, tcx)) {
+ continue;
+ }
+ return Some((
+ fields
+ .iter()
+ .filter(move |field| field.vis.is_accessible_from(mod_id, tcx))
+ // For compile-time reasons put a limit on number of fields we search
+ .take(100),
+ substs,
+ ));
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// This method is called after we have encountered a missing field error to recursively
+ /// search for the field
+ pub(crate) fn check_for_nested_field_satisfying(
+ &self,
+ span: Span,
+ matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
+ candidate_field: &ty::FieldDef,
+ subst: SubstsRef<'tcx>,
+ mut field_path: Vec<Ident>,
+ mod_id: DefId,
+ ) -> Option<Vec<Ident>> {
+ debug!(
+ "check_for_nested_field_satisfying(span: {:?}, candidate_field: {:?}, field_path: {:?}",
+ span, candidate_field, field_path
+ );
+
+ if field_path.len() > 3 {
+ // For compile-time reasons and to avoid infinite recursion we only check for fields
+ // up to a depth of three
+ None
+ } else {
+ field_path.push(candidate_field.ident(self.tcx).normalize_to_macros_2_0());
+ let field_ty = candidate_field.ty(self.tcx, subst);
+ if matches(candidate_field, field_ty) {
+ return Some(field_path);
+ } else if let Some((nested_fields, subst)) =
+ self.get_field_candidates_considering_privacy(span, field_ty, mod_id)
+ {
+ // recursively search fields of `candidate_field` if it's a ty::Adt
+ for field in nested_fields {
+ if let Some(field_path) = self.check_for_nested_field_satisfying(
+ span,
+ matches,
+ field,
+ subst,
+ field_path.clone(),
+ mod_id,
+ ) {
+ return Some(field_path);
+ }
+ }
+ }
+ None
+ }
+ }
+
+ fn check_expr_index(
+ &self,
+ base: &'tcx hir::Expr<'tcx>,
+ idx: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let base_t = self.check_expr(&base);
+ let idx_t = self.check_expr(&idx);
+
+ if base_t.references_error() {
+ base_t
+ } else if idx_t.references_error() {
+ idx_t
+ } else {
+ let base_t = self.structurally_resolved_type(base.span, base_t);
+ match self.lookup_indexing(expr, base, base_t, idx, idx_t) {
+ Some((index_ty, element_ty)) => {
+ // two-phase not needed because index_ty is never mutable
+ self.demand_coerce(idx, idx_t, index_ty, None, AllowTwoPhase::No);
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_index_if_possible(errors, idx.span)
+ });
+ element_ty
+ }
+ None => {
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ expr.span,
+ base_t,
+ E0608,
+ "cannot index into a value of type `{base_t}`",
+ );
+ // Try to give some advice about indexing tuples.
+ if let ty::Tuple(..) = base_t.kind() {
+ let mut needs_note = true;
+ // If the index is an integer, we can show the actual
+ // fixed expression:
+ if let ExprKind::Lit(ref lit) = idx.kind {
+ if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node {
+ let snip = self.tcx.sess.source_map().span_to_snippet(base.span);
+ if let Ok(snip) = snip {
+ err.span_suggestion(
+ expr.span,
+ "to access tuple elements, use",
+ format!("{snip}.{i}"),
+ Applicability::MachineApplicable,
+ );
+ needs_note = false;
+ }
+ }
+ }
+ if needs_note {
+ err.help(
+ "to access tuple elements, use tuple indexing \
+ syntax (e.g., `tuple.0`)",
+ );
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ }
+ }
+ }
+
+ fn point_at_index_if_possible(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ ) {
+ for error in errors {
+ match error.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(predicate)
+ if self.tcx.is_diagnostic_item(sym::SliceIndex, predicate.trait_ref.def_id) => {
+ }
+ _ => continue,
+ }
+ error.obligation.cause.span = span;
+ }
+ }
+
+ fn check_expr_yield(
+ &self,
+ value: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ src: &'tcx hir::YieldSource,
+ ) -> Ty<'tcx> {
+ match self.resume_yield_tys {
+ Some((resume_ty, yield_ty)) => {
+ self.check_expr_coercable_to_type(&value, yield_ty, None);
+
+ resume_ty
+ }
+ // Given that this `yield` expression was generated as a result of lowering a `.await`,
+ // we know that the yield type must be `()`; however, the context won't contain this
+ // information. Hence, we check the source of the yield expression here and check its
+ // value's type against `()` (this check should always hold).
+ None if src.is_await() => {
+ self.check_expr_coercable_to_type(&value, self.tcx.mk_unit(), None);
+ self.tcx.mk_unit()
+ }
+ _ => {
+ self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span });
+ // Avoid expressions without types during writeback (#78653).
+ self.check_expr(value);
+ self.tcx.mk_unit()
+ }
+ }
+ }
+
+ fn check_expr_asm_operand(&self, expr: &'tcx hir::Expr<'tcx>, is_input: bool) {
+ let needs = if is_input { Needs::None } else { Needs::MutPlace };
+ let ty = self.check_expr_with_needs(expr, needs);
+ self.require_type_is_sized(ty, expr.span, traits::InlineAsmSized);
+
+ if !is_input && !expr.is_syntactic_place_expr() {
+ let mut err = self.tcx.sess.struct_span_err(expr.span, "invalid asm output");
+ err.span_label(expr.span, "cannot assign to this expression");
+ err.emit();
+ }
+
+ // If this is an input value, we require its type to be fully resolved
+ // at this point. This allows us to provide helpful coercions which help
+ // pass the type candidate list in a later pass.
+ //
+ // We don't require output types to be resolved at this point, which
+ // allows them to be inferred based on how they are used later in the
+ // function.
+ if is_input {
+ let ty = self.structurally_resolved_type(expr.span, ty);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ let fnptr_ty = self.tcx.mk_fn_ptr(ty.fn_sig(self.tcx));
+ self.demand_coerce(expr, ty, fnptr_ty, None, AllowTwoPhase::No);
+ }
+ ty::Ref(_, base_ty, mutbl) => {
+ let ptr_ty = self.tcx.mk_ptr(ty::TypeAndMut { ty: base_ty, mutbl });
+ self.demand_coerce(expr, ty, ptr_ty, None, AllowTwoPhase::No);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ fn check_expr_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>) -> Ty<'tcx> {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => {
+ self.check_expr_asm_operand(expr, true);
+ }
+ hir::InlineAsmOperand::Out { expr: Some(expr), .. }
+ | hir::InlineAsmOperand::InOut { expr, .. } => {
+ self.check_expr_asm_operand(expr, false);
+ }
+ hir::InlineAsmOperand::Out { expr: None, .. } => {}
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.check_expr_asm_operand(in_expr, true);
+ if let Some(out_expr) = out_expr {
+ self.check_expr_asm_operand(out_expr, false);
+ }
+ }
+ // `AnonConst`s have their own body and is type-checked separately.
+ // As they don't flow into the type system we don't need them to
+ // be well-formed.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymFn { .. } => {}
+ hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ if asm.options.contains(ast::InlineAsmOptions::NORETURN) {
+ self.tcx.types.never
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
index 74a5b6e42..fce2a5888 100644
--- a/compiler/rustc_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
@@ -89,15 +89,6 @@ enum ConsumeMode {
Move,
}
-#[derive(Copy, Clone, PartialEq, Debug)]
-pub enum MutateMode {
- Init,
- /// Example: `x = y`
- JustWrite,
- /// Example: `x += y`
- WriteAndRead,
-}
-
/// The ExprUseVisitor type
///
/// This is the code that actually walks the tree.
@@ -134,7 +125,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
/// - `typeck_results` --- typeck results for the code being analyzed
pub fn new(
delegate: &'a mut (dyn Delegate<'tcx> + 'a),
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
body_owner: LocalDefId,
param_env: ty::ParamEnv<'tcx>,
typeck_results: &'a ty::TypeckResults<'tcx>,
@@ -233,8 +224,9 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
self.consume_exprs(args);
}
- hir::ExprKind::MethodCall(.., args, _) => {
+ hir::ExprKind::MethodCall(.., receiver, args, _) => {
// callee.m(args)
+ self.consume_expr(receiver);
self.consume_exprs(args);
}
@@ -497,7 +489,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
let expr_place = return_if_err!(self.mc.cat_expr(expr));
f(self);
if let Some(els) = els {
- // borrowing because we need to test the descriminant
+ // borrowing because we need to test the discriminant
self.maybe_read_scrutinee(expr, expr_place.clone(), from_ref(pat).iter());
self.walk_block(els)
}
@@ -582,7 +574,9 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
for adjustment in adjustments {
debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment);
match adjustment.kind {
- adjustment::Adjust::NeverToAny | adjustment::Adjust::Pointer(_) => {
+ adjustment::Adjust::NeverToAny
+ | adjustment::Adjust::Pointer(_)
+ | adjustment::Adjust::DynStar => {
// Creating a closure/fn-pointer or unsizing consumes
// the input and stores it into the resulting rvalue.
self.delegate_consume(&place_with_id, place_with_id.hir_id);
diff --git a/compiler/rustc_typeck/src/check/fallback.rs b/compiler/rustc_hir_typeck/src/fallback.rs
index 4059b3403..747ecb036 100644
--- a/compiler/rustc_typeck/src/check/fallback.rs
+++ b/compiler/rustc_hir_typeck/src/fallback.rs
@@ -1,4 +1,4 @@
-use crate::check::FnCtxt;
+use crate::FnCtxt;
use rustc_data_structures::{
fx::{FxHashMap, FxHashSet},
graph::WithSuccessors,
@@ -72,7 +72,7 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
//
// - Unconstrained ints are replaced with `i32`.
//
- // - Unconstrained floats are replaced with with `f64`.
+ // - Unconstrained floats are replaced with `f64`.
//
// - Non-numerics may get replaced with `()` or `!`, depending on
// how they were categorized by `calculate_diverging_fallback`
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
index 3a8093345..6a1cffe3e 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -1,12 +1,7 @@
-use crate::astconv::{
- AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
- GenericArgCountResult, IsMethodCall, PathSeg,
-};
-use crate::check::callee::{self, DeferredCallResolution};
-use crate::check::method::{self, MethodCallee, SelfSource};
-use crate::check::rvalue_scopes;
-use crate::check::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy};
-
+use crate::callee::{self, DeferredCallResolution};
+use crate::method::{self, MethodCallee, SelfSource};
+use crate::rvalue_scopes;
+use crate::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, MultiSpan};
@@ -15,26 +10,28 @@ use rustc_hir::def::{CtorOf, DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{ExprKind, GenericArg, Node, QPath};
+use rustc_hir_analysis::astconv::{
+ AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
+ GenericArgCountResult, IsMethodCall, PathSeg,
+};
use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
use rustc_infer::infer::{InferOk, InferResult};
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{
- self, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSelfTy, UserSubsts,
-};
use rustc_middle::ty::visit::TypeVisitable;
use rustc_middle::ty::{
self, AdtKind, CanonicalUserType, DefIdTree, EarlyBinder, GenericParamDefKind, ToPolyTraitRef,
ToPredicate, Ty, UserType,
};
+use rustc_middle::ty::{GenericArgKind, InternalSubsts, SubstsRef, UserSelfTy, UserSubsts};
use rustc_session::lint;
use rustc_span::def_id::LocalDefId;
use rustc_span::hygiene::DesugaringKind;
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::{
self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt,
};
@@ -60,17 +57,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
- self.tcx().struct_span_lint_hir(lint::builtin::UNREACHABLE_CODE, id, span, |lint| {
- let msg = format!("unreachable {}", kind);
- lint.build(&msg)
- .span_label(span, &msg)
- .span_label(
+ let msg = format!("unreachable {}", kind);
+ self.tcx().struct_span_lint_hir(
+ lint::builtin::UNREACHABLE_CODE,
+ id,
+ span,
+ &msg,
+ |lint| {
+ lint.span_label(span, &msg).span_label(
orig_span,
custom_note
.unwrap_or("any code following this expression is unreachable"),
)
- .emit();
- })
+ },
+ )
}
}
}
@@ -83,21 +83,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.resolve_vars_with_obligations_and_mutate_fulfillment(ty, |_| {})
}
- #[instrument(skip(self, mutate_fulfillment_errors), level = "debug")]
+ #[instrument(skip(self, mutate_fulfillment_errors), level = "debug", ret)]
pub(in super::super) fn resolve_vars_with_obligations_and_mutate_fulfillment(
&self,
mut ty: Ty<'tcx>,
mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
) -> Ty<'tcx> {
// No Infer()? Nothing needs doing.
- if !ty.has_infer_types_or_consts() {
+ if !ty.has_non_region_infer() {
debug!("no inference var, nothing needs doing");
return ty;
}
// If `ty` is a type variable, see whether we already know what it is.
ty = self.resolve_vars_if_possible(ty);
- if !ty.has_infer_types_or_consts() {
+ if !ty.has_non_region_infer() {
debug!(?ty);
return ty;
}
@@ -107,10 +107,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// indirect dependencies that don't seem worth tracking
// precisely.
self.select_obligations_where_possible(false, mutate_fulfillment_errors);
- ty = self.resolve_vars_if_possible(ty);
-
- debug!(?ty);
- ty
+ self.resolve_vars_if_possible(ty)
}
pub(in super::super) fn record_deferred_call_resolution(
@@ -412,7 +409,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
rhs_span: opt_input_expr.map(|expr| expr.span),
is_lit: opt_input_expr
.map_or(false, |expr| matches!(expr.kind, ExprKind::Lit(_))),
- output_pred: None,
+ output_ty: None,
},
),
self.param_env,
@@ -492,21 +489,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn array_length_to_const(&self, length: &hir::ArrayLen) -> ty::Const<'tcx> {
match length {
&hir::ArrayLen::Infer(_, span) => self.ct_infer(self.tcx.types.usize, None, span),
- hir::ArrayLen::Body(anon_const) => self.to_const(anon_const),
+ hir::ArrayLen::Body(anon_const) => {
+ let const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
+ let span = self.tcx.hir().span(anon_const.hir_id);
+ let c = ty::Const::from_anon_const(self.tcx, const_def_id);
+ self.register_wf_obligation(c.into(), span, ObligationCauseCode::WellFormed(None));
+ self.normalize_associated_types_in(span, c)
+ }
}
}
- pub fn to_const(&self, ast_c: &hir::AnonConst) -> ty::Const<'tcx> {
- let const_def_id = self.tcx.hir().local_def_id(ast_c.hir_id);
- let c = ty::Const::from_anon_const(self.tcx, const_def_id);
- self.register_wf_obligation(
- c.into(),
- self.tcx.hir().span(ast_c.hir_id),
- ObligationCauseCode::WellFormed(None),
- );
- c
- }
-
pub fn const_arg_to_const(
&self,
ast_c: &hir::AnonConst,
@@ -565,7 +557,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Registers an obligation for checking later, during regionck, that `arg` is well-formed.
pub fn register_wf_obligation(
&self,
- arg: subst::GenericArg<'tcx>,
+ arg: ty::GenericArg<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>,
) {
@@ -610,18 +602,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut generators = self.deferred_generator_interiors.borrow_mut();
for (body_id, interior, kind) in generators.drain(..) {
self.select_obligations_where_possible(false, |_| {});
- crate::check::generator_interior::resolve_interior(
- self, def_id, body_id, interior, kind,
- );
+ crate::generator_interior::resolve_interior(self, def_id, body_id, interior, kind);
}
}
#[instrument(skip(self), level = "debug")]
pub(in super::super) fn select_all_obligations_or_error(&self) {
- let errors = self.fulfillment_cx.borrow_mut().select_all_or_error(&self);
+ let mut errors = self.fulfillment_cx.borrow_mut().select_all_or_error(&self);
if !errors.is_empty() {
- self.report_fulfillment_errors(&errors, self.inh.body_id, false);
+ self.adjust_fulfillment_errors_for_expr_obligation(&mut errors);
+ self.err_ctxt().report_fulfillment_errors(&errors, self.inh.body_id, false);
}
}
@@ -634,7 +625,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
if !result.is_empty() {
mutate_fulfillment_errors(&mut result);
- self.report_fulfillment_errors(&result, self.inh.body_id, fallback_has_occurred);
+ self.adjust_fulfillment_errors_for_expr_obligation(&mut result);
+ self.err_ctxt().report_fulfillment_errors(
+ &result,
+ self.inh.body_id,
+ fallback_has_occurred,
+ );
}
}
@@ -831,23 +827,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let ty = item_ty.subst(self.tcx, substs);
self.write_resolution(hir_id, Ok((def_kind, def_id)));
- self.add_required_obligations_with_code(
- span,
- def_id,
- &substs,
- match lang_item {
- hir::LangItem::IntoFutureIntoFuture => {
- ObligationCauseCode::AwaitableExpr(expr_hir_id)
- }
- hir::LangItem::IteratorNext | hir::LangItem::IntoIterIntoIter => {
- ObligationCauseCode::ForLoopIterator
- }
- hir::LangItem::TryTraitFromOutput
- | hir::LangItem::TryTraitFromResidual
- | hir::LangItem::TryTraitBranch => ObligationCauseCode::QuestionMark,
- _ => traits::ItemObligation(def_id),
- },
- );
+
+ let code = match lang_item {
+ hir::LangItem::IntoFutureIntoFuture => {
+ Some(ObligationCauseCode::AwaitableExpr(expr_hir_id))
+ }
+ hir::LangItem::IteratorNext | hir::LangItem::IntoIterIntoIter => {
+ Some(ObligationCauseCode::ForLoopIterator)
+ }
+ hir::LangItem::TryTraitFromOutput
+ | hir::LangItem::TryTraitFromResidual
+ | hir::LangItem::TryTraitBranch => Some(ObligationCauseCode::QuestionMark),
+ _ => None,
+ };
+ if let Some(code) = code {
+ self.add_required_obligations_with_code(span, def_id, substs, move |_, _| code.clone());
+ } else {
+ self.add_required_obligations_for_hir(span, def_id, substs, hir_id);
+ }
+
(Res::Def(def_kind, def_id), ty)
}
@@ -986,7 +984,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
if found != self.tcx.types.unit {
return;
}
- if let ExprKind::MethodCall(path_segment, [rcvr, ..], _) = expr.kind {
+ if let ExprKind::MethodCall(path_segment, rcvr, ..) = expr.kind {
if self
.typeck_results
.borrow()
@@ -1273,7 +1271,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&mut self,
param: &ty::GenericParamDef,
arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
match (&param.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
<dyn AstConv<'_>>::ast_region_to_region(self.fcx, lt, Some(param)).into()
@@ -1297,10 +1295,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn inferred_kind(
&mut self,
- substs: Option<&[subst::GenericArg<'tcx>]>,
+ substs: Option<&[ty::GenericArg<'tcx>]>,
param: &ty::GenericParamDef,
infer_args: bool,
- ) -> subst::GenericArg<'tcx> {
+ ) -> ty::GenericArg<'tcx> {
let tcx = self.fcx.tcx();
match param.kind {
GenericParamDefKind::Lifetime => {
@@ -1359,7 +1357,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// First, store the "user substs" for later.
self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
- self.add_required_obligations(span, def_id, &substs);
+ self.add_required_obligations_for_hir(span, def_id, &substs, hir_id);
// Substitute the values for the type parameters into the type of
// the referenced item.
@@ -1396,35 +1394,66 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
/// Add all the obligations that are required, substituting and normalized appropriately.
- pub(crate) fn add_required_obligations(
+ pub(crate) fn add_required_obligations_for_hir(
&self,
span: Span,
def_id: DefId,
- substs: &SubstsRef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ hir_id: hir::HirId,
) {
- self.add_required_obligations_with_code(
- span,
- def_id,
- substs,
- traits::ItemObligation(def_id),
- )
+ self.add_required_obligations_with_code(span, def_id, substs, |idx, span| {
+ if span.is_dummy() {
+ ObligationCauseCode::ExprItemObligation(def_id, hir_id, idx)
+ } else {
+ ObligationCauseCode::ExprBindingObligation(def_id, span, hir_id, idx)
+ }
+ })
}
- #[tracing::instrument(level = "debug", skip(self, span, def_id, substs))]
+ #[instrument(level = "debug", skip(self, code, span, substs))]
fn add_required_obligations_with_code(
&self,
span: Span,
def_id: DefId,
- substs: &SubstsRef<'tcx>,
- code: ObligationCauseCode<'tcx>,
+ substs: SubstsRef<'tcx>,
+ code: impl Fn(usize, Span) -> ObligationCauseCode<'tcx>,
) {
+ let param_env = self.param_env;
+
+ let remap = match self.tcx.def_kind(def_id) {
+ // Associated consts have `Self: ~const Trait` bounds that should be satisfiable when
+ // `Self: Trait` is satisfied because it does not matter whether the impl is `const`.
+ // Therefore we have to remap the param env here to be non-const.
+ hir::def::DefKind::AssocConst => true,
+ hir::def::DefKind::AssocFn
+ if self.tcx.def_kind(self.tcx.parent(def_id)) == hir::def::DefKind::Trait =>
+ {
+ // N.B.: All callsites to this function involve checking a path expression.
+ //
+ // When instantiating a trait method as a function item, it does not actually matter whether
+ // the trait is `const` or not, or whether `where T: ~const Tr` needs to be satisfied as
+ // `const`. If we were to introduce instantiating trait methods as `const fn`s, we would
+ // check that after this, either via a bound `where F: ~const FnOnce` or when coercing to a
+ // `const fn` pointer.
+ //
+ // FIXME(fee1-dead) FIXME(const_trait_impl): update this doc when trait methods can satisfy
+ // `~const FnOnce` or can be coerced to `const fn` pointer.
+ true
+ }
+ _ => false,
+ };
let (bounds, _) = self.instantiate_bounds(span, def_id, &substs);
- for obligation in traits::predicates_for_generics(
- traits::ObligationCause::new(span, self.body_id, code),
- self.param_env,
+ for mut obligation in traits::predicates_for_generics(
+ |idx, predicate_span| {
+ traits::ObligationCause::new(span, self.body_id, code(idx, predicate_span))
+ },
+ param_env,
bounds,
) {
+ if remap {
+ obligation = obligation.without_const(self.tcx);
+ }
self.register_predicate(obligation);
}
}
@@ -1438,7 +1467,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty
} else {
if !self.is_tainted_by_errors() {
- self.emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282, true)
+ self.err_ctxt()
+ .emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282, true)
.emit();
}
let err = self.tcx.ty_error();
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs
index 7602f2550..fc83994ca 100644
--- a/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/arg_matrix.rs
@@ -130,14 +130,17 @@ impl<'tcx> ArgMatrix<'tcx> {
let ai = &self.expected_indices;
let ii = &self.provided_indices;
+ // Issue: 100478, when we end the iteration,
+ // `next_unmatched_idx` will point to the index of the first unmatched
+ let mut next_unmatched_idx = 0;
for i in 0..cmp::max(ai.len(), ii.len()) {
- // If we eliminate the last row, any left-over inputs are considered missing
+ // If we eliminate the last row, any left-over arguments are considered missing
if i >= mat.len() {
- return Some(Issue::Missing(i));
+ return Some(Issue::Missing(next_unmatched_idx));
}
- // If we eliminate the last column, any left-over arguments are extra
+ // If we eliminate the last column, any left-over inputs are extra
if mat[i].len() == 0 {
- return Some(Issue::Extra(i));
+ return Some(Issue::Extra(next_unmatched_idx));
}
// Make sure we don't pass the bounds of our matrix
@@ -145,6 +148,7 @@ impl<'tcx> ArgMatrix<'tcx> {
let is_input = i < ii.len();
if is_arg && is_input && matches!(mat[i][i], Compatibility::Compatible) {
// This is a satisfied input, so move along
+ next_unmatched_idx += 1;
continue;
}
@@ -163,7 +167,7 @@ impl<'tcx> ArgMatrix<'tcx> {
if is_input {
for j in 0..ai.len() {
// If we find at least one argument that could satisfy this input
- // this argument isn't useless
+ // this input isn't useless
if matches!(mat[i][j], Compatibility::Compatible) {
useless = false;
break;
@@ -232,8 +236,8 @@ impl<'tcx> ArgMatrix<'tcx> {
if matches!(c, Compatibility::Compatible) { Some(i) } else { None }
})
.collect();
- if compat.len() != 1 {
- // this could go into multiple slots, don't bother exploring both
+ if compat.len() < 1 {
+ // try to find a cycle even when this could go into multiple slots, see #101097
is_cycle = false;
break;
}
@@ -309,7 +313,8 @@ impl<'tcx> ArgMatrix<'tcx> {
}
while !self.provided_indices.is_empty() || !self.expected_indices.is_empty() {
- match self.find_issue() {
+ let res = self.find_issue();
+ match res {
Some(Issue::Invalid(idx)) => {
let compatibility = self.compatibility_matrix[idx][idx].clone();
let input_idx = self.provided_indices[idx];
@@ -364,7 +369,9 @@ impl<'tcx> ArgMatrix<'tcx> {
None => {
// We didn't find any issues, so we need to push the algorithm forward
// First, eliminate any arguments that currently satisfy their inputs
- for (inp, arg) in self.eliminate_satisfied() {
+ let eliminated = self.eliminate_satisfied();
+ assert!(!eliminated.is_empty(), "didn't eliminated any indice in this round");
+ for (inp, arg) in eliminated {
matched_inputs[arg] = Some(inp);
}
}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
new file mode 100644
index 000000000..8e0fcb56c
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -0,0 +1,2236 @@
+use crate::coercion::CoerceMany;
+use crate::fn_ctxt::arg_matrix::{ArgMatrix, Compatibility, Error, ExpectedIdx, ProvidedIdx};
+use crate::gather_locals::Declaration;
+use crate::method::MethodCallee;
+use crate::Expectation::*;
+use crate::TupleArgumentsFlag::*;
+use crate::{
+ struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy, Needs,
+ TupleArgumentsFlag,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, Applicability, Diagnostic, DiagnosticId, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_hir_analysis::check::intrinsicck::InlineAsmCtxt;
+use rustc_hir_analysis::check::potentially_plural_count;
+use rustc_hir_analysis::structured_errors::StructuredDiagnostic;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::error_reporting::{FailureCode, ObligationCauseExt};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::infer::TypeTrace;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, DefIdTree, IsSuggestable, Ty, TypeSuperVisitable, TypeVisitor};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, sym, Span};
+use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext};
+
+use std::iter;
+use std::mem;
+use std::ops::ControlFlow;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn check_casts(&mut self) {
+ // don't hold the borrow to deferred_cast_checks while checking to avoid borrow checker errors
+ // when writing to `self.param_env`.
+ let mut deferred_cast_checks = mem::take(&mut *self.deferred_cast_checks.borrow_mut());
+
+ debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len());
+ for cast in deferred_cast_checks.drain(..) {
+ let prev_env = self.param_env;
+ self.param_env = self.param_env.with_constness(cast.constness);
+
+ cast.check(self);
+
+ self.param_env = prev_env;
+ }
+
+ *self.deferred_cast_checks.borrow_mut() = deferred_cast_checks;
+ }
+
+ pub(in super::super) fn check_transmutes(&self) {
+ let mut deferred_transmute_checks = self.deferred_transmute_checks.borrow_mut();
+ debug!("FnCtxt::check_transmutes: {} deferred checks", deferred_transmute_checks.len());
+ for (from, to, hir_id) in deferred_transmute_checks.drain(..) {
+ self.check_transmute(from, to, hir_id);
+ }
+ }
+
+ pub(in super::super) fn check_asms(&self) {
+ let mut deferred_asm_checks = self.deferred_asm_checks.borrow_mut();
+ debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len());
+ for (asm, hir_id) in deferred_asm_checks.drain(..) {
+ let enclosing_id = self.tcx.hir().enclosing_body_owner(hir_id);
+ let get_operand_ty = |expr| {
+ let ty = self.typeck_results.borrow().expr_ty_adjusted(expr);
+ let ty = self.resolve_vars_if_possible(ty);
+ if ty.has_non_region_infer() {
+ assert!(self.is_tainted_by_errors());
+ self.tcx.ty_error()
+ } else {
+ self.tcx.erase_regions(ty)
+ }
+ };
+ InlineAsmCtxt::new_in_fn(self.tcx, self.param_env, get_operand_ty)
+ .check_asm(asm, self.tcx.hir().local_def_id_to_hir_id(enclosing_id));
+ }
+ }
+
+ pub(in super::super) fn check_method_argument_types(
+ &self,
+ sp: Span,
+ expr: &'tcx hir::Expr<'tcx>,
+ method: Result<MethodCallee<'tcx>, ()>,
+ args_no_rcvr: &'tcx [hir::Expr<'tcx>],
+ tuple_arguments: TupleArgumentsFlag,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let has_error = match method {
+ Ok(method) => method.substs.references_error() || method.sig.references_error(),
+ Err(_) => true,
+ };
+ if has_error {
+ let err_inputs = self.err_args(args_no_rcvr.len());
+
+ let err_inputs = match tuple_arguments {
+ DontTupleArguments => err_inputs,
+ TupleArguments => vec![self.tcx.intern_tup(&err_inputs)],
+ };
+
+ self.check_argument_types(
+ sp,
+ expr,
+ &err_inputs,
+ None,
+ args_no_rcvr,
+ false,
+ tuple_arguments,
+ method.ok().map(|method| method.def_id),
+ );
+ return self.tcx.ty_error();
+ }
+
+ let method = method.unwrap();
+ // HACK(eddyb) ignore self in the definition (see above).
+ let expected_input_tys = self.expected_inputs_for_expected_output(
+ sp,
+ expected,
+ method.sig.output(),
+ &method.sig.inputs()[1..],
+ );
+ self.check_argument_types(
+ sp,
+ expr,
+ &method.sig.inputs()[1..],
+ expected_input_tys,
+ args_no_rcvr,
+ method.sig.c_variadic,
+ tuple_arguments,
+ Some(method.def_id),
+ );
+ method.sig.output()
+ }
+
+ /// Generic function that factors out common logic from function calls,
+ /// method calls and overloaded operators.
+ pub(in super::super) fn check_argument_types(
+ &self,
+ // Span enclosing the call site
+ call_span: Span,
+ // Expression of the call site
+ call_expr: &'tcx hir::Expr<'tcx>,
+ // Types (as defined in the *signature* of the target function)
+ formal_input_tys: &[Ty<'tcx>],
+ // More specific expected types, after unifying with caller output types
+ expected_input_tys: Option<Vec<Ty<'tcx>>>,
+ // The expressions for each provided argument
+ provided_args: &'tcx [hir::Expr<'tcx>],
+ // Whether the function is variadic, for example when imported from C
+ c_variadic: bool,
+ // Whether the arguments have been bundled in a tuple (ex: closures)
+ tuple_arguments: TupleArgumentsFlag,
+ // The DefId for the function being called, for better error messages
+ fn_def_id: Option<DefId>,
+ ) {
+ let tcx = self.tcx;
+
+ // Conceptually, we've got some number of expected inputs, and some number of provided arguments
+ // and we can form a grid of whether each argument could satisfy a given input:
+ // in1 | in2 | in3 | ...
+ // arg1 ? | | |
+ // arg2 | ? | |
+ // arg3 | | ? |
+ // ...
+ // Initially, we just check the diagonal, because in the case of correct code
+ // these are the only checks that matter
+ // However, in the unhappy path, we'll fill in this whole grid to attempt to provide
+ // better error messages about invalid method calls.
+
+ // All the input types from the fn signature must outlive the call
+ // so as to validate implied bounds.
+ for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) {
+ self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
+ }
+
+ let mut err_code = "E0061";
+
+ // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here
+ let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments {
+ let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]);
+ match tuple_type.kind() {
+ // We expected a tuple and got a tuple
+ ty::Tuple(arg_types) => {
+ // Argument length differs
+ if arg_types.len() != provided_args.len() {
+ err_code = "E0057";
+ }
+ let expected_input_tys = match expected_input_tys {
+ Some(expected_input_tys) => match expected_input_tys.get(0) {
+ Some(ty) => match ty.kind() {
+ ty::Tuple(tys) => Some(tys.iter().collect()),
+ _ => None,
+ },
+ None => None,
+ },
+ None => None,
+ };
+ (arg_types.iter().collect(), expected_input_tys)
+ }
+ _ => {
+ // Otherwise, there's a mismatch, so clear out what we're expecting, and set
+ // our input types to err_args so we don't blow up the error messages
+ struct_span_err!(
+ tcx.sess,
+ call_span,
+ E0059,
+ "cannot use call notation; the first type parameter \
+ for the function trait is neither a tuple nor unit"
+ )
+ .emit();
+ (self.err_args(provided_args.len()), None)
+ }
+ }
+ } else {
+ (formal_input_tys.to_vec(), expected_input_tys)
+ };
+
+ // If there are no external expectations at the call site, just use the types from the function defn
+ let expected_input_tys = if let Some(expected_input_tys) = expected_input_tys {
+ assert_eq!(expected_input_tys.len(), formal_input_tys.len());
+ expected_input_tys
+ } else {
+ formal_input_tys.clone()
+ };
+
+ let minimum_input_count = expected_input_tys.len();
+ let provided_arg_count = provided_args.len();
+
+ let is_const_eval_select = matches!(fn_def_id, Some(def_id) if
+ self.tcx.def_kind(def_id) == hir::def::DefKind::Fn
+ && self.tcx.is_intrinsic(def_id)
+ && self.tcx.item_name(def_id) == sym::const_eval_select);
+
+ // We introduce a helper function to demand that a given argument satisfy a given input
+ // This is more complicated than just checking type equality, as arguments could be coerced
+ // This version writes those types back so further type checking uses the narrowed types
+ let demand_compatible = |idx| {
+ let formal_input_ty: Ty<'tcx> = formal_input_tys[idx];
+ let expected_input_ty: Ty<'tcx> = expected_input_tys[idx];
+ let provided_arg = &provided_args[idx];
+
+ debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty);
+
+ // We're on the happy path here, so we'll do a more involved check and write back types
+ // To check compatibility, we'll do 3 things:
+ // 1. Unify the provided argument with the expected type
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+
+ let checked_ty = self.check_expr_with_expectation(provided_arg, expectation);
+
+ // 2. Coerce to the most detailed type that could be coerced
+ // to, which is `expected_ty` if `rvalue_hint` returns an
+ // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+
+ // Cause selection errors caused by resolving a single argument to point at the
+ // argument and not the call. This lets us customize the span pointed to in the
+ // fulfillment error to be more accurate.
+ let coerced_ty = self.resolve_vars_with_obligations(coerced_ty);
+
+ let coerce_error = self
+ .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None)
+ .err();
+
+ if coerce_error.is_some() {
+ return Compatibility::Incompatible(coerce_error);
+ }
+
+ // Check that second and third argument of `const_eval_select` must be `FnDef`, and additionally that
+ // the second argument must be `const fn`. The first argument must be a tuple, but this is already expressed
+ // in the function signature (`F: FnOnce<ARG>`), so I did not bother to add another check here.
+ //
+ // This check is here because there is currently no way to express a trait bound for `FnDef` types only.
+ if is_const_eval_select && (1..=2).contains(&idx) {
+ if let ty::FnDef(def_id, _) = checked_ty.kind() {
+ if idx == 1 && !self.tcx.is_const_fn_raw(*def_id) {
+ self.tcx
+ .sess
+ .struct_span_err(provided_arg.span, "this argument must be a `const fn`")
+ .help("consult the documentation on `const_eval_select` for more information")
+ .emit();
+ }
+ } else {
+ self.tcx
+ .sess
+ .struct_span_err(provided_arg.span, "this argument must be a function item")
+ .note(format!("expected a function item, found {checked_ty}"))
+ .help(
+ "consult the documentation on `const_eval_select` for more information",
+ )
+ .emit();
+ }
+ }
+
+ // 3. Check if the formal type is a supertype of the checked one
+ // and register any such obligations for future type checks
+ let supertype_error = self
+ .at(&self.misc(provided_arg.span), self.param_env)
+ .sup(formal_input_ty, coerced_ty);
+ let subtyping_error = match supertype_error {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(err) => Some(err),
+ };
+
+ // If neither check failed, the types are compatible
+ match subtyping_error {
+ None => Compatibility::Compatible,
+ Some(_) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // To start, we only care "along the diagonal", where we expect every
+ // provided arg to be in the right spot
+ let mut compatibility_diagonal =
+ vec![Compatibility::Incompatible(None); provided_args.len()];
+
+ // Keep track of whether we *could possibly* be satisfied, i.e. whether we're on the happy path
+ // if the wrong number of arguments were supplied, we CAN'T be satisfied,
+ // and if we're c_variadic, the supplied arguments must be >= the minimum count from the function
+ // otherwise, they need to be identical, because rust doesn't currently support variadic functions
+ let mut call_appears_satisfied = if c_variadic {
+ provided_arg_count >= minimum_input_count
+ } else {
+ provided_arg_count == minimum_input_count
+ };
+
+ // Check the arguments.
+ // We do this in a pretty awful way: first we type-check any arguments
+ // that are not closures, then we type-check the closures. This is so
+ // that we have more information about the types of arguments when we
+ // type-check the functions. This isn't really the right way to do this.
+ for check_closures in [false, true] {
+ // More awful hacks: before we check argument types, try to do
+ // an "opportunistic" trait resolution of any trait bounds on
+ // the call. This helps coercions.
+ if check_closures {
+ self.select_obligations_where_possible(false, |_| {})
+ }
+
+ // Check each argument, to satisfy the input it was provided for
+ // Visually, we're traveling down the diagonal of the compatibility matrix
+ for (idx, arg) in provided_args.iter().enumerate() {
+ // Warn only for the first loop (the "no closures" one).
+ // Closure arguments themselves can't be diverging, but
+ // a previous argument can, e.g., `foo(panic!(), || {})`.
+ if !check_closures {
+ self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
+ }
+
+ // For C-variadic functions, we don't have a declared type for all of
+ // the arguments hence we only do our usual type checking with
+ // the arguments who's types we do know. However, we *can* check
+ // for unreachable expressions (see above).
+ // FIXME: unreachable warning current isn't emitted
+ if idx >= minimum_input_count {
+ continue;
+ }
+
+ let is_closure = matches!(arg.kind, ExprKind::Closure { .. });
+ if is_closure != check_closures {
+ continue;
+ }
+
+ let compatible = demand_compatible(idx);
+ let is_compatible = matches!(compatible, Compatibility::Compatible);
+ compatibility_diagonal[idx] = compatible;
+
+ if !is_compatible {
+ call_appears_satisfied = false;
+ }
+ }
+ }
+
+ if c_variadic && provided_arg_count < minimum_input_count {
+ err_code = "E0060";
+ }
+
+ for arg in provided_args.iter().skip(minimum_input_count) {
+ // Make sure we've checked this expr at least once.
+ let arg_ty = self.check_expr(&arg);
+
+ // If the function is c-style variadic, we skipped a bunch of arguments
+ // so we need to check those, and write out the types
+ // Ideally this would be folded into the above, for uniform style
+ // but c-variadic is already a corner case
+ if c_variadic {
+ fn variadic_error<'tcx>(
+ sess: &'tcx Session,
+ span: Span,
+ ty: Ty<'tcx>,
+ cast_ty: &str,
+ ) {
+ use rustc_hir_analysis::structured_errors::MissingCastForVariadicArg;
+
+ MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit();
+ }
+
+ // There are a few types which get autopromoted when passed via varargs
+ // in C but we just error out instead and require explicit casts.
+ let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
+ match arg_ty.kind() {
+ ty::Float(ty::FloatTy::F32) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
+ }
+ ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
+ }
+ ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
+ }
+ ty::FnDef(..) => {
+ let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
+ let ptr_ty = self.resolve_vars_if_possible(ptr_ty);
+ variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
+ }
+ _ => {}
+ }
+ }
+ }
+
+ if !call_appears_satisfied {
+ let compatibility_diagonal = IndexVec::from_raw(compatibility_diagonal);
+ let provided_args = IndexVec::from_iter(provided_args.iter().take(if c_variadic {
+ minimum_input_count
+ } else {
+ provided_arg_count
+ }));
+ debug_assert_eq!(
+ formal_input_tys.len(),
+ expected_input_tys.len(),
+ "expected formal_input_tys to be the same size as expected_input_tys"
+ );
+ let formal_and_expected_inputs = IndexVec::from_iter(
+ formal_input_tys
+ .iter()
+ .copied()
+ .zip(expected_input_tys.iter().copied())
+ .map(|vars| self.resolve_vars_if_possible(vars)),
+ );
+
+ self.report_arg_errors(
+ compatibility_diagonal,
+ formal_and_expected_inputs,
+ provided_args,
+ c_variadic,
+ err_code,
+ fn_def_id,
+ call_span,
+ call_expr,
+ );
+ }
+ }
+
+ fn report_arg_errors(
+ &self,
+ compatibility_diagonal: IndexVec<ProvidedIdx, Compatibility<'tcx>>,
+ formal_and_expected_inputs: IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
+ provided_args: IndexVec<ProvidedIdx, &'tcx hir::Expr<'tcx>>,
+ c_variadic: bool,
+ err_code: &str,
+ fn_def_id: Option<DefId>,
+ call_span: Span,
+ call_expr: &hir::Expr<'tcx>,
+ ) {
+ // Next, let's construct the error
+ let (error_span, full_call_span, ctor_of, is_method) = match &call_expr.kind {
+ hir::ExprKind::Call(
+ hir::Expr { hir_id, span, kind: hir::ExprKind::Path(qpath), .. },
+ _,
+ ) => {
+ if let Res::Def(DefKind::Ctor(of, _), _) =
+ self.typeck_results.borrow().qpath_res(qpath, *hir_id)
+ {
+ (call_span, *span, Some(of), false)
+ } else {
+ (call_span, *span, None, false)
+ }
+ }
+ hir::ExprKind::Call(hir::Expr { span, .. }, _) => (call_span, *span, None, false),
+ hir::ExprKind::MethodCall(path_segment, _, _, span) => {
+ let ident_span = path_segment.ident.span;
+ let ident_span = if let Some(args) = path_segment.args {
+ ident_span.with_hi(args.span_ext.hi())
+ } else {
+ ident_span
+ };
+ // methods are never ctors
+ (*span, ident_span, None, true)
+ }
+ k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k),
+ };
+ let args_span = error_span.trim_start(full_call_span).unwrap_or(error_span);
+ let call_name = match ctor_of {
+ Some(CtorOf::Struct) => "struct",
+ Some(CtorOf::Variant) => "enum variant",
+ None => "function",
+ };
+
+ // Don't print if it has error types or is just plain `_`
+ fn has_error_or_infer<'tcx>(tys: impl IntoIterator<Item = Ty<'tcx>>) -> bool {
+ tys.into_iter().any(|ty| ty.references_error() || ty.is_ty_var())
+ }
+
+ self.set_tainted_by_errors();
+ let tcx = self.tcx;
+
+ // Get the argument span in the context of the call span so that
+ // suggestions and labels are (more) correct when an arg is a
+ // macro invocation.
+ let normalize_span = |span: Span| -> Span {
+ let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span);
+ // Sometimes macros mess up the spans, so do not normalize the
+ // arg span to equal the error span, because that's less useful
+ // than pointing out the arg expr in the wrong context.
+ if normalized_span.source_equal(error_span) { span } else { normalized_span }
+ };
+
+ // Precompute the provided types and spans, since that's all we typically need for below
+ let provided_arg_tys: IndexVec<ProvidedIdx, (Ty<'tcx>, Span)> = provided_args
+ .iter()
+ .map(|expr| {
+ let ty = self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted_opt(*expr)
+ .unwrap_or_else(|| tcx.ty_error());
+ (self.resolve_vars_if_possible(ty), normalize_span(expr.span))
+ })
+ .collect();
+ let callee_expr = match &call_expr.peel_blocks().kind {
+ hir::ExprKind::Call(callee, _) => Some(*callee),
+ hir::ExprKind::MethodCall(_, receiver, ..) => {
+ if let Some((DefKind::AssocFn, def_id)) =
+ self.typeck_results.borrow().type_dependent_def(call_expr.hir_id)
+ && let Some(assoc) = tcx.opt_associated_item(def_id)
+ && assoc.fn_has_self_parameter
+ {
+ Some(*receiver)
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+ let callee_ty = callee_expr
+ .and_then(|callee_expr| self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr));
+
+ // A "softer" version of the `demand_compatible`, which checks types without persisting them,
+ // and treats error types differently
+ // This will allow us to "probe" for other argument orders that would likely have been correct
+ let check_compatible = |provided_idx: ProvidedIdx, expected_idx: ExpectedIdx| {
+ if provided_idx.as_usize() == expected_idx.as_usize() {
+ return compatibility_diagonal[provided_idx].clone();
+ }
+
+ let (formal_input_ty, expected_input_ty) = formal_and_expected_inputs[expected_idx];
+ // If either is an error type, we defy the usual convention and consider them to *not* be
+ // coercible. This prevents our error message heuristic from trying to pass errors into
+ // every argument.
+ if (formal_input_ty, expected_input_ty).references_error() {
+ return Compatibility::Incompatible(None);
+ }
+
+ let (arg_ty, arg_span) = provided_arg_tys[provided_idx];
+
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+ let can_coerce = self.can_coerce(arg_ty, coerced_ty);
+ if !can_coerce {
+ return Compatibility::Incompatible(Some(ty::error::TypeError::Sorts(
+ ty::error::ExpectedFound::new(true, coerced_ty, arg_ty),
+ )));
+ }
+
+ // Using probe here, since we don't want this subtyping to affect inference.
+ let subtyping_error = self.probe(|_| {
+ self.at(&self.misc(arg_span), self.param_env).sup(formal_input_ty, coerced_ty).err()
+ });
+
+ // Same as above: if either the coerce type or the checked type is an error type,
+ // consider them *not* compatible.
+ let references_error = (coerced_ty, arg_ty).references_error();
+ match (references_error, subtyping_error) {
+ (false, None) => Compatibility::Compatible,
+ (_, subtyping_error) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // The algorithm here is inspired by levenshtein distance and longest common subsequence.
+ // We'll try to detect 4 different types of mistakes:
+ // - An extra parameter has been provided that doesn't satisfy *any* of the other inputs
+ // - An input is missing, which isn't satisfied by *any* of the other arguments
+ // - Some number of arguments have been provided in the wrong order
+ // - A type is straight up invalid
+
+ // First, let's find the errors
+ let (mut errors, matched_inputs) =
+ ArgMatrix::new(provided_args.len(), formal_and_expected_inputs.len(), check_compatible)
+ .find_errors();
+
+ // First, check if we just need to wrap some arguments in a tuple.
+ if let Some((mismatch_idx, terr)) =
+ compatibility_diagonal.iter().enumerate().find_map(|(i, c)| {
+ if let Compatibility::Incompatible(Some(terr)) = c {
+ Some((i, *terr))
+ } else {
+ None
+ }
+ })
+ {
+ // Is the first bad expected argument a tuple?
+ // Do we have as many extra provided arguments as the tuple's length?
+ // If so, we might have just forgotten to wrap some args in a tuple.
+ if let Some(ty::Tuple(tys)) =
+ formal_and_expected_inputs.get(mismatch_idx.into()).map(|tys| tys.1.kind())
+ // If the tuple is unit, we're not actually wrapping any arguments.
+ && !tys.is_empty()
+ && provided_arg_tys.len() == formal_and_expected_inputs.len() - 1 + tys.len()
+ {
+ // Wrap up the N provided arguments starting at this position in a tuple.
+ let provided_as_tuple = tcx.mk_tup(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx).take(tys.len()),
+ );
+
+ let mut satisfied = true;
+ // Check if the newly wrapped tuple + rest of the arguments are compatible.
+ for ((_, expected_ty), provided_ty) in std::iter::zip(
+ formal_and_expected_inputs.iter().skip(mismatch_idx),
+ [provided_as_tuple].into_iter().chain(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx + tys.len()),
+ ),
+ ) {
+ if !self.can_coerce(provided_ty, *expected_ty) {
+ satisfied = false;
+ break;
+ }
+ }
+
+ // If they're compatible, suggest wrapping in an arg, and we're done!
+ // Take some care with spans, so we don't suggest wrapping a macro's
+ // innards in parenthesis, for example.
+ if satisfied
+ && let Some((_, lo)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx))
+ && let Some((_, hi)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx + tys.len() - 1))
+ {
+ let mut err;
+ if tys.len() == 1 {
+ // A tuple wrap suggestion actually occurs within,
+ // so don't do anything special here.
+ err = self.err_ctxt().report_and_explain_type_error(
+ TypeTrace::types(
+ &self.misc(*lo),
+ true,
+ formal_and_expected_inputs[mismatch_idx.into()].1,
+ provided_arg_tys[mismatch_idx.into()].0,
+ ),
+ terr,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ } else {
+ err = tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(
+ formal_and_expected_inputs.len(),
+ "argument"
+ ),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ );
+ err.multipart_suggestion_verbose(
+ "wrap these arguments in parentheses to construct a tuple",
+ vec![
+ (lo.shrink_to_lo(), "(".to_string()),
+ (hi.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ };
+ self.label_fn_like(
+ &mut err,
+ fn_def_id,
+ callee_ty,
+ Some(mismatch_idx),
+ is_method,
+ );
+ err.emit();
+ return;
+ }
+ }
+ }
+
+ // Okay, so here's where it gets complicated in regards to what errors
+ // we emit and how.
+ // There are 3 different "types" of errors we might encounter.
+ // 1) Missing/extra/swapped arguments
+ // 2) Valid but incorrect arguments
+ // 3) Invalid arguments
+ // - Currently I think this only comes up with `CyclicTy`
+ //
+ // We first need to go through, remove those from (3) and emit those
+ // as their own error, particularly since they're error code and
+ // message is special. From what I can tell, we *must* emit these
+ // here (vs somewhere prior to this function) since the arguments
+ // become invalid *because* of how they get used in the function.
+ // It is what it is.
+
+ if errors.is_empty() {
+ if cfg!(debug_assertions) {
+ span_bug!(error_span, "expected errors from argument matrix");
+ } else {
+ tcx.sess
+ .struct_span_err(
+ error_span,
+ "argument type mismatch was detected, \
+ but rustc had trouble determining where",
+ )
+ .note(
+ "we would appreciate a bug report: \
+ https://github.com/rust-lang/rust/issues/new",
+ )
+ .emit();
+ }
+ return;
+ }
+
+ errors.drain_filter(|error| {
+ let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(e))) = error else { return false };
+ let (provided_ty, provided_span) = provided_arg_tys[*provided_idx];
+ let (expected_ty, _) = formal_and_expected_inputs[*expected_idx];
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if !matches!(trace.cause.as_failure_code(*e), FailureCode::Error0308(_)) {
+ self.err_ctxt().report_and_explain_type_error(trace, *e).emit();
+ return true;
+ }
+ false
+ });
+
+ // We're done if we found errors, but we already emitted them.
+ if errors.is_empty() {
+ return;
+ }
+
+ // Okay, now that we've emitted the special errors separately, we
+ // are only left missing/extra/swapped and mismatched arguments, both
+ // can be collated pretty easily if needed.
+
+ // Next special case: if there is only one "Incompatible" error, just emit that
+ if let [
+ Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(err))),
+ ] = &errors[..]
+ {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[*expected_idx];
+ let (provided_ty, provided_arg_span) = provided_arg_tys[*provided_idx];
+ let cause = &self.misc(provided_arg_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ let mut err = self.err_ctxt().report_and_explain_type_error(trace, *err);
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[*provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ // Call out where the function is defined
+ self.label_fn_like(
+ &mut err,
+ fn_def_id,
+ callee_ty,
+ Some(expected_idx.as_usize()),
+ is_method,
+ );
+ err.emit();
+ return;
+ }
+
+ let mut err = if formal_and_expected_inputs.len() == provided_args.len() {
+ struct_span_err!(
+ tcx.sess,
+ full_call_span,
+ E0308,
+ "arguments to this {} are incorrect",
+ call_name,
+ )
+ } else {
+ tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(formal_and_expected_inputs.len(), "argument"),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ )
+ };
+
+ // As we encounter issues, keep track of what we want to provide for the suggestion
+ let mut labels = vec![];
+ // If there is a single error, we give a specific suggestion; otherwise, we change to
+ // "did you mean" with the suggested function call
+ enum SuggestionText {
+ None,
+ Provide(bool),
+ Remove(bool),
+ Swap,
+ Reorder,
+ DidYouMean,
+ }
+ let mut suggestion_text = SuggestionText::None;
+
+ let mut errors = errors.into_iter().peekable();
+ while let Some(error) = errors.next() {
+ match error {
+ Error::Invalid(provided_idx, expected_idx, compatibility) => {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[expected_idx];
+ let (provided_ty, provided_span) = provided_arg_tys[provided_idx];
+ if let Compatibility::Incompatible(error) = compatibility {
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if let Some(e) = error {
+ self.err_ctxt().note_type_err(
+ &mut err,
+ &trace.cause,
+ None,
+ Some(trace.values),
+ e,
+ false,
+ true,
+ );
+ }
+ }
+
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ }
+ Error::Extra(arg_idx) => {
+ let (provided_ty, provided_span) = provided_arg_tys[arg_idx];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ // FIXME: not suggestable, use something else
+ format!(" of type `{}`", provided_ty)
+ } else {
+ "".to_string()
+ };
+ labels
+ .push((provided_span, format!("argument{} unexpected", provided_ty_name)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Remove(false),
+ SuggestionText::Remove(_) => SuggestionText::Remove(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Missing(expected_idx) => {
+ // If there are multiple missing arguments adjacent to each other,
+ // then we can provide a single error.
+
+ let mut missing_idxs = vec![expected_idx];
+ while let Some(e) = errors.next_if(|e| {
+ matches!(e, Error::Missing(next_expected_idx)
+ if *next_expected_idx == *missing_idxs.last().unwrap() + 1)
+ }) {
+ match e {
+ Error::Missing(expected_idx) => missing_idxs.push(expected_idx),
+ _ => unreachable!(),
+ }
+ }
+
+ // NOTE: Because we might be re-arranging arguments, might have extra
+ // arguments, etc. it's hard to *really* know where we should provide
+ // this error label, so as a heuristic, we point to the provided arg, or
+ // to the call if the missing inputs pass the provided args.
+ match &missing_idxs[..] {
+ &[expected_idx] => {
+ let (_, input_ty) = formal_and_expected_inputs[expected_idx];
+ let span = if let Some((_, arg_span)) =
+ provided_arg_tys.get(expected_idx.to_provided_idx())
+ {
+ *arg_span
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([input_ty]) {
+ format!(" of type `{}`", input_ty)
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("an argument{} is missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Provide(false),
+ SuggestionText::Provide(_) => SuggestionText::Provide(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let span = if let (Some((_, first_span)), Some((_, second_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(second_idx.to_provided_idx()),
+ ) {
+ first_span.to(*second_span)
+ } else {
+ args_span
+ };
+ let rendered =
+ if !has_error_or_infer([first_expected_ty, second_expected_ty]) {
+ format!(
+ " of type `{}` and `{}`",
+ first_expected_ty, second_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("two arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx, third_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let (_, third_expected_ty) = formal_and_expected_inputs[third_idx];
+ let span = if let (Some((_, first_span)), Some((_, third_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(third_idx.to_provided_idx()),
+ ) {
+ first_span.to(*third_span)
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([
+ first_expected_ty,
+ second_expected_ty,
+ third_expected_ty,
+ ]) {
+ format!(
+ " of type `{}`, `{}`, and `{}`",
+ first_expected_ty, second_expected_ty, third_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("three arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ missing_idxs => {
+ let first_idx = *missing_idxs.first().unwrap();
+ let last_idx = *missing_idxs.last().unwrap();
+ // NOTE: Because we might be re-arranging arguments, might have extra arguments, etc.
+ // It's hard to *really* know where we should provide this error label, so this is a
+ // decent heuristic
+ let span = if let (Some((_, first_span)), Some((_, last_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(last_idx.to_provided_idx()),
+ ) {
+ first_span.to(*last_span)
+ } else {
+ args_span
+ };
+ labels.push((span, format!("multiple arguments are missing")));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+ Error::Swap(
+ first_provided_idx,
+ second_provided_idx,
+ first_expected_idx,
+ second_expected_idx,
+ ) => {
+ let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx];
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx];
+ let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) {
+ format!(", found `{}`", first_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ first_span,
+ format!("expected `{}`{}", first_expected_ty, first_provided_ty_name),
+ ));
+
+ let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx];
+ let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) {
+ format!(", found `{}`", second_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ second_span,
+ format!("expected `{}`{}", second_expected_ty, second_provided_ty_name),
+ ));
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Swap,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Permutation(args) => {
+ for (dst_arg, dest_input) in args {
+ let (_, expected_ty) = formal_and_expected_inputs[dst_arg];
+ let (provided_ty, provided_span) = provided_arg_tys[dest_input];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ format!(", found `{}`", provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ provided_span,
+ format!("expected `{}`{}", expected_ty, provided_ty_name),
+ ));
+ }
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Reorder,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+
+ // If we have less than 5 things to say, it would be useful to call out exactly what's wrong
+ if labels.len() <= 5 {
+ for (span, label) in labels {
+ err.span_label(span, label);
+ }
+ }
+
+ // Call out where the function is defined
+ self.label_fn_like(&mut err, fn_def_id, callee_ty, None, is_method);
+
+ // And add a suggestion block for all of the parameters
+ let suggestion_text = match suggestion_text {
+ SuggestionText::None => None,
+ SuggestionText::Provide(plural) => {
+ Some(format!("provide the argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Remove(plural) => {
+ Some(format!("remove the extra argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Swap => Some("swap these arguments".to_string()),
+ SuggestionText::Reorder => Some("reorder these arguments".to_string()),
+ SuggestionText::DidYouMean => Some("did you mean".to_string()),
+ };
+ if let Some(suggestion_text) = suggestion_text {
+ let source_map = self.sess().source_map();
+ let (mut suggestion, suggestion_span) =
+ if let Some(call_span) = full_call_span.find_ancestor_inside(error_span) {
+ ("(".to_string(), call_span.shrink_to_hi().to(error_span.shrink_to_hi()))
+ } else {
+ (
+ format!(
+ "{}(",
+ source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| {
+ fn_def_id.map_or("".to_string(), |fn_def_id| {
+ tcx.item_name(fn_def_id).to_string()
+ })
+ })
+ ),
+ error_span,
+ )
+ };
+ let mut needs_comma = false;
+ for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() {
+ if needs_comma {
+ suggestion += ", ";
+ } else {
+ needs_comma = true;
+ }
+ let suggestion_text = if let Some(provided_idx) = provided_idx
+ && let (_, provided_span) = provided_arg_tys[*provided_idx]
+ && let Ok(arg_text) = source_map.span_to_snippet(provided_span)
+ {
+ arg_text
+ } else {
+ // Propose a placeholder of the correct type
+ let (_, expected_ty) = formal_and_expected_inputs[expected_idx];
+ if expected_ty.is_unit() {
+ "()".to_string()
+ } else if expected_ty.is_suggestable(tcx, false) {
+ format!("/* {} */", expected_ty)
+ } else {
+ "/* value */".to_string()
+ }
+ };
+ suggestion += &suggestion_text;
+ }
+ suggestion += ")";
+ err.span_suggestion_verbose(
+ suggestion_span,
+ &suggestion_text,
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+
+ err.emit();
+ }
+
+ // AST fragment checking
+ pub(in super::super) fn check_lit(
+ &self,
+ lit: &hir::Lit,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ match lit.node {
+ ast::LitKind::Str(..) => tcx.mk_static_str(),
+ ast::LitKind::ByteStr(ref v) => {
+ tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
+ }
+ ast::LitKind::Byte(_) => tcx.types.u8,
+ ast::LitKind::Char(_) => tcx.types.char,
+ ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(ty::int_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(ty::uint_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Int(_) | ty::Uint(_) => Some(ty),
+ ty::Char => Some(tcx.types.u8),
+ ty::RawPtr(..) => Some(tcx.types.usize),
+ ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_int_var())
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => {
+ tcx.mk_mach_float(ty::float_ty(t))
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Float(_) => Some(ty),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_float_var())
+ }
+ ast::LitKind::Bool(_) => tcx.types.bool,
+ ast::LitKind::Err => tcx.ty_error(),
+ }
+ }
+
+ pub fn check_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ hir_id: hir::HirId,
+ ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
+ let path_span = qpath.span();
+ let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
+ let variant = match def {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ return None;
+ }
+ Res::Def(DefKind::Variant, _) => match ty.kind() {
+ ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)),
+ _ => bug!("unexpected type: {:?}", ty),
+ },
+ Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. } => match ty.kind() {
+ ty::Adt(adt, substs) if !adt.is_enum() => {
+ Some((adt.non_enum_variant(), adt.did(), substs))
+ }
+ _ => None,
+ },
+ _ => bug!("unexpected definition: {:?}", def),
+ };
+
+ if let Some((variant, did, substs)) = variant {
+ debug!("check_struct_path: did={:?} substs={:?}", did, substs);
+ self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
+
+ // Check bounds on type arguments used in the path.
+ self.add_required_obligations_for_hir(path_span, did, substs, hir_id);
+
+ Some((variant, ty))
+ } else {
+ match ty.kind() {
+ ty::Error(_) => {
+ // E0071 might be caused by a spelling error, which will have
+ // already caused an error message and probably a suggestion
+ // elsewhere. Refrain from emitting more unhelpful errors here
+ // (issue #88844).
+ }
+ _ => {
+ struct_span_err!(
+ self.tcx.sess,
+ path_span,
+ E0071,
+ "expected struct, variant or union type, found {}",
+ ty.sort_string(self.tcx)
+ )
+ .span_label(path_span, "not a struct")
+ .emit();
+ }
+ }
+ None
+ }
+ }
+
+ pub fn check_decl_initializer(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ init: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
+ // for #42640 (default match binding modes).
+ //
+ // See #44848.
+ let ref_bindings = pat.contains_explicit_ref_binding();
+
+ let local_ty = self.local_ty(init.span, hir_id).revealed_ty;
+ if let Some(m) = ref_bindings {
+ // Somewhat subtle: if we have a `ref` binding in the pattern,
+ // we want to avoid introducing coercions for the RHS. This is
+ // both because it helps preserve sanity and, in the case of
+ // ref mut, for soundness (issue #23116). In particular, in
+ // the latter case, we need to be clear that the type of the
+ // referent for the reference that results is *equal to* the
+ // type of the place it is referencing, and not some
+ // supertype thereof.
+ let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
+ self.demand_eqtype(init.span, local_ty, init_ty);
+ init_ty
+ } else {
+ self.check_expr_coercable_to_type(init, local_ty, None)
+ }
+ }
+
+ pub(in super::super) fn check_decl(&self, decl: Declaration<'tcx>) {
+ // Determine and write the type which we'll check the pattern against.
+ let decl_ty = self.local_ty(decl.span, decl.hir_id).decl_ty;
+ self.write_ty(decl.hir_id, decl_ty);
+
+ // Type check the initializer.
+ if let Some(ref init) = decl.init {
+ let init_ty = self.check_decl_initializer(decl.hir_id, decl.pat, &init);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, init_ty);
+ }
+
+ // Does the expected pattern type originate from an expression and what is the span?
+ let (origin_expr, ty_span) = match (decl.ty, decl.init) {
+ (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
+ (_, Some(init)) => {
+ (true, Some(init.span.find_ancestor_inside(decl.span).unwrap_or(init.span)))
+ } // No explicit type; so use the scrutinee.
+ _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
+ };
+
+ // Type check the pattern. Override if necessary to avoid knock-on errors.
+ self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr);
+ let pat_ty = self.node_ty(decl.pat.hir_id);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, pat_ty);
+
+ if let Some(blk) = decl.els {
+ let previous_diverges = self.diverges.get();
+ let else_ty = self.check_block_with_expected(blk, NoExpectation);
+ let cause = self.cause(blk.span, ObligationCauseCode::LetElse);
+ if let Some(mut err) =
+ self.demand_eqtype_with_origin(&cause, self.tcx.types.never, else_ty)
+ {
+ err.emit();
+ }
+ self.diverges.set(previous_diverges);
+ }
+ }
+
+ /// Type check a `let` statement.
+ pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
+ self.check_decl(local.into());
+ }
+
+ pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
+ // Don't do all the complex logic below for `DeclItem`.
+ match stmt.kind {
+ hir::StmtKind::Item(..) => return,
+ hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+ }
+
+ self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
+
+ // Hide the outer diverging and `has_errors` flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ match stmt.kind {
+ hir::StmtKind::Local(l) => {
+ self.check_decl_local(l);
+ }
+ // Ignore for now.
+ hir::StmtKind::Item(_) => {}
+ hir::StmtKind::Expr(ref expr) => {
+ // Check with expected type of `()`.
+ self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
+ if expr.can_have_side_effects() {
+ self.suggest_semicolon_at_end(expr.span, err);
+ }
+ });
+ }
+ hir::StmtKind::Semi(ref expr) => {
+ // All of this is equivalent to calling `check_expr`, but it is inlined out here
+ // in order to capture the fact that this `match` is the last statement in its
+ // function. This is done for better suggestions to remove the `;`.
+ let expectation = match expr.kind {
+ hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
+ _ => NoExpectation,
+ };
+ self.check_expr_with_expectation(expr, expectation);
+ }
+ }
+
+ // Combine the diverging and `has_error` flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+ }
+
+ pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
+ let unit = self.tcx.mk_unit();
+ let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
+
+ // if the block produces a `!` value, that can always be
+ // (effectively) coerced to unit.
+ if !ty.is_never() {
+ self.demand_suptype(blk.span, unit, ty);
+ }
+ }
+
+ pub(in super::super) fn check_block_with_expected(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let prev = self.ps.replace(self.ps.get().recurse(blk));
+
+ // In some cases, blocks have just one exit, but other blocks
+ // can be targeted by multiple breaks. This can happen both
+ // with labeled blocks as well as when we desugar
+ // a `try { ... }` expression.
+ //
+ // Example 1:
+ //
+ // 'a: { if true { break 'a Err(()); } Ok(()) }
+ //
+ // Here we would wind up with two coercions, one from
+ // `Err(())` and the other from the tail expression
+ // `Ok(())`. If the tail expression is omitted, that's a
+ // "forced unit" -- unless the block diverges, in which
+ // case we can ignore the tail expression (e.g., `'a: {
+ // break 'a 22; }` would not force the type of the block
+ // to be `()`).
+ let tail_expr = blk.expr.as_ref();
+ let coerce_to_ty = expected.coercion_target_type(self, blk.span);
+ let coerce = if blk.targeted_by_break {
+ CoerceMany::new(coerce_to_ty)
+ } else {
+ let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
+ Some(e) => slice::from_ref(e),
+ None => &[],
+ };
+ CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
+ };
+
+ let prev_diverges = self.diverges.get();
+ let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
+ for (pos, s) in blk.stmts.iter().enumerate() {
+ self.check_stmt(s, blk.stmts.len() - 1 == pos);
+ }
+
+ // check the tail expression **without** holding the
+ // `enclosing_breakables` lock below.
+ let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
+
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
+ let coerce = ctxt.coerce.as_mut().unwrap();
+ if let Some(tail_expr_ty) = tail_expr_ty {
+ let tail_expr = tail_expr.unwrap();
+ let span = self.get_expr_coercion_span(tail_expr);
+ let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
+ let ty_for_diagnostic = coerce.merged_ty();
+ // We use coerce_inner here because we want to augment the error
+ // suggesting to wrap the block in square brackets if it might've
+ // been mistaken array syntax
+ coerce.coerce_inner(
+ self,
+ &cause,
+ Some(tail_expr),
+ tail_expr_ty,
+ Some(&mut |diag: &mut Diagnostic| {
+ self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic);
+ }),
+ false,
+ );
+ } else {
+ // Subtle: if there is no explicit tail expression,
+ // that is typically equivalent to a tail expression
+ // of `()` -- except if the block diverges. In that
+ // case, there is no value supplied from the tail
+ // expression (assuming there are no other breaks,
+ // this implies that the type of the block will be
+ // `!`).
+ //
+ // #41425 -- label the implicit `()` as being the
+ // "found type" here, rather than the "expected type".
+ if !self.diverges.get().is_always() {
+ // #50009 -- Do not point at the entire fn block span, point at the return type
+ // span, as it is the cause of the requirement, and
+ // `consider_hint_about_removing_semicolon` will point at the last expression
+ // if it were a relevant part of the error. This improves usability in editors
+ // that highlight errors inline.
+ let mut sp = blk.span;
+ let mut fn_span = None;
+ if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
+ let ret_sp = decl.output.span();
+ if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
+ // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
+ // output would otherwise be incorrect and even misleading. Make sure
+ // the span we're aiming at correspond to a `fn` body.
+ if block_sp == blk.span {
+ sp = ret_sp;
+ fn_span = Some(ident.span);
+ }
+ }
+ }
+ coerce.coerce_forced_unit(
+ self,
+ &self.misc(sp),
+ &mut |err| {
+ if let Some(expected_ty) = expected.only_has_type(self) {
+ if !self.consider_removing_semicolon(blk, expected_ty, err) {
+ self.err_ctxt().consider_returning_binding(
+ blk,
+ expected_ty,
+ err,
+ );
+ }
+ if expected_ty == self.tcx.types.bool {
+ // If this is caused by a missing `let` in a `while let`,
+ // silence this redundant error, as we already emit E0070.
+
+ // Our block must be a `assign desugar local; assignment`
+ if let Some(hir::Node::Block(hir::Block {
+ stmts:
+ [
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Local(hir::Local {
+ source:
+ hir::LocalSource::AssignDesugar(_),
+ ..
+ }),
+ ..
+ },
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(..),
+ ..
+ }),
+ ..
+ },
+ ],
+ ..
+ })) = self.tcx.hir().find(blk.hir_id)
+ {
+ self.comes_from_while_condition(blk.hir_id, |_| {
+ err.downgrade_to_delayed_bug();
+ })
+ }
+ }
+ }
+ if let Some(fn_span) = fn_span {
+ err.span_label(
+ fn_span,
+ "implicitly returns `()` as its body has no tail or `return` \
+ expression",
+ );
+ }
+ },
+ false,
+ );
+ }
+ }
+ });
+
+ if ctxt.may_break {
+ // If we can break from the block, then the block's exit is always reachable
+ // (... as long as the entry is reachable) - regardless of the tail of the block.
+ self.diverges.set(prev_diverges);
+ }
+
+ let mut ty = ctxt.coerce.unwrap().complete(self);
+
+ if self.has_errors.get() || ty.references_error() {
+ ty = self.tcx.ty_error()
+ }
+
+ self.write_ty(blk.hir_id, ty);
+
+ self.ps.set(prev);
+ ty
+ }
+
+ fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
+ let node = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(id).def_id);
+ match node {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
+ let body = self.tcx.hir().body(body_id);
+ if let ExprKind::Block(block, _) = &body.value.kind {
+ return Some(block.span);
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
+ fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
+ let parent = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(blk_id).def_id);
+ self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
+ }
+
+ /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
+ /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
+ /// when given code like the following:
+ /// ```text
+ /// if false { return 0i32; } else { 1u32 }
+ /// // ^^^^ point at this instead of the whole `if` expression
+ /// ```
+ fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
+ let check_in_progress = |elem: &hir::Expr<'_>| {
+ self.typeck_results.borrow().node_type_opt(elem.hir_id).filter(|ty| !ty.is_never()).map(
+ |_| match elem.kind {
+ // Point at the tail expression when possible.
+ hir::ExprKind::Block(block, _) => block.expr.map_or(block.span, |e| e.span),
+ _ => elem.span,
+ },
+ )
+ };
+
+ if let hir::ExprKind::If(_, _, Some(el)) = expr.kind {
+ if let Some(rslt) = check_in_progress(el) {
+ return rslt;
+ }
+ }
+
+ if let hir::ExprKind::Match(_, arms, _) = expr.kind {
+ let mut iter = arms.iter().filter_map(|arm| check_in_progress(arm.body));
+ if let Some(span) = iter.next() {
+ if iter.next().is_none() {
+ return span;
+ }
+ }
+ }
+
+ expr.span
+ }
+
+ fn overwrite_local_ty_if_err(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ decl_ty: Ty<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ if ty.references_error() {
+ // Override the types everywhere with `err()` to avoid knock on errors.
+ self.write_ty(hir_id, ty);
+ self.write_ty(pat.hir_id, ty);
+ let local_ty = LocalTy { decl_ty, revealed_ty: ty };
+ self.locals.borrow_mut().insert(hir_id, local_ty);
+ self.locals.borrow_mut().insert(pat.hir_id, local_ty);
+ }
+ }
+
+ // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
+ // The newly resolved definition is written into `type_dependent_defs`.
+ fn finish_resolving_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ path_span: Span,
+ hir_id: hir::HirId,
+ ) -> (Res, Ty<'tcx>) {
+ match *qpath {
+ QPath::Resolved(ref maybe_qself, ref path) => {
+ let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
+ let ty = <dyn AstConv<'_>>::res_to_ty(self, self_ty, path, true);
+ (path.res, ty)
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ let ty = self.to_ty(qself);
+
+ let result = <dyn AstConv<'_>>::associated_path_to_ty(
+ self, hir_id, path_span, ty, qself, segment, true,
+ );
+ let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
+ let result = result.map(|(_, kind, def_id)| (kind, def_id));
+
+ // Write back the new resolution.
+ self.write_resolution(hir_id, result);
+
+ (result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), ty)
+ }
+ QPath::LangItem(lang_item, span, id) => {
+ self.resolve_lang_item_path(lang_item, span, hir_id, id)
+ }
+ }
+ }
+
+ /// Given a vector of fulfillment errors, try to adjust the spans of the
+ /// errors to more accurately point at the cause of the failure.
+ ///
+ /// This applies to calls, methods, and struct expressions. This will also
+ /// try to deduplicate errors that are due to the same cause but might
+ /// have been created with different [`ObligationCause`][traits::ObligationCause]s.
+ pub(super) fn adjust_fulfillment_errors_for_expr_obligation(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ ) {
+ // Store a mapping from `(Span, Predicate) -> ObligationCause`, so that
+ // other errors that have the same span and predicate can also get fixed,
+ // even if their `ObligationCauseCode` isn't an `Expr*Obligation` kind.
+ // This is important since if we adjust one span but not the other, then
+ // we will have "duplicated" the error on the UI side.
+ let mut remap_cause = FxHashSet::default();
+ let mut not_adjusted = vec![];
+
+ for error in errors {
+ let before_span = error.obligation.cause.span;
+ if self.adjust_fulfillment_error_for_expr_obligation(error)
+ || before_span != error.obligation.cause.span
+ {
+ // Store both the predicate and the predicate *without constness*
+ // since sometimes we instantiate and check both of these in a
+ // method call, for example.
+ remap_cause.insert((
+ before_span,
+ error.obligation.predicate,
+ error.obligation.cause.clone(),
+ ));
+ remap_cause.insert((
+ before_span,
+ error.obligation.predicate.without_const(self.tcx),
+ error.obligation.cause.clone(),
+ ));
+ } else {
+ // If it failed to be adjusted once around, it may be adjusted
+ // via the "remap cause" mapping the second time...
+ not_adjusted.push(error);
+ }
+ }
+
+ for error in not_adjusted {
+ for (span, predicate, cause) in &remap_cause {
+ if *predicate == error.obligation.predicate
+ && span.contains(error.obligation.cause.span)
+ {
+ error.obligation.cause = cause.clone();
+ continue;
+ }
+ }
+ }
+ }
+
+ fn adjust_fulfillment_error_for_expr_obligation(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ ) -> bool {
+ let (traits::ExprItemObligation(def_id, hir_id, idx) | traits::ExprBindingObligation(def_id, _, hir_id, idx))
+ = *error.obligation.cause.code().peel_derives() else { return false; };
+ let hir = self.tcx.hir();
+ let hir::Node::Expr(expr) = hir.get(hir_id) else { return false; };
+
+ // Skip over mentioning async lang item
+ if Some(def_id) == self.tcx.lang_items().from_generator_fn()
+ && error.obligation.cause.span.desugaring_kind()
+ == Some(rustc_span::DesugaringKind::Async)
+ {
+ return false;
+ }
+
+ let Some(unsubstituted_pred) =
+ self.tcx.predicates_of(def_id).instantiate_identity(self.tcx).predicates.into_iter().nth(idx)
+ else { return false; };
+
+ let generics = self.tcx.generics_of(def_id);
+ let predicate_substs = match unsubstituted_pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => pred.trait_ref.substs,
+ ty::PredicateKind::Projection(pred) => pred.projection_ty.substs,
+ _ => ty::List::empty(),
+ };
+
+ let find_param_matching = |matches: &dyn Fn(&ty::ParamTy) -> bool| {
+ predicate_substs.types().find_map(|ty| {
+ ty.walk().find_map(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Param(param_ty) = ty.kind()
+ && matches(param_ty)
+ {
+ Some(arg)
+ } else {
+ None
+ }
+ })
+ })
+ };
+
+ // Prefer generics that are local to the fn item, since these are likely
+ // to be the cause of the unsatisfied predicate.
+ let mut param_to_point_at = find_param_matching(&|param_ty| {
+ self.tcx.parent(generics.type_param(param_ty, self.tcx).def_id) == def_id
+ });
+ // Fall back to generic that isn't local to the fn item. This will come
+ // from a trait or impl, for example.
+ let mut fallback_param_to_point_at = find_param_matching(&|param_ty| {
+ self.tcx.parent(generics.type_param(param_ty, self.tcx).def_id) != def_id
+ && param_ty.name != rustc_span::symbol::kw::SelfUpper
+ });
+ // Finally, the `Self` parameter is possibly the reason that the predicate
+ // is unsatisfied. This is less likely to be true for methods, because
+ // method probe means that we already kinda check that the predicates due
+ // to the `Self` type are true.
+ let mut self_param_to_point_at =
+ find_param_matching(&|param_ty| param_ty.name == rustc_span::symbol::kw::SelfUpper);
+
+ // Finally, for ambiguity-related errors, we actually want to look
+ // for a parameter that is the source of the inference type left
+ // over in this predicate.
+ if let traits::FulfillmentErrorCode::CodeAmbiguity = error.code {
+ fallback_param_to_point_at = None;
+ self_param_to_point_at = None;
+ param_to_point_at =
+ self.find_ambiguous_parameter_in(def_id, error.root_obligation.predicate);
+ }
+
+ if self.closure_span_overlaps_error(error, expr.span) {
+ return false;
+ }
+
+ match &expr.kind {
+ hir::ExprKind::Path(qpath) => {
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Call(callee, args),
+ hir_id: call_hir_id,
+ span: call_span,
+ ..
+ }) = hir.get(hir.get_parent_node(expr.hir_id))
+ && callee.hir_id == expr.hir_id
+ {
+ if self.closure_span_overlaps_error(error, *call_span) {
+ return false;
+ }
+
+ for param in
+ [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.point_at_arg_if_possible(
+ error,
+ def_id,
+ param,
+ *call_hir_id,
+ callee.span,
+ None,
+ args,
+ )
+ {
+ return true;
+ }
+ }
+ }
+ // Notably, we only point to params that are local to the
+ // item we're checking, since those are the ones we are able
+ // to look in the final `hir::PathSegment` for. Everything else
+ // would require a deeper search into the `qpath` than I think
+ // is worthwhile.
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
+ {
+ return true;
+ }
+ }
+ hir::ExprKind::MethodCall(segment, receiver, args, ..) => {
+ for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.point_at_arg_if_possible(
+ error,
+ def_id,
+ param,
+ hir_id,
+ segment.ident.span,
+ Some(receiver),
+ args,
+ ) {
+ return true;
+ }
+ }
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_generic_if_possible(error, def_id, param_to_point_at, segment)
+ {
+ return true;
+ }
+ }
+ hir::ExprKind::Struct(qpath, fields, ..) => {
+ if let Res::Def(DefKind::Struct | DefKind::Variant, variant_def_id) =
+ self.typeck_results.borrow().qpath_res(qpath, hir_id)
+ {
+ for param in
+ [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ {
+ if let Some(param) = param
+ && self.point_at_field_if_possible(
+ error,
+ def_id,
+ param,
+ variant_def_id,
+ fields,
+ )
+ {
+ return true;
+ }
+ }
+ }
+ if let Some(param_to_point_at) = param_to_point_at
+ && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
+ {
+ return true;
+ }
+ }
+ _ => {}
+ }
+
+ false
+ }
+
+ fn closure_span_overlaps_error(
+ &self,
+ error: &traits::FulfillmentError<'tcx>,
+ span: Span,
+ ) -> bool {
+ if let traits::FulfillmentErrorCode::CodeSelectionError(
+ traits::SelectionError::OutputTypeParameterMismatch(_, expected, _),
+ ) = error.code
+ && let ty::Closure(def_id, _) | ty::Generator(def_id, ..) = expected.skip_binder().self_ty().kind()
+ && span.overlaps(self.tcx.def_span(*def_id))
+ {
+ true
+ } else {
+ false
+ }
+ }
+
+ fn point_at_arg_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ call_hir_id: hir::HirId,
+ callee_span: Span,
+ receiver: Option<&'tcx hir::Expr<'tcx>>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> bool {
+ let sig = self.tcx.fn_sig(def_id).skip_binder();
+ let args_referencing_param: Vec<_> = sig
+ .inputs()
+ .iter()
+ .enumerate()
+ .filter(|(_, ty)| find_param_in_ty(**ty, param_to_point_at))
+ .collect();
+ // If there's one field that references the given generic, great!
+ if let [(idx, _)] = args_referencing_param.as_slice()
+ && let Some(arg) = receiver
+ .map_or(args.get(*idx), |rcvr| if *idx == 0 { Some(rcvr) } else { args.get(*idx - 1) }) {
+ error.obligation.cause.span = arg.span.find_ancestor_in_same_ctxt(error.obligation.cause.span).unwrap_or(arg.span);
+ error.obligation.cause.map_code(|parent_code| {
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id: arg.hir_id,
+ call_hir_id,
+ parent_code,
+ }
+ });
+ return true;
+ } else if args_referencing_param.len() > 0 {
+ // If more than one argument applies, then point to the callee span at least...
+ // We have chance to fix this up further in `point_at_generics_if_possible`
+ error.obligation.cause.span = callee_span;
+ }
+
+ false
+ }
+
+ fn point_at_field_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ variant_def_id: DefId,
+ expr_fields: &[hir::ExprField<'tcx>],
+ ) -> bool {
+ let def = self.tcx.adt_def(def_id);
+
+ let identity_substs = ty::InternalSubsts::identity_for_item(self.tcx, def_id);
+ let fields_referencing_param: Vec<_> = def
+ .variant_with_id(variant_def_id)
+ .fields
+ .iter()
+ .filter(|field| {
+ let field_ty = field.ty(self.tcx, identity_substs);
+ find_param_in_ty(field_ty, param_to_point_at)
+ })
+ .collect();
+
+ if let [field] = fields_referencing_param.as_slice() {
+ for expr_field in expr_fields {
+ // Look for the ExprField that matches the field, using the
+ // same rules that check_expr_struct uses for macro hygiene.
+ if self.tcx.adjust_ident(expr_field.ident, variant_def_id) == field.ident(self.tcx)
+ {
+ error.obligation.cause.span = expr_field
+ .expr
+ .span
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(expr_field.span);
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ fn point_at_path_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param: ty::GenericArg<'tcx>,
+ qpath: &QPath<'tcx>,
+ ) -> bool {
+ match qpath {
+ hir::QPath::Resolved(_, path) => {
+ if let Some(segment) = path.segments.last()
+ && self.point_at_generic_if_possible(error, def_id, param, segment)
+ {
+ return true;
+ }
+ }
+ hir::QPath::TypeRelative(_, segment) => {
+ if self.point_at_generic_if_possible(error, def_id, param, segment) {
+ return true;
+ }
+ }
+ _ => {}
+ }
+
+ false
+ }
+
+ fn point_at_generic_if_possible(
+ &self,
+ error: &mut traits::FulfillmentError<'tcx>,
+ def_id: DefId,
+ param_to_point_at: ty::GenericArg<'tcx>,
+ segment: &hir::PathSegment<'tcx>,
+ ) -> bool {
+ let own_substs = self
+ .tcx
+ .generics_of(def_id)
+ .own_substs(ty::InternalSubsts::identity_for_item(self.tcx, def_id));
+ let Some((index, _)) = own_substs
+ .iter()
+ .filter(|arg| matches!(arg.unpack(), ty::GenericArgKind::Type(_)))
+ .enumerate()
+ .find(|(_, arg)| **arg == param_to_point_at) else { return false };
+ let Some(arg) = segment
+ .args()
+ .args
+ .iter()
+ .filter(|arg| matches!(arg, hir::GenericArg::Type(_)))
+ .nth(index) else { return false; };
+ error.obligation.cause.span = arg
+ .span()
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(arg.span());
+ true
+ }
+
+ fn find_ambiguous_parameter_in<T: TypeVisitable<'tcx>>(
+ &self,
+ item_def_id: DefId,
+ t: T,
+ ) -> Option<ty::GenericArg<'tcx>> {
+ struct FindAmbiguousParameter<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, DefId);
+ impl<'tcx> TypeVisitor<'tcx> for FindAmbiguousParameter<'_, 'tcx> {
+ type BreakTy = ty::GenericArg<'tcx>;
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow<Self::BreakTy> {
+ if let Some(origin) = self.0.type_var_origin(ty)
+ && let TypeVariableOriginKind::TypeParameterDefinition(_, Some(def_id)) =
+ origin.kind
+ && let generics = self.0.tcx.generics_of(self.1)
+ && let Some(index) = generics.param_def_id_to_index(self.0.tcx, def_id)
+ && let Some(subst) = ty::InternalSubsts::identity_for_item(self.0.tcx, self.1)
+ .get(index as usize)
+ {
+ ControlFlow::Break(*subst)
+ } else {
+ ty.super_visit_with(self)
+ }
+ }
+ }
+ t.visit_with(&mut FindAmbiguousParameter(self, item_def_id)).break_value()
+ }
+
+ fn label_fn_like(
+ &self,
+ err: &mut Diagnostic,
+ callable_def_id: Option<DefId>,
+ callee_ty: Option<Ty<'tcx>>,
+ // A specific argument should be labeled, instead of all of them
+ expected_idx: Option<usize>,
+ is_method: bool,
+ ) {
+ let Some(mut def_id) = callable_def_id else {
+ return;
+ };
+
+ if let Some(assoc_item) = self.tcx.opt_associated_item(def_id)
+ // Possibly points at either impl or trait item, so try to get it
+ // to point to trait item, then get the parent.
+ // This parent might be an impl in the case of an inherent function,
+ // but the next check will fail.
+ && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id)
+ && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id)
+ // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce"
+ && let Some(call_kind) = ty::ClosureKind::from_def_id(self.tcx, maybe_trait_def_id)
+ && let Some(callee_ty) = callee_ty
+ {
+ let callee_ty = callee_ty.peel_refs();
+ match *callee_ty.kind() {
+ ty::Param(param) => {
+ let param =
+ self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx);
+ if param.kind.is_synthetic() {
+ // if it's `impl Fn() -> ..` then just fall down to the def-id based logic
+ def_id = param.def_id;
+ } else {
+ // Otherwise, find the predicate that makes this generic callable,
+ // and point at that.
+ let instantiated = self
+ .tcx
+ .explicit_predicates_of(self.body_id.owner)
+ .instantiate_identity(self.tcx);
+ // FIXME(compiler-errors): This could be problematic if something has two
+ // fn-like predicates with different args, but callable types really never
+ // do that, so it's OK.
+ for (predicate, span) in
+ std::iter::zip(instantiated.predicates, instantiated.spans)
+ {
+ if let ty::PredicateKind::Trait(pred) = predicate.kind().skip_binder()
+ && pred.self_ty().peel_refs() == callee_ty
+ && ty::ClosureKind::from_def_id(self.tcx, pred.def_id()).is_some()
+ {
+ err.span_note(span, "callable defined here");
+ return;
+ }
+ }
+ }
+ }
+ ty::Opaque(new_def_id, _)
+ | ty::Closure(new_def_id, _)
+ | ty::FnDef(new_def_id, _) => {
+ def_id = new_def_id;
+ }
+ _ => {
+ // Look for a user-provided impl of a `Fn` trait, and point to it.
+ let new_def_id = self.probe(|_| {
+ let trait_ref = ty::TraitRef::new(
+ call_kind.to_def_id(self.tcx),
+ self.tcx.mk_substs(
+ [
+ ty::GenericArg::from(callee_ty),
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rustc_span::DUMMY_SP,
+ })
+ .into(),
+ ]
+ .into_iter(),
+ ),
+ );
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::dummy(),
+ self.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+ match SelectionContext::new(&self).select(&obligation) {
+ Ok(Some(traits::ImplSource::UserDefined(impl_source))) => {
+ Some(impl_source.impl_def_id)
+ }
+ _ => None,
+ }
+ });
+ if let Some(new_def_id) = new_def_id {
+ def_id = new_def_id;
+ } else {
+ return;
+ }
+ }
+ }
+ }
+
+ if let Some(def_span) = self.tcx.def_ident_span(def_id) && !def_span.is_dummy() {
+ let mut spans: MultiSpan = def_span.into();
+
+ let params = self
+ .tcx
+ .hir()
+ .get_if_local(def_id)
+ .and_then(|node| node.body_id())
+ .into_iter()
+ .flat_map(|id| self.tcx.hir().body(id).params)
+ .skip(if is_method { 1 } else { 0 });
+
+ for (_, param) in params
+ .into_iter()
+ .enumerate()
+ .filter(|(idx, _)| expected_idx.map_or(true, |expected_idx| expected_idx == *idx))
+ {
+ spans.push_span_label(param.span, "");
+ }
+
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id)));
+ } else if let Some(hir::Node::Expr(e)) = self.tcx.hir().get_if_local(def_id)
+ && let hir::ExprKind::Closure(hir::Closure { body, .. }) = &e.kind
+ {
+ let param = expected_idx
+ .and_then(|expected_idx| self.tcx.hir().body(*body).params.get(expected_idx));
+ let (kind, span) = if let Some(param) = param {
+ ("closure parameter", param.span)
+ } else {
+ ("closure", self.tcx.def_span(def_id))
+ };
+ err.span_note(span, &format!("{} defined here", kind));
+ } else {
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(
+ self.tcx.def_span(def_id),
+ &format!("{} defined here", def_kind.descr(def_id)),
+ );
+ }
+ }
+}
+
+fn find_param_in_ty<'tcx>(ty: Ty<'tcx>, param_to_point_at: ty::GenericArg<'tcx>) -> bool {
+ let mut walk = ty.walk();
+ while let Some(arg) = walk.next() {
+ if arg == param_to_point_at {
+ return true;
+ } else if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(..) = ty.kind()
+ {
+ // This logic may seem a bit strange, but typically when
+ // we have a projection type in a function signature, the
+ // argument that's being passed into that signature is
+ // not actually constraining that projection's substs in
+ // a meaningful way. So we skip it, and see improvements
+ // in some UI tests.
+ walk.skip_current_subtree();
+ }
+ }
+ false
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
new file mode 100644
index 000000000..0c600daf4
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
@@ -0,0 +1,312 @@
+mod _impl;
+mod arg_matrix;
+mod checks;
+mod suggestions;
+
+pub use _impl::*;
+pub use suggestions::*;
+
+use crate::coercion::DynamicCoerceMany;
+use crate::{Diverges, EnclosingBreakables, Inherited, UnsafetyState};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer;
+use rustc_infer::infer::error_reporting::TypeErrCtxt;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Const, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, Span};
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
+
+use std::cell::{Cell, RefCell};
+use std::ops::Deref;
+
+/// The `FnCtxt` stores type-checking context needed to type-check bodies of
+/// functions, closures, and `const`s, including performing type inference
+/// with [`InferCtxt`].
+///
+/// This is in contrast to [`ItemCtxt`], which is used to type-check item *signatures*
+/// and thus does not perform type inference.
+///
+/// See [`ItemCtxt`]'s docs for more.
+///
+/// [`ItemCtxt`]: rustc_hir_analysis::collect::ItemCtxt
+/// [`InferCtxt`]: infer::InferCtxt
+pub struct FnCtxt<'a, 'tcx> {
+ pub(super) body_id: hir::HirId,
+
+ /// The parameter environment used for proving trait obligations
+ /// in this function. This can change when we descend into
+ /// closures (as they bring new things into scope), hence it is
+ /// not part of `Inherited` (as of the time of this writing,
+ /// closures do not yet change the environment, but they will
+ /// eventually).
+ pub(super) param_env: ty::ParamEnv<'tcx>,
+
+ /// Number of errors that had been reported when we started
+ /// checking this function. On exit, if we find that *more* errors
+ /// have been reported, we will skip regionck and other work that
+ /// expects the types within the function to be consistent.
+ // FIXME(matthewjasper) This should not exist, and it's not correct
+ // if type checking is run in parallel.
+ err_count_on_creation: usize,
+
+ /// If `Some`, this stores coercion information for returned
+ /// expressions. If `None`, this is in a context where return is
+ /// inappropriate, such as a const expression.
+ ///
+ /// This is a `RefCell<DynamicCoerceMany>`, which means that we
+ /// can track all the return expressions and then use them to
+ /// compute a useful coercion from the set, similar to a match
+ /// expression or other branching context. You can use methods
+ /// like `expected_ty` to access the declared return type (if
+ /// any).
+ pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
+
+ /// Used exclusively to reduce cost of advanced evaluation used for
+ /// more helpful diagnostics.
+ pub(super) in_tail_expr: bool,
+
+ /// First span of a return site that we find. Used in error messages.
+ pub(super) ret_coercion_span: Cell<Option<Span>>,
+
+ pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
+
+ pub(super) ps: Cell<UnsafetyState>,
+
+ /// Whether the last checked node generates a divergence (e.g.,
+ /// `return` will set this to `Always`). In general, when entering
+ /// an expression or other node in the tree, the initial value
+ /// indicates whether prior parts of the containing expression may
+ /// have diverged. It is then typically set to `Maybe` (and the
+ /// old value remembered) for processing the subparts of the
+ /// current expression. As each subpart is processed, they may set
+ /// the flag to `Always`, etc. Finally, at the end, we take the
+ /// result and "union" it with the original value, so that when we
+ /// return the flag indicates if any subpart of the parent
+ /// expression (up to and including this part) has diverged. So,
+ /// if you read it after evaluating a subexpression `X`, the value
+ /// you get indicates whether any subexpression that was
+ /// evaluating up to and including `X` diverged.
+ ///
+ /// We currently use this flag only for diagnostic purposes:
+ ///
+ /// - To warn about unreachable code: if, after processing a
+ /// sub-expression but before we have applied the effects of the
+ /// current node, we see that the flag is set to `Always`, we
+ /// can issue a warning. This corresponds to something like
+ /// `foo(return)`; we warn on the `foo()` expression. (We then
+ /// update the flag to `WarnedAlways` to suppress duplicate
+ /// reports.) Similarly, if we traverse to a fresh statement (or
+ /// tail expression) from an `Always` setting, we will issue a
+ /// warning. This corresponds to something like `{return;
+ /// foo();}` or `{return; 22}`, where we would warn on the
+ /// `foo()` or `22`.
+ ///
+ /// An expression represents dead code if, after checking it,
+ /// the diverges flag is set to something other than `Maybe`.
+ pub(super) diverges: Cell<Diverges>,
+
+ /// Whether any child nodes have any type errors.
+ pub(super) has_errors: Cell<bool>,
+
+ pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
+
+ pub(super) inh: &'a Inherited<'tcx>,
+
+ /// True if the function or closure's return type is known before
+ /// entering the function/closure, i.e. if the return type is
+ /// either given explicitly or inferred from, say, an `Fn*` trait
+ /// bound. Used for diagnostic purposes only.
+ pub(super) return_type_pre_known: bool,
+
+ /// True if the return type has an Opaque type
+ pub(super) return_type_has_opaque: bool,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn new(
+ inh: &'a Inherited<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ) -> FnCtxt<'a, 'tcx> {
+ FnCtxt {
+ body_id,
+ param_env,
+ err_count_on_creation: inh.tcx.sess.err_count(),
+ ret_coercion: None,
+ in_tail_expr: false,
+ ret_coercion_span: Cell::new(None),
+ resume_yield_tys: None,
+ ps: Cell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
+ diverges: Cell::new(Diverges::Maybe),
+ has_errors: Cell::new(false),
+ enclosing_breakables: RefCell::new(EnclosingBreakables {
+ stack: Vec::new(),
+ by_id: Default::default(),
+ }),
+ inh,
+ return_type_pre_known: true,
+ return_type_has_opaque: false,
+ }
+ }
+
+ pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
+ ObligationCause::new(span, self.body_id, code)
+ }
+
+ pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
+ self.cause(span, ObligationCauseCode::MiscObligation)
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ /// Creates an `TypeErrCtxt` with a reference to the in-progress
+ /// `TypeckResults` which is used for diagnostics.
+ /// Use [`InferCtxt::err_ctxt`] to start one without a `TypeckResults`.
+ ///
+ /// [`InferCtxt::err_ctxt`]: infer::InferCtxt::err_ctxt
+ pub fn err_ctxt(&'a self) -> TypeErrCtxt<'a, 'tcx> {
+ TypeErrCtxt { infcx: &self.infcx, typeck_results: Some(self.typeck_results.borrow()) }
+ }
+
+ pub fn errors_reported_since_creation(&self) -> bool {
+ self.tcx.sess.err_count() > self.err_count_on_creation
+ }
+}
+
+impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
+ type Target = Inherited<'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.inh
+ }
+}
+
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn item_def_id(&self) -> Option<DefId> {
+ None
+ }
+
+ fn get_type_parameter_bounds(
+ &self,
+ _: Span,
+ def_id: DefId,
+ _: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ let tcx = self.tcx;
+ let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local());
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ ty::GenericPredicates {
+ parent: None,
+ predicates: tcx.arena.alloc_from_iter(
+ self.param_env.caller_bounds().iter().filter_map(|predicate| {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) if data.self_ty().is_param(index) => {
+ // HACK(eddyb) should get the original `Span`.
+ let span = tcx.def_span(def_id);
+ Some((predicate, span))
+ }
+ _ => None,
+ }
+ }),
+ ),
+ }
+ }
+
+ fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
+ let v = match def {
+ Some(def) => infer::EarlyBoundRegion(span, def.name),
+ None => infer::MiscVariable(span),
+ };
+ Some(self.next_region_var(v))
+ }
+
+ fn allow_ty_infer(&self) -> bool {
+ true
+ }
+
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
+ return ty;
+ }
+ unreachable!()
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ })
+ }
+ }
+
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Const<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
+ return ct;
+ }
+ unreachable!()
+ } else {
+ self.next_const_var(
+ ty,
+ ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
+ )
+ }
+ }
+
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ let trait_ref = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
+ poly_trait_ref,
+ );
+
+ let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+ self,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+
+ self.tcx().mk_projection(item_def_id, item_substs)
+ }
+
+ fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_escaping_bound_vars() {
+ ty // FIXME: normalization and escaping regions
+ } else {
+ self.normalize_associated_types_in(span, ty)
+ }
+ }
+
+ fn set_tainted_by_errors(&self) {
+ self.infcx.set_tainted_by_errors()
+ }
+
+ fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
+ self.write_ty(hir_id, ty)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
new file mode 100644
index 000000000..4db9c56f9
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -0,0 +1,1250 @@
+use super::FnCtxt;
+
+use crate::errors::{AddReturnTypeSuggestion, ExpectedReturnTypeLabel};
+use rustc_ast::util::parser::{ExprPrecedence, PREC_POSTFIX};
+use rustc_errors::{Applicability, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{
+ Expr, ExprKind, GenericBound, Node, Path, QPath, Stmt, StmtKind, TyKind, WherePredicate,
+};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::{self, StatementAsExpression};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::{self, Binder, IsSuggestable, ToPredicate, Ty};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::DefIdOrName;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut Diagnostic) {
+ err.span_suggestion_short(
+ span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ /// On implicit return expressions with mismatched types, provides the following suggestions:
+ ///
+ /// - Points out the method's return type as the reason for the expected type.
+ /// - Possible missing semicolon.
+ /// - Possible missing return type if the return type is the default, and not `fn main()`.
+ pub fn suggest_mismatched_types_on_tail(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ blk_id: hir::HirId,
+ ) -> bool {
+ let expr = expr.peel_drop_temps();
+ self.suggest_missing_semicolon(err, expr, expected, false);
+ let mut pointing_at_return_type = false;
+ if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
+ let fn_id = self.tcx.hir().get_return_block(blk_id).unwrap();
+ pointing_at_return_type = self.suggest_missing_return_type(
+ err,
+ &fn_decl,
+ expected,
+ found,
+ can_suggest,
+ fn_id,
+ );
+ self.suggest_missing_break_or_return_expr(
+ err, expr, &fn_decl, expected, found, blk_id, fn_id,
+ );
+ }
+ pointing_at_return_type
+ }
+
+ /// When encountering an fn-like type, try accessing the output of the type
+ /// and suggesting calling it if it satisfies a predicate (i.e. if the
+ /// output has a method or a field):
+ /// ```compile_fail,E0308
+ /// fn foo(x: usize) -> usize { x }
+ /// let x: usize = foo; // suggest calling the `foo` function: `foo(42)`
+ /// ```
+ pub(crate) fn suggest_fn_call(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ found: Ty<'tcx>,
+ can_satisfy: impl FnOnce(Ty<'tcx>) -> bool,
+ ) -> bool {
+ let Some((def_id_or_name, output, inputs)) = self.extract_callable_info(expr, found)
+ else { return false; };
+ if can_satisfy(output) {
+ let (sugg_call, mut applicability) = match inputs.len() {
+ 0 => ("".to_string(), Applicability::MachineApplicable),
+ 1..=4 => (
+ inputs
+ .iter()
+ .map(|ty| {
+ if ty.is_suggestable(self.tcx, false) {
+ format!("/* {ty} */")
+ } else {
+ "/* value */".to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ Applicability::HasPlaceholders,
+ ),
+ _ => ("/* ... */".to_string(), Applicability::HasPlaceholders),
+ };
+
+ let msg = match def_id_or_name {
+ DefIdOrName::DefId(def_id) => match self.tcx.def_kind(def_id) {
+ DefKind::Ctor(CtorOf::Struct, _) => "construct this tuple struct".to_string(),
+ DefKind::Ctor(CtorOf::Variant, _) => "construct this tuple variant".to_string(),
+ kind => format!("call this {}", kind.descr(def_id)),
+ },
+ DefIdOrName::Name(name) => format!("call this {name}"),
+ };
+
+ let sugg = match expr.kind {
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::Path(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Lit(..) => {
+ vec![(expr.span.shrink_to_hi(), format!("({sugg_call})"))]
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Might be `{ expr } || { bool }`
+ applicability = Applicability::MaybeIncorrect;
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ _ => {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ };
+
+ err.multipart_suggestion_verbose(
+ format!("use parentheses to {msg}"),
+ sugg,
+ applicability,
+ );
+ return true;
+ }
+ false
+ }
+
+ /// Extracts information about a callable type for diagnostics. This is a
+ /// heuristic -- it doesn't necessarily mean that a type is always callable,
+ /// because the callable type must also be well-formed to be called.
+ pub(in super::super) fn extract_callable_info(
+ &self,
+ expr: &Expr<'_>,
+ found: Ty<'tcx>,
+ ) -> Option<(DefIdOrName, Ty<'tcx>, Vec<Ty<'tcx>>)> {
+ // Autoderef is useful here because sometimes we box callables, etc.
+ let Some((def_id_or_name, output, inputs)) = self.autoderef(expr.span, found).silence_errors().find_map(|(found, _)| {
+ match *found.kind() {
+ ty::FnPtr(fn_sig) =>
+ Some((DefIdOrName::Name("function pointer"), fn_sig.output(), fn_sig.inputs())),
+ ty::FnDef(def_id, _) => {
+ let fn_sig = found.fn_sig(self.tcx);
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs()))
+ }
+ ty::Closure(def_id, substs) => {
+ let fn_sig = substs.as_closure().sig();
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs().map_bound(|inputs| &inputs[1..])))
+ }
+ ty::Opaque(def_id, substs) => {
+ self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::DefId(def_id),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Dynamic(data, _, ty::Dyn) => {
+ data.iter().find_map(|pred| {
+ if let ty::ExistentialPredicate::Projection(proj) = pred.skip_binder()
+ && Some(proj.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // for existential projection, substs are shifted over by 1
+ && let ty::Tuple(args) = proj.substs.type_at(0).kind()
+ {
+ Some((
+ DefIdOrName::Name("trait object"),
+ pred.rebind(proj.term.ty().unwrap()),
+ pred.rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Param(param) => {
+ let def_id = self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx).def_id;
+ self.tcx.predicates_of(self.body_id.owner).predicates.iter().find_map(|(pred, _)| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ && proj.projection_ty.self_ty() == found
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::DefId(def_id),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ _ => None,
+ }
+ }) else { return None; };
+
+ let output = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, output);
+ let inputs = inputs
+ .skip_binder()
+ .iter()
+ .map(|ty| {
+ self.replace_bound_vars_with_fresh_vars(
+ expr.span,
+ infer::FnCall,
+ inputs.rebind(*ty),
+ )
+ })
+ .collect();
+
+ // We don't want to register any extra obligations, which should be
+ // implied by wf, but also because that would possibly result in
+ // erroneous errors later on.
+ let infer::InferOk { value: output, obligations: _ } =
+ self.normalize_associated_types_in_as_infer_ok(expr.span, output);
+
+ if output.is_ty_var() { None } else { Some((def_id_or_name, output, inputs)) }
+ }
+
+ pub fn suggest_two_fn_call(
+ &self,
+ err: &mut Diagnostic,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ lhs_ty: Ty<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_ty: Ty<'tcx>,
+ can_satisfy: impl FnOnce(Ty<'tcx>, Ty<'tcx>) -> bool,
+ ) -> bool {
+ let Some((_, lhs_output_ty, lhs_inputs)) = self.extract_callable_info(lhs_expr, lhs_ty)
+ else { return false; };
+ let Some((_, rhs_output_ty, rhs_inputs)) = self.extract_callable_info(rhs_expr, rhs_ty)
+ else { return false; };
+
+ if can_satisfy(lhs_output_ty, rhs_output_ty) {
+ let mut sugg = vec![];
+ let mut applicability = Applicability::MachineApplicable;
+
+ for (expr, inputs) in [(lhs_expr, lhs_inputs), (rhs_expr, rhs_inputs)] {
+ let (sugg_call, this_applicability) = match inputs.len() {
+ 0 => ("".to_string(), Applicability::MachineApplicable),
+ 1..=4 => (
+ inputs
+ .iter()
+ .map(|ty| {
+ if ty.is_suggestable(self.tcx, false) {
+ format!("/* {ty} */")
+ } else {
+ "/* value */".to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ Applicability::HasPlaceholders,
+ ),
+ _ => ("/* ... */".to_string(), Applicability::HasPlaceholders),
+ };
+
+ applicability = applicability.max(this_applicability);
+
+ match expr.kind {
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::Path(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Lit(..) => {
+ sugg.extend([(expr.span.shrink_to_hi(), format!("({sugg_call})"))]);
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Might be `{ expr } || { bool }`
+ applicability = Applicability::MaybeIncorrect;
+ sugg.extend([
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]);
+ }
+ _ => {
+ sugg.extend([
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]);
+ }
+ }
+ }
+
+ err.multipart_suggestion_verbose(
+ format!("use parentheses to call these"),
+ sugg,
+ applicability,
+ );
+
+ true
+ } else {
+ false
+ }
+ }
+
+ pub fn suggest_deref_ref_or_into(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> bool {
+ let expr = expr.peel_blocks();
+ if let Some((sp, msg, suggestion, applicability, verbose, annotation)) =
+ self.check_ref(expr, found, expected)
+ {
+ if verbose {
+ err.span_suggestion_verbose(sp, &msg, suggestion, applicability);
+ } else {
+ err.span_suggestion(sp, &msg, suggestion, applicability);
+ }
+ if annotation {
+ let suggest_annotation = match expr.peel_drop_temps().kind {
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, _) => "&",
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, _) => "&mut ",
+ _ => return true,
+ };
+ let mut tuple_indexes = Vec::new();
+ let mut expr_id = expr.hir_id;
+ for (parent_id, node) in self.tcx.hir().parent_iter(expr.hir_id) {
+ match node {
+ Node::Expr(&Expr { kind: ExprKind::Tup(subs), .. }) => {
+ tuple_indexes.push(
+ subs.iter()
+ .enumerate()
+ .find(|(_, sub_expr)| sub_expr.hir_id == expr_id)
+ .unwrap()
+ .0,
+ );
+ expr_id = parent_id;
+ }
+ Node::Local(local) => {
+ if let Some(mut ty) = local.ty {
+ while let Some(index) = tuple_indexes.pop() {
+ match ty.kind {
+ TyKind::Tup(tys) => ty = &tys[index],
+ _ => return true,
+ }
+ }
+ let annotation_span = ty.span;
+ err.span_suggestion(
+ annotation_span.with_hi(annotation_span.lo()),
+ format!("alternatively, consider changing the type annotation"),
+ suggest_annotation,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ break;
+ }
+ _ => break,
+ }
+ }
+ }
+ return true;
+ } else if self.suggest_else_fn_with_closure(err, expr, found, expected) {
+ return true;
+ } else if self.suggest_fn_call(err, expr, found, |output| self.can_coerce(output, expected))
+ && let ty::FnDef(def_id, ..) = &found.kind()
+ && let Some(sp) = self.tcx.hir().span_if_local(*def_id)
+ {
+ err.span_label(sp, format!("{found} defined here"));
+ return true;
+ } else if self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
+ return true;
+ } else {
+ let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
+ if !methods.is_empty() {
+ let mut suggestions = methods.iter()
+ .filter_map(|conversion_method| {
+ let receiver_method_ident = expr.method_ident();
+ if let Some(method_ident) = receiver_method_ident
+ && method_ident.name == conversion_method.name
+ {
+ return None // do not suggest code that is already there (#53348)
+ }
+
+ let method_call_list = [sym::to_vec, sym::to_string];
+ let mut sugg = if let ExprKind::MethodCall(receiver_method, ..) = expr.kind
+ && receiver_method.ident.name == sym::clone
+ && method_call_list.contains(&conversion_method.name)
+ // If receiver is `.clone()` and found type has one of those methods,
+ // we guess that the user wants to convert from a slice type (`&[]` or `&str`)
+ // to an owned type (`Vec` or `String`). These conversions clone internally,
+ // so we remove the user's `clone` call.
+ {
+ vec![(
+ receiver_method.ident.span,
+ conversion_method.name.to_string()
+ )]
+ } else if expr.precedence().order()
+ < ExprPrecedence::MethodCall.order()
+ {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(").{}()", conversion_method.name)),
+ ]
+ } else {
+ vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))]
+ };
+ let struct_pat_shorthand_field = self.maybe_get_struct_pattern_shorthand_field(expr);
+ if let Some(name) = struct_pat_shorthand_field {
+ sugg.insert(
+ 0,
+ (expr.span.shrink_to_lo(), format!("{}: ", name)),
+ );
+ }
+ Some(sugg)
+ })
+ .peekable();
+ if suggestions.peek().is_some() {
+ err.multipart_suggestions(
+ "try using a conversion method",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ } else if let ty::Adt(found_adt, found_substs) = found.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, found_adt.did())
+ && let ty::Adt(expected_adt, expected_substs) = expected.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, expected_adt.did())
+ && let ty::Ref(_, inner_ty, _) = expected_substs.type_at(0).kind()
+ && inner_ty.is_str()
+ {
+ let ty = found_substs.type_at(0);
+ let mut peeled = ty;
+ let mut ref_cnt = 0;
+ while let ty::Ref(_, inner, _) = peeled.kind() {
+ peeled = *inner;
+ ref_cnt += 1;
+ }
+ if let ty::Adt(adt, _) = peeled.kind()
+ && self.tcx.is_diagnostic_item(sym::String, adt.did())
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ "try converting the passed type into a `&str`",
+ format!(".map(|x| &*{}x)", "*".repeat(ref_cnt)),
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ /// When encountering the expected boxed value allocated in the stack, suggest allocating it
+ /// in the heap by calling `Box::new()`.
+ pub(in super::super) fn suggest_boxing_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return false;
+ }
+ if !expected.is_box() || found.is_box() {
+ return false;
+ }
+ let boxed_found = self.tcx.mk_box(found);
+ if self.can_coerce(boxed_found, expected) {
+ err.multipart_suggestion(
+ "store this in the heap by calling `Box::new`",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::new(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ err.note(
+ "for more on the distinction between the stack and the heap, read \
+ https://doc.rust-lang.org/book/ch15-01-box.html, \
+ https://doc.rust-lang.org/rust-by-example/std/box.html, and \
+ https://doc.rust-lang.org/std/boxed/index.html",
+ );
+ true
+ } else {
+ false
+ }
+ }
+
+ /// When encountering a closure that captures variables, where a FnPtr is expected,
+ /// suggest a non-capturing closure
+ pub(in super::super) fn suggest_no_capture_closure(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ if let (ty::FnPtr(_), ty::Closure(def_id, _)) = (expected.kind(), found.kind()) {
+ if let Some(upvars) = self.tcx.upvars_mentioned(*def_id) {
+ // Report upto four upvars being captured to reduce the amount error messages
+ // reported back to the user.
+ let spans_and_labels = upvars
+ .iter()
+ .take(4)
+ .map(|(var_hir_id, upvar)| {
+ let var_name = self.tcx.hir().name(*var_hir_id).to_string();
+ let msg = format!("`{}` captured here", var_name);
+ (upvar.span, msg)
+ })
+ .collect::<Vec<_>>();
+
+ let mut multi_span: MultiSpan =
+ spans_and_labels.iter().map(|(sp, _)| *sp).collect::<Vec<_>>().into();
+ for (sp, label) in spans_and_labels {
+ multi_span.push_span_label(sp, label);
+ }
+ err.span_note(
+ multi_span,
+ "closures can only be coerced to `fn` types if they do not capture any variables"
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
+ #[instrument(skip(self, err))]
+ pub(in super::super) fn suggest_calling_boxed_future_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ // Handle #68197.
+
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return false;
+ }
+ let pin_did = self.tcx.lang_items().pin_type();
+ // This guards the `unwrap` and `mk_box` below.
+ if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() {
+ return false;
+ }
+ let box_found = self.tcx.mk_box(found);
+ let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap();
+ let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap();
+ match expected.kind() {
+ ty::Adt(def, _) if Some(def.did()) == pin_did => {
+ if self.can_coerce(pin_box_found, expected) {
+ debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected);
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ }
+ _ => {
+ err.multipart_suggestion(
+ "you need to pin and box this expression",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::pin(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ true
+ } else if self.can_coerce(pin_found, expected) {
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ true
+ }
+ _ => false,
+ }
+ } else {
+ false
+ }
+ }
+ ty::Adt(def, _) if def.is_box() && self.can_coerce(box_found, expected) => {
+ // Check if the parent expression is a call to Pin::new. If it
+ // is and we were expecting a Box, ergo Pin<Box<expected>>, we
+ // can suggest Box::pin.
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else {
+ return false;
+ };
+ match fn_name.kind {
+ ExprKind::Path(QPath::TypeRelative(
+ hir::Ty {
+ kind: TyKind::Path(QPath::Resolved(_, Path { res: recv_ty, .. })),
+ ..
+ },
+ method,
+ )) if recv_ty.opt_def_id() == pin_did && method.ident.name == sym::new => {
+ err.span_suggestion(
+ fn_name.span,
+ "use `Box::pin` to pin and box this expression",
+ "Box::pin",
+ Applicability::MachineApplicable,
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+ _ => false,
+ }
+ }
+
+ /// A common error is to forget to add a semicolon at the end of a block, e.g.,
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return expression in a block would make sense on its own as a
+ /// statement and the return type has been left as default or has been specified as `()`. If so,
+ /// it suggests adding a semicolon.
+ ///
+ /// If the expression is the expression of a closure without block (`|| expr`), a
+ /// block is needed to be added too (`|| { expr; }`). This is denoted by `needs_block`.
+ pub fn suggest_missing_semicolon(
+ &self,
+ err: &mut Diagnostic,
+ expression: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ needs_block: bool,
+ ) {
+ if expected.is_unit() {
+ // `BlockTailExpression` only relevant if the tail expr would be
+ // useful on its own.
+ match expression.kind {
+ ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Loop(..)
+ | ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::Block(..)
+ if expression.can_have_side_effects()
+ // If the expression is from an external macro, then do not suggest
+ // adding a semicolon, because there's nowhere to put it.
+ // See issue #81943.
+ && !in_external_macro(self.tcx.sess, expression.span) =>
+ {
+ if needs_block {
+ err.multipart_suggestion(
+ "consider using a semicolon here",
+ vec![
+ (expression.span.shrink_to_lo(), "{ ".to_owned()),
+ (expression.span.shrink_to_hi(), "; }".to_owned()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ expression.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ /// A possible error is to forget to add a return type that is needed:
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return type is left as default, the method is not part of an
+ /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
+ /// type.
+ pub(in super::super) fn suggest_missing_return_type(
+ &self,
+ err: &mut Diagnostic,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ can_suggest: bool,
+ fn_id: hir::HirId,
+ ) -> bool {
+ let found =
+ self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found));
+ // Only suggest changing the return type for methods that
+ // haven't set a return type at all (and aren't `fn main()` or an impl).
+ match &fn_decl.output {
+ &hir::FnRetTy::DefaultReturn(span) if expected.is_unit() && !can_suggest => {
+ // `fn main()` must return `()`, do not suggest changing return type
+ err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span });
+ return true;
+ }
+ &hir::FnRetTy::DefaultReturn(span) if expected.is_unit() => {
+ if found.is_suggestable(self.tcx, false) {
+ err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: found.to_string() });
+ return true;
+ } else if let ty::Closure(_, substs) = found.kind()
+ // FIXME(compiler-errors): Get better at printing binders...
+ && let closure = substs.as_closure()
+ && closure.sig().is_suggestable(self.tcx, false)
+ {
+ err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found: closure.print_as_impl_trait().to_string() });
+ return true;
+ } else {
+ // FIXME: if `found` could be `impl Iterator` we should suggest that.
+ err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span });
+ return true
+ }
+ }
+ &hir::FnRetTy::Return(ref ty) => {
+ // Only point to return type if the expected type is the return type, as if they
+ // are not, the expectation must have been caused by something else.
+ debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
+ let span = ty.span;
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ debug!("suggest_missing_return_type: return type {:?}", ty);
+ debug!("suggest_missing_return_type: expected type {:?}", ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = Binder::bind_with_vars(ty, bound_vars);
+ let ty = self.normalize_associated_types_in(span, ty);
+ let ty = self.tcx.erase_late_bound_regions(ty);
+ if self.can_coerce(expected, ty) {
+ err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
+ self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
+ return true;
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ /// check whether the return type is a generic type with a trait bound
+ /// only suggest this if the generic param is not present in the arguments
+ /// if this is true, hint them towards changing the return type to `impl Trait`
+ /// ```compile_fail,E0308
+ /// fn cant_name_it<T: Fn() -> u32>() -> T {
+ /// || 3
+ /// }
+ /// ```
+ fn try_suggest_return_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ fn_id: hir::HirId,
+ ) {
+ // Only apply the suggestion if:
+ // - the return type is a generic parameter
+ // - the generic param is not used as a fn param
+ // - the generic param has at least one bound
+ // - the generic param doesn't appear in any other bounds where it's not the Self type
+ // Suggest:
+ // - Changing the return type to be `impl <all bounds>`
+
+ debug!("try_suggest_return_impl_trait, expected = {:?}, found = {:?}", expected, found);
+
+ let ty::Param(expected_ty_as_param) = expected.kind() else { return };
+
+ let fn_node = self.tcx.hir().find(fn_id);
+
+ let Some(hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Fn(
+ hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. },
+ hir::Generics { params, predicates, .. },
+ _body_id,
+ ),
+ ..
+ })) = fn_node else { return };
+
+ if params.get(expected_ty_as_param.index as usize).is_none() {
+ return;
+ };
+
+ // get all where BoundPredicates here, because they are used in to cases below
+ let where_predicates = predicates
+ .iter()
+ .filter_map(|p| match p {
+ WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bounds,
+ bounded_ty,
+ ..
+ }) => {
+ // FIXME: Maybe these calls to `ast_ty_to_ty` can be removed (and the ones below)
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, bounded_ty);
+ Some((ty, bounds))
+ }
+ _ => None,
+ })
+ .map(|(ty, bounds)| match ty.kind() {
+ ty::Param(param_ty) if param_ty == expected_ty_as_param => Ok(Some(bounds)),
+ // check whether there is any predicate that contains our `T`, like `Option<T>: Send`
+ _ => match ty.contains(expected) {
+ true => Err(()),
+ false => Ok(None),
+ },
+ })
+ .collect::<Result<Vec<_>, _>>();
+
+ let Ok(where_predicates) = where_predicates else { return };
+
+ // now get all predicates in the same types as the where bounds, so we can chain them
+ let predicates_from_where =
+ where_predicates.iter().flatten().flat_map(|bounds| bounds.iter());
+
+ // extract all bounds from the source code using their spans
+ let all_matching_bounds_strs = predicates_from_where
+ .filter_map(|bound| match bound {
+ GenericBound::Trait(_, _) => {
+ self.tcx.sess.source_map().span_to_snippet(bound.span()).ok()
+ }
+ _ => None,
+ })
+ .collect::<Vec<String>>();
+
+ if all_matching_bounds_strs.len() == 0 {
+ return;
+ }
+
+ let all_bounds_str = all_matching_bounds_strs.join(" + ");
+
+ let ty_param_used_in_fn_params = fn_parameters.iter().any(|param| {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, param);
+ matches!(ty.kind(), ty::Param(fn_param_ty_param) if expected_ty_as_param == fn_param_ty_param)
+ });
+
+ if ty_param_used_in_fn_params {
+ return;
+ }
+
+ err.span_suggestion(
+ fn_return.span(),
+ "consider using an impl return type",
+ format!("impl {}", all_bounds_str),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ pub(in super::super) fn suggest_missing_break_or_return_expr(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ id: hir::HirId,
+ fn_id: hir::HirId,
+ ) {
+ if !expected.is_unit() {
+ return;
+ }
+ let found = self.resolve_vars_with_obligations(found);
+
+ let in_loop = self.is_loop(id)
+ || self.tcx.hir().parent_iter(id).any(|(parent_id, _)| self.is_loop(parent_id));
+
+ let in_local_statement = self.is_local_statement(id)
+ || self
+ .tcx
+ .hir()
+ .parent_iter(id)
+ .any(|(parent_id, _)| self.is_local_statement(parent_id));
+
+ if in_loop && in_local_statement {
+ err.multipart_suggestion(
+ "you might have meant to break the loop with this value",
+ vec![
+ (expr.span.shrink_to_lo(), "break ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+
+ if let hir::FnRetTy::Return(ty) = fn_decl.output {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars));
+ let ty = self.normalize_associated_types_in(expr.span, ty);
+ let ty = match self.tcx.asyncness(fn_id.owner) {
+ hir::IsAsync::Async => {
+ let infcx = self.tcx.infer_ctxt().build();
+ infcx
+ .get_impl_future_output_ty(ty)
+ .unwrap_or_else(|| {
+ span_bug!(
+ fn_decl.output.span(),
+ "failed to get output type of async function"
+ )
+ })
+ .skip_binder()
+ }
+ hir::IsAsync::NotAsync => ty,
+ };
+ if self.can_coerce(found, ty) {
+ err.multipart_suggestion(
+ "you might have meant to return this value",
+ vec![
+ (expr.span.shrink_to_lo(), "return ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ pub(in super::super) fn suggest_missing_parentheses(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ ) -> bool {
+ let sp = self.tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+ // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Given an expression type mismatch, peel any `&` expressions until we get to
+ /// a block expression, and then suggest replacing the braces with square braces
+ /// if it was possibly mistaken array syntax.
+ pub(crate) fn suggest_block_to_brackets_peeling_refs(
+ &self,
+ diag: &mut Diagnostic,
+ mut expr: &hir::Expr<'_>,
+ mut expr_ty: Ty<'tcx>,
+ mut expected_ty: Ty<'tcx>,
+ ) -> bool {
+ loop {
+ match (&expr.kind, expr_ty.kind(), expected_ty.kind()) {
+ (
+ hir::ExprKind::AddrOf(_, _, inner_expr),
+ ty::Ref(_, inner_expr_ty, _),
+ ty::Ref(_, inner_expected_ty, _),
+ ) => {
+ expr = *inner_expr;
+ expr_ty = *inner_expr_ty;
+ expected_ty = *inner_expected_ty;
+ }
+ (hir::ExprKind::Block(blk, _), _, _) => {
+ self.suggest_block_to_brackets(diag, *blk, expr_ty, expected_ty);
+ break true;
+ }
+ _ => break false,
+ }
+ }
+ }
+
+ pub(crate) fn suggest_copied_or_cloned(
+ &self,
+ diag: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) -> bool {
+ let ty::Adt(adt_def, substs) = expr_ty.kind() else { return false; };
+ let ty::Adt(expected_adt_def, expected_substs) = expected_ty.kind() else { return false; };
+ if adt_def != expected_adt_def {
+ return false;
+ }
+
+ let mut suggest_copied_or_cloned = || {
+ let expr_inner_ty = substs.type_at(0);
+ let expected_inner_ty = expected_substs.type_at(0);
+ if let ty::Ref(_, ty, hir::Mutability::Not) = expr_inner_ty.kind()
+ && self.can_eq(self.param_env, *ty, expected_inner_ty).is_ok()
+ {
+ let def_path = self.tcx.def_path_str(adt_def.did());
+ if self.type_is_copy_modulo_regions(self.param_env, *ty, expr.span) {
+ diag.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{def_path}::copied` to copy the value inside the `{def_path}`"
+ ),
+ ".copied()",
+ Applicability::MachineApplicable,
+ );
+ return true;
+ } else if let Some(clone_did) = self.tcx.lang_items().clone_trait()
+ && rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions(
+ self,
+ self.param_env,
+ *ty,
+ clone_did,
+ expr.span
+ )
+ {
+ diag.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{def_path}::cloned` to clone the value inside the `{def_path}`"
+ ),
+ ".cloned()",
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ false
+ };
+
+ if let Some(result_did) = self.tcx.get_diagnostic_item(sym::Result)
+ && adt_def.did() == result_did
+ // Check that the error types are equal
+ && self.can_eq(self.param_env, substs.type_at(1), expected_substs.type_at(1)).is_ok()
+ {
+ return suggest_copied_or_cloned();
+ } else if let Some(option_did) = self.tcx.get_diagnostic_item(sym::Option)
+ && adt_def.did() == option_did
+ {
+ return suggest_copied_or_cloned();
+ }
+
+ false
+ }
+
+ pub(crate) fn suggest_into(
+ &self,
+ diag: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) -> bool {
+ let expr = expr.peel_blocks();
+
+ // We have better suggestions for scalar interconversions...
+ if expr_ty.is_scalar() && expected_ty.is_scalar() {
+ return false;
+ }
+
+ // Don't suggest turning a block into another type (e.g. `{}.into()`)
+ if matches!(expr.kind, hir::ExprKind::Block(..)) {
+ return false;
+ }
+
+ // We'll later suggest `.as_ref` when noting the type error,
+ // so skip if we will suggest that instead.
+ if self.err_ctxt().should_suggest_as_ref(expected_ty, expr_ty).is_some() {
+ return false;
+ }
+
+ if let Some(into_def_id) = self.tcx.get_diagnostic_item(sym::Into)
+ && self.predicate_must_hold_modulo_regions(&traits::Obligation::new(
+ self.misc(expr.span),
+ self.param_env,
+ ty::Binder::dummy(ty::TraitRef {
+ def_id: into_def_id,
+ substs: self.tcx.mk_substs_trait(expr_ty, &[expected_ty.into()]),
+ })
+ .to_poly_trait_predicate()
+ .to_predicate(self.tcx),
+ ))
+ {
+ let sugg = if expr.precedence().order() >= PREC_POSTFIX {
+ vec![(expr.span.shrink_to_hi(), ".into()".to_owned())]
+ } else {
+ vec![(expr.span.shrink_to_lo(), "(".to_owned()), (expr.span.shrink_to_hi(), ").into()".to_owned())]
+ };
+ diag.multipart_suggestion(
+ format!("call `Into::into` on this expression to convert `{expr_ty}` into `{expected_ty}`"),
+ sugg,
+ Applicability::MaybeIncorrect
+ );
+ return true;
+ }
+
+ false
+ }
+
+ /// Suggest wrapping the block in square brackets instead of curly braces
+ /// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`.
+ pub(crate) fn suggest_block_to_brackets(
+ &self,
+ diag: &mut Diagnostic,
+ blk: &hir::Block<'_>,
+ blk_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) {
+ if let ty::Slice(elem_ty) | ty::Array(elem_ty, _) = expected_ty.kind() {
+ if self.can_coerce(blk_ty, *elem_ty)
+ && blk.stmts.is_empty()
+ && blk.rules == hir::BlockCheckMode::DefaultBlock
+ {
+ let source_map = self.tcx.sess.source_map();
+ if let Ok(snippet) = source_map.span_to_snippet(blk.span) {
+ if snippet.starts_with('{') && snippet.ends_with('}') {
+ diag.multipart_suggestion_verbose(
+ "to create an array, use square brackets instead of curly braces",
+ vec![
+ (
+ blk.span
+ .shrink_to_lo()
+ .with_hi(rustc_span::BytePos(blk.span.lo().0 + 1)),
+ "[".to_string(),
+ ),
+ (
+ blk.span
+ .shrink_to_hi()
+ .with_lo(rustc_span::BytePos(blk.span.hi().0 - 1)),
+ "]".to_string(),
+ ),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn is_loop(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Expr(Expr { kind: ExprKind::Loop(..), .. }))
+ }
+
+ fn is_local_statement(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Stmt(Stmt { kind: StmtKind::Local(..), .. }))
+ }
+
+ /// Suggest that `&T` was cloned instead of `T` because `T` does not implement `Clone`,
+ /// which is a side-effect of autoref.
+ pub(crate) fn note_type_is_not_clone(
+ &self,
+ diag: &mut Diagnostic,
+ expected_ty: Ty<'tcx>,
+ found_ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ ) {
+ let hir::ExprKind::MethodCall(segment, callee_expr, &[], _) = expr.kind else { return; };
+ let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; };
+ let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return };
+ let results = self.typeck_results.borrow();
+ // First, look for a `Clone::clone` call
+ if segment.ident.name == sym::clone
+ && results.type_dependent_def_id(expr.hir_id).map_or(
+ false,
+ |did| {
+ let assoc_item = self.tcx.associated_item(did);
+ assoc_item.container == ty::AssocItemContainer::TraitContainer
+ && assoc_item.container_id(self.tcx) == clone_trait_did
+ },
+ )
+ // If that clone call hasn't already dereferenced the self type (i.e. don't give this
+ // diagnostic in cases where we have `(&&T).clone()` and we expect `T`).
+ && !results.expr_adjustments(callee_expr).iter().any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(..)))
+ // Check that we're in fact trying to clone into the expected type
+ && self.can_coerce(*pointee_ty, expected_ty)
+ // And the expected type doesn't implement `Clone`
+ && !self.predicate_must_hold_considering_regions(&traits::Obligation {
+ cause: traits::ObligationCause::dummy(),
+ param_env: self.param_env,
+ recursion_depth: 0,
+ predicate: ty::Binder::dummy(ty::TraitRef {
+ def_id: clone_trait_did,
+ substs: self.tcx.mk_substs([expected_ty.into()].iter()),
+ })
+ .without_const()
+ .to_predicate(self.tcx),
+ })
+ {
+ diag.span_note(
+ callee_expr.span,
+ &format!(
+ "`{expected_ty}` does not implement `Clone`, so `{found_ty}` was cloned instead"
+ ),
+ );
+ }
+ }
+
+ /// A common error is to add an extra semicolon:
+ ///
+ /// ```compile_fail,E0308
+ /// fn foo() -> usize {
+ /// 22;
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the final statement in a block is an
+ /// expression with an explicit semicolon whose type is compatible
+ /// with `expected_ty`. If so, it suggests removing the semicolon.
+ pub(crate) fn consider_removing_semicolon(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ ) -> bool {
+ if let Some((span_semi, boxed)) = self.err_ctxt().could_remove_semicolon(blk, expected_ty) {
+ if let StatementAsExpression::NeedsBoxing = boxed {
+ err.span_suggestion_verbose(
+ span_semi,
+ "consider removing this semicolon and boxing the expression",
+ "",
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.span_suggestion_short(
+ span_semi,
+ "remove this semicolon to return this value",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_hir_typeck/src/gather_locals.rs
index 8f34a970f..9a096f24f 100644
--- a/compiler/rustc_typeck/src/check/gather_locals.rs
+++ b/compiler/rustc_hir_typeck/src/gather_locals.rs
@@ -1,9 +1,10 @@
-use crate::check::{FnCtxt, LocalTy, UserType};
+use crate::{FnCtxt, LocalTy};
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::PatKind;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_middle::ty::Ty;
+use rustc_middle::ty::UserType;
use rustc_span::Span;
use rustc_trait_selection::traits;
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
index a2c23db16..122ad7009 100644
--- a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
@@ -210,7 +210,7 @@ impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> {
}
/// For an expression with an uninhabited return type (e.g. a function that returns !),
- /// this adds a self edge to to the CFG to model the fact that the function does not
+ /// this adds a self edge to the CFG to model the fact that the function does not
/// return.
fn handle_uninhabited_return(&mut self, expr: &Expr<'tcx>) {
let ty = self.typeck_results.expr_ty(expr);
@@ -256,6 +256,8 @@ impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> {
| hir::Node::TypeBinding(..)
| hir::Node::TraitRef(..)
| hir::Node::Pat(..)
+ | hir::Node::PatField(..)
+ | hir::Node::ExprField(..)
| hir::Node::Arm(..)
| hir::Node::Local(..)
| hir::Node::Ctor(..)
@@ -432,7 +434,8 @@ impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> {
self.handle_uninhabited_return(expr);
}
- ExprKind::MethodCall(_, exprs, _) => {
+ ExprKind::MethodCall(_, receiver, exprs, _) => {
+ self.visit_expr(receiver);
for expr in exprs {
self.visit_expr(expr);
}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
index 139d17d2e..139d17d2e 100644
--- a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
index c0a0bfe8e..c0a0bfe8e 100644
--- a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
new file mode 100644
index 000000000..4f3bdfbe7
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
@@ -0,0 +1,309 @@
+//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
+//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
+//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
+//!
+//! There are three phases to this analysis:
+//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
+//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
+//! and also build a control flow graph.
+//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
+//! the CFG and find the exact points where we know a value is definitely dropped.
+//!
+//! The end result is a data structure that maps the post-order index of each node in the HIR tree
+//! to a set of values that are known to be dropped at that location.
+
+use self::cfg_build::build_control_flow_graph;
+use self::record_consumed_borrow::find_consumed_and_borrowed;
+use crate::FnCtxt;
+use hir::def_id::DefId;
+use hir::{Body, HirId, HirIdMap, Node};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
+use rustc_middle::ty;
+use std::collections::BTreeMap;
+use std::fmt::Debug;
+
+mod cfg_build;
+mod cfg_propagate;
+mod cfg_visualize;
+mod record_consumed_borrow;
+
+pub fn compute_drop_ranges<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body: &'tcx Body<'tcx>,
+) -> DropRanges {
+ if fcx.sess().opts.unstable_opts.drop_tracking {
+ let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
+
+ let typeck_results = &fcx.typeck_results.borrow();
+ let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
+ let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
+ fcx.tcx.hir(),
+ fcx.tcx,
+ typeck_results,
+ consumed_borrowed_places,
+ body,
+ num_exprs,
+ );
+
+ drop_ranges.propagate_to_fixpoint();
+
+ debug!("borrowed_temporaries = {borrowed_temporaries:?}");
+ DropRanges {
+ tracked_value_map: drop_ranges.tracked_value_map,
+ nodes: drop_ranges.nodes,
+ borrowed_temporaries: Some(borrowed_temporaries),
+ }
+ } else {
+ // If drop range tracking is not enabled, skip all the analysis and produce an
+ // empty set of DropRanges.
+ DropRanges {
+ tracked_value_map: FxHashMap::default(),
+ nodes: IndexVec::new(),
+ borrowed_temporaries: None,
+ }
+ }
+}
+
+/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
+///
+/// This includes the place itself, and if the place is a reference to a local
+/// variable then `f` is also called on the HIR node for that variable as well.
+///
+/// For example, if `place` points to `foo()`, then `f` is called once for the
+/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
+/// be called both on the `ExprKind::Path` node that represents the expression
+/// as well as the HirId of the local `x` itself.
+fn for_each_consumable<'tcx>(hir: Map<'tcx>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
+ f(place);
+ let node = hir.find(place.hir_id());
+ if let Some(Node::Expr(expr)) = node {
+ match expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ f(TrackedValue::Variable(*hir_id));
+ }
+ _ => (),
+ }
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct PostOrderId {
+ DEBUG_FORMAT = "id({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct TrackedValueIndex {
+ DEBUG_FORMAT = "hidx({})",
+ }
+}
+
+/// Identifies a value whose drop state we need to track.
+#[derive(PartialEq, Eq, Hash, Clone, Copy)]
+enum TrackedValue {
+ /// Represents a named variable, such as a let binding, parameter, or upvar.
+ ///
+ /// The HirId points to the variable's definition site.
+ Variable(HirId),
+ /// A value produced as a result of an expression.
+ ///
+ /// The HirId points to the expression that returns this value.
+ Temporary(HirId),
+}
+
+impl Debug for TrackedValue {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
+ } else {
+ match self {
+ Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id),
+ Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id),
+ }
+ }
+ })
+ }
+}
+
+impl TrackedValue {
+ fn hir_id(&self) -> HirId {
+ match self {
+ TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
+ }
+ }
+
+ fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
+ match place_with_id.place.base {
+ PlaceBase::Rvalue | PlaceBase::StaticItem => {
+ TrackedValue::Temporary(place_with_id.hir_id)
+ }
+ PlaceBase::Local(hir_id)
+ | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
+ TrackedValue::Variable(hir_id)
+ }
+ }
+ }
+}
+
+/// Represents a reason why we might not be able to convert a HirId or Place
+/// into a tracked value.
+#[derive(Debug)]
+enum TrackedValueConversionError {
+ /// Place projects are not currently supported.
+ ///
+ /// The reasoning around these is kind of subtle, so we choose to be more
+ /// conservative around these for now. There is no reason in theory we
+ /// cannot support these, we just have not implemented it yet.
+ PlaceProjectionsNotSupported,
+}
+
+impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
+ type Error = TrackedValueConversionError;
+
+ fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
+ if !place_with_id.place.projections.is_empty() {
+ debug!(
+ "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
+ place_with_id
+ );
+ return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
+ }
+
+ Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
+ }
+}
+
+pub struct DropRanges {
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ borrowed_temporaries: Option<FxHashSet<HirId>>,
+}
+
+impl DropRanges {
+ pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
+ self.tracked_value_map
+ .get(&TrackedValue::Temporary(hir_id))
+ .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
+ .cloned()
+ .map_or(false, |tracked_value_id| {
+ self.expect_node(location.into()).drop_state.contains(tracked_value_id)
+ })
+ }
+
+ pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
+ if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
+ }
+
+ /// Returns a reference to the NodeInfo for a node, panicking if it does not exist
+ fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
+ &self.nodes[id]
+ }
+}
+
+/// Tracks information needed to compute drop ranges.
+struct DropRangesBuilder {
+ /// The core of DropRangesBuilder is a set of nodes, which each represent
+ /// one expression. We primarily refer to them by their index in a
+ /// post-order traversal of the HIR tree, since this is what
+ /// generator_interior uses to talk about yield positions.
+ ///
+ /// This IndexVec keeps the relevant details for each node. See the
+ /// NodeInfo struct for more details, but this information includes things
+ /// such as the set of control-flow successors, which variables are dropped
+ /// or reinitialized, and whether each variable has been inferred to be
+ /// known-dropped or potentially reinitialized at each point.
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ /// We refer to values whose drop state we are tracking by the HirId of
+ /// where they are defined. Within a NodeInfo, however, we store the
+ /// drop-state in a bit vector indexed by a HirIdIndex
+ /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
+ /// from HirIds to the HirIdIndex that is used to represent that value in
+ /// bitvector.
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+
+ /// When building the control flow graph, we don't always know the
+ /// post-order index of the target node at the point we encounter it.
+ /// For example, this happens with break and continue. In those cases,
+ /// we store a pair of the PostOrderId of the source and the HirId
+ /// of the target. Once we have gathered all of these edges, we make a
+ /// pass over the set of deferred edges (see process_deferred_edges in
+ /// cfg_build.rs), look up the PostOrderId for the target (since now the
+ /// post-order index for all nodes is known), and add missing control flow
+ /// edges.
+ deferred_edges: Vec<(PostOrderId, HirId)>,
+ /// This maps HirIds of expressions to their post-order index. It is
+ /// used in process_deferred_edges to correctly add back-edges.
+ post_order_map: HirIdMap<PostOrderId>,
+}
+
+impl Debug for DropRangesBuilder {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("DropRanges")
+ .field("hir_id_map", &self.tracked_value_map)
+ .field("post_order_maps", &self.post_order_map)
+ .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
+ .finish()
+ }
+}
+
+/// DropRanges keeps track of what values are definitely dropped at each point in the code.
+///
+/// Values of interest are defined by the hir_id of their place. Locations in code are identified
+/// by their index in the post-order traversal. At its core, DropRanges maps
+/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
+/// dropped at the point of the node identified by post_order_id.
+impl DropRangesBuilder {
+ /// Returns the number of values (hir_ids) that are tracked
+ fn num_values(&self) -> usize {
+ self.tracked_value_map.len()
+ }
+
+ fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
+ let size = self.num_values();
+ self.nodes.ensure_contains_elem(id, || NodeInfo::new(size));
+ &mut self.nodes[id]
+ }
+
+ fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
+ trace!("adding control edge from {:?} to {:?}", from, to);
+ self.node_mut(from).successors.push(to);
+ }
+}
+
+#[derive(Debug)]
+struct NodeInfo {
+ /// IDs of nodes that can follow this one in the control flow
+ ///
+ /// If the vec is empty, then control proceeds to the next node.
+ successors: Vec<PostOrderId>,
+
+ /// List of hir_ids that are dropped by this node.
+ drops: Vec<TrackedValueIndex>,
+
+ /// List of hir_ids that are reinitialized by this node.
+ reinits: Vec<TrackedValueIndex>,
+
+ /// Set of values that are definitely dropped at this point.
+ drop_state: BitSet<TrackedValueIndex>,
+}
+
+impl NodeInfo {
+ fn new(num_values: usize) -> Self {
+ Self {
+ successors: vec![],
+ drops: vec![],
+ reinits: vec![],
+ drop_state: BitSet::new_filled(num_values),
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
index ded0888c3..bfe95852a 100644
--- a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs
+++ b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
@@ -1,13 +1,16 @@
use super::TrackedValue;
use crate::{
- check::FnCtxt,
expr_use_visitor::{self, ExprUseVisitor},
+ FnCtxt,
};
use hir::{def_id::DefId, Body, HirId, HirIdMap};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
-use rustc_middle::hir::place::{PlaceBase, Projection, ProjectionKind};
use rustc_middle::ty::{ParamEnv, TyCtxt};
+use rustc_middle::{
+ hir::place::{PlaceBase, Projection, ProjectionKind},
+ ty::TypeVisitable,
+};
pub(super) fn find_consumed_and_borrowed<'a, 'tcx>(
fcx: &'a FnCtxt<'a, 'tcx>,
@@ -159,8 +162,8 @@ impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
bk: rustc_middle::ty::BorrowKind,
) {
debug!(
- "borrow: place_with_id = {place_with_id:?}, diag_expr_id={diag_expr_id:?}, \
- borrow_kind={bk:?}"
+ "borrow: place_with_id = {place_with_id:#?}, diag_expr_id={diag_expr_id:#?}, \
+ borrow_kind={bk:#?}"
);
self.borrow_place(place_with_id);
@@ -198,7 +201,13 @@ impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
// If the type being assigned needs dropped, then the mutation counts as a borrow
// since it is essentially doing `Drop::drop(&mut x); x = new_value;`.
- if assignee_place.place.base_ty.needs_drop(self.tcx, self.param_env) {
+ let ty = self.tcx.erase_regions(assignee_place.place.base_ty);
+ if ty.needs_infer() {
+ self.tcx.sess.delay_span_bug(
+ self.tcx.hir().span(assignee_place.hir_id),
+ &format!("inference variables in {ty}"),
+ );
+ } else if ty.needs_drop(self.tcx, self.param_env) {
self.places
.borrowed
.insert(TrackedValue::from_place_with_projections_allowed(assignee_place));
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
new file mode 100644
index 000000000..b7dd599cd
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
@@ -0,0 +1,647 @@
+//! This calculates the types which has storage which lives across a suspension point in a
+//! generator from the perspective of typeck. The actual types used at runtime
+//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
+//! types computed here.
+
+use self::drop_ranges::DropRanges;
+use super::FnCtxt;
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_errors::{pluralize, DelayDm};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::hir_id::HirIdSet;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
+use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
+use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt, TypeVisitable};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+mod drop_ranges;
+
+struct InteriorVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ region_scope_tree: &'a region::ScopeTree,
+ types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>,
+ rvalue_scopes: &'a RvalueScopes,
+ expr_count: usize,
+ kind: hir::GeneratorKind,
+ prev_unresolved_span: Option<Span>,
+ linted_values: HirIdSet,
+ drop_ranges: DropRanges,
+}
+
+impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
+ fn record(
+ &mut self,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ scope: Option<region::Scope>,
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ ) {
+ use rustc_span::DUMMY_SP;
+
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+
+ debug!(
+ "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}",
+ ty, hir_id, scope, expr, source_span, self.expr_count,
+ );
+
+ let live_across_yield = scope
+ .map(|s| {
+ self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| {
+ // If we are recording an expression that is the last yield
+ // in the scope, or that has a postorder CFG index larger
+ // than the one of all of the yields, then its value can't
+ // be storage-live (and therefore live) at any of the yields.
+ //
+ // See the mega-comment at `yield_in_scope` for a proof.
+
+ yield_data
+ .iter()
+ .find(|yield_data| {
+ debug!(
+ "comparing counts yield: {} self: {}, source_span = {:?}",
+ yield_data.expr_and_pat_count, self.expr_count, source_span
+ );
+
+ if self.fcx.sess().opts.unstable_opts.drop_tracking
+ && self
+ .drop_ranges
+ .is_dropped_at(hir_id, yield_data.expr_and_pat_count)
+ {
+ debug!("value is dropped at yield point; not recording");
+ return false;
+ }
+
+ // If it is a borrowing happening in the guard,
+ // it needs to be recorded regardless because they
+ // do live across this yield point.
+ yield_data.expr_and_pat_count >= self.expr_count
+ })
+ .cloned()
+ })
+ })
+ .unwrap_or_else(|| {
+ Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() })
+ });
+
+ if let Some(yield_data) = live_across_yield {
+ debug!(
+ "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}",
+ expr, scope, ty, self.expr_count, yield_data.span
+ );
+
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ // If unresolved type isn't a ty_var then unresolved_type_span is None
+ let span = self
+ .prev_unresolved_span
+ .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span));
+
+ // If we encounter an int/float variable, then inference fallback didn't
+ // finish due to some other error. Don't emit spurious additional errors.
+ if let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) =
+ unresolved_type.kind()
+ {
+ self.fcx
+ .tcx
+ .sess
+ .delay_span_bug(span, &format!("Encountered var {:?}", unresolved_type));
+ } else {
+ let note = format!(
+ "the type is part of the {} because of this {}",
+ self.kind, yield_data.source
+ );
+
+ self.fcx
+ .need_type_info_err_in_generator(self.kind, span, unresolved_type)
+ .span_note(yield_data.span, &*note)
+ .emit();
+ }
+ } else {
+ // Insert the type into the ordered set.
+ let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree));
+
+ if !self.linted_values.contains(&hir_id) {
+ check_must_not_suspend_ty(
+ self.fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ expr,
+ source_span,
+ yield_span: yield_data.span,
+ plural_len: 1,
+ ..Default::default()
+ },
+ );
+ self.linted_values.insert(hir_id);
+ }
+
+ self.types.insert(ty::GeneratorInteriorTypeCause {
+ span: source_span,
+ ty,
+ scope_span,
+ yield_span: yield_data.span,
+ expr: expr.map(|e| e.hir_id),
+ });
+ }
+ } else {
+ debug!(
+ "no type in expr = {:?}, count = {:?}, span = {:?}",
+ expr,
+ self.expr_count,
+ expr.map(|e| e.span)
+ );
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ debug!(
+ "remained unresolved_type = {:?}, unresolved_type_span: {:?}",
+ unresolved_type, unresolved_type_span
+ );
+ self.prev_unresolved_span = unresolved_type_span;
+ }
+ }
+ }
+}
+
+pub fn resolve_interior<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body_id: hir::BodyId,
+ interior: Ty<'tcx>,
+ kind: hir::GeneratorKind,
+) {
+ let body = fcx.tcx.hir().body(body_id);
+ let typeck_results = fcx.inh.typeck_results.borrow();
+ let mut visitor = InteriorVisitor {
+ fcx,
+ types: FxIndexSet::default(),
+ region_scope_tree: fcx.tcx.region_scope_tree(def_id),
+ rvalue_scopes: &typeck_results.rvalue_scopes,
+ expr_count: 0,
+ kind,
+ prev_unresolved_span: None,
+ linted_values: <_>::default(),
+ drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body),
+ };
+ intravisit::walk_body(&mut visitor, body);
+
+ // Check that we visited the same amount of expressions as the RegionResolutionVisitor
+ let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap();
+ assert_eq!(region_expr_count, visitor.expr_count);
+
+ // The types are already kept in insertion order.
+ let types = visitor.types;
+
+ // The types in the generator interior contain lifetimes local to the generator itself,
+ // which should not be exposed outside of the generator. Therefore, we replace these
+ // lifetimes with existentially-bound lifetimes, which reflect the exact value of the
+ // lifetimes not being known by users.
+ //
+ // These lifetimes are used in auto trait impl checking (for example,
+ // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync),
+ // so knowledge of the exact relationships between them isn't particularly important.
+
+ debug!("types in generator {:?}, span = {:?}", types, body.value.span);
+
+ let mut counter = 0;
+ let mut captured_tys = FxHashSet::default();
+ let type_causes: Vec<_> = types
+ .into_iter()
+ .filter_map(|mut cause| {
+ // Erase regions and canonicalize late-bound regions to deduplicate as many types as we
+ // can.
+ let ty = fcx.normalize_associated_types_in(cause.span, cause.ty);
+ let erased = fcx.tcx.erase_regions(ty);
+ if captured_tys.insert(erased) {
+ // Replace all regions inside the generator interior with late bound regions.
+ // Note that each region slot in the types gets a new fresh late bound region,
+ // which means that none of the regions inside relate to any other, even if
+ // typeck had previously found constraints that would cause them to be related.
+ let folded = fcx.tcx.fold_regions(erased, |_, current_depth| {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon(counter),
+ };
+ let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, br));
+ counter += 1;
+ r
+ });
+
+ cause.ty = folded;
+ Some(cause)
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ // Extract type components to build the witness type.
+ let type_list = fcx.tcx.mk_type_list(type_causes.iter().map(|cause| cause.ty));
+ let bound_vars = fcx.tcx.mk_bound_variable_kinds(
+ (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
+ );
+ let witness =
+ fcx.tcx.mk_generator_witness(ty::Binder::bind_with_vars(type_list, bound_vars.clone()));
+
+ drop(typeck_results);
+ // Store the generator types and spans into the typeck results for this generator.
+ fcx.inh.typeck_results.borrow_mut().generator_interior_types =
+ ty::Binder::bind_with_vars(type_causes, bound_vars);
+
+ debug!(
+ "types in generator after region replacement {:?}, span = {:?}",
+ witness, body.value.span
+ );
+
+ // Unify the type variable inside the generator with the new witness
+ match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(interior, witness) {
+ Ok(ok) => fcx.register_infer_ok_obligations(ok),
+ _ => bug!("failed to relate {interior} and {witness}"),
+ }
+}
+
+// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in
+// librustc_middle/middle/region.rs since `expr_count` is compared against the results
+// there.
+impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> {
+ fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
+ let Arm { guard, pat, body, .. } = arm;
+ self.visit_pat(pat);
+ if let Some(ref g) = guard {
+ {
+ // If there is a guard, we need to count all variables bound in the pattern as
+ // borrowed for the entire guard body, regardless of whether they are accessed.
+ // We do this by walking the pattern bindings and recording `&T` for any `x: T`
+ // that is bound.
+
+ struct ArmPatCollector<'a, 'b, 'tcx> {
+ interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>,
+ scope: Scope,
+ }
+
+ impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> {
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+ if let PatKind::Binding(_, id, ident, ..) = pat.kind {
+ let ty =
+ self.interior_visitor.fcx.typeck_results.borrow().node_type(id);
+ let tcx = self.interior_visitor.fcx.tcx;
+ let ty = tcx.mk_ref(
+ // Use `ReErased` as `resolve_interior` is going to replace all the
+ // regions anyway.
+ tcx.mk_region(ty::ReErased),
+ ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
+ );
+ self.interior_visitor.record(
+ ty,
+ id,
+ Some(self.scope),
+ None,
+ ident.span,
+ );
+ }
+ }
+ }
+
+ ArmPatCollector {
+ interior_visitor: self,
+ scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node },
+ }
+ .visit_pat(pat);
+ }
+
+ match g {
+ Guard::If(ref e) => {
+ self.visit_expr(e);
+ }
+ Guard::IfLet(ref l) => {
+ self.visit_let_expr(l);
+ }
+ }
+ }
+ self.visit_expr(body);
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+
+ self.expr_count += 1;
+
+ if let PatKind::Binding(..) = pat.kind {
+ let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap();
+ let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
+ self.record(ty, pat.hir_id, Some(scope), None, pat.span);
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ match &expr.kind {
+ ExprKind::Call(callee, args) => match &callee.kind {
+ ExprKind::Path(qpath) => {
+ let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id);
+ match res {
+ // Direct calls never need to keep the callee `ty::FnDef`
+ // ZST in a temporary, so skip its type, just in case it
+ // can significantly complicate the generator type.
+ Res::Def(
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn),
+ _,
+ ) => {
+ // NOTE(eddyb) this assumes a path expression has
+ // no nested expressions to keep track of.
+ self.expr_count += 1;
+
+ // Record the rest of the call expression normally.
+ for arg in *args {
+ self.visit_expr(arg);
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ },
+ _ => intravisit::walk_expr(self, expr),
+ }
+
+ self.expr_count += 1;
+
+ debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
+
+ let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr);
+
+ // Typically, the value produced by an expression is consumed by its parent in some way,
+ // so we only have to check if the parent contains a yield (note that the parent may, for
+ // example, store the value into a local variable, but then we already consider local
+ // variables to be live across their scope).
+ //
+ // However, in the case of temporary values, we are going to store the value into a
+ // temporary on the stack that is live for the current temporary scope and then return a
+ // reference to it. That value may be live across the entire temporary scope.
+ //
+ // There's another subtlety: if the type has an observable drop, it must be dropped after
+ // the yield, even if it's not borrowed or referenced after the yield. Ideally this would
+ // *only* happen for types with observable drop, not all types which wrap them, but that
+ // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in
+ // src/test/ui/generator/drop-tracking-parent-expression.rs.
+ let scope = if self.drop_ranges.is_borrowed_temporary(expr)
+ || ty.map_or(true, |ty| {
+ // Avoid ICEs in needs_drop.
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+ let ty = self.fcx.tcx.erase_regions(ty);
+ if ty.needs_infer() {
+ self.fcx
+ .tcx
+ .sess
+ .delay_span_bug(expr.span, &format!("inference variables in {ty}"));
+ true
+ } else {
+ ty.needs_drop(self.fcx.tcx, self.fcx.param_env)
+ }
+ }) {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ } else {
+ let parent_expr = self
+ .fcx
+ .tcx
+ .hir()
+ .parent_iter(expr.hir_id)
+ .find(|(_, node)| matches!(node, hir::Node::Expr(_)))
+ .map(|(id, _)| id);
+ debug!("parent_expr: {:?}", parent_expr);
+ match parent_expr {
+ Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
+ None => {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ }
+ }
+ };
+
+ // If there are adjustments, then record the final type --
+ // this is the actual value that is being produced.
+ if let Some(adjusted_ty) = ty {
+ self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span);
+ }
+
+ // Also record the unadjusted type (which is the only type if
+ // there are no adjustments). The reason for this is that the
+ // unadjusted value is sometimes a "temporary" that would wind
+ // up in a MIR temporary.
+ //
+ // As an example, consider an expression like `vec![].push(x)`.
+ // Here, the `vec![]` would wind up MIR stored into a
+ // temporary variable `t` which we can borrow to invoke
+ // `<Vec<_>>::push(&mut t, x)`.
+ //
+ // Note that an expression can have many adjustments, and we
+ // are just ignoring those intermediate types. This is because
+ // those intermediate values are always linearly "consumed" by
+ // the other adjustments, and hence would never be directly
+ // captured in the MIR.
+ //
+ // (Note that this partly relies on the fact that the `Deref`
+ // traits always return references, which means their content
+ // can be reborrowed without needing to spill to a temporary.
+ // If this were not the case, then we could conceivably have
+ // to create intermediate temporaries.)
+ //
+ // The type table might not have information for this expression
+ // if it is in a malformed scope. (#66387)
+ if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
+ self.record(ty, expr.hir_id, scope, Some(expr), expr.span);
+ } else {
+ self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
+ }
+ }
+}
+
+#[derive(Default)]
+struct SuspendCheckData<'a, 'tcx> {
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ yield_span: Span,
+ descr_pre: &'a str,
+ descr_post: &'a str,
+ plural_len: usize,
+}
+
+// Returns whether it emitted a diagnostic or not
+// Note that this fn and the proceeding one are based on the code
+// for creating must_use diagnostics
+//
+// Note that this technique was chosen over things like a `Suspend` marker trait
+// as it is simpler and has precedent in the compiler
+fn check_must_not_suspend_ty<'tcx>(
+ fcx: &FnCtxt<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, 'tcx>,
+) -> bool {
+ if ty.is_unit()
+ // FIXME: should this check `is_ty_uninhabited_from`. This query is not available in this stage
+ // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in
+ // `must_use`
+ // || fcx.tcx.is_ty_uninhabited_from(fcx.tcx.parent_module(hir_id).to_def_id(), ty, fcx.param_env)
+ {
+ return false;
+ }
+
+ let plural_suffix = pluralize!(data.plural_len);
+
+ debug!("Checking must_not_suspend for {}", ty);
+
+ match *ty.kind() {
+ ty::Adt(..) if ty.is_box() => {
+ let boxed_ty = ty.boxed_ty();
+ let descr_pre = &format!("{}boxed ", data.descr_pre);
+ check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
+ }
+ ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
+ // FIXME: support adding the attribute to TAITs
+ ty::Opaque(def, _) => {
+ let mut has_emitted = false;
+ for &(predicate, _) in fcx.tcx.explicit_item_bounds(def) {
+ // We only look at the `DefId`, so it is safe to skip the binder here.
+ if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
+ predicate.kind().skip_binder()
+ {
+ let def_id = poly_trait_predicate.trait_ref.def_id;
+ let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_pre, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Dynamic(binder, _, _) => {
+ let mut has_emitted = false;
+ for predicate in binder.iter() {
+ if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
+ let def_id = trait_ref.def_id;
+ let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_post, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Tuple(fields) => {
+ let mut has_emitted = false;
+ let comps = match data.expr.map(|e| &e.kind) {
+ Some(hir::ExprKind::Tup(comps)) => {
+ debug_assert_eq!(comps.len(), fields.len());
+ Some(comps)
+ }
+ _ => None,
+ };
+ for (i, ty) in fields.iter().enumerate() {
+ let descr_post = &format!(" in tuple element {i}");
+ let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span);
+ if check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_post,
+ expr: comps.and_then(|comps| comps.get(i)),
+ source_span: span,
+ ..data
+ },
+ ) {
+ has_emitted = true;
+ }
+ }
+ has_emitted
+ }
+ ty::Array(ty, len) => {
+ let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix);
+ check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_pre,
+ plural_len: len.try_eval_usize(fcx.tcx, fcx.param_env).unwrap_or(0) as usize
+ + 1,
+ ..data
+ },
+ )
+ }
+ // If drop tracking is enabled, we want to look through references, since the referrent
+ // may not be considered live across the await point.
+ ty::Ref(_region, ty, _mutability) if fcx.sess().opts.unstable_opts.drop_tracking => {
+ let descr_pre = &format!("{}reference{} to ", data.descr_pre, plural_suffix);
+ check_must_not_suspend_ty(fcx, ty, hir_id, SuspendCheckData { descr_pre, ..data })
+ }
+ _ => false,
+ }
+}
+
+fn check_must_not_suspend_def(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, '_>,
+) -> bool {
+ if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) {
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::MUST_NOT_SUSPEND,
+ hir_id,
+ data.source_span,
+ DelayDm(|| {
+ format!(
+ "{}`{}`{} held across a suspend point, but should not be",
+ data.descr_pre,
+ tcx.def_path_str(def_id),
+ data.descr_post,
+ )
+ }),
+ |lint| {
+ // add span pointing to the offending yield/await
+ lint.span_label(data.yield_span, "the value is held across this suspend point");
+
+ // Add optional reason note
+ if let Some(note) = attr.value_str() {
+ // FIXME(guswynn): consider formatting this better
+ lint.span_note(data.source_span, note.as_str());
+ }
+
+ // Add some quick suggestions on what to do
+ // FIXME: can `drop` work as a suggestion here as well?
+ lint.span_help(
+ data.source_span,
+ "consider using a block (`{ ... }`) \
+ to shrink the value's scope, ending before the suspend point",
+ );
+
+ lint
+ },
+ );
+
+ true
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/inherited.rs b/compiler/rustc_hir_typeck/src/inherited.rs
new file mode 100644
index 000000000..0fb7651b3
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/inherited.rs
@@ -0,0 +1,213 @@
+use super::callee::DeferredCallResolution;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::HirIdMap;
+use rustc_infer::infer;
+use rustc_infer::infer::{DefiningAnchor, InferCtxt, InferOk, TyCtxtInferExt};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefIdMap;
+use rustc_span::{self, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCtxt, TraitEngine, TraitEngineExt as _,
+};
+
+use std::cell::RefCell;
+use std::ops::Deref;
+
+/// Closures defined within the function. For example:
+/// ```ignore (illustrative)
+/// fn foo() {
+/// bar(move|| { ... })
+/// }
+/// ```
+/// Here, the function `foo()` and the closure passed to
+/// `bar()` will each have their own `FnCtxt`, but they will
+/// share the inherited fields.
+pub struct Inherited<'tcx> {
+ pub(super) infcx: InferCtxt<'tcx>,
+
+ pub(super) typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+
+ pub(super) locals: RefCell<HirIdMap<super::LocalTy<'tcx>>>,
+
+ pub(super) fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
+
+ // Some additional `Sized` obligations badly affect type inference.
+ // These obligations are added in a later stage of typeck.
+ // Removing these may also cause additional complications, see #101066.
+ pub(super) deferred_sized_obligations:
+ RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>,
+
+ // When we process a call like `c()` where `c` is a closure type,
+ // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
+ // `FnOnce` closure. In that case, we defer full resolution of the
+ // call until upvar inference can kick in and make the
+ // decision. We keep these deferred resolutions grouped by the
+ // def-id of the closure, so that once we decide, we can easily go
+ // back and process them.
+ pub(super) deferred_call_resolutions: RefCell<LocalDefIdMap<Vec<DeferredCallResolution<'tcx>>>>,
+
+ pub(super) deferred_cast_checks: RefCell<Vec<super::cast::CastCheck<'tcx>>>,
+
+ pub(super) deferred_transmute_checks: RefCell<Vec<(Ty<'tcx>, Ty<'tcx>, hir::HirId)>>,
+
+ pub(super) deferred_asm_checks: RefCell<Vec<(&'tcx hir::InlineAsm<'tcx>, hir::HirId)>>,
+
+ pub(super) deferred_generator_interiors:
+ RefCell<Vec<(hir::BodyId, Ty<'tcx>, hir::GeneratorKind)>>,
+
+ pub(super) body_id: Option<hir::BodyId>,
+
+ /// Whenever we introduce an adjustment from `!` into a type variable,
+ /// we record that type variable here. This is later used to inform
+ /// fallback. See the `fallback` module for details.
+ pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>,
+}
+
+impl<'tcx> Deref for Inherited<'tcx> {
+ type Target = InferCtxt<'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.infcx
+ }
+}
+
+/// A temporary returned by `Inherited::build(...)`. This is necessary
+/// for multiple `InferCtxt` to share the same `typeck_results`
+/// without using `Rc` or something similar.
+pub struct InheritedBuilder<'tcx> {
+ infcx: infer::InferCtxtBuilder<'tcx>,
+ def_id: LocalDefId,
+ typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+}
+
+impl<'tcx> Inherited<'tcx> {
+ pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> {
+ let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner;
+
+ InheritedBuilder {
+ infcx: tcx
+ .infer_ctxt()
+ .ignoring_regions()
+ .with_opaque_type_inference(DefiningAnchor::Bind(hir_owner.def_id))
+ .with_normalize_fn_sig_for_diagnostic(Lrc::new(move |infcx, fn_sig| {
+ if fn_sig.has_escaping_bound_vars() {
+ return fn_sig;
+ }
+ infcx.probe(|_| {
+ let ocx = ObligationCtxt::new_in_snapshot(infcx);
+ let normalized_fn_sig = ocx.normalize(
+ ObligationCause::dummy(),
+ // FIXME(compiler-errors): This is probably not the right param-env...
+ infcx.tcx.param_env(def_id),
+ fn_sig,
+ );
+ if ocx.select_all_or_error().is_empty() {
+ let normalized_fn_sig =
+ infcx.resolve_vars_if_possible(normalized_fn_sig);
+ if !normalized_fn_sig.needs_infer() {
+ return normalized_fn_sig;
+ }
+ }
+ fn_sig
+ })
+ })),
+ def_id,
+ typeck_results: RefCell::new(ty::TypeckResults::new(hir_owner)),
+ }
+ }
+}
+
+impl<'tcx> InheritedBuilder<'tcx> {
+ pub fn enter<F, R>(mut self, f: F) -> R
+ where
+ F: FnOnce(&Inherited<'tcx>) -> R,
+ {
+ let def_id = self.def_id;
+ f(&Inherited::new(self.infcx.build(), def_id, self.typeck_results))
+ }
+}
+
+impl<'tcx> Inherited<'tcx> {
+ fn new(
+ infcx: InferCtxt<'tcx>,
+ def_id: LocalDefId,
+ typeck_results: RefCell<ty::TypeckResults<'tcx>>,
+ ) -> Self {
+ let tcx = infcx.tcx;
+ let body_id = tcx.hir().maybe_body_owned_by(def_id);
+
+ Inherited {
+ typeck_results,
+ infcx,
+ fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)),
+ locals: RefCell::new(Default::default()),
+ deferred_sized_obligations: RefCell::new(Vec::new()),
+ deferred_call_resolutions: RefCell::new(Default::default()),
+ deferred_cast_checks: RefCell::new(Vec::new()),
+ deferred_transmute_checks: RefCell::new(Vec::new()),
+ deferred_asm_checks: RefCell::new(Vec::new()),
+ deferred_generator_interiors: RefCell::new(Vec::new()),
+ diverging_type_vars: RefCell::new(Default::default()),
+ body_id,
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
+ if obligation.has_escaping_bound_vars() {
+ span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation);
+ }
+ self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation);
+ }
+
+ pub(super) fn register_predicates<I>(&self, obligations: I)
+ where
+ I: IntoIterator<Item = traits::PredicateObligation<'tcx>>,
+ {
+ for obligation in obligations {
+ self.register_predicate(obligation);
+ }
+ }
+
+ pub(super) fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
+ self.register_predicates(infer_ok.obligations);
+ infer_ok.value
+ }
+
+ pub(super) fn normalize_associated_types_in<T>(
+ &self,
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.normalize_associated_types_in_with_cause(
+ ObligationCause::misc(span, body_id),
+ param_env,
+ value,
+ )
+ }
+
+ pub(super) fn normalize_associated_types_in_with_cause<T>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let ok = self.partially_normalize_associated_types_in(cause, param_env, value);
+ debug!(?ok);
+ self.register_infer_ok_obligations(ok)
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/intrinsicck.rs b/compiler/rustc_hir_typeck/src/intrinsicck.rs
new file mode 100644
index 000000000..9812d96fc
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/intrinsicck.rs
@@ -0,0 +1,108 @@
+use hir::HirId;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_middle::ty::layout::{LayoutError, SizeSkeleton};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{Pointer, VariantIdx};
+
+use super::FnCtxt;
+
+/// If the type is `Option<T>`, it will return `T`, otherwise
+/// the type itself. Works on most `Option`-like types.
+fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let ty::Adt(def, substs) = *ty.kind() else { return ty };
+
+ if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
+ let data_idx;
+
+ let one = VariantIdx::new(1);
+ let zero = VariantIdx::new(0);
+
+ if def.variant(zero).fields.is_empty() {
+ data_idx = one;
+ } else if def.variant(one).fields.is_empty() {
+ data_idx = zero;
+ } else {
+ return ty;
+ }
+
+ if def.variant(data_idx).fields.len() == 1 {
+ return def.variant(data_idx).fields[0].ty(tcx, substs);
+ }
+ }
+
+ ty
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_transmute(&self, from: Ty<'tcx>, to: Ty<'tcx>, hir_id: HirId) {
+ let tcx = self.tcx;
+ let span = tcx.hir().span(hir_id);
+ let normalize = |ty| {
+ let ty = self.resolve_vars_if_possible(ty);
+ self.tcx.normalize_erasing_regions(self.param_env, ty)
+ };
+ let from = normalize(from);
+ let to = normalize(to);
+ trace!(?from, ?to);
+
+ // Transmutes that are only changing lifetimes are always ok.
+ if from == to {
+ return;
+ }
+
+ let skel = |ty| SizeSkeleton::compute(ty, tcx, self.param_env);
+ let sk_from = skel(from);
+ let sk_to = skel(to);
+ trace!(?sk_from, ?sk_to);
+
+ // Check for same size using the skeletons.
+ if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) {
+ if sk_from.same_size(sk_to) {
+ return;
+ }
+
+ // Special-case transmuting from `typeof(function)` and
+ // `Option<typeof(function)>` to present a clearer error.
+ let from = unpack_option_like(tcx, from);
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&tcx) {
+ struct_span_err!(tcx.sess, span, E0591, "can't transmute zero-sized type")
+ .note(&format!("source type: {from}"))
+ .note(&format!("target type: {to}"))
+ .help("cast with `as` to a pointer instead")
+ .emit();
+ return;
+ }
+ }
+
+ // Try to display a sensible error with as much information as possible.
+ let skeleton_string = |ty: Ty<'tcx>, sk| match sk {
+ Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()),
+ Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
+ Err(LayoutError::Unknown(bad)) => {
+ if bad == ty {
+ "this type does not have a fixed size".to_owned()
+ } else {
+ format!("size can vary because of {bad}")
+ }
+ }
+ Err(err) => err.to_string(),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0512,
+ "cannot transmute between types of different sizes, \
+ or dependently-sized types"
+ );
+ if from == to {
+ err.note(&format!("`{from}` does not have a fixed size"));
+ } else {
+ err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
+ .note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
+ }
+ err.emit();
+ }
+}
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
new file mode 100644
index 000000000..959c54866
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -0,0 +1,507 @@
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(try_blocks)]
+#![feature(never_type)]
+#![feature(min_specialization)]
+#![feature(control_flow_enum)]
+#![feature(drain_filter)]
+#![allow(rustc::potential_query_instability)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+
+#[macro_use]
+extern crate rustc_middle;
+
+mod _match;
+mod autoderef;
+mod callee;
+// Used by clippy;
+pub mod cast;
+mod check;
+mod closure;
+mod coercion;
+mod demand;
+mod diverges;
+mod errors;
+mod expectation;
+mod expr;
+// Used by clippy;
+pub mod expr_use_visitor;
+mod fallback;
+mod fn_ctxt;
+mod gather_locals;
+mod generator_interior;
+mod inherited;
+mod intrinsicck;
+mod mem_categorization;
+mod method;
+mod op;
+mod pat;
+mod place_op;
+mod rvalue_scopes;
+mod upvar;
+mod writeback;
+
+pub use diverges::Diverges;
+pub use expectation::Expectation;
+pub use fn_ctxt::*;
+pub use inherited::{Inherited, InheritedBuilder};
+
+use crate::check::check_fn;
+use crate::coercion::DynamicCoerceMany;
+use crate::gather_locals::GatherLocalsVisitor;
+use rustc_data_structures::unord::UnordSet;
+use rustc_errors::{struct_span_err, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{HirIdMap, Node};
+use rustc_hir_analysis::astconv::AstConv;
+use rustc_hir_analysis::check::check_abi;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::traits;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::config;
+use rustc_session::Session;
+use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::Span;
+
+#[macro_export]
+macro_rules! type_error_struct {
+ ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({
+ let mut err = rustc_errors::struct_span_err!($session, $span, $code, $($message)*);
+
+ if $typ.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ })
+}
+
+/// The type of a local binding, including the revealed type for anon types.
+#[derive(Copy, Clone, Debug)]
+pub struct LocalTy<'tcx> {
+ decl_ty: Ty<'tcx>,
+ revealed_ty: Ty<'tcx>,
+}
+
+#[derive(Copy, Clone)]
+pub struct UnsafetyState {
+ pub def: hir::HirId,
+ pub unsafety: hir::Unsafety,
+ from_fn: bool,
+}
+
+impl UnsafetyState {
+ pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState {
+ UnsafetyState { def, unsafety, from_fn: true }
+ }
+
+ pub fn recurse(self, blk: &hir::Block<'_>) -> UnsafetyState {
+ use hir::BlockCheckMode;
+ match self.unsafety {
+ // If this unsafe, then if the outer function was already marked as
+ // unsafe we shouldn't attribute the unsafe'ness to the block. This
+ // way the block can be warned about instead of ignoring this
+ // extraneous block (functions are never warned about).
+ hir::Unsafety::Unsafe if self.from_fn => self,
+
+ unsafety => {
+ let (unsafety, def) = match blk.rules {
+ BlockCheckMode::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.hir_id),
+ BlockCheckMode::DefaultBlock => (unsafety, self.def),
+ };
+ UnsafetyState { def, unsafety, from_fn: false }
+ }
+ }
+ }
+}
+
+/// If this `DefId` is a "primary tables entry", returns
+/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`.
+///
+/// If this function returns `Some`, then `typeck_results(def_id)` will
+/// succeed; if it returns `None`, then `typeck_results(def_id)` may or
+/// may not succeed. In some cases where this function returns `None`
+/// (notably closures), `typeck_results(def_id)` would wind up
+/// redirecting to the owning function.
+fn primary_body_of(
+ tcx: TyCtxt<'_>,
+ id: hir::HirId,
+) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
+ match tcx.hir().get(id) {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => {
+ Some((body, Some(ty), None))
+ }
+ hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Const(ty, Some(body)) => Some((body, Some(ty), None)),
+ hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
+ Some((body, None, Some(sig)))
+ }
+ _ => None,
+ },
+ Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::Const(ty, body) => Some((body, Some(ty), None)),
+ hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::AnonConst(constant) => Some((constant.body, None, None)),
+ _ => None,
+ }
+}
+
+fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ if typeck_root_def_id != def_id {
+ return tcx.has_typeck_results(typeck_root_def_id);
+ }
+
+ if let Some(def_id) = def_id.as_local() {
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ primary_body_of(tcx, id).is_some()
+ } else {
+ false
+ }
+}
+
+fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &UnordSet<LocalDefId> {
+ &*tcx.typeck(def_id).used_trait_imports
+}
+
+fn typeck_item_bodies(tcx: TyCtxt<'_>, (): ()) {
+ tcx.hir().par_body_owners(|body_owner_def_id| tcx.ensure().typeck(body_owner_def_id));
+}
+
+fn typeck_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (did, param_did): (LocalDefId, DefId),
+) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || tcx.type_of(param_did);
+ typeck_with_fallback(tcx, did, fallback)
+}
+
+fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ if let Some(param_did) = tcx.opt_const_param_of(def_id) {
+ tcx.typeck_const_arg((def_id, param_did))
+ } else {
+ let fallback = move || tcx.type_of(def_id.to_def_id());
+ typeck_with_fallback(tcx, def_id, fallback)
+ }
+}
+
+/// Used only to get `TypeckResults` for type inference during error recovery.
+/// Currently only used for type inference of `static`s and `const`s to avoid type cycle errors.
+fn diagnostic_only_typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || {
+ let span = tcx.hir().span(tcx.hir().local_def_id_to_hir_id(def_id));
+ tcx.ty_error_with_message(span, "diagnostic only typeck table used")
+ };
+ typeck_with_fallback(tcx, def_id, fallback)
+}
+
+fn typeck_with_fallback<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ fallback: impl Fn() -> Ty<'tcx> + 'tcx,
+) -> &'tcx ty::TypeckResults<'tcx> {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()).expect_local();
+ if typeck_root_def_id != def_id {
+ return tcx.typeck(typeck_root_def_id);
+ }
+
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let span = tcx.hir().span(id);
+
+ // Figure out what primary body this item has.
+ let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| {
+ span_bug!(span, "can't type-check body of {:?}", def_id);
+ });
+ let body = tcx.hir().body(body_id);
+
+ let typeck_results = Inherited::build(tcx, def_id).enter(|inh| {
+ let param_env = tcx.param_env(def_id);
+ let mut fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig {
+ let fn_sig = if rustc_hir_analysis::collect::get_infer_ret_ty(&decl.output).is_some() {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ <dyn AstConv<'_>>::ty_of_fn(&fcx, id, header.unsafety, header.abi, decl, None, None)
+ } else {
+ tcx.fn_sig(def_id)
+ };
+
+ check_abi(tcx, id, span, fn_sig.abi());
+
+ // Compute the function signature from point of view of inside the fn.
+ let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig);
+ let fn_sig = inh.normalize_associated_types_in(
+ body.value.span,
+ body_id.hir_id,
+ param_env,
+ fn_sig,
+ );
+ check_fn(&inh, param_env, fn_sig, decl, id, body, None, true).0
+ } else {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ let expected_type = body_ty
+ .and_then(|ty| match ty.kind {
+ hir::TyKind::Infer => Some(<dyn AstConv<'_>>::ast_ty_to_ty(&fcx, ty)),
+ _ => None,
+ })
+ .unwrap_or_else(|| match tcx.hir().get(id) {
+ Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) {
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::ConstBlock(ref anon_const),
+ ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Ty(&hir::Ty {
+ kind: hir::TyKind::Typeof(ref anon_const), ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::InlineAsm(asm), .. })
+ | Node::Item(&hir::Item { kind: hir::ItemKind::GlobalAsm(asm), .. }) => {
+ let operand_ty = asm
+ .operands
+ .iter()
+ .filter_map(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ // Inline assembly constants must be integers.
+ Some(fcx.next_int_var())
+ }
+ hir::InlineAsmOperand::SymFn { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ Some(fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span,
+ }))
+ }
+ _ => None,
+ })
+ .next();
+ operand_ty.unwrap_or_else(fallback)
+ }
+ _ => fallback(),
+ },
+ _ => fallback(),
+ });
+
+ let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type);
+ fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
+
+ // Gather locals in statics (because of block expressions).
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ fcx.check_expr_coercable_to_type(&body.value, expected_type, None);
+
+ fcx.write_ty(id, expected_type);
+
+ fcx
+ };
+
+ let fallback_has_occurred = fcx.type_inference_fallback();
+
+ // Even though coercion casts provide type hints, we check casts after fallback for
+ // backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
+ fcx.check_casts();
+ fcx.select_obligations_where_possible(fallback_has_occurred, |_| {});
+
+ // Closure and generator analysis may run after fallback
+ // because they don't constrain other type variables.
+ // Closure analysis only runs on closures. Therefore they only need to fulfill non-const predicates (as of now)
+ let prev_constness = fcx.param_env.constness();
+ fcx.param_env = fcx.param_env.without_const();
+ fcx.closure_analyze(body);
+ fcx.param_env = fcx.param_env.with_constness(prev_constness);
+ assert!(fcx.deferred_call_resolutions.borrow().is_empty());
+ // Before the generator analysis, temporary scopes shall be marked to provide more
+ // precise information on types to be captured.
+ fcx.resolve_rvalue_scopes(def_id.to_def_id());
+ fcx.resolve_generator_interiors(def_id.to_def_id());
+
+ for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) {
+ let ty = fcx.normalize_ty(span, ty);
+ fcx.require_type_is_sized(ty, span, code);
+ }
+
+ fcx.select_all_obligations_or_error();
+
+ if !fcx.infcx.is_tainted_by_errors() {
+ fcx.check_transmutes();
+ }
+
+ fcx.check_asms();
+
+ fcx.infcx.skip_region_resolution();
+
+ fcx.resolve_type_vars_in_body(body)
+ });
+
+ // Consistency check our TypeckResults instance can hold all ItemLocalIds
+ // it will need to hold.
+ assert_eq!(typeck_results.hir_owner, id.owner);
+
+ typeck_results
+}
+
+/// When `check_fn` is invoked on a generator (i.e., a body that
+/// includes yield), it returns back some information about the yield
+/// points.
+struct GeneratorTypes<'tcx> {
+ /// Type of generator argument / values returned by `yield`.
+ resume_ty: Ty<'tcx>,
+
+ /// Type of value that is yielded.
+ yield_ty: Ty<'tcx>,
+
+ /// Types that are captured (see `GeneratorInterior` for more).
+ interior: Ty<'tcx>,
+
+ /// Indicates if the generator is movable or static (immovable).
+ movability: hir::Movability,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Needs {
+ MutPlace,
+ None,
+}
+
+impl Needs {
+ fn maybe_mut_place(m: hir::Mutability) -> Self {
+ match m {
+ hir::Mutability::Mut => Needs::MutPlace,
+ hir::Mutability::Not => Needs::None,
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum PlaceOp {
+ Deref,
+ Index,
+}
+
+pub struct BreakableCtxt<'tcx> {
+ may_break: bool,
+
+ // this is `null` for loops where break with a value is illegal,
+ // such as `while`, `for`, and `while let`
+ coerce: Option<DynamicCoerceMany<'tcx>>,
+}
+
+pub struct EnclosingBreakables<'tcx> {
+ stack: Vec<BreakableCtxt<'tcx>>,
+ by_id: HirIdMap<usize>,
+}
+
+impl<'tcx> EnclosingBreakables<'tcx> {
+ fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'tcx> {
+ self.opt_find_breakable(target_id).unwrap_or_else(|| {
+ bug!("could not find enclosing breakable with id {}", target_id);
+ })
+ }
+
+ fn opt_find_breakable(&mut self, target_id: hir::HirId) -> Option<&mut BreakableCtxt<'tcx>> {
+ match self.by_id.get(&target_id) {
+ Some(ix) => Some(&mut self.stack[*ix]),
+ None => None,
+ }
+ }
+}
+
+fn report_unexpected_variant_res(tcx: TyCtxt<'_>, res: Res, qpath: &hir::QPath<'_>, span: Span) {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0533,
+ "expected unit struct, unit variant or constant, found {} `{}`",
+ res.descr(),
+ rustc_hir_pretty::qpath_to_string(qpath),
+ )
+ .emit();
+}
+
+/// Controls whether the arguments are tupled. This is used for the call
+/// operator.
+///
+/// Tupling means that all call-side arguments are packed into a tuple and
+/// passed as a single parameter. For example, if tupling is enabled, this
+/// function:
+/// ```
+/// fn f(x: (isize, isize)) {}
+/// ```
+/// Can be called as:
+/// ```ignore UNSOLVED (can this be done in user code?)
+/// # fn f(x: (isize, isize)) {}
+/// f(1, 2);
+/// ```
+/// Instead of:
+/// ```
+/// # fn f(x: (isize, isize)) {}
+/// f((1, 2));
+/// ```
+#[derive(Clone, Eq, PartialEq)]
+enum TupleArgumentsFlag {
+ DontTupleArguments,
+ TupleArguments,
+}
+
+fn fatally_break_rust(sess: &Session) {
+ let handler = sess.diagnostic();
+ handler.span_bug_no_panic(
+ MultiSpan::new(),
+ "It looks like you're trying to break rust; would you like some ICE?",
+ );
+ handler.note_without_error("the compiler expectedly panicked. this is a feature.");
+ handler.note_without_error(
+ "we would appreciate a joke overview: \
+ https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675",
+ );
+ handler.note_without_error(&format!(
+ "rustc {} running on {}",
+ option_env!("CFG_VERSION").unwrap_or("unknown_version"),
+ config::host_triple(),
+ ));
+}
+
+fn has_expected_num_generic_args<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_did: Option<DefId>,
+ expected: usize,
+) -> bool {
+ trait_did.map_or(true, |trait_did| {
+ let generics = tcx.generics_of(trait_did);
+ generics.count() == expected + if generics.has_self { 1 } else { 0 }
+ })
+}
+
+pub fn provide(providers: &mut Providers) {
+ method::provide(providers);
+ *providers = Providers {
+ typeck_item_bodies,
+ typeck_const_arg,
+ typeck,
+ diagnostic_only_typeck,
+ has_typeck_results,
+ used_trait_imports,
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_hir_typeck/src/mem_categorization.rs
index ced919f66..362f1c343 100644
--- a/compiler/rustc_typeck/src/mem_categorization.rs
+++ b/compiler/rustc_hir_typeck/src/mem_categorization.rs
@@ -92,7 +92,7 @@ impl HirNode for hir::Pat<'_> {
#[derive(Clone)]
pub(crate) struct MemCategorizationContext<'a, 'tcx> {
pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_owner: LocalDefId,
upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
@@ -103,7 +103,7 @@ pub(crate) type McResult<T> = Result<T, ()>;
impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
/// Creates a `MemCategorizationContext`.
pub(crate) fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_owner: LocalDefId,
typeck_results: &'a ty::TypeckResults<'tcx>,
@@ -184,7 +184,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
/// modes #42640) may look like `Some(x)` but in fact have
/// implicit deref patterns attached (e.g., it is really
/// `&Some(x)`). In that case, we return the "outermost" type
- /// (e.g., `&Option<T>).
+ /// (e.g., `&Option<T>`).
pub(crate) fn pat_ty_adjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
// Check for implicit `&` types wrapping the pattern; note
// that these are never attached to binding patterns, so
@@ -265,6 +265,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
self.cat_expr_adjusted_with(expr, || Ok(previous), adjustment)
}
+ #[instrument(level = "debug", skip(self, previous))]
fn cat_expr_adjusted_with<F>(
&self,
expr: &hir::Expr<'_>,
@@ -274,7 +275,6 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
where
F: FnOnce() -> McResult<PlaceWithHirId<'tcx>>,
{
- debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr);
let target = self.resolve_vars_if_possible(adjustment.target);
match adjustment.kind {
adjustment::Adjust::Deref(overloaded) => {
@@ -292,13 +292,15 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
adjustment::Adjust::NeverToAny
| adjustment::Adjust::Pointer(_)
- | adjustment::Adjust::Borrow(_) => {
+ | adjustment::Adjust::Borrow(_)
+ | adjustment::Adjust::DynStar => {
// Result is an rvalue.
Ok(self.cat_rvalue(expr.hir_id, expr.span, target))
}
}
}
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn cat_expr_unadjusted(
&self,
expr: &hir::Expr<'_>,
@@ -387,6 +389,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
}
}
+ #[instrument(level = "debug", skip(self, span))]
pub(crate) fn cat_res(
&self,
hir_id: hir::HirId,
@@ -394,8 +397,6 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
expr_ty: Ty<'tcx>,
res: Res,
) -> McResult<PlaceWithHirId<'tcx>> {
- debug!("cat_res: id={:?} expr={:?} def={:?}", hir_id, expr_ty, res);
-
match res {
Res::Def(
DefKind::Ctor(..)
@@ -475,13 +476,12 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
ret
}
+ #[instrument(level = "debug", skip(self))]
fn cat_overloaded_place(
&self,
expr: &hir::Expr<'_>,
base: &hir::Expr<'_>,
) -> McResult<PlaceWithHirId<'tcx>> {
- debug!("cat_overloaded_place(expr={:?}, base={:?})", expr, base);
-
// Reconstruct the output assuming it's a reference with the
// same region and mutability as the receiver. This holds for
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
@@ -497,13 +497,12 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
self.cat_deref(expr, base)
}
+ #[instrument(level = "debug", skip(self, node))]
fn cat_deref(
&self,
node: &impl HirNode,
base_place: PlaceWithHirId<'tcx>,
) -> McResult<PlaceWithHirId<'tcx>> {
- debug!("cat_deref: base_place={:?}", base_place);
-
let base_curr_ty = base_place.place.ty();
let deref_ty = match base_curr_ty.builtin_deref(true) {
Some(mt) => mt.ty,
@@ -562,7 +561,8 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _)
| Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
| Res::SelfCtor(..)
- | Res::SelfTy { .. } => {
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. } => {
// Structs and Unions have only have one variant.
Ok(VariantIdx::new(0))
}
diff --git a/compiler/rustc_typeck/src/check/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs
index 2c89b63ae..be4ea9986 100644
--- a/compiler/rustc_typeck/src/check/method/confirm.rs
+++ b/compiler/rustc_hir_typeck/src/method/confirm.rs
@@ -1,16 +1,16 @@
use super::{probe, MethodCallee};
-use crate::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall};
-use crate::check::{callee, FnCtxt};
+use crate::{callee, FnCtxt};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::GenericArg;
+use rustc_hir_analysis::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall};
use rustc_infer::infer::{self, InferOk};
use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext};
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{self, Subst, SubstsRef};
+use rustc_middle::ty::subst::{self, SubstsRef};
use rustc_middle::ty::{self, GenericParamDefKind, Ty};
use rustc_span::Span;
use rustc_trait_selection::traits;
@@ -491,7 +491,19 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
// so we just call `predicates_for_generics` directly to avoid redoing work.
// `self.add_required_obligations(self.span, def_id, &all_substs);`
for obligation in traits::predicates_for_generics(
- traits::ObligationCause::new(self.span, self.body_id, traits::ItemObligation(def_id)),
+ |idx, span| {
+ let code = if span.is_dummy() {
+ ObligationCauseCode::ExprItemObligation(def_id, self.call_expr.hir_id, idx)
+ } else {
+ ObligationCauseCode::ExprBindingObligation(
+ def_id,
+ span,
+ self.call_expr.hir_id,
+ idx,
+ )
+ };
+ traits::ObligationCause::new(self.span, self.body_id, code)
+ },
self.param_env,
method_predicates,
) {
diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs
new file mode 100644
index 000000000..a1278edef
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/method/mod.rs
@@ -0,0 +1,625 @@
+//! Method lookup: the secret sauce of Rust. See the [rustc dev guide] for more information.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/method-lookup.html
+
+mod confirm;
+mod prelude2021;
+pub mod probe;
+mod suggest;
+
+pub use self::suggest::SelfSource;
+pub use self::MethodError::*;
+
+use crate::{Expectation, FnCtxt};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::{self, InferOk};
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{self, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TypeVisitable};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+
+use self::probe::{IsSuggestion, ProbeScope};
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ probe::provide(providers);
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MethodCallee<'tcx> {
+ /// Impl method ID, for inherent methods, or trait method ID, otherwise.
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+
+ /// Instantiated method signature, i.e., it has been
+ /// substituted, normalized, and has had late-bound
+ /// lifetimes replaced with inference variables.
+ pub sig: ty::FnSig<'tcx>,
+}
+
+#[derive(Debug)]
+pub enum MethodError<'tcx> {
+ // Did not find an applicable method, but we did find various near-misses that may work.
+ NoMatch(NoMatchData<'tcx>),
+
+ // Multiple methods might apply.
+ Ambiguity(Vec<CandidateSource>),
+
+ // Found an applicable method, but it is not visible. The third argument contains a list of
+ // not-in-scope traits which may work.
+ PrivateMatch(DefKind, DefId, Vec<DefId>),
+
+ // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have
+ // forgotten to import a trait.
+ IllegalSizedBound(Vec<DefId>, bool, Span),
+
+ // Found a match, but the return type is wrong
+ BadReturnType,
+}
+
+// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
+// could lead to matches if satisfied, and a list of not-in-scope traits which may work.
+#[derive(Debug)]
+pub struct NoMatchData<'tcx> {
+ pub static_candidates: Vec<CandidateSource>,
+ pub unsatisfied_predicates:
+ Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
+ pub out_of_scope_traits: Vec<DefId>,
+ pub lev_candidate: Option<ty::AssocItem>,
+ pub mode: probe::Mode,
+}
+
+// A pared down enum describing just the places from which a method
+// candidate can arise. Used for error reporting only.
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum CandidateSource {
+ Impl(DefId),
+ Trait(DefId /* trait id */),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Determines whether the type `self_ty` supports a method name `method_name` or not.
+ #[instrument(level = "debug", skip(self))]
+ pub fn method_exists(
+ &self,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr_id: hir::HirId,
+ allow_private: bool,
+ ) -> bool {
+ let mode = probe::Mode::MethodCall;
+ match self.probe_for_name(
+ method_name.span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr_id,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(..) => true,
+ Err(NoMatch(..)) => false,
+ Err(Ambiguity(..)) => true,
+ Err(PrivateMatch(..)) => allow_private,
+ Err(IllegalSizedBound(..)) => true,
+ Err(BadReturnType) => bug!("no return type expectations but got BadReturnType"),
+ }
+ }
+
+ /// Adds a suggestion to call the given method to the provided diagnostic.
+ #[instrument(level = "debug", skip(self, err, call_expr))]
+ pub(crate) fn suggest_method_call(
+ &self,
+ err: &mut Diagnostic,
+ msg: &str,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &hir::Expr<'_>,
+ span: Option<Span>,
+ ) {
+ let params = self
+ .probe_for_name(
+ method_name.span,
+ probe::Mode::MethodCall,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ ProbeScope::TraitsInScope,
+ )
+ .map(|pick| {
+ let sig = self.tcx.fn_sig(pick.item.def_id);
+ sig.inputs().skip_binder().len().saturating_sub(1)
+ })
+ .unwrap_or(0);
+
+ // Account for `foo.bar<T>`;
+ let sugg_span = span.unwrap_or(call_expr.span).shrink_to_hi();
+ let (suggestion, applicability) = (
+ format!("({})", (0..params).map(|_| "_").collect::<Vec<_>>().join(", ")),
+ if params > 0 { Applicability::HasPlaceholders } else { Applicability::MaybeIncorrect },
+ );
+
+ err.span_suggestion_verbose(sugg_span, msg, suggestion, applicability);
+ }
+
+ /// Performs method lookup. If lookup is successful, it will return the callee
+ /// and store an appropriate adjustment for the self-expr. In some cases it may
+ /// report an error (e.g., invoking the `drop` method).
+ ///
+ /// # Arguments
+ ///
+ /// Given a method call like `foo.bar::<T1,...Tn>(a, b + 1, ...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
+ /// * `segment`: the name and generic arguments of the method (`bar::<T1, ...Tn>`)
+ /// * `span`: the span for the method call
+ /// * `call_expr`: the complete method call: (`foo.bar::<T1,...Tn>(...)`)
+ /// * `self_expr`: the self expression (`foo`)
+ /// * `args`: the expressions of the arguments (`a, b + 1, ...`)
+ #[instrument(level = "debug", skip(self))]
+ pub fn lookup_method(
+ &self,
+ self_ty: Ty<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ span: Span,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Result<MethodCallee<'tcx>, MethodError<'tcx>> {
+ let pick =
+ self.lookup_probe(span, segment.ident, self_ty, call_expr, ProbeScope::TraitsInScope)?;
+
+ self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args);
+
+ for import_id in &pick.import_ids {
+ debug!("used_trait_import: {:?}", import_id);
+ Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports)
+ .unwrap()
+ .insert(*import_id);
+ }
+
+ self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None);
+
+ let result =
+ self.confirm_method(span, self_expr, call_expr, self_ty, pick.clone(), segment);
+ debug!("result = {:?}", result);
+
+ if let Some(span) = result.illegal_sized_bound {
+ let mut needs_mut = false;
+ if let ty::Ref(region, t_type, mutability) = self_ty.kind() {
+ let trait_type = self
+ .tcx
+ .mk_ref(*region, ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() });
+ // We probe again to see if there might be a borrow mutability discrepancy.
+ match self.lookup_probe(
+ span,
+ segment.ident,
+ trait_type,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(ref new_pick) if *new_pick != pick => {
+ needs_mut = true;
+ }
+ _ => {}
+ }
+ }
+
+ // We probe again, taking all traits into account (not only those in scope).
+ let mut candidates = match self.lookup_probe(
+ span,
+ segment.ident,
+ self_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ ) {
+ // If we find a different result the caller probably forgot to import a trait.
+ Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container_id(self.tcx)],
+ Err(Ambiguity(ref sources)) => sources
+ .iter()
+ .filter_map(|source| {
+ match *source {
+ // Note: this cannot come from an inherent impl,
+ // because the first probing succeeded.
+ CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def),
+ CandidateSource::Trait(_) => None,
+ }
+ })
+ .collect(),
+ _ => Vec::new(),
+ };
+ candidates.retain(|candidate| *candidate != self.tcx.parent(result.callee.def_id));
+
+ return Err(IllegalSizedBound(candidates, needs_mut, span));
+ }
+
+ Ok(result.callee)
+ }
+
+ #[instrument(level = "debug", skip(self, call_expr))]
+ pub fn lookup_probe(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ scope: ProbeScope,
+ ) -> probe::PickResult<'tcx> {
+ let mode = probe::Mode::MethodCall;
+ let self_ty = self.resolve_vars_if_possible(self_ty);
+ self.probe_for_name(
+ span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ scope,
+ )
+ }
+
+ pub(super) fn obligation_for_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_types) = opt_input_types {
+ return input_types[param.index as usize - 1].into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ (
+ traits::Obligation::misc(
+ span,
+ self.body_id,
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ pub(super) fn obligation_for_op_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_type) = opt_input_type {
+ return input_type.into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ let output_ty = expected.only_has_type(self).and_then(|ty| (!ty.needs_infer()).then(|| ty));
+
+ (
+ traits::Obligation::new(
+ traits::ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_ty,
+ },
+ ),
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ /// `lookup_method_in_trait` is used for overloaded operators.
+ /// It does a very narrow slice of what the normal probe/confirm path does.
+ /// In particular, it doesn't really do any probing: it simply constructs
+ /// an obligation for a particular trait with the given self type and checks
+ /// whether that trait is implemented.
+ #[instrument(level = "debug", skip(self, span))]
+ pub(super) fn lookup_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ let (obligation, substs) =
+ self.obligation_for_method(span, trait_def_id, self_ty, opt_input_types);
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ None,
+ false,
+ )
+ }
+
+ pub(super) fn lookup_op_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ let (obligation, substs) = self.obligation_for_op_method(
+ span,
+ trait_def_id,
+ self_ty,
+ opt_input_type,
+ opt_input_expr,
+ expected,
+ );
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ opt_input_expr,
+ true,
+ )
+ }
+
+ // FIXME(#18741): it seems likely that we can consolidate some of this
+ // code with the other method-lookup code. In particular, the second half
+ // of this method is basically the same as confirmation.
+ fn construct_obligation_for_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ obligation: traits::PredicateObligation<'tcx>,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ is_op: bool,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!(?obligation);
+
+ // Now we want to know if this can be matched
+ if !self.predicate_may_hold(&obligation) {
+ debug!("--> Cannot match obligation");
+ // Cannot be matched, no such method resolution is possible.
+ return None;
+ }
+
+ // Trait must have a method named `m_name` and it should not have
+ // type parameters or early-bound regions.
+ let tcx = self.tcx;
+ let Some(method_item) = self.associated_value(trait_def_id, m_name) else {
+ tcx.sess.delay_span_bug(
+ span,
+ "operator trait does not have corresponding operator method",
+ );
+ return None;
+ };
+ let def_id = method_item.def_id;
+ let generics = tcx.generics_of(def_id);
+ assert_eq!(generics.params.len(), 0);
+
+ debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
+ let mut obligations = vec![];
+
+ // Instantiate late-bound regions and substitute the trait
+ // parameters into the method type to get the actual method type.
+ //
+ // N.B., instantiate late-bound regions first so that
+ // `instantiate_type_scheme` can normalize associated types that
+ // may reference those regions.
+ let fn_sig = tcx.bound_fn_sig(def_id);
+ let fn_sig = fn_sig.subst(self.tcx, substs);
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, fn_sig, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, fn_sig)
+ };
+ let fn_sig = {
+ obligations.extend(o);
+ value
+ };
+
+ // Register obligations for the parameters. This will include the
+ // `Self` parameter, which in turn has a bound of the main trait,
+ // so this also effectively registers `obligation` as well. (We
+ // used to register `obligation` explicitly, but that resulted in
+ // double error messages being reported.)
+ //
+ // Note that as the method comes from a trait, it should not have
+ // any late-bound regions appearing in its bounds.
+ let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, bounds, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, bounds)
+ };
+ let bounds = {
+ obligations.extend(o);
+ value
+ };
+
+ assert!(!bounds.has_escaping_bound_vars());
+
+ let cause = if is_op {
+ ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_ty: None,
+ },
+ )
+ } else {
+ traits::ObligationCause::misc(span, self.body_id)
+ };
+ let predicates_cause = cause.clone();
+ obligations.extend(traits::predicates_for_generics(
+ move |_, _| predicates_cause.clone(),
+ self.param_env,
+ bounds,
+ ));
+
+ // Also add an obligation for the method type being well-formed.
+ let method_ty = tcx.mk_fn_ptr(ty::Binder::dummy(fn_sig));
+ debug!(
+ "lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}",
+ method_ty, obligation
+ );
+ obligations.push(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(method_ty.into())).to_predicate(tcx),
+ ));
+
+ let callee = MethodCallee { def_id, substs, sig: fn_sig };
+
+ debug!("callee = {:?}", callee);
+
+ Some(InferOk { obligations, value: callee })
+ }
+
+ /// Performs a [full-qualified function call] (formerly "universal function call") lookup. If
+ /// lookup is successful, it will return the type of definition and the [`DefId`] of the found
+ /// function definition.
+ ///
+ /// [full-qualified function call]: https://doc.rust-lang.org/reference/expressions/call-expr.html#disambiguating-function-calls
+ ///
+ /// # Arguments
+ ///
+ /// Given a function call like `Foo::bar::<T1,...Tn>(...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `span`: the span of the call, excluding arguments (`Foo::bar::<T1, ...Tn>`)
+ /// * `method_name`: the identifier of the function within the container type (`bar`)
+ /// * `self_ty`: the type to search within (`Foo`)
+ /// * `self_ty_span` the span for the type being searched within (span of `Foo`)
+ /// * `expr_id`: the [`hir::HirId`] of the expression composing the entire call
+ #[instrument(level = "debug", skip(self), ret)]
+ pub fn resolve_fully_qualified_call(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ expr_id: hir::HirId,
+ ) -> Result<(DefKind, DefId), MethodError<'tcx>> {
+ let tcx = self.tcx;
+
+ // Check if we have an enum variant.
+ if let ty::Adt(adt_def, _) = self_ty.kind() {
+ if adt_def.is_enum() {
+ let variant_def = adt_def
+ .variants()
+ .iter()
+ .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did()));
+ if let Some(variant_def) = variant_def {
+ // Braced variants generate unusable names in value namespace (reserved for
+ // possible future use), so variants resolved as associated items may refer to
+ // them as well. It's ok to use the variant's id as a ctor id since an
+ // error will be reported on any use of such resolution anyway.
+ let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id);
+ tcx.check_stability(ctor_def_id, Some(expr_id), span, Some(method_name.span));
+ return Ok((
+ DefKind::Ctor(CtorOf::Variant, variant_def.ctor_kind),
+ ctor_def_id,
+ ));
+ }
+ }
+ }
+
+ let pick = self.probe_for_name(
+ span,
+ probe::Mode::Path,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ expr_id,
+ ProbeScope::TraitsInScope,
+ )?;
+
+ self.lint_fully_qualified_call_from_2018(
+ span,
+ method_name,
+ self_ty,
+ self_ty_span,
+ expr_id,
+ &pick,
+ );
+
+ debug!(?pick);
+ {
+ let mut typeck_results = self.typeck_results.borrow_mut();
+ let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap();
+ for import_id in pick.import_ids {
+ debug!(used_trait_import=?import_id);
+ used_trait_imports.insert(import_id);
+ }
+ }
+
+ let def_kind = pick.item.kind.as_def_kind();
+ tcx.check_stability(pick.item.def_id, Some(expr_id), span, Some(method_name.span));
+ Ok((def_kind, pick.item.def_id))
+ }
+
+ /// Finds item with name `item_name` defined in impl/trait `def_id`
+ /// and return it, or `None`, if no such item was defined there.
+ pub fn associated_value(&self, def_id: DefId, item_name: Ident) -> Option<ty::AssocItem> {
+ self.tcx
+ .associated_items(def_id)
+ .find_by_name_and_namespace(self.tcx, item_name, Namespace::ValueNS, def_id)
+ .copied()
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/prelude2021.rs b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
index 7c68d9304..3c98a2aa3 100644
--- a/compiler/rustc_typeck/src/check/method/prelude2021.rs
+++ b/compiler/rustc_hir_typeck/src/method/prelude2021.rs
@@ -1,3 +1,7 @@
+use crate::{
+ method::probe::{self, Pick},
+ FnCtxt,
+};
use hir::def_id::DefId;
use hir::HirId;
use hir::ItemKind;
@@ -12,11 +16,6 @@ use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
use rustc_trait_selection::infer::InferCtxtExt;
-use crate::check::{
- method::probe::{self, Pick},
- FnCtxt,
-};
-
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub(super) fn lint_dot_call_from_2018(
&self,
@@ -82,14 +81,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
prelude_or_array_lint,
self_expr.hir_id,
self_expr.span,
+ format!("trait method `{}` will become ambiguous in Rust 2021", segment.ident.name),
|lint| {
let sp = self_expr.span;
- let mut lint = lint.build(&format!(
- "trait method `{}` will become ambiguous in Rust 2021",
- segment.ident.name
- ));
-
let derefs = "*".repeat(pick.autoderefs);
let autoref = match pick.autoref_or_ptr_adjustment {
@@ -133,7 +128,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
- lint.emit();
+ lint
},
);
} else {
@@ -143,6 +138,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
prelude_or_array_lint,
call_expr.hir_id,
call_expr.span,
+ format!("trait method `{}` will become ambiguous in Rust 2021", segment.ident.name),
|lint| {
let sp = call_expr.span;
let trait_name = self.trait_path_or_bare_name(
@@ -151,16 +147,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pick.item.container_id(self.tcx),
);
- let mut lint = lint.build(&format!(
- "trait method `{}` will become ambiguous in Rust 2021",
- segment.ident.name
- ));
-
let (self_adjusted, precise) = self.adjust_expr(pick, self_expr, sp);
if precise {
let args = args
.iter()
- .skip(1)
.map(|arg| {
let span = arg.span.find_ancestor_inside(sp).unwrap_or_default();
format!(
@@ -203,7 +193,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
- lint.emit();
+ lint
},
);
}
@@ -258,15 +248,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
- self.tcx.struct_span_lint_hir(RUST_2021_PRELUDE_COLLISIONS, expr_id, span, |lint| {
- // "type" refers to either a type or, more likely, a trait from which
- // the associated function or method is from.
- let container_id = pick.item.container_id(self.tcx);
- let trait_path = self.trait_path_or_bare_name(span, expr_id, container_id);
- let trait_generics = self.tcx.generics_of(container_id);
-
- let trait_name =
- if trait_generics.params.len() <= trait_generics.has_self as usize {
+ self.tcx.struct_span_lint_hir(
+ RUST_2021_PRELUDE_COLLISIONS,
+ expr_id,
+ span,
+ format!(
+ "trait-associated function `{}` will become ambiguous in Rust 2021",
+ method_name.name
+ ),
+ |lint| {
+ // "type" refers to either a type or, more likely, a trait from which
+ // the associated function or method is from.
+ let container_id = pick.item.container_id(self.tcx);
+ let trait_path = self.trait_path_or_bare_name(span, expr_id, container_id);
+ let trait_generics = self.tcx.generics_of(container_id);
+
+ let trait_name = if trait_generics.params.len() <= trait_generics.has_self as usize
+ {
trait_path
} else {
let counts = trait_generics.own_counts();
@@ -283,44 +281,42 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
)
};
- let mut lint = lint.build(&format!(
- "trait-associated function `{}` will become ambiguous in Rust 2021",
- method_name.name
- ));
-
- let mut self_ty_name = self_ty_span
- .find_ancestor_inside(span)
- .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
- .unwrap_or_else(|| self_ty.to_string());
-
- // Get the number of generics the self type has (if an Adt) unless we can determine that
- // the user has written the self type with generics already which we (naively) do by looking
- // for a "<" in `self_ty_name`.
- if !self_ty_name.contains('<') {
- if let Adt(def, _) = self_ty.kind() {
- let generics = self.tcx.generics_of(def.did());
- if !generics.params.is_empty() {
- let counts = generics.own_counts();
- self_ty_name += &format!(
- "<{}>",
- std::iter::repeat("'_")
- .take(counts.lifetimes)
- .chain(std::iter::repeat("_").take(counts.types + counts.consts))
- .collect::<Vec<_>>()
- .join(", ")
- );
+ let mut self_ty_name = self_ty_span
+ .find_ancestor_inside(span)
+ .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
+ .unwrap_or_else(|| self_ty.to_string());
+
+ // Get the number of generics the self type has (if an Adt) unless we can determine that
+ // the user has written the self type with generics already which we (naively) do by looking
+ // for a "<" in `self_ty_name`.
+ if !self_ty_name.contains('<') {
+ if let Adt(def, _) = self_ty.kind() {
+ let generics = self.tcx.generics_of(def.did());
+ if !generics.params.is_empty() {
+ let counts = generics.own_counts();
+ self_ty_name += &format!(
+ "<{}>",
+ std::iter::repeat("'_")
+ .take(counts.lifetimes)
+ .chain(
+ std::iter::repeat("_").take(counts.types + counts.consts)
+ )
+ .collect::<Vec<_>>()
+ .join(", ")
+ );
+ }
}
}
- }
- lint.span_suggestion(
- span,
- "disambiguate the associated function",
- format!("<{} as {}>::{}", self_ty_name, trait_name, method_name.name,),
- Applicability::MachineApplicable,
- );
-
- lint.emit();
- });
+ lint.span_suggestion(
+ span,
+ "disambiguate the associated function",
+ format!("<{} as {}>::{}", self_ty_name, trait_name, method_name.name,),
+ Applicability::MachineApplicable,
+ );
+
+ lint
+ },
+ );
}
fn trait_path_or_bare_name(
diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index efe15fec7..28aa2302f 100644
--- a/compiler/rustc_typeck/src/check/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -3,14 +3,12 @@ use super::CandidateSource;
use super::MethodError;
use super::NoMatchData;
-use crate::check::FnCtxt;
use crate::errors::MethodCallOnUnknownType;
-use crate::hir::def::DefKind;
-use crate::hir::def_id::DefId;
-
+use crate::FnCtxt;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::Applicability;
use rustc_hir as hir;
+use rustc_hir::def::DefKind;
use rustc_hir::def::Namespace;
use rustc_infer::infer::canonical::OriginalQueryValues;
use rustc_infer::infer::canonical::{Canonical, QueryResponse};
@@ -19,10 +17,11 @@ use rustc_infer::infer::{self, InferOk, TyCtxtInferExt};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use rustc_middle::middle::stability;
use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
-use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::GenericParamDefKind;
use rustc_middle::ty::{self, ParamEnvAnd, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
use rustc_session::lint;
+use rustc_span::def_id::DefId;
use rustc_span::def_id::LocalDefId;
use rustc_span::lev_distance::{
find_best_match_for_name_with_substrings, lev_distance_with_substrings,
@@ -253,7 +252,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// would result in an error (basically, the same criteria we
/// would use to decide if a method is a plausible fit for
/// ambiguity purposes).
- #[instrument(level = "debug", skip(self, scope_expr_id))]
+ #[instrument(level = "debug", skip(self, candidate_filter))]
pub fn probe_for_return_type(
&self,
span: Span,
@@ -261,11 +260,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return_type: Ty<'tcx>,
self_ty: Ty<'tcx>,
scope_expr_id: hir::HirId,
+ candidate_filter: impl Fn(&ty::AssocItem) -> bool,
) -> Vec<ty::AssocItem> {
- debug!(
- "probe(self_ty={:?}, return_type={}, scope_expr_id={})",
- self_ty, return_type, scope_expr_id
- );
let method_names = self
.probe_op(
span,
@@ -276,7 +272,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self_ty,
scope_expr_id,
ProbeScope::AllTraits,
- |probe_cx| Ok(probe_cx.candidate_method_names()),
+ |probe_cx| Ok(probe_cx.candidate_method_names(candidate_filter)),
)
.unwrap_or_default();
method_names
@@ -299,7 +295,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.collect()
}
- #[instrument(level = "debug", skip(self, scope_expr_id))]
+ #[instrument(level = "debug", skip(self))]
pub fn probe_for_name(
&self,
span: Span,
@@ -310,10 +306,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
scope_expr_id: hir::HirId,
scope: ProbeScope,
) -> PickResult<'tcx> {
- debug!(
- "probe(self_ty={:?}, item_name={}, scope_expr_id={})",
- self_ty, item_name, scope_expr_id
- );
self.probe_op(
span,
mode,
@@ -417,9 +409,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
lint::builtin::TYVAR_BEHIND_RAW_POINTER,
scope_expr_id,
span,
- |lint| {
- lint.build("type annotations needed").emit();
- },
+ "type annotations needed",
+ |lint| lint,
);
}
} else {
@@ -481,69 +472,65 @@ fn method_autoderef_steps<'tcx>(
) -> MethodAutoderefStepsResult<'tcx> {
debug!("method_autoderef_steps({:?})", goal);
- tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &goal, |ref infcx, goal, inference_vars| {
- let ParamEnvAnd { param_env, value: self_ty } = goal;
-
- let mut autoderef =
- Autoderef::new(infcx, param_env, hir::CRATE_HIR_ID, DUMMY_SP, self_ty, DUMMY_SP)
- .include_raw_pointers()
- .silence_errors();
- let mut reached_raw_pointer = false;
- let mut steps: Vec<_> = autoderef
- .by_ref()
- .map(|(ty, d)| {
- let step = CandidateStep {
- self_ty: infcx.make_query_response_ignoring_pending_obligations(
- inference_vars.clone(),
- ty,
- ),
- autoderefs: d,
- from_unsafe_deref: reached_raw_pointer,
- unsize: false,
- };
- if let ty::RawPtr(_) = ty.kind() {
- // all the subsequent steps will be from_unsafe_deref
- reached_raw_pointer = true;
- }
- step
- })
- .collect();
-
- let final_ty = autoderef.final_ty(true);
- let opt_bad_ty = match final_ty.kind() {
- ty::Infer(ty::TyVar(_)) | ty::Error(_) => Some(MethodAutoderefBadTy {
- reached_raw_pointer,
- ty: infcx
- .make_query_response_ignoring_pending_obligations(inference_vars, final_ty),
- }),
- ty::Array(elem_ty, _) => {
- let dereferences = steps.len() - 1;
-
- steps.push(CandidateStep {
- self_ty: infcx.make_query_response_ignoring_pending_obligations(
- inference_vars,
- infcx.tcx.mk_slice(*elem_ty),
- ),
- autoderefs: dereferences,
- // this could be from an unsafe deref if we had
- // a *mut/const [T; N]
- from_unsafe_deref: reached_raw_pointer,
- unsize: true,
- });
-
- None
+ let (ref infcx, goal, inference_vars) = tcx.infer_ctxt().build_with_canonical(DUMMY_SP, &goal);
+ let ParamEnvAnd { param_env, value: self_ty } = goal;
+
+ let mut autoderef =
+ Autoderef::new(infcx, param_env, hir::CRATE_HIR_ID, DUMMY_SP, self_ty, DUMMY_SP)
+ .include_raw_pointers()
+ .silence_errors();
+ let mut reached_raw_pointer = false;
+ let mut steps: Vec<_> = autoderef
+ .by_ref()
+ .map(|(ty, d)| {
+ let step = CandidateStep {
+ self_ty: infcx
+ .make_query_response_ignoring_pending_obligations(inference_vars.clone(), ty),
+ autoderefs: d,
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: false,
+ };
+ if let ty::RawPtr(_) = ty.kind() {
+ // all the subsequent steps will be from_unsafe_deref
+ reached_raw_pointer = true;
}
- _ => None,
- };
-
- debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty);
+ step
+ })
+ .collect();
+
+ let final_ty = autoderef.final_ty(true);
+ let opt_bad_ty = match final_ty.kind() {
+ ty::Infer(ty::TyVar(_)) | ty::Error(_) => Some(MethodAutoderefBadTy {
+ reached_raw_pointer,
+ ty: infcx.make_query_response_ignoring_pending_obligations(inference_vars, final_ty),
+ }),
+ ty::Array(elem_ty, _) => {
+ let dereferences = steps.len() - 1;
+
+ steps.push(CandidateStep {
+ self_ty: infcx.make_query_response_ignoring_pending_obligations(
+ inference_vars,
+ infcx.tcx.mk_slice(*elem_ty),
+ ),
+ autoderefs: dereferences,
+ // this could be from an unsafe deref if we had
+ // a *mut/const [T; N]
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: true,
+ });
- MethodAutoderefStepsResult {
- steps: tcx.arena.alloc_from_iter(steps),
- opt_bad_ty: opt_bad_ty.map(|ty| &*tcx.arena.alloc(ty)),
- reached_recursion_limit: autoderef.reached_recursion_limit(),
+ None
}
- })
+ _ => None,
+ };
+
+ debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty);
+
+ MethodAutoderefStepsResult {
+ steps: tcx.arena.alloc_from_iter(steps),
+ opt_bad_ty: opt_bad_ty.map(|ty| &*tcx.arena.alloc(ty)),
+ reached_recursion_limit: autoderef.reached_recursion_limit(),
+ }
}
impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
@@ -980,12 +967,16 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
}
- fn candidate_method_names(&self) -> Vec<Ident> {
+ fn candidate_method_names(
+ &self,
+ candidate_filter: impl Fn(&ty::AssocItem) -> bool,
+ ) -> Vec<Ident> {
let mut set = FxHashSet::default();
let mut names: Vec<_> = self
.inherent_candidates
.iter()
.chain(&self.extension_candidates)
+ .filter(|candidate| candidate_filter(&candidate.item))
.filter(|candidate| {
if let Some(return_ty) = self.return_type {
self.matches_return_type(&candidate.item, None, return_ty)
@@ -1366,24 +1357,24 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
stable_pick: &Pick<'_>,
unstable_candidates: &[(Candidate<'tcx>, Symbol)],
) {
+ let def_kind = stable_pick.item.kind.as_def_kind();
self.tcx.struct_span_lint_hir(
lint::builtin::UNSTABLE_NAME_COLLISIONS,
self.scope_expr_id,
self.span,
+ format!(
+ "{} {} with this name may be added to the standard library in the future",
+ def_kind.article(),
+ def_kind.descr(stable_pick.item.def_id),
+ ),
|lint| {
- let def_kind = stable_pick.item.kind.as_def_kind();
- let mut diag = lint.build(&format!(
- "{} {} with this name may be added to the standard library in the future",
- def_kind.article(),
- def_kind.descr(stable_pick.item.def_id),
- ));
match (stable_pick.item.kind, stable_pick.item.container) {
(ty::AssocKind::Fn, _) => {
// FIXME: This should be a `span_suggestion` instead of `help`
// However `self.span` only
// highlights the method name, so we can't use it. Also consider reusing
// the code from `report_method_error()`.
- diag.help(&format!(
+ lint.help(&format!(
"call with fully qualified syntax `{}(...)` to keep using the current \
method",
self.tcx.def_path_str(stable_pick.item.def_id),
@@ -1391,7 +1382,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
(ty::AssocKind::Const, ty::AssocItemContainer::TraitContainer) => {
let def_id = stable_pick.item.container_id(self.tcx);
- diag.span_suggestion(
+ lint.span_suggestion(
self.span,
"use the fully qualified path to the associated const",
format!(
@@ -1407,7 +1398,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
if self.tcx.sess.is_nightly_build() {
for (candidate, feature) in unstable_candidates {
- diag.help(&format!(
+ lint.help(&format!(
"add `#![feature({})]` to the crate attributes to enable `{}`",
feature,
self.tcx.def_path_str(candidate.item.def_id),
@@ -1415,7 +1406,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
}
}
- diag.emit();
+ lint
},
);
}
@@ -1514,8 +1505,11 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
traits::normalize(selcx, self.param_env, cause.clone(), impl_bounds);
// Convert the bounds into obligations.
- let impl_obligations =
- traits::predicates_for_generics(cause, self.param_env, impl_bounds);
+ let impl_obligations = traits::predicates_for_generics(
+ move |_, _| cause.clone(),
+ self.param_env,
+ impl_bounds,
+ );
let candidate_obligations = impl_obligations
.chain(norm_obligations.into_iter())
@@ -1700,7 +1694,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
pcx.allow_similar_names = true;
pcx.assemble_inherent_candidates();
- let method_names = pcx.candidate_method_names();
+ let method_names = pcx.candidate_method_names(|_| true);
pcx.allow_similar_names = false;
let applicable_close_candidates: Vec<ty::AssocItem> = method_names
.iter()
diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index c92b93cbc..6c21ed902 100644
--- a/compiler/rustc_typeck/src/check/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -1,7 +1,9 @@
//! Give useful errors and suggestions to users when an item can't be
//! found or is otherwise invalid.
-use crate::check::FnCtxt;
+use crate::errors;
+use crate::FnCtxt;
+use rustc_ast::ast::Mutability;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{
pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
@@ -16,12 +18,12 @@ use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKi
use rustc_middle::traits::util::supertraits;
use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
use rustc_middle::ty::print::with_crate_prefix;
-use rustc_middle::ty::ToPolyTraitRef;
use rustc_middle::ty::{self, DefIdTree, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{IsSuggestable, ToPolyTraitRef};
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Symbol;
use rustc_span::{lev_distance, source_map, ExpnKind, FileName, MacroKind, Span};
-use rustc_trait_selection::traits::error_reporting::on_unimplemented::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::on_unimplemented::TypeErrCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_trait_selection::traits::{
FulfillmentError, Obligation, ObligationCause, ObligationCauseCode, OnUnimplementedNote,
@@ -30,8 +32,8 @@ use rustc_trait_selection::traits::{
use std::cmp::Ordering;
use std::iter;
-use super::probe::{Mode, ProbeScope};
-use super::{super::suggest_call_constructor, CandidateSource, MethodError, NoMatchData};
+use super::probe::{AutorefOrPtrAdjustment, IsSuggestion, Mode, ProbeScope};
+use super::{CandidateSource, MethodError, NoMatchData};
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn is_fn_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
@@ -95,7 +97,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
item_name: Ident,
source: SelfSource<'tcx>,
error: MethodError<'tcx>,
- args: Option<&'tcx [hir::Expr<'tcx>]>,
+ args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
// Avoid suggestions when we don't know what's going on.
if rcvr_ty.references_error() {
@@ -104,7 +106,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let report_candidates = |span: Span,
err: &mut Diagnostic,
- mut sources: Vec<CandidateSource>,
+ sources: &mut Vec<CandidateSource>,
sugg_span: Span| {
sources.sort();
sources.dedup();
@@ -246,7 +248,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match error {
MethodError::NoMatch(NoMatchData {
- static_candidates: static_sources,
+ static_candidates: mut static_sources,
unsatisfied_predicates,
out_of_scope_traits,
lev_candidate,
@@ -270,7 +272,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
};
- if self.suggest_constraining_numerical_ty(
+ if self.suggest_wrapping_range_with_parens(
+ tcx, actual, source, span, item_name, &ty_str,
+ ) || self.suggest_constraining_numerical_ty(
tcx, actual, source, span, item_kind, item_name, &ty_str,
) {
return None;
@@ -363,44 +367,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
- if self.is_fn_ty(rcvr_ty, span) {
- if let SelfSource::MethodCall(expr) = source {
- let suggest = if let ty::FnDef(def_id, _) = rcvr_ty.kind() {
- if let Some(local_id) = def_id.as_local() {
- let hir_id = tcx.hir().local_def_id_to_hir_id(local_id);
- let node = tcx.hir().get(hir_id);
- let fields = node.tuple_fields();
- if let Some(fields) = fields
- && let Some(DefKind::Ctor(of, _)) = self.tcx.opt_def_kind(local_id) {
- Some((fields.len(), of))
- } else {
- None
- }
- } else {
- // The logic here isn't smart but `associated_item_def_ids`
- // doesn't work nicely on local.
- if let DefKind::Ctor(of, _) = tcx.def_kind(def_id) {
- let parent_def_id = tcx.parent(*def_id);
- Some((tcx.associated_item_def_ids(parent_def_id).len(), of))
- } else {
- None
- }
- }
- } else {
- None
- };
-
- // If the function is a tuple constructor, we recommend that they call it
- if let Some((fields, kind)) = suggest {
- suggest_call_constructor(expr.span, kind, fields, &mut err);
- } else {
- // General case
- err.span_label(
- expr.span,
- "this is a function, perhaps you wish to call it",
- );
- }
- }
+ if let SelfSource::MethodCall(rcvr_expr) = source {
+ self.suggest_fn_call(&mut err, rcvr_expr, rcvr_ty, |output_ty| {
+ let call_expr = self
+ .tcx
+ .hir()
+ .expect_expr(self.tcx.hir().get_parent_node(rcvr_expr.hir_id));
+ let probe = self.lookup_probe(
+ span,
+ item_name,
+ output_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ );
+ probe.is_ok()
+ });
}
let mut custom_span_label = false;
@@ -441,9 +422,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
err.help(&format!("try with `{}::{}`", ty_str, item_name,));
}
- report_candidates(span, &mut err, static_sources, sugg_span);
+ report_candidates(span, &mut err, &mut static_sources, sugg_span);
} else if static_sources.len() > 1 {
- report_candidates(span, &mut err, static_sources, sugg_span);
+ report_candidates(span, &mut err, &mut static_sources, sugg_span);
}
let mut bound_spans = vec![];
@@ -560,7 +541,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
bound_spans.push((self.tcx.def_span(def.did()), msg))
}
// Point at the trait object that couldn't satisfy the bound.
- ty::Dynamic(preds, _) => {
+ ty::Dynamic(preds, _, _) => {
for pred in preds.iter() {
match pred.skip_binder() {
ty::ExistentialPredicate::Trait(tr) => bound_spans
@@ -877,8 +858,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Avoid crashing.
return (None, None);
}
- let OnUnimplementedNote { message, label, .. } =
- self.on_unimplemented_note(trait_ref, &obligation);
+ let OnUnimplementedNote { message, label, .. } = self
+ .err_ctxt()
+ .on_unimplemented_note(trait_ref, &obligation);
(message, label)
})
.unwrap_or((None, None))
@@ -904,7 +886,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
- let label_span_not_found = |err: &mut DiagnosticBuilder<'_, _>| {
+ let label_span_not_found = |err: &mut Diagnostic| {
if unsatisfied_predicates.is_empty() {
err.span_label(span, format!("{item_kind} not found in `{ty_str}`"));
let is_string_or_ref_str = match actual.kind() {
@@ -1000,9 +982,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
label_span_not_found(&mut err);
}
- self.check_for_field_method(&mut err, source, span, actual, item_name);
+ // Don't suggest (for example) `expr.field.method()` if `expr.method()`
+ // doesn't exist due to unsatisfied predicates.
+ if unsatisfied_predicates.is_empty() {
+ self.check_for_field_method(&mut err, source, span, actual, item_name);
+ }
- self.check_for_unwrap_self(&mut err, source, span, actual, item_name);
+ self.check_for_inner_self(&mut err, source, span, actual, item_name);
bound_spans.sort();
bound_spans.dedup();
@@ -1017,10 +1003,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
span,
rcvr_ty,
item_name,
- args.map(|args| args.len()),
+ args.map(|(_, args)| args.len() + 1),
source,
out_of_scope_traits,
&unsatisfied_predicates,
+ &static_sources,
unsatisfied_bounds,
);
}
@@ -1062,23 +1049,38 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// that had unsatisfied trait bounds
if unsatisfied_predicates.is_empty() {
let def_kind = lev_candidate.kind.as_def_kind();
- err.span_suggestion(
- span,
- &format!(
- "there is {} {} with a similar name",
- def_kind.article(),
- def_kind.descr(lev_candidate.def_id),
- ),
- lev_candidate.name,
- Applicability::MaybeIncorrect,
- );
+ // Methods are defined within the context of a struct and their first parameter is always self,
+ // which represents the instance of the struct the method is being called on
+ // Associated functions don’t take self as a parameter and
+ // they are not methods because they don’t have an instance of the struct to work with.
+ if def_kind == DefKind::AssocFn && lev_candidate.fn_has_self_parameter {
+ err.span_suggestion(
+ span,
+ &format!("there is a method with a similar name",),
+ lev_candidate.name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion(
+ span,
+ &format!(
+ "there is {} {} with a similar name",
+ def_kind.article(),
+ def_kind.descr(lev_candidate.def_id),
+ ),
+ lev_candidate.name,
+ Applicability::MaybeIncorrect,
+ );
+ }
}
}
+ self.check_for_deref_method(&mut err, source, rcvr_ty, item_name);
+
return Some(err);
}
- MethodError::Ambiguity(sources) => {
+ MethodError::Ambiguity(mut sources) => {
let mut err = struct_span_err!(
self.sess(),
item_name.span,
@@ -1087,7 +1089,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
err.span_label(item_name.span, format!("multiple `{}` found", item_name));
- report_candidates(span, &mut err, sources, sugg_span);
+ report_candidates(span, &mut err, &mut sources, sugg_span);
err.emit();
}
@@ -1150,7 +1152,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
rcvr_ty: Ty<'tcx>,
expr: &hir::Expr<'_>,
item_name: Ident,
- err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ err: &mut Diagnostic,
) -> bool {
let tcx = self.tcx;
let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() {
@@ -1165,7 +1167,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ => None,
});
if let Some((field, field_ty)) = field_receiver {
- let scope = tcx.parent_module(self.body_id).to_def_id();
+ let scope = tcx.parent_module(self.body_id);
let is_accessible = field.vis.is_accessible_from(scope, tcx);
if is_accessible {
@@ -1204,6 +1206,89 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false
}
+ /// Suggest possible range with adding parentheses, for example:
+ /// when encountering `0..1.map(|i| i + 1)` suggest `(0..1).map(|i| i + 1)`.
+ fn suggest_wrapping_range_with_parens(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ actual: Ty<'tcx>,
+ source: SelfSource<'tcx>,
+ span: Span,
+ item_name: Ident,
+ ty_str: &str,
+ ) -> bool {
+ if let SelfSource::MethodCall(expr) = source {
+ for (_, parent) in tcx.hir().parent_iter(expr.hir_id).take(5) {
+ if let Node::Expr(parent_expr) = parent {
+ let lang_item = match parent_expr.kind {
+ ExprKind::Struct(ref qpath, _, _) => match **qpath {
+ QPath::LangItem(LangItem::Range, ..) => Some(LangItem::Range),
+ QPath::LangItem(LangItem::RangeTo, ..) => Some(LangItem::RangeTo),
+ QPath::LangItem(LangItem::RangeToInclusive, ..) => {
+ Some(LangItem::RangeToInclusive)
+ }
+ _ => None,
+ },
+ ExprKind::Call(ref func, _) => match func.kind {
+ // `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
+ ExprKind::Path(QPath::LangItem(LangItem::RangeInclusiveNew, ..)) => {
+ Some(LangItem::RangeInclusiveStruct)
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ if lang_item.is_none() {
+ continue;
+ }
+
+ let span_included = match parent_expr.kind {
+ hir::ExprKind::Struct(_, eps, _) => {
+ eps.len() > 0 && eps.last().map_or(false, |ep| ep.span.contains(span))
+ }
+ // `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
+ hir::ExprKind::Call(ref func, ..) => func.span.contains(span),
+ _ => false,
+ };
+
+ if !span_included {
+ continue;
+ }
+
+ let range_def_id = self.tcx.require_lang_item(lang_item.unwrap(), None);
+ let range_ty =
+ self.tcx.bound_type_of(range_def_id).subst(self.tcx, &[actual.into()]);
+
+ let pick = self.probe_for_name(
+ span,
+ Mode::MethodCall,
+ item_name,
+ IsSuggestion(true),
+ range_ty,
+ expr.hir_id,
+ ProbeScope::AllTraits,
+ );
+ if pick.is_ok() {
+ let range_span = parent_expr.span.with_hi(expr.span.hi());
+ tcx.sess.emit_err(errors::MissingParentheseInRange {
+ span,
+ ty_str: ty_str.to_string(),
+ method_name: item_name.as_str().to_string(),
+ add_missing_parentheses: Some(errors::AddMissingParenthesesInRange {
+ func_name: item_name.name.as_str().to_string(),
+ left: range_span.shrink_to_lo(),
+ right: range_span.shrink_to_hi(),
+ }),
+ });
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
fn suggest_constraining_numerical_ty(
&self,
tcx: TyCtxt<'tcx>,
@@ -1266,7 +1351,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// If this is a floating point literal that ends with '.',
// get rid of it to stop this from becoming a member access.
let snippet = snippet.strip_suffix('.').unwrap_or(&snippet);
-
err.span_suggestion(
lit.span,
&format!(
@@ -1282,7 +1366,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// local binding
if let hir::def::Res::Local(hir_id) = path.res {
let span = tcx.hir().span(hir_id);
- let snippet = tcx.sess.source_map().span_to_snippet(span);
let filename = tcx.sess.source_map().span_to_filename(span);
let parent_node =
@@ -1292,7 +1375,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
concrete_type,
);
- match (filename, parent_node, snippet) {
+ match (filename, parent_node) {
(
FileName::Real(_),
Node::Local(hir::Local {
@@ -1300,14 +1383,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
ty,
..
}),
- Ok(ref snippet),
) => {
+ let type_span = ty.map(|ty| ty.span.with_lo(span.hi())).unwrap_or(span.shrink_to_hi());
err.span_suggestion(
// account for `let x: _ = 42;`
- // ^^^^
- span.to(ty.as_ref().map(|ty| ty.span).unwrap_or(span)),
+ // ^^^
+ type_span,
&msg,
- format!("{}: {}", snippet, concrete_type),
+ format!(": {concrete_type}"),
Applicability::MaybeIncorrect,
);
}
@@ -1327,55 +1410,82 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn check_for_field_method(
&self,
- err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ err: &mut Diagnostic,
source: SelfSource<'tcx>,
span: Span,
actual: Ty<'tcx>,
item_name: Ident,
) {
if let SelfSource::MethodCall(expr) = source
- && let Some((fields, substs)) = self.get_field_candidates(span, actual)
+ && let mod_id = self.tcx.parent_module(expr.hir_id).to_def_id()
+ && let Some((fields, substs)) =
+ self.get_field_candidates_considering_privacy(span, actual, mod_id)
{
let call_expr = self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id));
- for candidate_field in fields.iter() {
- if let Some(field_path) = self.check_for_nested_field_satisfying(
- span,
- &|_, field_ty| {
- self.lookup_probe(
- span,
- item_name,
- field_ty,
- call_expr,
- ProbeScope::AllTraits,
- )
- .is_ok()
- },
- candidate_field,
- substs,
- vec![],
- self.tcx.parent_module(expr.hir_id).to_def_id(),
- ) {
- let field_path_str = field_path
+
+ let lang_items = self.tcx.lang_items();
+ let never_mention_traits = [
+ lang_items.clone_trait(),
+ lang_items.deref_trait(),
+ lang_items.deref_mut_trait(),
+ self.tcx.get_diagnostic_item(sym::AsRef),
+ self.tcx.get_diagnostic_item(sym::AsMut),
+ self.tcx.get_diagnostic_item(sym::Borrow),
+ self.tcx.get_diagnostic_item(sym::BorrowMut),
+ ];
+ let candidate_fields: Vec<_> = fields
+ .filter_map(|candidate_field| {
+ self.check_for_nested_field_satisfying(
+ span,
+ &|_, field_ty| {
+ self.lookup_probe(
+ span,
+ item_name,
+ field_ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ )
+ .map_or(false, |pick| {
+ !never_mention_traits
+ .iter()
+ .flatten()
+ .any(|def_id| self.tcx.parent(pick.item.def_id) == *def_id)
+ })
+ },
+ candidate_field,
+ substs,
+ vec![],
+ mod_id,
+ )
+ })
+ .map(|field_path| {
+ field_path
.iter()
.map(|id| id.name.to_ident_string())
.collect::<Vec<String>>()
- .join(".");
- debug!("field_path_str: {:?}", field_path_str);
-
- err.span_suggestion_verbose(
- item_name.span.shrink_to_lo(),
- "one of the expressions' fields has a method of the same name",
- format!("{field_path_str}."),
- Applicability::MaybeIncorrect,
- );
- }
+ .join(".")
+ })
+ .collect();
+
+ let len = candidate_fields.len();
+ if len > 0 {
+ err.span_suggestions(
+ item_name.span.shrink_to_lo(),
+ format!(
+ "{} of the expressions' fields {} a method of the same name",
+ if len > 1 { "some" } else { "one" },
+ if len > 1 { "have" } else { "has" },
+ ),
+ candidate_fields.iter().map(|path| format!("{path}.")),
+ Applicability::MaybeIncorrect,
+ );
}
}
}
- fn check_for_unwrap_self(
+ fn check_for_inner_self(
&self,
- err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ err: &mut Diagnostic,
source: SelfSource<'tcx>,
span: Span,
actual: Ty<'tcx>,
@@ -1386,81 +1496,168 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id));
let ty::Adt(kind, substs) = actual.kind() else { return; };
- if !kind.is_enum() {
- return;
- }
+ match kind.adt_kind() {
+ ty::AdtKind::Enum => {
+ let matching_variants: Vec<_> = kind
+ .variants()
+ .iter()
+ .flat_map(|variant| {
+ let [field] = &variant.fields[..] else { return None; };
+ let field_ty = field.ty(tcx, substs);
+
+ // Skip `_`, since that'll just lead to ambiguity.
+ if self.resolve_vars_if_possible(field_ty).is_ty_var() {
+ return None;
+ }
- let matching_variants: Vec<_> = kind
- .variants()
- .iter()
- .flat_map(|variant| {
- let [field] = &variant.fields[..] else { return None; };
- let field_ty = field.ty(tcx, substs);
+ self.lookup_probe(
+ span,
+ item_name,
+ field_ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ )
+ .ok()
+ .map(|pick| (variant, field, pick))
+ })
+ .collect();
+
+ let ret_ty_matches = |diagnostic_item| {
+ if let Some(ret_ty) = self
+ .ret_coercion
+ .as_ref()
+ .map(|c| self.resolve_vars_if_possible(c.borrow().expected_ty()))
+ && let ty::Adt(kind, _) = ret_ty.kind()
+ && tcx.get_diagnostic_item(diagnostic_item) == Some(kind.did())
+ {
+ true
+ } else {
+ false
+ }
+ };
- // Skip `_`, since that'll just lead to ambiguity.
- if self.resolve_vars_if_possible(field_ty).is_ty_var() {
- return None;
+ match &matching_variants[..] {
+ [(_, field, pick)] => {
+ let self_ty = field.ty(tcx, substs);
+ err.span_note(
+ tcx.def_span(pick.item.def_id),
+ &format!("the method `{item_name}` exists on the type `{self_ty}`"),
+ );
+ let (article, kind, variant, question) =
+ if tcx.is_diagnostic_item(sym::Result, kind.did()) {
+ ("a", "Result", "Err", ret_ty_matches(sym::Result))
+ } else if tcx.is_diagnostic_item(sym::Option, kind.did()) {
+ ("an", "Option", "None", ret_ty_matches(sym::Option))
+ } else {
+ return;
+ };
+ if question {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use the `?` operator to extract the `{self_ty}` value, propagating \
+ {article} `{kind}::{variant}` value to the caller"
+ ),
+ "?",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "consider using `{kind}::expect` to unwrap the `{self_ty}` value, \
+ panicking if the value is {article} `{kind}::{variant}`"
+ ),
+ ".expect(\"REASON\")",
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ // FIXME(compiler-errors): Support suggestions for other matching enum variants
+ _ => {}
}
-
- self.lookup_probe(span, item_name, field_ty, call_expr, ProbeScope::AllTraits)
- .ok()
- .map(|pick| (variant, field, pick))
- })
- .collect();
-
- let ret_ty_matches = |diagnostic_item| {
- if let Some(ret_ty) = self
- .ret_coercion
- .as_ref()
- .map(|c| self.resolve_vars_if_possible(c.borrow().expected_ty()))
- && let ty::Adt(kind, _) = ret_ty.kind()
- && tcx.get_diagnostic_item(diagnostic_item) == Some(kind.did())
- {
- true
- } else {
- false
}
- };
+ // Target wrapper types - types that wrap or pretend to wrap another type,
+ // perhaps this inner type is meant to be called?
+ ty::AdtKind::Struct | ty::AdtKind::Union => {
+ let [first] = ***substs else { return; };
+ let ty::GenericArgKind::Type(ty) = first.unpack() else { return; };
+ let Ok(pick) = self.lookup_probe(
+ span,
+ item_name,
+ ty,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ ) else { return; };
- match &matching_variants[..] {
- [(_, field, pick)] => {
- let self_ty = field.ty(tcx, substs);
- err.span_note(
- tcx.def_span(pick.item.def_id),
- &format!("the method `{item_name}` exists on the type `{self_ty}`"),
- );
- let (article, kind, variant, question) =
- if Some(kind.did()) == tcx.get_diagnostic_item(sym::Result) {
- ("a", "Result", "Err", ret_ty_matches(sym::Result))
- } else if Some(kind.did()) == tcx.get_diagnostic_item(sym::Option) {
- ("an", "Option", "None", ret_ty_matches(sym::Option))
- } else {
- return;
+ let name = self.ty_to_value_string(actual);
+ let inner_id = kind.did();
+ let mutable = if let Some(AutorefOrPtrAdjustment::Autoref { mutbl, .. }) =
+ pick.autoref_or_ptr_adjustment
+ {
+ Some(mutbl)
+ } else {
+ None
+ };
+
+ if tcx.is_diagnostic_item(sym::LocalKey, inner_id) {
+ err.help("use `with` or `try_with` to access thread local storage");
+ } else if Some(kind.did()) == tcx.lang_items().maybe_uninit() {
+ err.help(format!(
+ "if this `{name}` has been initialized, \
+ use one of the `assume_init` methods to access the inner value"
+ ));
+ } else if tcx.is_diagnostic_item(sym::RefCell, inner_id) {
+ let (suggestion, borrow_kind, panic_if) = match mutable {
+ Some(Mutability::Not) => (".borrow()", "borrow", "a mutable borrow exists"),
+ Some(Mutability::Mut) => {
+ (".borrow_mut()", "mutably borrow", "any borrows exist")
+ }
+ None => return,
};
- if question {
err.span_suggestion_verbose(
expr.span.shrink_to_hi(),
format!(
- "use the `?` operator to extract the `{self_ty}` value, propagating \
- {article} `{kind}::{variant}` value to the caller"
+ "use `{suggestion}` to {borrow_kind} the `{ty}`, \
+ panicking if {panic_if}"
),
- "?",
- Applicability::MachineApplicable,
+ suggestion,
+ Applicability::MaybeIncorrect,
);
- } else {
+ } else if tcx.is_diagnostic_item(sym::Mutex, inner_id) {
err.span_suggestion_verbose(
expr.span.shrink_to_hi(),
format!(
- "consider using `{kind}::expect` to unwrap the `{self_ty}` value, \
- panicking if the value is {article} `{kind}::{variant}`"
+ "use `.lock().unwrap()` to borrow the `{ty}`, \
+ blocking the current thread until it can be acquired"
),
- ".expect(\"REASON\")",
- Applicability::HasPlaceholders,
+ ".lock().unwrap()",
+ Applicability::MaybeIncorrect,
);
- }
+ } else if tcx.is_diagnostic_item(sym::RwLock, inner_id) {
+ let (suggestion, borrow_kind) = match mutable {
+ Some(Mutability::Not) => (".read().unwrap()", "borrow"),
+ Some(Mutability::Mut) => (".write().unwrap()", "mutably borrow"),
+ None => return,
+ };
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use `{suggestion}` to {borrow_kind} the `{ty}`, \
+ blocking the current thread until it can be acquired"
+ ),
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ return;
+ };
+
+ err.span_note(
+ tcx.def_span(pick.item.def_id),
+ &format!("the method `{item_name}` exists on the type `{ty}`"),
+ );
}
- // FIXME(compiler-errors): Support suggestions for other matching enum variants
- _ => {}
}
}
@@ -1631,6 +1828,62 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
+ fn check_for_deref_method(
+ &self,
+ err: &mut Diagnostic,
+ self_source: SelfSource<'tcx>,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ let SelfSource::QPath(ty) = self_source else { return; };
+ for (deref_ty, _) in self.autoderef(rustc_span::DUMMY_SP, rcvr_ty).skip(1) {
+ if let Ok(pick) = self.probe_for_name(
+ ty.span,
+ Mode::Path,
+ item_name,
+ IsSuggestion(true),
+ deref_ty,
+ ty.hir_id,
+ ProbeScope::TraitsInScope,
+ ) {
+ if deref_ty.is_suggestable(self.tcx, true)
+ // If this method receives `&self`, then the provided
+ // argument _should_ coerce, so it's valid to suggest
+ // just changing the path.
+ && pick.item.fn_has_self_parameter
+ && let Some(self_ty) =
+ self.tcx.fn_sig(pick.item.def_id).inputs().skip_binder().get(0)
+ && self_ty.is_ref()
+ {
+ let suggested_path = match deref_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(_, _)
+ | ty::Str
+ | ty::Projection(_)
+ | ty::Param(_) => format!("{deref_ty}"),
+ _ => format!("<{deref_ty}>"),
+ };
+ err.span_suggestion_verbose(
+ ty.span,
+ format!("the function `{item_name}` is implemented on `{deref_ty}`"),
+ suggested_path,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_note(
+ ty.span,
+ format!("the function `{item_name}` is implemented on `{deref_ty}`"),
+ );
+ }
+ return;
+ }
+ }
+ }
+
/// Print out the type for use in value namespace.
fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String {
match ty.kind() {
@@ -1763,6 +2016,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Option<ty::Predicate<'tcx>>,
Option<ObligationCause<'tcx>>,
)],
+ static_candidates: &[CandidateSource],
unsatisfied_bounds: bool,
) {
let mut alt_rcvr_sugg = false;
@@ -1877,6 +2131,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
None => true,
})
.filter(|info| {
+ // Static candidates are already implemented, and known not to work
+ // Do not suggest them again
+ static_candidates.iter().all(|sc| match *sc {
+ CandidateSource::Trait(def_id) => def_id != info.def_id,
+ CandidateSource::Impl(def_id) => {
+ self.tcx.trait_id_of_impl(def_id) != Some(info.def_id)
+ }
+ })
+ })
+ .filter(|info| {
// We approximate the coherence rules to only suggest
// traits that are legal to implement by requiring that
// either the type or trait is local. Multi-dispatch means
@@ -1999,7 +2263,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Colon,
Nothing,
}
- let ast_generics = hir.get_generics(id.owner).unwrap();
+ let ast_generics = hir.get_generics(id.owner.def_id).unwrap();
let (sp, mut introducer) = if let Some(span) =
ast_generics.bounds_span_for_suggestions(def_id)
{
@@ -2158,6 +2422,60 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
+ /// issue #102320, for `unwrap_or` with closure as argument, suggest `unwrap_or_else`
+ /// FIXME: currently not working for suggesting `map_or_else`, see #102408
+ pub(crate) fn suggest_else_fn_with_closure(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ found: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ let Some((_def_id_or_name, output, _inputs)) = self.extract_callable_info(expr, found)
+ else { return false; };
+
+ if !self.can_coerce(output, expected) {
+ return false;
+ }
+
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ if let Some(Node::Expr(call_expr)) = self.tcx.hir().find(parent) &&
+ let hir::ExprKind::MethodCall(
+ hir::PathSegment { ident: method_name, .. },
+ self_expr,
+ args,
+ ..,
+ ) = call_expr.kind &&
+ let Some(self_ty) = self.typeck_results.borrow().expr_ty_opt(self_expr) {
+ let new_name = Ident {
+ name: Symbol::intern(&format!("{}_else", method_name.as_str())),
+ span: method_name.span,
+ };
+ let probe = self.lookup_probe(
+ expr.span,
+ new_name,
+ self_ty,
+ self_expr,
+ ProbeScope::TraitsInScope,
+ );
+
+ // check the method arguments number
+ if let Ok(pick) = probe &&
+ let fn_sig = self.tcx.fn_sig(pick.item.def_id) &&
+ let fn_args = fn_sig.skip_binder().inputs() &&
+ fn_args.len() == args.len() + 1 {
+ err.span_suggestion_verbose(
+ method_name.span.shrink_to_hi(),
+ &format!("try calling `{}` instead", new_name.name.as_str()),
+ "_else",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ }
+ false
+ }
+
/// Checks whether there is a local type somewhere in the chain of
/// autoderefs of `rcvr_ty`.
fn type_derefs_to_local(
@@ -2232,7 +2550,7 @@ pub fn all_traits(tcx: TyCtxt<'_>) -> Vec<TraitInfo> {
fn print_disambiguation_help<'tcx>(
item_name: Ident,
- args: Option<&'tcx [hir::Expr<'tcx>]>,
+ args: Option<(&'tcx hir::Expr<'tcx>, &'tcx [hir::Expr<'tcx>])>,
err: &mut Diagnostic,
trait_name: String,
rcvr_ty: Ty<'_>,
@@ -2244,7 +2562,7 @@ fn print_disambiguation_help<'tcx>(
fn_has_self_parameter: bool,
) {
let mut applicability = Applicability::MachineApplicable;
- let (span, sugg) = if let (ty::AssocKind::Fn, Some(args)) = (kind, args) {
+ let (span, sugg) = if let (ty::AssocKind::Fn, Some((receiver, args))) = (kind, args) {
let args = format!(
"({}{})",
if rcvr_ty.is_region_ptr() {
@@ -2252,7 +2570,8 @@ fn print_disambiguation_help<'tcx>(
} else {
""
},
- args.iter()
+ std::iter::once(receiver)
+ .chain(args.iter())
.map(|arg| source_map.span_to_snippet(arg.span).unwrap_or_else(|_| {
applicability = Applicability::HasPlaceholders;
"_".to_owned()
diff --git a/compiler/rustc_hir_typeck/src/op.rs b/compiler/rustc_hir_typeck/src/op.rs
new file mode 100644
index 000000000..895739976
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/op.rs
@@ -0,0 +1,994 @@
+//! Code related to processing overloaded binary and unary operators.
+
+use super::method::MethodCallee;
+use super::{has_expected_num_generic_args, FnCtxt};
+use crate::Expectation;
+use rustc_ast as ast;
+use rustc_errors::{self, struct_span_err, Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::traits::ObligationCauseCode;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeVisitable};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::suggestions::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::{FulfillmentError, TraitEngine, TraitEngineExt};
+use rustc_type_ir::sty::TyKind::*;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Checks a `a <op>= b`
+ pub fn check_binop_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (lhs_ty, rhs_ty, return_ty) =
+ self.check_overloaded_binop(expr, lhs, rhs, op, IsAssign::Yes, expected);
+
+ let ty =
+ if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
+ self.enforce_builtin_binop_types(lhs.span, lhs_ty, rhs.span, rhs_ty, op);
+ self.tcx.mk_unit()
+ } else {
+ return_ty
+ };
+
+ self.check_lhs_assignable(lhs, "E0067", op.span, |err| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs),
+ Op::Binary(op, IsAssign::Yes),
+ expected,
+ )
+ .is_ok()
+ {
+ // If LHS += RHS is an error, but *LHS += RHS is successful, then we will have
+ // emitted a better suggestion during error handling in check_overloaded_binop.
+ if self
+ .lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs),
+ Op::Binary(op, IsAssign::Yes),
+ expected,
+ )
+ .is_err()
+ {
+ err.downgrade_to_delayed_bug();
+ } else {
+ // Otherwise, it's valid to suggest dereferencing the LHS here.
+ err.span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "consider dereferencing the left-hand side of this operation",
+ "*",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ });
+
+ ty
+ }
+
+ /// Checks a potentially overloaded binary operator.
+ pub fn check_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ debug!(
+ "check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
+ expr.hir_id, expr, op, lhs_expr, rhs_expr
+ );
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ // && and || are a simple case.
+ self.check_expr_coercable_to_type(lhs_expr, tcx.types.bool, None);
+ let lhs_diverges = self.diverges.get();
+ self.check_expr_coercable_to_type(rhs_expr, tcx.types.bool, None);
+
+ // Depending on the LHS' value, the RHS can never execute.
+ self.diverges.set(lhs_diverges);
+
+ tcx.types.bool
+ }
+ _ => {
+ // Otherwise, we always treat operators as if they are
+ // overloaded. This is the way to be most flexible w/r/t
+ // types that get inferred.
+ let (lhs_ty, rhs_ty, return_ty) = self.check_overloaded_binop(
+ expr,
+ lhs_expr,
+ rhs_expr,
+ op,
+ IsAssign::No,
+ expected,
+ );
+
+ // Supply type inference hints if relevant. Probably these
+ // hints should be enforced during select as part of the
+ // `consider_unification_despite_ambiguity` routine, but this
+ // more convenient for now.
+ //
+ // The basic idea is to help type inference by taking
+ // advantage of things we know about how the impls for
+ // scalar types are arranged. This is important in a
+ // scenario like `1_u32 << 2`, because it lets us quickly
+ // deduce that the result type should be `u32`, even
+ // though we don't know yet what type 2 has and hence
+ // can't pin this down to a specific impl.
+ if !lhs_ty.is_ty_var()
+ && !rhs_ty.is_ty_var()
+ && is_builtin_binop(lhs_ty, rhs_ty, op)
+ {
+ let builtin_return_ty = self.enforce_builtin_binop_types(
+ lhs_expr.span,
+ lhs_ty,
+ rhs_expr.span,
+ rhs_ty,
+ op,
+ );
+ self.demand_suptype(expr.span, builtin_return_ty, return_ty);
+ }
+
+ return_ty
+ }
+ }
+ }
+
+ fn enforce_builtin_binop_types(
+ &self,
+ lhs_span: Span,
+ lhs_ty: Ty<'tcx>,
+ rhs_span: Span,
+ rhs_ty: Ty<'tcx>,
+ op: hir::BinOp,
+ ) -> Ty<'tcx> {
+ debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
+
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs_ty, rhs_ty) = (deref_ty_if_possible(lhs_ty), deref_ty_if_possible(rhs_ty));
+
+ let tcx = self.tcx;
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ self.demand_suptype(lhs_span, tcx.types.bool, lhs_ty);
+ self.demand_suptype(rhs_span, tcx.types.bool, rhs_ty);
+ tcx.types.bool
+ }
+
+ BinOpCategory::Shift => {
+ // result type is same as LHS always
+ lhs_ty
+ }
+
+ BinOpCategory::Math | BinOpCategory::Bitwise => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ lhs_ty
+ }
+
+ BinOpCategory::Comparison => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ tcx.types.bool
+ }
+ }
+ }
+
+ fn check_overloaded_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ is_assign: IsAssign,
+ expected: Expectation<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
+ debug!(
+ "check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})",
+ expr.hir_id, op, is_assign
+ );
+
+ let lhs_ty = match is_assign {
+ IsAssign::No => {
+ // Find a suitable supertype of the LHS expression's type, by coercing to
+ // a type variable, to pass as the `Self` to the trait, avoiding invariant
+ // trait matching creating lifetime constraints that are too strict.
+ // e.g., adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result
+ // in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`.
+ let lhs_ty = self.check_expr(lhs_expr);
+ let fresh_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: lhs_expr.span,
+ });
+ self.demand_coerce(lhs_expr, lhs_ty, fresh_var, Some(rhs_expr), AllowTwoPhase::No)
+ }
+ IsAssign::Yes => {
+ // rust-lang/rust#52126: We have to use strict
+ // equivalence on the LHS of an assign-op like `+=`;
+ // overwritten or mutably-borrowed places cannot be
+ // coerced to a supertype.
+ self.check_expr(lhs_expr)
+ }
+ };
+ let lhs_ty = self.resolve_vars_with_obligations(lhs_ty);
+
+ // N.B., as we have not yet type-checked the RHS, we don't have the
+ // type at hand. Make a variable to represent it. The whole reason
+ // for this indirection is so that, below, we can check the expr
+ // using this variable as the expected type, which sometimes lets
+ // us do better coercions than we would be able to do otherwise,
+ // particularly for things like `String + &String`.
+ let rhs_ty_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rhs_expr.span,
+ });
+
+ let result = self.lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty_var),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ );
+
+ // see `NB` above
+ let rhs_ty = self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var, Some(lhs_expr));
+ let rhs_ty = self.resolve_vars_with_obligations(rhs_ty);
+
+ let return_ty = match result {
+ Ok(method) => {
+ let by_ref_binop = !op.node.is_by_value();
+ if is_assign == IsAssign::Yes || by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ };
+ self.apply_adjustments(lhs_expr, vec![autoref]);
+ }
+ }
+ if by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[1],
+ };
+ // HACK(eddyb) Bypass checks due to reborrows being in
+ // some cases applied on the RHS, on top of which we need
+ // to autoref, which is not allowed by apply_adjustments.
+ // self.apply_adjustments(rhs_expr, vec![autoref]);
+ self.typeck_results
+ .borrow_mut()
+ .adjustments_mut()
+ .entry(rhs_expr.hir_id)
+ .or_default()
+ .push(autoref);
+ }
+ }
+ self.write_method_call(expr.hir_id, method);
+
+ method.sig.output()
+ }
+ // error types are considered "builtin"
+ Err(_) if lhs_ty.references_error() || rhs_ty.references_error() => self.tcx.ty_error(),
+ Err(errors) => {
+ let (_, trait_def_id) =
+ lang_item_for_op(self.tcx, Op::Binary(op, is_assign), op.span);
+ let missing_trait = trait_def_id
+ .map(|def_id| with_no_trimmed_paths!(self.tcx.def_path_str(def_id)));
+ let (mut err, output_def_id) = match is_assign {
+ IsAssign::Yes => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ expr.span,
+ E0368,
+ "binary assignment operation `{}=` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty,
+ );
+ err.span_label(
+ lhs_expr.span,
+ format!("cannot use `{}=` on type `{}`", op.node.as_str(), lhs_ty),
+ );
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, None)
+ }
+ IsAssign::No => {
+ let message = match op.node {
+ hir::BinOpKind::Add => {
+ format!("cannot add `{rhs_ty}` to `{lhs_ty}`")
+ }
+ hir::BinOpKind::Sub => {
+ format!("cannot subtract `{rhs_ty}` from `{lhs_ty}`")
+ }
+ hir::BinOpKind::Mul => {
+ format!("cannot multiply `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::Div => {
+ format!("cannot divide `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::Rem => {
+ format!("cannot mod `{lhs_ty}` by `{rhs_ty}`")
+ }
+ hir::BinOpKind::BitAnd => {
+ format!("no implementation for `{lhs_ty} & {rhs_ty}`")
+ }
+ hir::BinOpKind::BitXor => {
+ format!("no implementation for `{lhs_ty} ^ {rhs_ty}`")
+ }
+ hir::BinOpKind::BitOr => {
+ format!("no implementation for `{lhs_ty} | {rhs_ty}`")
+ }
+ hir::BinOpKind::Shl => {
+ format!("no implementation for `{lhs_ty} << {rhs_ty}`")
+ }
+ hir::BinOpKind::Shr => {
+ format!("no implementation for `{lhs_ty} >> {rhs_ty}`")
+ }
+ _ => format!(
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty
+ ),
+ };
+ let output_def_id = trait_def_id.and_then(|def_id| {
+ self.tcx
+ .associated_item_def_ids(def_id)
+ .iter()
+ .find(|item_def_id| {
+ self.tcx.associated_item(*item_def_id).name == sym::Output
+ })
+ .cloned()
+ });
+ let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "{message}");
+ if !lhs_expr.span.eq(&rhs_expr.span) {
+ err.span_label(lhs_expr.span, lhs_ty.to_string());
+ err.span_label(rhs_expr.span, rhs_ty.to_string());
+ }
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, output_def_id)
+ }
+ };
+
+ let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ let msg = &format!(
+ "`{}{}` can be used on `{}` if you dereference the left-hand side",
+ op.node.as_str(),
+ match is_assign {
+ IsAssign::Yes => "=",
+ IsAssign::No => "",
+ },
+ lhs_deref_ty,
+ );
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_lo(),
+ msg,
+ "*",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ };
+
+ let is_compatible = |lhs_ty, rhs_ty| {
+ self.lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ };
+
+ // We should suggest `a + b` => `*a + b` if `a` is copy, and suggest
+ // `a += b` => `*a += b` if a is a mut ref.
+ if !op.span.can_be_used_for_suggestions() {
+ // Suppress suggestions when lhs and rhs are not in the same span as the error
+ } else if is_assign == IsAssign::Yes
+ && let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty)
+ {
+ suggest_deref_binop(lhs_deref_ty);
+ } else if is_assign == IsAssign::No
+ && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind()
+ {
+ if self.type_is_copy_modulo_regions(
+ self.param_env,
+ *lhs_deref_ty,
+ lhs_expr.span,
+ ) {
+ suggest_deref_binop(*lhs_deref_ty);
+ }
+ } else if self.suggest_fn_call(&mut err, lhs_expr, lhs_ty, |lhs_ty| {
+ is_compatible(lhs_ty, rhs_ty)
+ }) || self.suggest_fn_call(&mut err, rhs_expr, rhs_ty, |rhs_ty| {
+ is_compatible(lhs_ty, rhs_ty)
+ }) || self.suggest_two_fn_call(
+ &mut err,
+ rhs_expr,
+ rhs_ty,
+ lhs_expr,
+ lhs_ty,
+ |lhs_ty, rhs_ty| is_compatible(lhs_ty, rhs_ty),
+ ) {
+ // Cool
+ }
+
+ if let Some(missing_trait) = missing_trait {
+ if op.node == hir::BinOpKind::Add
+ && self.check_str_addition(
+ lhs_expr, rhs_expr, lhs_ty, rhs_ty, &mut err, is_assign, op,
+ )
+ {
+ // This has nothing here because it means we did string
+ // concatenation (e.g., "Hello " + "World!"). This means
+ // we don't want the note in the else clause to be emitted
+ } else if lhs_ty.has_non_region_param() {
+ // Look for a TraitPredicate in the Fulfillment errors,
+ // and use it to generate a suggestion.
+ //
+ // Note that lookup_op_method must be called again but
+ // with a specific rhs_ty instead of a placeholder so
+ // the resulting predicate generates a more specific
+ // suggestion for the user.
+ let errors = self
+ .lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .unwrap_err();
+ if !errors.is_empty() {
+ for error in errors {
+ if let Some(trait_pred) =
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ {
+ let output_associated_item = match error.obligation.cause.code()
+ {
+ ObligationCauseCode::BinOp {
+ output_ty: Some(output_ty),
+ ..
+ } => {
+ // Make sure that we're attaching `Output = ..` to the right trait predicate
+ if let Some(output_def_id) = output_def_id
+ && let Some(trait_def_id) = trait_def_id
+ && self.tcx.parent(output_def_id) == trait_def_id
+ {
+ Some(("Output", *output_ty))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+
+ self.err_ctxt().suggest_restricting_param_bound(
+ &mut err,
+ trait_pred,
+ output_associated_item,
+ self.body_id,
+ );
+ }
+ }
+ } else {
+ // When we know that a missing bound is responsible, we don't show
+ // this note as it is redundant.
+ err.note(&format!(
+ "the trait `{missing_trait}` is not implemented for `{lhs_ty}`"
+ ));
+ }
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ };
+
+ (lhs_ty, rhs_ty, return_ty)
+ }
+
+ /// Provide actionable suggestions when trying to add two strings with incorrect types,
+ /// like `&str + &str`, `String + String` and `&str + &String`.
+ ///
+ /// If this function returns `true` it means a note was printed, so we don't need
+ /// to print the normal "implementation of `std::ops::Add` might be missing" note
+ fn check_str_addition(
+ &self,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ lhs_ty: Ty<'tcx>,
+ rhs_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ is_assign: IsAssign,
+ op: hir::BinOp,
+ ) -> bool {
+ let str_concat_note = "string concatenation requires an owned `String` on the left";
+ let rm_borrow_msg = "remove the borrow to obtain an owned `String`";
+ let to_owned_msg = "create an owned `String` from a string reference";
+
+ let is_std_string = |ty: Ty<'tcx>| {
+ ty.ty_adt_def()
+ .map_or(false, |ty_def| self.tcx.is_diagnostic_item(sym::String, ty_def.did()))
+ };
+
+ match (lhs_ty.kind(), rhs_ty.kind()) {
+ (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) // &str or &String + &str, &String or &&str
+ if (*l_ty.kind() == Str || is_std_string(l_ty))
+ && (*r_ty.kind() == Str
+ || is_std_string(r_ty)
+ || matches!(
+ r_ty.kind(), Ref(_, inner_ty, _) if *inner_ty.kind() == Str
+ )) =>
+ {
+ if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str`
+ err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings");
+ err.note(str_concat_note);
+ if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ err.span_suggestion_verbose(
+ lhs_expr.span.until(lhs_inner_expr.span),
+ rm_borrow_msg,
+ "",
+ Applicability::MachineApplicable
+ );
+ } else {
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_hi(),
+ to_owned_msg,
+ ".to_owned()",
+ Applicability::MachineApplicable
+ );
+ }
+ }
+ true
+ }
+ (&Ref(_, l_ty, _), &Adt(..)) // Handle `&str` & `&String` + `String`
+ if (*l_ty.kind() == Str || is_std_string(l_ty)) && is_std_string(rhs_ty) =>
+ {
+ err.span_label(
+ op.span,
+ "`+` cannot be used to concatenate a `&str` with a `String`",
+ );
+ match is_assign {
+ IsAssign::No => {
+ let sugg_msg;
+ let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ sugg_msg = "remove the borrow on the left and add one on the right";
+ (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned())
+ } else {
+ sugg_msg = "create an owned `String` on the left and add a borrow on the right";
+ (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned())
+ };
+ let suggestions = vec![
+ lhs_sugg,
+ (rhs_expr.span.shrink_to_lo(), "&".to_owned()),
+ ];
+ err.multipart_suggestion_verbose(
+ sugg_msg,
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
+ IsAssign::Yes => {
+ err.note(str_concat_note);
+ }
+ }
+ true
+ }
+ _ => false,
+ }
+ }
+
+ pub fn check_user_unop(
+ &self,
+ ex: &'tcx hir::Expr<'tcx>,
+ operand_ty: Ty<'tcx>,
+ op: hir::UnOp,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ assert!(op.is_by_value());
+ match self.lookup_op_method(operand_ty, None, None, Op::Unary(op, ex.span), expected) {
+ Ok(method) => {
+ self.write_method_call(ex.hir_id, method);
+ method.sig.output()
+ }
+ Err(errors) => {
+ let actual = self.resolve_vars_if_possible(operand_ty);
+ if !actual.references_error() {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ ex.span,
+ E0600,
+ "cannot apply unary operator `{}` to type `{}`",
+ op.as_str(),
+ actual
+ );
+ err.span_label(
+ ex.span,
+ format!("cannot apply unary operator `{}`", op.as_str()),
+ );
+
+ if operand_ty.has_non_region_param() {
+ let predicates = errors.iter().filter_map(|error| {
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ });
+ for pred in predicates {
+ self.err_ctxt().suggest_restricting_param_bound(
+ &mut err,
+ pred,
+ None,
+ self.body_id,
+ );
+ }
+ }
+
+ let sp = self.tcx.sess.source_map().start_point(ex.span);
+ if let Some(sp) =
+ self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ // If the previous expression was a block expression, suggest parentheses
+ // (turning this into a binary subtraction operation instead.)
+ // for example, `{2} - 2` -> `({2}) - 2` (see src\test\ui\parser\expr-as-stmt.rs)
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ } else {
+ match actual.kind() {
+ Uint(_) if op == hir::UnOp::Neg => {
+ err.note("unsigned values cannot be negated");
+
+ if let hir::ExprKind::Unary(
+ _,
+ hir::Expr {
+ kind:
+ hir::ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(1, _),
+ ..
+ }),
+ ..
+ },
+ ) = ex.kind
+ {
+ err.span_suggestion(
+ ex.span,
+ &format!(
+ "you may have meant the maximum value of `{actual}`",
+ ),
+ format!("{actual}::MAX"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ Str | Never | Char | Tuple(_) | Array(_, _) => {}
+ Ref(_, lty, _) if *lty.kind() == Str => {}
+ _ => {
+ self.note_unmet_impls_on_type(&mut err, errors);
+ }
+ }
+ }
+ err.emit();
+ }
+ self.tcx.ty_error()
+ }
+ }
+ }
+
+ fn lookup_op_method(
+ &self,
+ lhs_ty: Ty<'tcx>,
+ other_ty: Option<Ty<'tcx>>,
+ other_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ op: Op,
+ expected: Expectation<'tcx>,
+ ) -> Result<MethodCallee<'tcx>, Vec<FulfillmentError<'tcx>>> {
+ let span = match op {
+ Op::Binary(op, _) => op.span,
+ Op::Unary(_, span) => span,
+ };
+ let (opname, trait_did) = lang_item_for_op(self.tcx, op, span);
+
+ debug!(
+ "lookup_op_method(lhs_ty={:?}, op={:?}, opname={:?}, trait_did={:?})",
+ lhs_ty, op, opname, trait_did
+ );
+
+ // Catches cases like #83893, where a lang item is declared with the
+ // wrong number of generic arguments. Should have yielded an error
+ // elsewhere by now, but we have to catch it here so that we do not
+ // index `other_tys` out of bounds (if the lang item has too many
+ // generic arguments, `other_tys` is too short).
+ if !has_expected_num_generic_args(
+ self.tcx,
+ trait_did,
+ match op {
+ // Binary ops have a generic right-hand side, unary ops don't
+ Op::Binary(..) => 1,
+ Op::Unary(..) => 0,
+ },
+ ) {
+ return Err(vec![]);
+ }
+
+ let opname = Ident::with_dummy_span(opname);
+ let method = trait_did.and_then(|trait_did| {
+ self.lookup_op_method_in_trait(
+ span,
+ opname,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ )
+ });
+
+ match (method, trait_did) {
+ (Some(ok), _) => {
+ let method = self.register_infer_ok_obligations(ok);
+ self.select_obligations_where_possible(false, |_| {});
+ Ok(method)
+ }
+ (None, None) => Err(vec![]),
+ (None, Some(trait_did)) => {
+ let (obligation, _) = self.obligation_for_op_method(
+ span,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ );
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
+ fulfill.register_predicate_obligation(self, obligation);
+ Err(fulfill.select_where_possible(&self.infcx))
+ }
+ }
+ }
+}
+
+fn lang_item_for_op(
+ tcx: TyCtxt<'_>,
+ op: Op,
+ span: Span,
+) -> (rustc_span::Symbol, Option<hir::def_id::DefId>) {
+ let lang = tcx.lang_items();
+ if let Op::Binary(op, IsAssign::Yes) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add_assign, lang.add_assign_trait()),
+ hir::BinOpKind::Sub => (sym::sub_assign, lang.sub_assign_trait()),
+ hir::BinOpKind::Mul => (sym::mul_assign, lang.mul_assign_trait()),
+ hir::BinOpKind::Div => (sym::div_assign, lang.div_assign_trait()),
+ hir::BinOpKind::Rem => (sym::rem_assign, lang.rem_assign_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor_assign, lang.bitxor_assign_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand_assign, lang.bitand_assign_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor_assign, lang.bitor_assign_trait()),
+ hir::BinOpKind::Shl => (sym::shl_assign, lang.shl_assign_trait()),
+ hir::BinOpKind::Shr => (sym::shr_assign, lang.shr_assign_trait()),
+ hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt
+ | hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::And
+ | hir::BinOpKind::Or => {
+ span_bug!(span, "impossible assignment operation: {}=", op.node.as_str())
+ }
+ }
+ } else if let Op::Binary(op, IsAssign::No) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add, lang.add_trait()),
+ hir::BinOpKind::Sub => (sym::sub, lang.sub_trait()),
+ hir::BinOpKind::Mul => (sym::mul, lang.mul_trait()),
+ hir::BinOpKind::Div => (sym::div, lang.div_trait()),
+ hir::BinOpKind::Rem => (sym::rem, lang.rem_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor, lang.bitxor_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand, lang.bitand_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor, lang.bitor_trait()),
+ hir::BinOpKind::Shl => (sym::shl, lang.shl_trait()),
+ hir::BinOpKind::Shr => (sym::shr, lang.shr_trait()),
+ hir::BinOpKind::Lt => (sym::lt, lang.partial_ord_trait()),
+ hir::BinOpKind::Le => (sym::le, lang.partial_ord_trait()),
+ hir::BinOpKind::Ge => (sym::ge, lang.partial_ord_trait()),
+ hir::BinOpKind::Gt => (sym::gt, lang.partial_ord_trait()),
+ hir::BinOpKind::Eq => (sym::eq, lang.eq_trait()),
+ hir::BinOpKind::Ne => (sym::ne, lang.eq_trait()),
+ hir::BinOpKind::And | hir::BinOpKind::Or => {
+ span_bug!(span, "&& and || are not overloadable")
+ }
+ }
+ } else if let Op::Unary(hir::UnOp::Not, _) = op {
+ (sym::not, lang.not_trait())
+ } else if let Op::Unary(hir::UnOp::Neg, _) = op {
+ (sym::neg, lang.neg_trait())
+ } else {
+ bug!("lookup_op_method: op not supported: {:?}", op)
+ }
+}
+
+// Binary operator categories. These categories summarize the behavior
+// with respect to the builtin operations supported.
+enum BinOpCategory {
+ /// &&, || -- cannot be overridden
+ Shortcircuit,
+
+ /// <<, >> -- when shifting a single integer, rhs can be any
+ /// integer type. For simd, types must match.
+ Shift,
+
+ /// +, -, etc -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd
+ Math,
+
+ /// &, |, ^ -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd/bool
+ Bitwise,
+
+ /// ==, !=, etc -- takes equal types, produces bools, except for simd,
+ /// which produce the input type
+ Comparison,
+}
+
+impl BinOpCategory {
+ fn from(op: hir::BinOp) -> BinOpCategory {
+ match op.node {
+ hir::BinOpKind::Shl | hir::BinOpKind::Shr => BinOpCategory::Shift,
+
+ hir::BinOpKind::Add
+ | hir::BinOpKind::Sub
+ | hir::BinOpKind::Mul
+ | hir::BinOpKind::Div
+ | hir::BinOpKind::Rem => BinOpCategory::Math,
+
+ hir::BinOpKind::BitXor | hir::BinOpKind::BitAnd | hir::BinOpKind::BitOr => {
+ BinOpCategory::Bitwise
+ }
+
+ hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt => BinOpCategory::Comparison,
+
+ hir::BinOpKind::And | hir::BinOpKind::Or => BinOpCategory::Shortcircuit,
+ }
+ }
+}
+
+/// Whether the binary operation is an assignment (`a += b`), or not (`a + b`)
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum IsAssign {
+ No,
+ Yes,
+}
+
+#[derive(Clone, Copy, Debug)]
+enum Op {
+ Binary(hir::BinOp, IsAssign),
+ Unary(hir::UnOp, Span),
+}
+
+/// Dereferences a single level of immutable referencing.
+fn deref_ty_if_possible<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Ref(_, ty, hir::Mutability::Not) => *ty,
+ _ => ty,
+ }
+}
+
+/// Returns `true` if this is a built-in arithmetic operation (e.g., u32
+/// + u32, i16x4 == i16x4) and false if these types would have to be
+/// overloaded to be legal. There are two reasons that we distinguish
+/// builtin operations from overloaded ones (vs trying to drive
+/// everything uniformly through the trait system and intrinsics or
+/// something like that):
+///
+/// 1. Builtin operations can trivially be evaluated in constants.
+/// 2. For comparison operators applied to SIMD types the result is
+/// not of type `bool`. For example, `i16x4 == i16x4` yields a
+/// type like `i16x4`. This means that the overloaded trait
+/// `PartialEq` is not applicable.
+///
+/// Reason #2 is the killer. I tried for a while to always use
+/// overloaded logic and just check the types in constants/codegen after
+/// the fact, and it worked fine, except for SIMD types. -nmatsakis
+fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, rhs: Ty<'tcx>, op: hir::BinOp) -> bool {
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs, rhs) = (deref_ty_if_possible(lhs), deref_ty_if_possible(rhs));
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => true,
+
+ BinOpCategory::Shift => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ }
+
+ BinOpCategory::Math => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ }
+
+ BinOpCategory::Bitwise => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ || lhs.is_bool() && rhs.is_bool()
+ }
+
+ BinOpCategory::Comparison => {
+ lhs.references_error() || rhs.references_error() || lhs.is_scalar() && rhs.is_scalar()
+ }
+ }
+}
+
+struct TypeParamEraser<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, Span);
+
+impl<'tcx> TypeFolder<'tcx> for TypeParamEraser<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.0.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Param(_) => self.0.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.1,
+ }),
+ _ => ty.super_fold_with(self),
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
index 837c32355..ea90da4a6 100644
--- a/compiler/rustc_typeck/src/check/pat.rs
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -1,6 +1,5 @@
-use crate::check::FnCtxt;
+use crate::FnCtxt;
use rustc_ast as ast;
-
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
@@ -569,7 +568,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
) -> Ty<'tcx> {
// Determine the binding mode...
let bm = match ba {
- hir::BindingAnnotation::Unannotated => def_bm,
+ hir::BindingAnnotation::NONE => def_bm,
_ => BindingMode::convert(ba),
};
// ...and store it in a side table:
@@ -600,7 +599,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// If there are multiple arms, make sure they all agree on
// what the type of the binding `x` ought to be.
if var_id != pat.hir_id {
- self.check_binding_alt_eq_ty(pat.span, var_id, local_ty, ti);
+ self.check_binding_alt_eq_ty(ba, pat.span, var_id, local_ty, ti);
}
if let Some(p) = sub {
@@ -610,7 +609,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
local_ty
}
- fn check_binding_alt_eq_ty(&self, span: Span, var_id: HirId, ty: Ty<'tcx>, ti: TopInfo<'tcx>) {
+ fn check_binding_alt_eq_ty(
+ &self,
+ ba: hir::BindingAnnotation,
+ span: Span,
+ var_id: HirId,
+ ty: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) {
let var_ty = self.local_ty(span, var_id).decl_ty;
if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) {
let hir = self.tcx.hir();
@@ -628,12 +634,50 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
});
let pre = if in_match { "in the same arm, " } else { "" };
err.note(&format!("{}a binding must have the same type in all alternatives", pre));
- // FIXME: check if `var_ty` and `ty` can be made the same type by adding or removing
- // `ref` or `&` to the pattern.
+ self.suggest_adding_missing_ref_or_removing_ref(
+ &mut err,
+ span,
+ var_ty,
+ self.resolve_vars_with_obligations(ty),
+ ba,
+ );
err.emit();
}
}
+ fn suggest_adding_missing_ref_or_removing_ref(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ba: hir::BindingAnnotation,
+ ) {
+ match (expected.kind(), actual.kind(), ba) {
+ (ty::Ref(_, inner_ty, _), _, hir::BindingAnnotation::NONE)
+ if self.can_eq(self.param_env, *inner_ty, actual).is_ok() =>
+ {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider adding `ref`",
+ "ref ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ (_, ty::Ref(_, inner_ty, _), hir::BindingAnnotation::REF)
+ if self.can_eq(self.param_env, expected, *inner_ty).is_ok() =>
+ {
+ err.span_suggestion_verbose(
+ span.with_hi(span.lo() + BytePos(4)),
+ "consider removing `ref`",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => (),
+ }
+ }
+
// Precondition: pat is a Ref(_) pattern
fn borrow_pat_suggestion(&self, err: &mut Diagnostic, pat: &Pat<'_>) {
let tcx = self.tcx;
@@ -882,7 +926,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
),
);
match self.tcx.hir().get(self.tcx.hir().get_parent_node(pat.hir_id)) {
- hir::Node::Pat(Pat { kind: hir::PatKind::Struct(..), .. }) => {
+ hir::Node::PatField(..) => {
e.span_suggestion_verbose(
ident.span.shrink_to_hi(),
"bind the struct field to a different name instead",
@@ -936,7 +980,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pat: &'tcx Pat<'tcx>,
qpath: &'tcx hir::QPath<'tcx>,
subpats: &'tcx [Pat<'tcx>],
- ddpos: Option<usize>,
+ ddpos: hir::DotDotPos,
expected: Ty<'tcx>,
def_bm: BindingMode,
ti: TopInfo<'tcx>,
@@ -1021,7 +1065,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Type-check subpatterns.
if subpats.len() == variant.fields.len()
- || subpats.len() < variant.fields.len() && ddpos.is_some()
+ || subpats.len() < variant.fields.len() && ddpos.as_opt_usize().is_some()
{
let ty::Adt(_, substs) = pat_ty.kind() else {
bug!("unexpected pattern type {:?}", pat_ty);
@@ -1209,14 +1253,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&self,
span: Span,
elements: &'tcx [Pat<'tcx>],
- ddpos: Option<usize>,
+ ddpos: hir::DotDotPos,
expected: Ty<'tcx>,
def_bm: BindingMode,
ti: TopInfo<'tcx>,
) -> Ty<'tcx> {
let tcx = self.tcx;
let mut expected_len = elements.len();
- if ddpos.is_some() {
+ if ddpos.as_opt_usize().is_some() {
// Require known type only when `..` is present.
if let ty::Tuple(tys) = self.structurally_resolved_type(span, expected).kind() {
expected_len = tys.len();
@@ -1352,7 +1396,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
.iter()
.copied()
.filter(|(field, _)| {
- field.vis.is_accessible_from(tcx.parent_module(pat.hir_id).to_def_id(), tcx)
+ field.vis.is_accessible_from(tcx.parent_module(pat.hir_id), tcx)
&& !matches!(
tcx.eval_stability(field.did, None, DUMMY_SP, None),
EvalResult::Deny { .. }
@@ -1745,10 +1789,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
&unmentioned_fields.iter().map(|(_, i)| i).collect::<Vec<_>>(),
);
- self.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, pat.hir_id, pat.span, |build| {
- let mut lint = build.build("some fields are not explicitly listed");
+ self.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, pat.hir_id, pat.span, "some fields are not explicitly listed", |lint| {
lint.span_label(pat.span, format!("field{} {} not listed", rustc_errors::pluralize!(unmentioned_fields.len()), joined_patterns));
-
lint.help(
"ensure that all fields are mentioned explicitly by adding the suggested fields",
);
@@ -1756,7 +1798,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
"the pattern is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
ty,
));
- lint.emit();
+
+ lint
});
}
diff --git a/compiler/rustc_typeck/src/check/place_op.rs b/compiler/rustc_hir_typeck/src/place_op.rs
index 2e0f37eba..ba8cf6926 100644
--- a/compiler/rustc_typeck/src/check/place_op.rs
+++ b/compiler/rustc_hir_typeck/src/place_op.rs
@@ -1,5 +1,5 @@
-use crate::check::method::MethodCallee;
-use crate::check::{has_expected_num_generic_args, FnCtxt, PlaceOp};
+use crate::method::MethodCallee;
+use crate::{has_expected_num_generic_args, FnCtxt, PlaceOp};
use rustc_ast as ast;
use rustc_errors::Applicability;
use rustc_hir as hir;
diff --git a/compiler/rustc_typeck/src/check/rvalue_scopes.rs b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
index 22c9e7961..22c9e7961 100644
--- a/compiler/rustc_typeck/src/check/rvalue_scopes.rs
+++ b/compiler/rustc_hir_typeck/src/rvalue_scopes.rs
diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs
index dd8f943b9..4dea40829 100644
--- a/compiler/rustc_typeck/src/check/upvar.rs
+++ b/compiler/rustc_hir_typeck/src/upvar.rs
@@ -352,7 +352,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// and that the path can be captured with required capture kind (depending on use in closure,
/// move closure etc.)
///
- /// Returns the set of of adjusted information along with the inferred closure kind and span
+ /// Returns the set of adjusted information along with the inferred closure kind and span
/// associated with the closure kind inference.
///
/// Note that we *always* infer a minimal kind, even if
@@ -749,10 +749,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
closure_hir_id,
closure_head_span,
+ reasons.migration_message(),
|lint| {
- let mut diagnostics_builder = lint.build(
- &reasons.migration_message(),
- );
for NeededMigration { var_hir_id, diagnostics_info } in &need_migrations {
// Labels all the usage of the captured variable and why they are responsible
// for migration being needed
@@ -760,13 +758,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match &lint_note.captures_info {
UpvarMigrationInfo::CapturingPrecise { source_expr: Some(capture_expr_id), var_name: captured_name } => {
let cause_span = self.tcx.hir().span(*capture_expr_id);
- diagnostics_builder.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`",
+ lint.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`",
self.tcx.hir().name(*var_hir_id),
captured_name,
));
}
UpvarMigrationInfo::CapturingNothing { use_span } => {
- diagnostics_builder.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect",
+ lint.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect",
self.tcx.hir().name(*var_hir_id),
));
}
@@ -781,13 +779,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match &lint_note.captures_info {
UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
- diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure",
+ lint.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure",
self.tcx.hir().name(*var_hir_id),
captured_name,
));
}
UpvarMigrationInfo::CapturingNothing { use_span: _ } => {
- diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure",
+ lint.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure",
v = self.tcx.hir().name(*var_hir_id),
));
}
@@ -800,7 +798,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
match &lint_note.captures_info {
UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
let var_name = self.tcx.hir().name(*var_hir_id);
- diagnostics_builder.span_label(closure_head_span, format!("\
+ lint.span_label(closure_head_span, format!("\
in Rust 2018, this closure implements {missing_trait} \
as `{var_name}` implements {missing_trait}, but in Rust 2021, \
this closure will no longer implement {missing_trait} \
@@ -814,7 +812,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
}
- diagnostics_builder.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
+ lint.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
let diagnostic_msg = format!(
"add a dummy let to cause {} to be fully captured",
@@ -857,7 +855,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// We take the indentation from the next non-empty line.
let line2 = lines.find(|line| !line.is_empty()).unwrap_or_default();
let indent = line2.split_once(|c: char| !c.is_whitespace()).unwrap_or_default().0;
- diagnostics_builder.span_suggestion(
+ lint.span_suggestion(
closure_body_span.with_lo(closure_body_span.lo() + BytePos::from_usize(line1.len())).shrink_to_lo(),
&diagnostic_msg,
format!("\n{indent}{migration_string};"),
@@ -868,7 +866,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// braces, but with more than just the opening
// brace on the first line. We put the `let`
// directly after the `{`.
- diagnostics_builder.span_suggestion(
+ lint.span_suggestion(
closure_body_span.with_lo(closure_body_span.lo() + BytePos(1)).shrink_to_lo(),
&diagnostic_msg,
format!(" {migration_string};"),
@@ -877,7 +875,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
} else {
// This is a closure without braces around the body.
// We add braces to add the `let` before the body.
- diagnostics_builder.multipart_suggestion(
+ lint.multipart_suggestion(
&diagnostic_msg,
vec![
(closure_body_span.shrink_to_lo(), format!("{{ {migration_string}; ")),
@@ -887,7 +885,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
} else {
- diagnostics_builder.span_suggestion(
+ lint.span_suggestion(
closure_span,
&diagnostic_msg,
migration_string,
@@ -895,7 +893,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
}
- diagnostics_builder.emit();
+ lint
},
);
}
@@ -1217,7 +1215,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Combine all the reasons of why the root variable should be captured as a result of
// auto trait implementation issues
- auto_trait_migration_reasons.extend(capture_trait_reasons.clone());
+ auto_trait_migration_reasons.extend(capture_trait_reasons.iter().copied());
diagnostics_info.push(MigrationLintNote {
captures_info,
@@ -2024,6 +2022,10 @@ fn should_do_rust_2021_incompatible_closure_captures_analysis(
tcx: TyCtxt<'_>,
closure_id: hir::HirId,
) -> bool {
+ if tcx.sess.rust_2021() {
+ return false;
+ }
+
let (level, _) =
tcx.lint_level_at_node(lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES, closure_id);
diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs
index f549807c3..1e26daa9c 100644
--- a/compiler/rustc_typeck/src/check/writeback.rs
+++ b/compiler/rustc_hir_typeck/src/writeback.rs
@@ -2,8 +2,7 @@
// unresolved type variables and replaces "ty_var" types with their
// substitutions.
-use crate::check::FnCtxt;
-
+use crate::FnCtxt;
use hir::def_id::LocalDefId;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::ErrorGuaranteed;
@@ -16,6 +15,7 @@ use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable};
+use rustc_middle::ty::TypeckResults;
use rustc_middle::ty::{self, ClosureSizeProfileData, Ty, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
@@ -192,6 +192,27 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
}
}
+ // (ouz-a 1005988): Normally `[T] : std::ops::Index<usize>` should be normalized
+ // into [T] but currently `Where` clause stops the normalization process for it,
+ // here we compare types of expr and base in a code without `Where` clause they would be equal
+ // if they are not we don't modify the expr, hence we bypass the ICE
+ fn is_builtin_index(
+ &mut self,
+ typeck_results: &TypeckResults<'tcx>,
+ e: &hir::Expr<'_>,
+ base_ty: Ty<'tcx>,
+ index_ty: Ty<'tcx>,
+ ) -> bool {
+ if let Some(elem_ty) = base_ty.builtin_index() {
+ let Some(exp_ty) = typeck_results.expr_ty_opt(e) else {return false;};
+ let resolved_exp_ty = self.resolve(exp_ty, &e.span);
+
+ elem_ty == resolved_exp_ty && index_ty == self.fcx.tcx.types.usize
+ } else {
+ false
+ }
+ }
+
// Similar to operators, indexing is always assumed to be overloaded
// Here, correct cases where an indexing expression can be simplified
// to use builtin indexing because the index type is known to be
@@ -222,8 +243,9 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
)
});
let index_ty = self.fcx.resolve_vars_if_possible(index_ty);
+ let resolved_base_ty = self.resolve(*base_ty, &base.span);
- if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize {
+ if self.is_builtin_index(&typeck_results, e, resolved_base_ty, index_ty) {
// Remove the method call record
typeck_results.type_dependent_defs_mut().remove(e.hir_id);
typeck_results.node_substs_mut().remove(e.hir_id);
@@ -292,6 +314,17 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
intravisit::walk_expr(self, e);
}
+ fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) {
+ match &p.kind {
+ hir::GenericParamKind::Lifetime { .. } => {
+ // Nothing to write back here
+ }
+ hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => {
+ self.tcx().sess.delay_span_bug(p.span, format!("unexpected generic param: {p:?}"));
+ }
+ }
+ }
+
fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
self.visit_node_id(b.span, b.hir_id);
intravisit::walk_block(self, b);
@@ -468,7 +501,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
if !errors_buffer.is_empty() {
errors_buffer.sort_by_key(|diag| diag.span.primary_span());
- for mut diag in errors_buffer.drain(..) {
+ for mut diag in errors_buffer {
self.tcx().sess.diagnostic().emit_diagnostic(&mut diag);
}
}
@@ -503,33 +536,37 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
let opaque_types =
self.fcx.infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
for (opaque_type_key, decl) in opaque_types {
- let hidden_type = match decl.origin {
- hir::OpaqueTyOrigin::FnReturn(_) | hir::OpaqueTyOrigin::AsyncFn(_) => {
- let ty = self.resolve(decl.hidden_type.ty, &decl.hidden_type.span);
- struct RecursionChecker {
- def_id: LocalDefId,
- }
- impl<'tcx> ty::TypeVisitor<'tcx> for RecursionChecker {
- type BreakTy = ();
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if let ty::Opaque(def_id, _) = *t.kind() {
- if def_id == self.def_id.to_def_id() {
- return ControlFlow::Break(());
- }
- }
- t.super_visit_with(self)
+ let hidden_type = self.resolve(decl.hidden_type, &decl.hidden_type.span);
+ let opaque_type_key = self.resolve(opaque_type_key, &decl.hidden_type.span);
+
+ struct RecursionChecker {
+ def_id: LocalDefId,
+ }
+ impl<'tcx> ty::TypeVisitor<'tcx> for RecursionChecker {
+ type BreakTy = ();
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Opaque(def_id, _) = *t.kind() {
+ if def_id == self.def_id.to_def_id() {
+ return ControlFlow::Break(());
}
}
- if ty
- .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id })
- .is_break()
- {
- return;
- }
- Some(ty)
+ t.super_visit_with(self)
}
- hir::OpaqueTyOrigin::TyAlias => None,
- };
+ }
+ if hidden_type
+ .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id })
+ .is_break()
+ {
+ continue;
+ }
+
+ let hidden_type = hidden_type.remap_generic_params_to_declaration_params(
+ opaque_type_key,
+ self.fcx.infcx.tcx,
+ true,
+ decl.origin,
+ );
+
self.typeck_results.concrete_opaque_types.insert(opaque_type_key.def_id, hidden_type);
}
}
@@ -667,7 +704,7 @@ impl Locatable for hir::HirId {
/// unresolved types and so forth.
struct Resolver<'cx, 'tcx> {
tcx: TyCtxt<'tcx>,
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
span: &'cx dyn Locatable,
body: &'tcx hir::Body<'tcx>,
@@ -684,27 +721,14 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
Resolver { tcx: fcx.tcx, infcx: fcx, span, body, replaced_with_error: false }
}
- fn report_type_error(&self, t: Ty<'tcx>) {
+ fn report_error(&self, p: impl Into<ty::GenericArg<'tcx>>) {
if !self.tcx.sess.has_errors().is_some() {
self.infcx
+ .err_ctxt()
.emit_inference_failure_err(
Some(self.body.id()),
self.span.to_span(self.tcx),
- t.into(),
- E0282,
- false,
- )
- .emit();
- }
- }
-
- fn report_const_error(&self, c: ty::Const<'tcx>) {
- if self.tcx.sess.has_errors().is_none() {
- self.infcx
- .emit_inference_failure_err(
- Some(self.body.id()),
- self.span.to_span(self.tcx),
- c.into(),
+ p.into(),
E0282,
false,
)
@@ -749,7 +773,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
}
Err(_) => {
debug!("Resolver::fold_ty: input type `{:?}` not fully resolvable", t);
- self.report_type_error(t);
+ self.report_error(t);
self.replaced_with_error = true;
self.tcx().ty_error()
}
@@ -766,7 +790,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
Ok(ct) => self.tcx.erase_regions(ct),
Err(_) => {
debug!("Resolver::fold_const: input const `{:?}` not fully resolvable", ct);
- self.report_const_error(ct);
+ self.report_error(ct);
self.replaced_with_error = true;
self.tcx().const_error(ct.ty())
}
diff --git a/compiler/rustc_incremental/Cargo.toml b/compiler/rustc_incremental/Cargo.toml
index d3c425a07..179e85f32 100644
--- a/compiler/rustc_incremental/Cargo.toml
+++ b/compiler/rustc_incremental/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_graphviz = { path = "../rustc_graphviz" }
diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs
index 1e88e8091..83dd9a67e 100644
--- a/compiler/rustc_incremental/src/lib.rs
+++ b/compiler/rustc_incremental/src/lib.rs
@@ -2,7 +2,6 @@
#![deny(missing_docs)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(let_else)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
index 710c4a01b..79e2d371e 100644
--- a/compiler/rustc_incremental/src/persist/dirty_clean.rs
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -149,19 +149,19 @@ pub fn check_dirty_clean_annotations(tcx: TyCtxt<'_>) {
let crate_items = tcx.hir_crate_items(());
for id in crate_items.items() {
- dirty_clean_visitor.check_item(id.def_id);
+ dirty_clean_visitor.check_item(id.owner_id.def_id);
}
for id in crate_items.trait_items() {
- dirty_clean_visitor.check_item(id.def_id);
+ dirty_clean_visitor.check_item(id.owner_id.def_id);
}
for id in crate_items.impl_items() {
- dirty_clean_visitor.check_item(id.def_id);
+ dirty_clean_visitor.check_item(id.owner_id.def_id);
}
for id in crate_items.foreign_items() {
- dirty_clean_visitor.check_item(id.def_id);
+ dirty_clean_visitor.check_item(id.owner_id.def_id);
}
let mut all_attrs = FindAllAttrs { tcx, found_attrs: vec![] };
@@ -302,7 +302,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
HirNode::ImplItem(item) => match item.kind {
ImplItemKind::Fn(..) => ("Node::ImplItem", LABELS_FN_IN_IMPL),
ImplItemKind::Const(..) => ("NodeImplConst", LABELS_CONST_IN_IMPL),
- ImplItemKind::TyAlias(..) => ("NodeImplType", LABELS_CONST_IN_IMPL),
+ ImplItemKind::Type(..) => ("NodeImplType", LABELS_CONST_IN_IMPL),
},
_ => self.tcx.sess.span_fatal(
attr.span,
diff --git a/compiler/rustc_index/Cargo.toml b/compiler/rustc_index/Cargo.toml
index 8a81a93a9..d8ea5aa80 100644
--- a/compiler/rustc_index/Cargo.toml
+++ b/compiler/rustc_index/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
arrayvec = { version = "0.7", default-features = false }
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 33c3c536f..23a4c1f06 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -1,7 +1,7 @@
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![feature(allow_internal_unstable)]
-#![feature(bench_black_box)]
#![feature(extend_one)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(new_uninit)]
#![feature(step_trait)]
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
index 30ff36421..4172ce3bb 100644
--- a/compiler/rustc_index/src/vec.rs
+++ b/compiler/rustc_index/src/vec.rs
@@ -172,7 +172,9 @@ impl<I: Idx, T> IndexVec<I, T> {
}
#[inline]
- pub fn indices(&self) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + 'static {
+ pub fn indices(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + Clone + 'static {
(0..self.len()).map(|n| I::new(n))
}
diff --git a/compiler/rustc_infer/Cargo.toml b/compiler/rustc_infer/Cargo.toml
index 02ac83a5e..aced787d6 100644
--- a/compiler/rustc_infer/Cargo.toml
+++ b/compiler/rustc_infer/Cargo.toml
@@ -15,6 +15,7 @@ rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_infer/src/errors/mod.rs b/compiler/rustc_infer/src/errors/mod.rs
new file mode 100644
index 000000000..2131d1906
--- /dev/null
+++ b/compiler/rustc_infer/src/errors/mod.rs
@@ -0,0 +1,505 @@
+use hir::GenericParamKind;
+use rustc_errors::{
+ fluent, AddToDiagnostic, Applicability, Diagnostic, DiagnosticMessage, DiagnosticStyledString,
+ MultiSpan, SubdiagnosticMessage,
+};
+use rustc_hir as hir;
+use rustc_hir::{FnRetTy, Ty};
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_middle::ty::{Region, TyCtxt};
+use rustc_span::symbol::kw;
+use rustc_span::{symbol::Ident, BytePos, Span};
+
+use crate::infer::error_reporting::{
+ need_type_info::{GeneratorKindAsDiagArg, UnderspecifiedArgKind},
+ ObligationCauseAsDiagArg,
+};
+
+pub mod note_and_explain;
+
+#[derive(Diagnostic)]
+#[diag(infer_opaque_hidden_type)]
+pub struct OpaqueHiddenTypeDiag {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[note(opaque_type)]
+ pub opaque_type: Span,
+ #[note(hidden_type)]
+ pub hidden_type: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(infer_type_annotations_needed, code = "E0282")]
+pub struct AnnotationRequired<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub source_kind: &'static str,
+ pub source_name: &'a str,
+ #[label]
+ pub failure_span: Option<Span>,
+ #[subdiagnostic]
+ pub bad_label: Option<InferenceBadError<'a>>,
+ #[subdiagnostic]
+ pub infer_subdiags: Vec<SourceKindSubdiag<'a>>,
+ #[subdiagnostic]
+ pub multi_suggestions: Vec<SourceKindMultiSuggestion<'a>>,
+}
+
+// Copy of `AnnotationRequired` for E0283
+#[derive(Diagnostic)]
+#[diag(infer_type_annotations_needed, code = "E0283")]
+pub struct AmbigousImpl<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub source_kind: &'static str,
+ pub source_name: &'a str,
+ #[label]
+ pub failure_span: Option<Span>,
+ #[subdiagnostic]
+ pub bad_label: Option<InferenceBadError<'a>>,
+ #[subdiagnostic]
+ pub infer_subdiags: Vec<SourceKindSubdiag<'a>>,
+ #[subdiagnostic]
+ pub multi_suggestions: Vec<SourceKindMultiSuggestion<'a>>,
+}
+
+// Copy of `AnnotationRequired` for E0284
+#[derive(Diagnostic)]
+#[diag(infer_type_annotations_needed, code = "E0284")]
+pub struct AmbigousReturn<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub source_kind: &'static str,
+ pub source_name: &'a str,
+ #[label]
+ pub failure_span: Option<Span>,
+ #[subdiagnostic]
+ pub bad_label: Option<InferenceBadError<'a>>,
+ #[subdiagnostic]
+ pub infer_subdiags: Vec<SourceKindSubdiag<'a>>,
+ #[subdiagnostic]
+ pub multi_suggestions: Vec<SourceKindMultiSuggestion<'a>>,
+}
+
+#[derive(Diagnostic)]
+#[diag(infer_need_type_info_in_generator, code = "E0698")]
+pub struct NeedTypeInfoInGenerator<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub generator_kind: GeneratorKindAsDiagArg,
+ #[subdiagnostic]
+ pub bad_label: InferenceBadError<'a>,
+}
+
+// Used when a better one isn't available
+#[derive(Subdiagnostic)]
+#[label(infer_label_bad)]
+pub struct InferenceBadError<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub bad_kind: &'static str,
+ pub prefix_kind: UnderspecifiedArgKind,
+ pub has_parent: bool,
+ pub prefix: &'a str,
+ pub parent_prefix: &'a str,
+ pub parent_name: String,
+ pub name: String,
+}
+
+#[derive(Subdiagnostic)]
+pub enum SourceKindSubdiag<'a> {
+ #[suggestion_verbose(
+ infer_source_kind_subdiag_let,
+ code = ": {type_name}",
+ applicability = "has-placeholders"
+ )]
+ LetLike {
+ #[primary_span]
+ span: Span,
+ name: String,
+ type_name: String,
+ kind: &'static str,
+ x_kind: &'static str,
+ prefix_kind: UnderspecifiedArgKind,
+ prefix: &'a str,
+ arg_name: String,
+ },
+ #[label(infer_source_kind_subdiag_generic_label)]
+ GenericLabel {
+ #[primary_span]
+ span: Span,
+ is_type: bool,
+ param_name: String,
+ parent_exists: bool,
+ parent_prefix: String,
+ parent_name: String,
+ },
+ #[suggestion_verbose(
+ infer_source_kind_subdiag_generic_suggestion,
+ code = "::<{args}>",
+ applicability = "has-placeholders"
+ )]
+ GenericSuggestion {
+ #[primary_span]
+ span: Span,
+ arg_count: usize,
+ args: String,
+ },
+}
+
+#[derive(Subdiagnostic)]
+pub enum SourceKindMultiSuggestion<'a> {
+ #[multipart_suggestion_verbose(
+ infer_source_kind_fully_qualified,
+ applicability = "has-placeholders"
+ )]
+ FullyQualified {
+ #[suggestion_part(code = "{def_path}({adjustment}")]
+ span_lo: Span,
+ #[suggestion_part(code = "{successor_pos}")]
+ span_hi: Span,
+ def_path: String,
+ adjustment: &'a str,
+ successor_pos: &'a str,
+ },
+ #[multipart_suggestion_verbose(
+ infer_source_kind_closure_return,
+ applicability = "has-placeholders"
+ )]
+ ClosureReturn {
+ #[suggestion_part(code = "{start_span_code}")]
+ start_span: Span,
+ start_span_code: String,
+ #[suggestion_part(code = " }}")]
+ end_span: Option<Span>,
+ },
+}
+
+impl<'a> SourceKindMultiSuggestion<'a> {
+ pub fn new_fully_qualified(
+ span: Span,
+ def_path: String,
+ adjustment: &'a str,
+ successor: (&'a str, BytePos),
+ ) -> Self {
+ Self::FullyQualified {
+ span_lo: span.shrink_to_lo(),
+ span_hi: span.shrink_to_hi().with_hi(successor.1),
+ def_path,
+ adjustment,
+ successor_pos: successor.0,
+ }
+ }
+
+ pub fn new_closure_return(
+ ty_info: String,
+ data: &'a FnRetTy<'a>,
+ should_wrap_expr: Option<Span>,
+ ) -> Self {
+ let (arrow, post) = match data {
+ FnRetTy::DefaultReturn(_) => ("-> ", " "),
+ _ => ("", ""),
+ };
+ let (start_span, start_span_code, end_span) = match should_wrap_expr {
+ Some(end_span) => {
+ (data.span(), format!("{}{}{}{{ ", arrow, ty_info, post), Some(end_span))
+ }
+ None => (data.span(), format!("{}{}{}", arrow, ty_info, post), None),
+ };
+ Self::ClosureReturn { start_span, start_span_code, end_span }
+ }
+}
+
+pub enum RegionOriginNote<'a> {
+ Plain {
+ span: Span,
+ msg: DiagnosticMessage,
+ },
+ WithName {
+ span: Span,
+ msg: DiagnosticMessage,
+ name: &'a str,
+ continues: bool,
+ },
+ WithRequirement {
+ span: Span,
+ requirement: ObligationCauseAsDiagArg<'a>,
+ expected_found: Option<(DiagnosticStyledString, DiagnosticStyledString)>,
+ },
+}
+
+impl AddToDiagnostic for RegionOriginNote<'_> {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ let mut label_or_note = |span, msg: DiagnosticMessage| {
+ let sub_count = diag.children.iter().filter(|d| d.span.is_dummy()).count();
+ let expanded_sub_count = diag.children.iter().filter(|d| !d.span.is_dummy()).count();
+ let span_is_primary = diag.span.primary_spans().iter().all(|&sp| sp == span);
+ if span_is_primary && sub_count == 0 && expanded_sub_count == 0 {
+ diag.span_label(span, msg);
+ } else if span_is_primary && expanded_sub_count == 0 {
+ diag.note(msg);
+ } else {
+ diag.span_note(span, msg);
+ }
+ };
+ match self {
+ RegionOriginNote::Plain { span, msg } => {
+ label_or_note(span, msg);
+ }
+ RegionOriginNote::WithName { span, msg, name, continues } => {
+ label_or_note(span, msg);
+ diag.set_arg("name", name);
+ diag.set_arg("continues", continues);
+ }
+ RegionOriginNote::WithRequirement {
+ span,
+ requirement,
+ expected_found: Some((expected, found)),
+ } => {
+ label_or_note(span, fluent::infer_subtype);
+ diag.set_arg("requirement", requirement);
+
+ diag.note_expected_found(&"", expected, &"", found);
+ }
+ RegionOriginNote::WithRequirement { span, requirement, expected_found: None } => {
+ // FIXME: this really should be handled at some earlier stage. Our
+ // handling of region checking when type errors are present is
+ // *terrible*.
+ label_or_note(span, fluent::infer_subtype_2);
+ diag.set_arg("requirement", requirement);
+ }
+ };
+ }
+}
+
+pub enum LifetimeMismatchLabels {
+ InRet {
+ param_span: Span,
+ ret_span: Span,
+ span: Span,
+ label_var1: Option<Ident>,
+ },
+ Normal {
+ hir_equal: bool,
+ ty_sup: Span,
+ ty_sub: Span,
+ span: Span,
+ sup: Option<Ident>,
+ sub: Option<Ident>,
+ },
+}
+
+impl AddToDiagnostic for LifetimeMismatchLabels {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ match self {
+ LifetimeMismatchLabels::InRet { param_span, ret_span, span, label_var1 } => {
+ diag.span_label(param_span, fluent::infer_declared_different);
+ diag.span_label(ret_span, fluent::infer_nothing);
+ diag.span_label(span, fluent::infer_data_returned);
+ diag.set_arg("label_var1_exists", label_var1.is_some());
+ diag.set_arg("label_var1", label_var1.map(|x| x.to_string()).unwrap_or_default());
+ }
+ LifetimeMismatchLabels::Normal {
+ hir_equal,
+ ty_sup,
+ ty_sub,
+ span,
+ sup: label_var1,
+ sub: label_var2,
+ } => {
+ if hir_equal {
+ diag.span_label(ty_sup, fluent::infer_declared_multiple);
+ diag.span_label(ty_sub, fluent::infer_nothing);
+ diag.span_label(span, fluent::infer_data_lifetime_flow);
+ } else {
+ diag.span_label(ty_sup, fluent::infer_types_declared_different);
+ diag.span_label(ty_sub, fluent::infer_nothing);
+ diag.span_label(span, fluent::infer_data_flows);
+ diag.set_arg("label_var1_exists", label_var1.is_some());
+ diag.set_arg(
+ "label_var1",
+ label_var1.map(|x| x.to_string()).unwrap_or_default(),
+ );
+ diag.set_arg("label_var2_exists", label_var2.is_some());
+ diag.set_arg(
+ "label_var2",
+ label_var2.map(|x| x.to_string()).unwrap_or_default(),
+ );
+ }
+ }
+ }
+ }
+}
+
+pub struct AddLifetimeParamsSuggestion<'a> {
+ pub tcx: TyCtxt<'a>,
+ pub sub: Region<'a>,
+ pub ty_sup: &'a Ty<'a>,
+ pub ty_sub: &'a Ty<'a>,
+ pub add_note: bool,
+}
+
+impl AddToDiagnostic for AddLifetimeParamsSuggestion<'_> {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ let mut mk_suggestion = || {
+ let (
+ hir::Ty { kind: hir::TyKind::Rptr(lifetime_sub, _), .. },
+ hir::Ty { kind: hir::TyKind::Rptr(lifetime_sup, _), .. },
+ ) = (self.ty_sub, self.ty_sup) else {
+ return false;
+ };
+
+ if !lifetime_sub.name.is_anonymous() || !lifetime_sup.name.is_anonymous() {
+ return false;
+ };
+
+ let Some(anon_reg) = self.tcx.is_suitable_region(self.sub) else {
+ return false;
+ };
+
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(anon_reg.def_id);
+
+ let node = self.tcx.hir().get(hir_id);
+ let is_impl = matches!(&node, hir::Node::ImplItem(_));
+ let generics = match node {
+ hir::Node::Item(&hir::Item {
+ kind: hir::ItemKind::Fn(_, ref generics, ..),
+ ..
+ })
+ | hir::Node::TraitItem(&hir::TraitItem { ref generics, .. })
+ | hir::Node::ImplItem(&hir::ImplItem { ref generics, .. }) => generics,
+ _ => return false,
+ };
+
+ let suggestion_param_name = generics
+ .params
+ .iter()
+ .filter(|p| matches!(p.kind, GenericParamKind::Lifetime { .. }))
+ .map(|p| p.name.ident().name)
+ .find(|i| *i != kw::UnderscoreLifetime);
+ let introduce_new = suggestion_param_name.is_none();
+ let suggestion_param_name =
+ suggestion_param_name.map(|n| n.to_string()).unwrap_or_else(|| "'a".to_owned());
+
+ debug!(?lifetime_sup.span);
+ debug!(?lifetime_sub.span);
+ let make_suggestion = |span: rustc_span::Span| {
+ if span.is_empty() {
+ (span, format!("{}, ", suggestion_param_name))
+ } else if let Ok("&") = self.tcx.sess.source_map().span_to_snippet(span).as_deref()
+ {
+ (span.shrink_to_hi(), format!("{} ", suggestion_param_name))
+ } else {
+ (span, suggestion_param_name.clone())
+ }
+ };
+ let mut suggestions =
+ vec![make_suggestion(lifetime_sub.span), make_suggestion(lifetime_sup.span)];
+
+ if introduce_new {
+ let new_param_suggestion = if let Some(first) =
+ generics.params.iter().find(|p| !p.name.ident().span.is_empty())
+ {
+ (first.span.shrink_to_lo(), format!("{}, ", suggestion_param_name))
+ } else {
+ (generics.span, format!("<{}>", suggestion_param_name))
+ };
+
+ suggestions.push(new_param_suggestion);
+ }
+
+ diag.multipart_suggestion(
+ fluent::infer_lifetime_param_suggestion,
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ diag.set_arg("is_impl", is_impl);
+ true
+ };
+ if mk_suggestion() && self.add_note {
+ diag.note(fluent::infer_lifetime_param_suggestion_elided);
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(infer_lifetime_mismatch, code = "E0623")]
+pub struct LifetimeMismatch<'a> {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub labels: LifetimeMismatchLabels,
+ #[subdiagnostic]
+ pub suggestion: AddLifetimeParamsSuggestion<'a>,
+}
+
+pub struct IntroducesStaticBecauseUnmetLifetimeReq {
+ pub unmet_requirements: MultiSpan,
+ pub binding_span: Span,
+}
+
+impl AddToDiagnostic for IntroducesStaticBecauseUnmetLifetimeReq {
+ fn add_to_diagnostic_with<F>(mut self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ self.unmet_requirements
+ .push_span_label(self.binding_span, fluent::infer_msl_introduces_static);
+ diag.span_note(self.unmet_requirements, fluent::infer_msl_unmet_req);
+ }
+}
+
+// FIXME(#100717): replace with a `Option<Span>` when subdiagnostic supports that
+#[derive(Subdiagnostic)]
+pub enum DoesNotOutliveStaticFromImpl {
+ #[note(infer_does_not_outlive_static_from_impl)]
+ Spanned {
+ #[primary_span]
+ span: Span,
+ },
+ #[note(infer_does_not_outlive_static_from_impl)]
+ Unspanned,
+}
+
+#[derive(Subdiagnostic)]
+pub enum ImplicitStaticLifetimeSubdiag {
+ #[note(infer_implicit_static_lifetime_note)]
+ Note {
+ #[primary_span]
+ span: Span,
+ },
+ #[suggestion_verbose(
+ infer_implicit_static_lifetime_suggestion,
+ code = " + '_",
+ applicability = "maybe-incorrect"
+ )]
+ Sugg {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(infer_mismatched_static_lifetime)]
+pub struct MismatchedStaticLifetime<'a> {
+ #[primary_span]
+ pub cause_span: Span,
+ #[subdiagnostic]
+ pub unmet_lifetime_reqs: IntroducesStaticBecauseUnmetLifetimeReq,
+ #[subdiagnostic]
+ pub expl: Option<note_and_explain::RegionExplanation<'a>>,
+ #[subdiagnostic]
+ pub does_not_outlive_static_from_impl: DoesNotOutliveStaticFromImpl,
+ #[subdiagnostic(eager)]
+ pub implicit_static_lifetimes: Vec<ImplicitStaticLifetimeSubdiag>,
+}
diff --git a/compiler/rustc_infer/src/errors/note_and_explain.rs b/compiler/rustc_infer/src/errors/note_and_explain.rs
new file mode 100644
index 000000000..6a29d8562
--- /dev/null
+++ b/compiler/rustc_infer/src/errors/note_and_explain.rs
@@ -0,0 +1,177 @@
+use crate::infer::error_reporting::nice_region_error::find_anon_type;
+use rustc_errors::{
+ self, fluent, AddToDiagnostic, Diagnostic, IntoDiagnosticArg, SubdiagnosticMessage,
+};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{symbol::kw, Span};
+
+#[derive(Default)]
+struct DescriptionCtx<'a> {
+ span: Option<Span>,
+ kind: &'a str,
+ arg: String,
+ num_arg: u32,
+}
+
+impl<'a> DescriptionCtx<'a> {
+ fn new<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ alt_span: Option<Span>,
+ ) -> Option<Self> {
+ let mut me = DescriptionCtx::default();
+ me.span = alt_span;
+ match *region {
+ ty::ReEarlyBound(_) | ty::ReFree(_) => {
+ return Self::from_early_bound_and_free_regions(tcx, region);
+ }
+ ty::ReStatic => {
+ me.kind = "restatic";
+ }
+
+ ty::RePlaceholder(_) => return None,
+
+ // FIXME(#13998) RePlaceholder should probably print like
+ // ReFree rather than dumping Debug output on the user.
+ //
+ // We shouldn't really be having unification failures with ReVar
+ // and ReLateBound though.
+ ty::ReVar(_) | ty::ReLateBound(..) | ty::ReErased => {
+ me.kind = "revar";
+ me.arg = format!("{:?}", region);
+ }
+ };
+ Some(me)
+ }
+
+ fn from_early_bound_and_free_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ ) -> Option<Self> {
+ let mut me = DescriptionCtx::default();
+ let scope = region.free_region_binding_scope(tcx).expect_local();
+ match *region {
+ ty::ReEarlyBound(ref br) => {
+ let mut sp = tcx.def_span(scope);
+ if let Some(param) =
+ tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(br.name))
+ {
+ sp = param.span;
+ }
+ if br.has_name() {
+ me.kind = "as_defined";
+ me.arg = br.name.to_string();
+ } else {
+ me.kind = "as_defined_anon";
+ };
+ me.span = Some(sp)
+ }
+ ty::ReFree(ref fr) => {
+ if !fr.bound_region.is_named()
+ && let Some((ty, _)) = find_anon_type(tcx, region, &fr.bound_region)
+ {
+ me.kind = "defined_here";
+ me.span = Some(ty.span);
+ } else {
+ match fr.bound_region {
+ ty::BoundRegionKind::BrNamed(_, name) => {
+ let mut sp = tcx.def_span(scope);
+ if let Some(param) =
+ tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(name))
+ {
+ sp = param.span;
+ }
+ if name == kw::UnderscoreLifetime {
+ me.kind = "as_defined_anon";
+ } else {
+ me.kind = "as_defined";
+ me.arg = name.to_string();
+ };
+ me.span = Some(sp);
+ }
+ ty::BrAnon(idx) => {
+ me.kind = "anon_num_here";
+ me.num_arg = idx+1;
+ me.span = Some(tcx.def_span(scope));
+ },
+ _ => {
+ me.kind = "defined_here_reg";
+ me.arg = region.to_string();
+ me.span = Some(tcx.def_span(scope));
+ },
+ }
+ }
+ }
+ _ => bug!(),
+ }
+ Some(me)
+ }
+
+ fn add_to(self, diag: &mut rustc_errors::Diagnostic) {
+ diag.set_arg("desc_kind", self.kind);
+ diag.set_arg("desc_arg", self.arg);
+ diag.set_arg("desc_num_arg", self.num_arg);
+ }
+}
+
+pub enum PrefixKind {
+ Empty,
+}
+
+pub enum SuffixKind {
+ Continues,
+}
+
+impl IntoDiagnosticArg for PrefixKind {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ let kind = match self {
+ Self::Empty => "empty",
+ }
+ .into();
+ rustc_errors::DiagnosticArgValue::Str(kind)
+ }
+}
+
+impl IntoDiagnosticArg for SuffixKind {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ let kind = match self {
+ Self::Continues => "continues",
+ }
+ .into();
+ rustc_errors::DiagnosticArgValue::Str(kind)
+ }
+}
+
+pub struct RegionExplanation<'a> {
+ desc: DescriptionCtx<'a>,
+ prefix: PrefixKind,
+ suffix: SuffixKind,
+}
+
+impl RegionExplanation<'_> {
+ pub fn new<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ alt_span: Option<Span>,
+ prefix: PrefixKind,
+ suffix: SuffixKind,
+ ) -> Option<Self> {
+ Some(Self { desc: DescriptionCtx::new(tcx, region, alt_span)?, prefix, suffix })
+ }
+}
+
+impl AddToDiagnostic for RegionExplanation<'_> {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ if let Some(span) = self.desc.span {
+ diag.span_note(span, fluent::infer_region_explanation);
+ } else {
+ diag.note(fluent::infer_region_explanation);
+ }
+ self.desc.add_to(diag);
+ diag.set_arg("pref_kind", self.prefix);
+ diag.set_arg("suff_kind", self.suffix);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs
index 130214a65..5ff3779fa 100644
--- a/compiler/rustc_infer/src/infer/at.rs
+++ b/compiler/rustc_infer/src/infer/at.rs
@@ -31,7 +31,7 @@ use rustc_middle::ty::relate::{Relate, TypeRelation};
use rustc_middle::ty::{Const, ImplSubject};
pub struct At<'a, 'tcx> {
- pub infcx: &'a InferCtxt<'a, 'tcx>,
+ pub infcx: &'a InferCtxt<'tcx>,
pub cause: &'a ObligationCause<'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
/// Whether we should define opaque types
@@ -48,9 +48,9 @@ pub struct Trace<'a, 'tcx> {
trace: TypeTrace<'tcx>,
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
#[inline]
- pub fn at(
+ pub fn at<'a>(
&'a self,
cause: &'a ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -66,7 +66,6 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
tcx: self.tcx,
defining_use_anchor: self.defining_use_anchor,
considering_regions: self.considering_regions,
- in_progress_typeck_results: self.in_progress_typeck_results,
inner: self.inner.clone(),
skip_leak_check: self.skip_leak_check.clone(),
lexical_region_resolutions: self.lexical_region_resolutions.clone(),
@@ -74,10 +73,14 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
evaluation_cache: self.evaluation_cache.clone(),
reported_trait_errors: self.reported_trait_errors.clone(),
reported_closure_mismatch: self.reported_closure_mismatch.clone(),
- tainted_by_errors_flag: self.tainted_by_errors_flag.clone(),
+ tainted_by_errors: self.tainted_by_errors.clone(),
err_count_on_creation: self.err_count_on_creation,
in_snapshot: self.in_snapshot.clone(),
universe: self.universe.clone(),
+ normalize_fn_sig_for_diagnostic: self
+ .normalize_fn_sig_for_diagnostic
+ .as_ref()
+ .map(|f| f.clone()),
}
}
}
diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
index ca7862c9d..a3ff70363 100644
--- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
+++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
@@ -20,7 +20,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_index::vec::Idx;
use smallvec::SmallVec;
-impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Canonicalizes a query value `V`. When we canonicalize a query,
/// we not only canonicalize unbound inference variables, but we
/// *also* replace all free regions whatsoever. So for example a
@@ -180,11 +180,7 @@ impl CanonicalizeMode for CanonicalizeQueryResponse {
r: ty::Region<'tcx>,
) -> ty::Region<'tcx> {
match *r {
- ty::ReFree(_)
- | ty::ReErased
- | ty::ReStatic
- | ty::ReEmpty(ty::UniverseIndex::ROOT)
- | ty::ReEarlyBound(..) => r,
+ ty::ReFree(_) | ty::ReErased | ty::ReStatic | ty::ReEarlyBound(..) => r,
ty::RePlaceholder(placeholder) => canonicalizer.canonical_var_for_region(
CanonicalVarInfo { kind: CanonicalVarKind::PlaceholderRegion(placeholder) },
@@ -199,10 +195,6 @@ impl CanonicalizeMode for CanonicalizeQueryResponse {
)
}
- ty::ReEmpty(ui) => {
- bug!("canonicalizing 'empty in universe {:?}", ui) // FIXME
- }
-
_ => {
// Other than `'static` or `'empty`, the query
// response should be executing in a fully
@@ -324,7 +316,7 @@ impl CanonicalizeMode for CanonicalizeFreeRegionsOtherThanStatic {
}
struct Canonicalizer<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
variables: SmallVec<[CanonicalVarInfo<'tcx>; 8]>,
query_state: &'cx mut OriginalQueryValues<'tcx>,
@@ -372,7 +364,7 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
debug!(
"canonical: region var found with vid {:?}, \
opportunistically resolved to {:?}",
- vid, r
+ vid, resolved_vid
);
let r = self.tcx.reuse_or_mk_region(r, ty::ReVar(resolved_vid));
self.canonicalize_mode.canonicalize_free_region(self, r)
@@ -381,7 +373,6 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
ty::ReStatic
| ty::ReEarlyBound(..)
| ty::ReFree(_)
- | ty::ReEmpty(_)
| ty::RePlaceholder(..)
| ty::ReErased => self.canonicalize_mode.canonicalize_free_region(self, r),
}
@@ -530,7 +521,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> {
/// `canonicalize_query` and `canonicalize_response`.
fn canonicalize<V>(
value: V,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
tcx: TyCtxt<'tcx>,
canonicalize_region_mode: &dyn CanonicalizeMode,
query_state: &mut OriginalQueryValues<'tcx>,
diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs
index a9294a85e..06ca2534d 100644
--- a/compiler/rustc_infer/src/infer/canonical/mod.rs
+++ b/compiler/rustc_infer/src/infer/canonical/mod.rs
@@ -36,7 +36,7 @@ mod canonicalizer;
pub mod query_response;
mod substitute;
-impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Creates a substitution S for the canonical value with fresh
/// inference variables and applies it to the canonical value.
/// Returns both the instantiated result *and* the substitution S.
diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs
index 8dc20544f..a299a3e57 100644
--- a/compiler/rustc_infer/src/infer/canonical/query_response.rs
+++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs
@@ -16,12 +16,13 @@ use crate::infer::nll_relate::{NormalizationStrategy, TypeRelating, TypeRelating
use crate::infer::region_constraints::{Constraint, RegionConstraintData};
use crate::infer::{InferCtxt, InferOk, InferResult, NllRegionVariableOrigin};
use crate::traits::query::{Fallible, NoSolution};
-use crate::traits::TraitEngine;
use crate::traits::{Obligation, ObligationCause, PredicateObligation};
+use crate::traits::{PredicateObligations, TraitEngine};
use rustc_data_structures::captures::Captures;
use rustc_index::vec::Idx;
use rustc_index::vec::IndexVec;
use rustc_middle::arena::ArenaAllocatable;
+use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::relate::TypeRelation;
@@ -31,7 +32,7 @@ use rustc_span::Span;
use std::fmt::Debug;
use std::iter;
-impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// This method is meant to be invoked as the final step of a canonical query
/// implementation. It is given:
///
@@ -63,8 +64,8 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
Canonical<'tcx, QueryResponse<'tcx, T>>: ArenaAllocatable<'tcx>,
{
let query_response = self.make_query_response(inference_vars, answer, fulfill_cx)?;
+ debug!("query_response = {:#?}", query_response);
let canonical_result = self.canonicalize_response(query_response);
-
debug!("canonical_result = {:#?}", canonical_result);
Ok(self.tcx.arena.alloc(canonical_result))
@@ -125,13 +126,17 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
debug!("ambig_errors = {:#?}", ambig_errors);
let region_obligations = self.take_registered_region_obligations();
+ debug!(?region_obligations);
let region_constraints = self.with_region_constraints(|region_constraints| {
make_query_region_constraints(
tcx,
- region_obligations.iter().map(|r_o| (r_o.sup_type, r_o.sub_region)),
+ region_obligations
+ .iter()
+ .map(|r_o| (r_o.sup_type, r_o.sub_region, r_o.origin.to_constraint_category())),
region_constraints,
)
});
+ debug!(?region_constraints);
let certainty =
if ambig_errors.is_empty() { Certainty::Proven } else { Certainty::Ambiguous };
@@ -246,6 +251,8 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
// the original values `v_o` that was canonicalized into a
// variable...
+ let constraint_category = cause.to_constraint_category();
+
for (index, original_value) in original_values.var_values.iter().enumerate() {
// ...with the value `v_r` of that variable from the query.
let result_value = query_response.substitute_projected(self.tcx, &result_subst, |v| {
@@ -261,12 +268,14 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
(GenericArgKind::Lifetime(v_o), GenericArgKind::Lifetime(v_r)) => {
// To make `v_o = v_r`, we emit `v_o: v_r` and `v_r: v_o`.
if v_o != v_r {
- output_query_region_constraints
- .outlives
- .push(ty::Binder::dummy(ty::OutlivesPredicate(v_o.into(), v_r)));
- output_query_region_constraints
- .outlives
- .push(ty::Binder::dummy(ty::OutlivesPredicate(v_r.into(), v_o)));
+ output_query_region_constraints.outlives.push((
+ ty::Binder::dummy(ty::OutlivesPredicate(v_o.into(), v_r)),
+ constraint_category,
+ ));
+ output_query_region_constraints.outlives.push((
+ ty::Binder::dummy(ty::OutlivesPredicate(v_r.into(), v_o)),
+ constraint_category,
+ ));
}
}
@@ -312,7 +321,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
// Screen out `'a: 'a` cases -- we skip the binder here but
// only compare the inner values to one another, so they are still at
// consistent binding levels.
- let ty::OutlivesPredicate(k1, r2) = r_c.skip_binder();
+ let ty::OutlivesPredicate(k1, r2) = r_c.0.skip_binder();
if k1 != r2.into() { Some(r_c) } else { None }
}),
);
@@ -500,7 +509,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
for &(a, b) in &query_response.value.opaque_types {
let a = substitute_value(self.tcx, &result_subst, a);
let b = substitute_value(self.tcx, &result_subst, b);
- obligations.extend(self.handle_opaque_type(a, b, true, cause, param_env)?.obligations);
+ obligations.extend(self.at(cause, param_env).eq(a, b)?.obligations);
}
Ok(InferOk { value: result_subst, obligations })
@@ -557,7 +566,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Obligation<'tcx, ty::Predicate<'tcx>> {
- let ty::OutlivesPredicate(k1, r2) = predicate.skip_binder();
+ let ty::OutlivesPredicate(k1, r2) = predicate.0.skip_binder();
let atom = match k1.unpack() {
GenericArgKind::Lifetime(r1) => {
@@ -572,7 +581,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
span_bug!(cause.span, "unexpected const outlives {:?}", predicate);
}
};
- let predicate = predicate.rebind(atom).to_predicate(self.tcx);
+ let predicate = predicate.0.rebind(atom).to_predicate(self.tcx);
Obligation::new(cause, param_env, predicate)
}
@@ -623,7 +632,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
/// creates query region constraints.
pub fn make_query_region_constraints<'tcx>(
tcx: TyCtxt<'tcx>,
- outlives_obligations: impl Iterator<Item = (Ty<'tcx>, ty::Region<'tcx>)>,
+ outlives_obligations: impl Iterator<Item = (Ty<'tcx>, ty::Region<'tcx>, ConstraintCategory<'tcx>)>,
region_constraints: &RegionConstraintData<'tcx>,
) -> QueryRegionConstraints<'tcx> {
let RegionConstraintData { constraints, verifys, givens, member_constraints } =
@@ -632,28 +641,35 @@ pub fn make_query_region_constraints<'tcx>(
assert!(verifys.is_empty());
assert!(givens.is_empty());
+ debug!(?constraints);
+
let outlives: Vec<_> = constraints
.iter()
- .map(|(k, _)| match *k {
- // Swap regions because we are going from sub (<=) to outlives
- // (>=).
- Constraint::VarSubVar(v1, v2) => ty::OutlivesPredicate(
- tcx.mk_region(ty::ReVar(v2)).into(),
- tcx.mk_region(ty::ReVar(v1)),
- ),
- Constraint::VarSubReg(v1, r2) => {
- ty::OutlivesPredicate(r2.into(), tcx.mk_region(ty::ReVar(v1)))
- }
- Constraint::RegSubVar(r1, v2) => {
- ty::OutlivesPredicate(tcx.mk_region(ty::ReVar(v2)).into(), r1)
- }
- Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1),
+ .map(|(k, origin)| {
+ // no bound vars in the code above
+ let constraint = ty::Binder::dummy(match *k {
+ // Swap regions because we are going from sub (<=) to outlives
+ // (>=).
+ Constraint::VarSubVar(v1, v2) => ty::OutlivesPredicate(
+ tcx.mk_region(ty::ReVar(v2)).into(),
+ tcx.mk_region(ty::ReVar(v1)),
+ ),
+ Constraint::VarSubReg(v1, r2) => {
+ ty::OutlivesPredicate(r2.into(), tcx.mk_region(ty::ReVar(v1)))
+ }
+ Constraint::RegSubVar(r1, v2) => {
+ ty::OutlivesPredicate(tcx.mk_region(ty::ReVar(v2)).into(), r1)
+ }
+ Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1),
+ });
+ (constraint, origin.to_constraint_category())
})
- .map(ty::Binder::dummy) // no bound vars in the code above
.chain(
outlives_obligations
- .map(|(ty, r)| ty::OutlivesPredicate(ty.into(), r))
- .map(ty::Binder::dummy), // no bound vars in the code above
+ // no bound vars in the code above
+ .map(|(ty, r, constraint_category)| {
+ (ty::Binder::dummy(ty::OutlivesPredicate(ty.into(), r)), constraint_category)
+ }),
)
.collect();
@@ -661,7 +677,7 @@ pub fn make_query_region_constraints<'tcx>(
}
struct QueryTypeRelatingDelegate<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
obligations: &'a mut Vec<PredicateObligation<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
cause: &'a ObligationCause<'tcx>,
@@ -725,17 +741,11 @@ impl<'tcx> TypeRelatingDelegate<'tcx> for QueryTypeRelatingDelegate<'_, 'tcx> {
true
}
- fn register_opaque_type(
+ fn register_opaque_type_obligations(
&mut self,
- a: Ty<'tcx>,
- b: Ty<'tcx>,
- a_is_expected: bool,
+ obligations: PredicateObligations<'tcx>,
) -> Result<(), TypeError<'tcx>> {
- self.obligations.extend(
- self.infcx
- .handle_opaque_type(a, b, a_is_expected, &self.cause, self.param_env)?
- .obligations,
- );
+ self.obligations.extend(obligations);
Ok(())
}
}
diff --git a/compiler/rustc_infer/src/infer/canonical/substitute.rs b/compiler/rustc_infer/src/infer/canonical/substitute.rs
index 34b611342..389afe22e 100644
--- a/compiler/rustc_infer/src/infer/canonical/substitute.rs
+++ b/compiler/rustc_infer/src/infer/canonical/substitute.rs
@@ -72,15 +72,16 @@ where
value
} else {
let delegate = FnMutDelegate {
- regions: |br: ty::BoundRegion| match var_values.var_values[br.var].unpack() {
+ regions: &mut |br: ty::BoundRegion| match var_values.var_values[br.var].unpack() {
GenericArgKind::Lifetime(l) => l,
r => bug!("{:?} is a region but value is {:?}", br, r),
},
- types: |bound_ty: ty::BoundTy| match var_values.var_values[bound_ty.var].unpack() {
+ types: &mut |bound_ty: ty::BoundTy| match var_values.var_values[bound_ty.var].unpack() {
GenericArgKind::Type(ty) => ty,
r => bug!("{:?} is a type but value is {:?}", bound_ty, r),
},
- consts: |bound_ct: ty::BoundVar, _| match var_values.var_values[bound_ct].unpack() {
+ consts: &mut |bound_ct: ty::BoundVar, _| match var_values.var_values[bound_ct].unpack()
+ {
GenericArgKind::Const(ct) => ct,
c => bug!("{:?} is a const but value is {:?}", bound_ct, c),
},
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
index 8bf1de34a..b5427f639 100644
--- a/compiler/rustc_infer/src/infer/combine.rs
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -43,7 +43,7 @@ use rustc_span::{Span, DUMMY_SP};
#[derive(Clone)]
pub struct CombineFields<'infcx, 'tcx> {
- pub infcx: &'infcx InferCtxt<'infcx, 'tcx>,
+ pub infcx: &'infcx InferCtxt<'tcx>,
pub trace: TypeTrace<'tcx>,
pub cause: Option<ty::relate::Cause>,
pub param_env: ty::ParamEnv<'tcx>,
@@ -63,7 +63,7 @@ pub enum RelationDir {
EqTo,
}
-impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
pub fn super_combine_tys<R>(
&self,
relation: &mut R,
@@ -147,11 +147,7 @@ impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> {
ty::ConstKind::Infer(InferConst::Var(a_vid)),
ty::ConstKind::Infer(InferConst::Var(b_vid)),
) => {
- self.inner
- .borrow_mut()
- .const_unification_table()
- .unify_var_var(a_vid, b_vid)
- .map_err(|e| const_unification_error(a_is_expected, e))?;
+ self.inner.borrow_mut().const_unification_table().union(a_vid, b_vid);
return Ok(a);
}
@@ -246,21 +242,17 @@ impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> {
let value = ConstInferUnifier { infcx: self, span, param_env, for_universe, target_vid }
.relate(ct, ct)?;
- self.inner
- .borrow_mut()
- .const_unification_table()
- .unify_var_value(
- target_vid,
- ConstVarValue {
- origin: ConstVariableOrigin {
- kind: ConstVariableOriginKind::ConstInference,
- span: DUMMY_SP,
- },
- val: ConstVariableValue::Known { value },
+ self.inner.borrow_mut().const_unification_table().union_value(
+ target_vid,
+ ConstVarValue {
+ origin: ConstVariableOrigin {
+ kind: ConstVariableOriginKind::ConstInference,
+ span: DUMMY_SP,
},
- )
- .map(|()| value)
- .map_err(|e| const_unification_error(vid_is_expected, e))
+ val: ConstVariableValue::Known { value },
+ },
+ );
+ Ok(value)
}
fn unify_integral_variable(
@@ -391,7 +383,7 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
/// Preconditions:
///
/// - `for_vid` is a "root vid"
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
fn generalize(
&self,
ty: Ty<'tcx>,
@@ -435,15 +427,8 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
cache: SsoHashMap::new(),
};
- let ty = match generalize.relate(ty, ty) {
- Ok(ty) => ty,
- Err(e) => {
- debug!(?e, "failure");
- return Err(e);
- }
- };
+ let ty = generalize.relate(ty, ty)?;
let needs_wf = generalize.needs_wf;
- trace!(?ty, ?needs_wf, "success");
Ok(Generalization { ty, needs_wf })
}
@@ -467,7 +452,7 @@ impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
}
struct Generalizer<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
/// The span, used when creating new type variables and things.
cause: &'cx ObligationCause<'tcx>,
@@ -493,12 +478,13 @@ struct Generalizer<'cx, 'tcx> {
param_env: ty::ParamEnv<'tcx>,
- cache: SsoHashMap<Ty<'tcx>, RelateResult<'tcx, Ty<'tcx>>>,
+ cache: SsoHashMap<Ty<'tcx>, Ty<'tcx>>,
}
/// Result from a generalization operation. This includes
/// not only the generalized type, but also a bool flag
/// indicating whether further WF checks are needed.
+#[derive(Debug)]
struct Generalization<'tcx> {
ty: Ty<'tcx>,
@@ -599,8 +585,8 @@ impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
fn tys(&mut self, t: Ty<'tcx>, t2: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
assert_eq!(t, t2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
- if let Some(result) = self.cache.get(&t) {
- return result.clone();
+ if let Some(&result) = self.cache.get(&t) {
+ return Ok(result);
}
debug!("generalize: t={:?}", t);
@@ -670,10 +656,10 @@ impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
Ok(t)
}
_ => relate::super_relate_tys(self, t, t),
- };
+ }?;
- self.cache.insert(t, result.clone());
- return result;
+ self.cache.insert(t, result);
+ Ok(result)
}
fn regions(
@@ -694,7 +680,6 @@ impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
ty::RePlaceholder(..)
| ty::ReVar(..)
- | ty::ReEmpty(_)
| ty::ReStatic
| ty::ReEarlyBound(..)
| ty::ReFree(..) => {
@@ -749,10 +734,7 @@ impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
}
}
}
- ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
- if self.tcx().lazy_normalization() =>
- {
- assert_eq!(promoted, None);
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
let substs = self.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
@@ -761,7 +743,7 @@ impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
)?;
Ok(self.tcx().mk_const(ty::ConstS {
ty: c.ty(),
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }),
}))
}
_ => relate::super_relate_consts(self, c, c),
@@ -776,13 +758,6 @@ pub trait ConstEquateRelation<'tcx>: TypeRelation<'tcx> {
fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>);
}
-pub fn const_unification_error<'tcx>(
- a_is_expected: bool,
- (a, b): (ty::Const<'tcx>, ty::Const<'tcx>),
-) -> TypeError<'tcx> {
- TypeError::ConstMismatch(ExpectedFound::new(a_is_expected, a, b))
-}
-
fn int_unification_error<'tcx>(
a_is_expected: bool,
v: (ty::IntVarValue, ty::IntVarValue),
@@ -800,7 +775,7 @@ fn float_unification_error<'tcx>(
}
struct ConstInferUnifier<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
span: Span,
@@ -856,10 +831,9 @@ impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
fn tys(&mut self, t: Ty<'tcx>, _t: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug_assert_eq!(t, _t);
- debug!("ConstInferUnifier: t={:?}", t);
match t.kind() {
&ty::Infer(ty::TyVar(vid)) => {
@@ -883,12 +857,7 @@ impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
.borrow_mut()
.type_variables()
.new_var(self.for_universe, origin);
- let u = self.tcx().mk_ty_var(new_var_id);
- debug!(
- "ConstInferUnifier: replacing original vid={:?} with new={:?}",
- vid, u
- );
- Ok(u)
+ Ok(self.tcx().mk_ty_var(new_var_id))
}
}
}
@@ -914,7 +883,6 @@ impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
ty::RePlaceholder(..)
| ty::ReVar(..)
- | ty::ReEmpty(_)
| ty::ReStatic
| ty::ReEarlyBound(..)
| ty::ReFree(..) => {
@@ -932,14 +900,13 @@ impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
}
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn consts(
&mut self,
c: ty::Const<'tcx>,
_c: ty::Const<'tcx>,
) -> RelateResult<'tcx, ty::Const<'tcx>> {
debug_assert_eq!(c, _c);
- debug!("ConstInferUnifier: c={:?}", c);
match c.kind() {
ty::ConstKind::Infer(InferConst::Var(vid)) => {
@@ -980,19 +947,17 @@ impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
}
}
}
- ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
- if self.tcx().lazy_normalization() =>
- {
- assert_eq!(promoted, None);
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
let substs = self.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
substs,
substs,
)?;
+
Ok(self.tcx().mk_const(ty::ConstS {
ty: c.ty(),
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }),
}))
}
_ => relate::super_relate_consts(self, c, c),
diff --git a/compiler/rustc_infer/src/infer/equate.rs b/compiler/rustc_infer/src/infer/equate.rs
index 3b1798ca7..59728148a 100644
--- a/compiler/rustc_infer/src/infer/equate.rs
+++ b/compiler/rustc_infer/src/infer/equate.rs
@@ -110,6 +110,25 @@ impl<'tcx> TypeRelation<'tcx> for Equate<'_, '_, 'tcx> {
.obligations,
);
}
+ // Optimization of GeneratorWitness relation since we know that all
+ // free regions are replaced with bound regions during construction.
+ // This greatly speeds up equating of GeneratorWitness.
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
+ let a_types = infcx.tcx.anonymize_bound_vars(a_types);
+ let b_types = infcx.tcx.anonymize_bound_vars(b_types);
+ if a_types.bound_vars() == b_types.bound_vars() {
+ let (a_types, b_types) = infcx.replace_bound_vars_with_placeholders(
+ a_types.map_bound(|a_types| (a_types, b_types.skip_binder())),
+ );
+ for (a, b) in std::iter::zip(a_types, b_types) {
+ self.relate(a, b)?;
+ }
+ } else {
+ return Err(ty::error::TypeError::Sorts(ty::relate::expected_found(
+ self, a, b,
+ )));
+ }
+ }
_ => {
self.fields.infcx.super_combine_tys(self, a, b)?;
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
index 20864c657..9ff703e52 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -51,6 +51,7 @@ use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePa
use crate::infer;
use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
+use crate::infer::ExpectedFound;
use crate::traits::error_reporting::report_object_safety_error;
use crate::traits::{
IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
@@ -58,30 +59,57 @@ use crate::traits::{
};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::{pluralize, struct_span_err, Diagnostic, ErrorGuaranteed};
+use rustc_errors::{pluralize, struct_span_err, Diagnostic, ErrorGuaranteed, IntoDiagnosticArg};
use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString, MultiSpan};
use rustc_hir as hir;
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::lang_items::LangItem;
use rustc_hir::Node;
use rustc_middle::dep_graph::DepContext;
use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::relate::{self, RelateResult, TypeRelation};
use rustc_middle::ty::{
- self, error::TypeError, Binder, List, Region, Subst, Ty, TyCtxt, TypeFoldable,
- TypeSuperVisitable, TypeVisitable,
+ self, error::TypeError, Binder, List, Region, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable,
+ TypeVisitable,
};
use rustc_span::{sym, symbol::kw, BytePos, DesugaringKind, Pos, Span};
use rustc_target::spec::abi;
-use std::ops::ControlFlow;
+use std::ops::{ControlFlow, Deref};
use std::{cmp, fmt, iter};
mod note;
-mod need_type_info;
+pub(crate) mod need_type_info;
pub use need_type_info::TypeAnnotationNeeded;
pub mod nice_region_error;
+/// A helper for building type related errors. The `typeck_results`
+/// field is only populated during an in-progress typeck.
+/// Get an instance by calling `InferCtxt::err` or `FnCtxt::infer_err`.
+pub struct TypeErrCtxt<'a, 'tcx> {
+ pub infcx: &'a InferCtxt<'tcx>,
+ pub typeck_results: Option<std::cell::Ref<'a, ty::TypeckResults<'tcx>>>,
+}
+
+impl TypeErrCtxt<'_, '_> {
+ /// This is just to avoid a potential footgun of accidentally
+ /// dropping `typeck_results` by calling `InferCtxt::err_ctxt`
+ #[deprecated(note = "you already have a `TypeErrCtxt`")]
+ #[allow(unused)]
+ pub fn err_ctxt(&self) -> ! {
+ bug!("called `err_ctxt` on `TypeErrCtxt`. Try removing the call");
+ }
+}
+
+impl<'tcx> Deref for TypeErrCtxt<'_, 'tcx> {
+ type Target = InferCtxt<'tcx>;
+ fn deref(&self) -> &InferCtxt<'tcx> {
+ &self.infcx
+ }
+}
+
pub(super) fn note_and_explain_region<'tcx>(
tcx: TyCtxt<'tcx>,
err: &mut Diagnostic,
@@ -95,11 +123,6 @@ pub(super) fn note_and_explain_region<'tcx>(
msg_span_from_free_region(tcx, region, alt_span)
}
- ty::ReEmpty(ty::UniverseIndex::ROOT) => ("the empty lifetime".to_owned(), alt_span),
-
- // uh oh, hope no user ever sees THIS
- ty::ReEmpty(ui) => (format!("the empty lifetime in universe {:?}", ui), alt_span),
-
ty::RePlaceholder(_) => return,
// FIXME(#13998) RePlaceholder should probably print like
@@ -138,8 +161,6 @@ fn msg_span_from_free_region<'tcx>(
(msg, Some(span))
}
ty::ReStatic => ("the static lifetime".to_owned(), alt_span),
- ty::ReEmpty(ty::UniverseIndex::ROOT) => ("an empty lifetime".to_owned(), alt_span),
- ty::ReEmpty(ui) => (format!("an empty lifetime in universe {:?}", ui), alt_span),
_ => bug!("{:?}", region),
}
}
@@ -249,17 +270,7 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
// Explain the region we are capturing.
match *hidden_region {
- ty::ReEmpty(ty::UniverseIndex::ROOT) => {
- // All lifetimes shorter than the function body are `empty` in
- // lexical region resolution. The default explanation of "an empty
- // lifetime" isn't really accurate here.
- let message = format!(
- "hidden type `{}` captures lifetime smaller than the function body",
- hidden_ty
- );
- err.span_note(span, &message);
- }
- ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic | ty::ReEmpty(_) => {
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic => {
// Assuming regionck succeeded (*), we ought to always be
// capturing *some* region from the fn header, and hence it
// ought to be free. So under normal circumstances, we will go
@@ -318,7 +329,38 @@ pub fn unexpected_hidden_region_diagnostic<'tcx>(
err
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
+ pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Binder<'tcx, Ty<'tcx>>> {
+ if let ty::Opaque(def_id, substs) = ty.kind() {
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
+ // Future::Output
+ let item_def_id = self.tcx.associated_item_def_ids(future_trait)[0];
+
+ let bounds = self.tcx.bound_explicit_item_bounds(*def_id);
+
+ for (predicate, _) in bounds.subst_iter_copied(self.tcx, substs) {
+ let output = predicate
+ .kind()
+ .map_bound(|kind| match kind {
+ ty::PredicateKind::Projection(projection_predicate)
+ if projection_predicate.projection_ty.item_def_id == item_def_id =>
+ {
+ projection_predicate.term.ty()
+ }
+ _ => None,
+ })
+ .transpose();
+ if output.is_some() {
+ // We don't account for multiple `Future::Output = Ty` constraints.
+ return output;
+ }
+ }
+ }
+ None
+ }
+}
+
+impl<'tcx> TypeErrCtxt<'_, 'tcx> {
pub fn report_region_errors(
&self,
generic_param_scope: LocalDefId,
@@ -385,7 +427,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
RegionResolutionError::UpperBoundUniverseConflict(
_,
_,
- var_universe,
+ _,
sup_origin,
sup_r,
) => {
@@ -396,7 +438,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// placeholder. In practice, we expect more
// tailored errors that don't really use this
// value.
- let sub_r = self.tcx.mk_region(ty::ReEmpty(var_universe));
+ let sub_r = self.tcx.lifetimes.re_erased;
self.report_placeholder_failure(sup_origin, sub_r, sup_r).emit();
}
@@ -457,7 +499,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
/// Adds a note if the types come from similarly named crates
- fn check_and_note_conflicting_crates(&self, err: &mut Diagnostic, terr: &TypeError<'tcx>) {
+ fn check_and_note_conflicting_crates(&self, err: &mut Diagnostic, terr: TypeError<'tcx>) {
use hir::def_id::CrateNum;
use rustc_hir::definitions::DisambiguatedDefPathData;
use ty::print::Printer;
@@ -561,7 +603,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
}
};
- match *terr {
+ match terr {
TypeError::Sorts(ref exp_found) => {
// if they are both "path types", there's a chance of ambiguity
// due to different versions of the same crate
@@ -583,7 +625,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
err: &mut Diagnostic,
cause: &ObligationCause<'tcx>,
exp_found: Option<ty::error::ExpectedFound<Ty<'tcx>>>,
- terr: &TypeError<'tcx>,
+ terr: TypeError<'tcx>,
) {
match *cause.code() {
ObligationCauseCode::Pattern { origin_expr: true, span: Some(span), root_ty } => {
@@ -592,13 +634,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
{
// don't show type `_`
if span.desugaring_kind() == Some(DesugaringKind::ForLoop)
- && let ty::Adt(def, substs) = ty.kind()
- && Some(def.did()) == self.tcx.get_diagnostic_item(sym::Option)
+ && let ty::Adt(def, substs) = ty.kind()
+ && Some(def.did()) == self.tcx.get_diagnostic_item(sym::Option)
{
err.span_label(span, format!("this is an iterator with items of type `{}`", substs.type_at(0)));
} else {
- err.span_label(span, format!("this expression has type `{}`", ty));
- }
+ err.span_label(span, format!("this expression has type `{}`", ty));
+ }
}
if let Some(ty::error::ExpectedFound { found, .. }) = exp_found
&& ty.is_box() && ty.boxed_ty() == found
@@ -634,8 +676,8 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id);
let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind {
let arg_expr = args.first().expect("try desugaring call w/out arg");
- self.in_progress_typeck_results.and_then(|typeck_results| {
- typeck_results.borrow().expr_ty_opt(arg_expr)
+ self.typeck_results.as_ref().and_then(|typeck_results| {
+ typeck_results.expr_ty_opt(arg_expr)
})
} else {
bug!("try desugaring w/out call expr as scrutinee");
@@ -739,12 +781,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
err.help("...or use `match` instead of `let...else`");
}
_ => {
- if let ObligationCauseCode::BindingObligation(_, binding_span) =
- cause.code().peel_derives()
+ if let ObligationCauseCode::BindingObligation(_, span)
+ | ObligationCauseCode::ExprBindingObligation(_, span, ..)
+ = cause.code().peel_derives()
+ && let TypeError::RegionsPlaceholderMismatch = terr
{
- if matches!(terr, TypeError::RegionsPlaceholderMismatch) {
- err.span_note(*binding_span, "the lifetime requirement is introduced here");
- }
+ err.span_note( * span,
+ "the lifetime requirement is introduced here");
}
}
}
@@ -960,12 +1003,23 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
}
+ fn normalize_fn_sig_for_diagnostic(&self, sig: ty::PolyFnSig<'tcx>) -> ty::PolyFnSig<'tcx> {
+ if let Some(normalize) = &self.normalize_fn_sig_for_diagnostic {
+ normalize(self, sig)
+ } else {
+ sig
+ }
+ }
+
/// Given two `fn` signatures highlight only sub-parts that are different.
fn cmp_fn_sig(
&self,
sig1: &ty::PolyFnSig<'tcx>,
sig2: &ty::PolyFnSig<'tcx>,
) -> (DiagnosticStyledString, DiagnosticStyledString) {
+ let sig1 = &self.normalize_fn_sig_for_diagnostic(*sig1);
+ let sig2 = &self.normalize_fn_sig_for_diagnostic(*sig2);
+
let get_lifetimes = |sig| {
use rustc_hir::def::Namespace;
let (_, sig, reg) = ty::print::FmtPrinter::new(self.tcx, Namespace::TypeNS)
@@ -1422,9 +1476,9 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// the message in `secondary_span` as the primary label, and apply the message that would
/// otherwise be used for the primary label on the `secondary_span` `Span`. This applies on
/// E0271, like `src/test/ui/issues/issue-39970.stderr`.
- #[tracing::instrument(
+ #[instrument(
level = "debug",
- skip(self, diag, secondary_span, swap_secondary_and_primary, force_label)
+ skip(self, diag, secondary_span, swap_secondary_and_primary, prefer_label)
)]
pub fn note_type_err(
&self,
@@ -1432,9 +1486,9 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
cause: &ObligationCause<'tcx>,
secondary_span: Option<(Span, String)>,
mut values: Option<ValuePairs<'tcx>>,
- terr: &TypeError<'tcx>,
+ terr: TypeError<'tcx>,
swap_secondary_and_primary: bool,
- force_label: bool,
+ prefer_label: bool,
) {
let span = cause.span();
@@ -1574,23 +1628,31 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
Some(values) => {
let values = self.resolve_vars_if_possible(values);
let (is_simple_error, exp_found) = match values {
- ValuePairs::Terms(infer::ExpectedFound {
- expected: ty::Term::Ty(expected),
- found: ty::Term::Ty(found),
- }) => {
- let is_simple_err = expected.is_simple_text() && found.is_simple_text();
- OpaqueTypesVisitor::visit_expected_found(self.tcx, expected, found, span)
- .report(diag);
-
- (
- is_simple_err,
- Mismatch::Variable(infer::ExpectedFound { expected, found }),
- )
+ ValuePairs::Terms(infer::ExpectedFound { expected, found }) => {
+ match (expected.unpack(), found.unpack()) {
+ (ty::TermKind::Ty(expected), ty::TermKind::Ty(found)) => {
+ let is_simple_err =
+ expected.is_simple_text() && found.is_simple_text();
+ OpaqueTypesVisitor::visit_expected_found(
+ self.tcx, expected, found, span,
+ )
+ .report(diag);
+
+ (
+ is_simple_err,
+ Mismatch::Variable(infer::ExpectedFound { expected, found }),
+ )
+ }
+ (ty::TermKind::Const(_), ty::TermKind::Const(_)) => {
+ (false, Mismatch::Fixed("constant"))
+ }
+ _ => (false, Mismatch::Fixed("type")),
+ }
}
ValuePairs::TraitRefs(_) | ValuePairs::PolyTraitRefs(_) => {
(false, Mismatch::Fixed("trait"))
}
- _ => (false, Mismatch::Fixed("type")),
+ ValuePairs::Regions(_) => (false, Mismatch::Fixed("lifetime")),
};
let vals = match self.values_str(values) {
Some((expected, found)) => Some((expected, found)),
@@ -1612,7 +1674,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
TypeError::ObjectUnsafeCoercion(_) => {}
_ => {
let mut label_or_note = |span: Span, msg: &str| {
- if force_label || &[span] == diag.span.primary_spans() {
+ if (prefer_label && is_simple_error) || &[span] == diag.span.primary_spans() {
diag.span_label(span, msg);
} else {
diag.span_note(span, msg);
@@ -1649,8 +1711,114 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
),
Mismatch::Fixed(s) => (s.into(), s.into(), None),
};
- match (&terr, expected == found) {
- (TypeError::Sorts(values), extra) => {
+
+ enum Similar<'tcx> {
+ Adts { expected: ty::AdtDef<'tcx>, found: ty::AdtDef<'tcx> },
+ PrimitiveFound { expected: ty::AdtDef<'tcx>, found: Ty<'tcx> },
+ PrimitiveExpected { expected: Ty<'tcx>, found: ty::AdtDef<'tcx> },
+ }
+
+ let similarity = |ExpectedFound { expected, found }: ExpectedFound<Ty<'tcx>>| {
+ if let ty::Adt(expected, _) = expected.kind() && let Some(primitive) = found.primitive_symbol() {
+ let path = self.tcx.def_path(expected.did()).data;
+ let name = path.last().unwrap().data.get_opt_name();
+ if name == Some(primitive) {
+ return Some(Similar::PrimitiveFound { expected: *expected, found });
+ }
+ } else if let Some(primitive) = expected.primitive_symbol() && let ty::Adt(found, _) = found.kind() {
+ let path = self.tcx.def_path(found.did()).data;
+ let name = path.last().unwrap().data.get_opt_name();
+ if name == Some(primitive) {
+ return Some(Similar::PrimitiveExpected { expected, found: *found });
+ }
+ } else if let ty::Adt(expected, _) = expected.kind() && let ty::Adt(found, _) = found.kind() {
+ if !expected.did().is_local() && expected.did().krate == found.did().krate {
+ // Most likely types from different versions of the same crate
+ // are in play, in which case this message isn't so helpful.
+ // A "perhaps two different versions..." error is already emitted for that.
+ return None;
+ }
+ let f_path = self.tcx.def_path(found.did()).data;
+ let e_path = self.tcx.def_path(expected.did()).data;
+
+ if let (Some(e_last), Some(f_last)) = (e_path.last(), f_path.last()) && e_last == f_last {
+ return Some(Similar::Adts{expected: *expected, found: *found});
+ }
+ }
+ None
+ };
+
+ match terr {
+ // If two types mismatch but have similar names, mention that specifically.
+ TypeError::Sorts(values) if let Some(s) = similarity(values) => {
+ let diagnose_primitive =
+ |prim: Ty<'tcx>,
+ shadow: Ty<'tcx>,
+ defid: DefId,
+ diagnostic: &mut Diagnostic| {
+ let name = shadow.sort_string(self.tcx);
+ diagnostic.note(format!(
+ "{prim} and {name} have similar names, but are actually distinct types"
+ ));
+ diagnostic
+ .note(format!("{prim} is a primitive defined by the language"));
+ let def_span = self.tcx.def_span(defid);
+ let msg = if defid.is_local() {
+ format!("{name} is defined in the current crate")
+ } else {
+ let crate_name = self.tcx.crate_name(defid.krate);
+ format!("{name} is defined in crate `{crate_name}")
+ };
+ diagnostic.span_note(def_span, msg);
+ };
+
+ let diagnose_adts =
+ |expected_adt : ty::AdtDef<'tcx>,
+ found_adt: ty::AdtDef<'tcx>,
+ diagnostic: &mut Diagnostic| {
+ let found_name = values.found.sort_string(self.tcx);
+ let expected_name = values.expected.sort_string(self.tcx);
+
+ let found_defid = found_adt.did();
+ let expected_defid = expected_adt.did();
+
+ diagnostic.note(format!("{found_name} and {expected_name} have similar names, but are actually distinct types"));
+ for (defid, name) in
+ [(found_defid, found_name), (expected_defid, expected_name)]
+ {
+ let def_span = self.tcx.def_span(defid);
+
+ let msg = if found_defid.is_local() && expected_defid.is_local() {
+ let module = self
+ .tcx
+ .parent_module_from_def_id(defid.expect_local())
+ .to_def_id();
+ let module_name = self.tcx.def_path(module).to_string_no_crate_verbose();
+ format!("{name} is defined in module `crate{module_name}` of the current crate")
+ } else if defid.is_local() {
+ format!("{name} is defined in the current crate")
+ } else {
+ let crate_name = self.tcx.crate_name(defid.krate);
+ format!("{name} is defined in crate `{crate_name}`")
+ };
+ diagnostic.span_note(def_span, msg);
+ }
+ };
+
+ match s {
+ Similar::Adts{expected, found} => {
+ diagnose_adts(expected, found, diag)
+ }
+ Similar::PrimitiveFound{expected, found: prim} => {
+ diagnose_primitive(prim, values.expected, expected.did(), diag)
+ }
+ Similar::PrimitiveExpected{expected: prim, found} => {
+ diagnose_primitive(prim, values.found, found.did(), diag)
+ }
+ }
+ }
+ TypeError::Sorts(values) => {
+ let extra = expected == found;
let sort_string = |ty: Ty<'tcx>| match (extra, ty.kind()) {
(true, ty::Opaque(def_id, _)) => {
let sm = self.tcx.sess.source_map();
@@ -1662,6 +1830,19 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pos.col.to_usize() + 1,
)
}
+ (true, ty::Projection(proj))
+ if self.tcx.def_kind(proj.item_def_id)
+ == DefKind::ImplTraitPlaceholder =>
+ {
+ let sm = self.tcx.sess.source_map();
+ let pos = sm.lookup_char_pos(self.tcx.def_span(proj.item_def_id).lo());
+ format!(
+ " (trait associated opaque type at <{}:{}:{}>)",
+ sm.filename_for_diagnostics(&pos.file.name),
+ pos.line,
+ pos.col.to_usize() + 1,
+ )
+ }
(true, _) => format!(" ({})", ty.sort_string(self.tcx)),
(false, _) => "".to_string(),
};
@@ -1690,10 +1871,10 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
);
}
}
- (TypeError::ObjectUnsafeCoercion(_), _) => {
+ TypeError::ObjectUnsafeCoercion(_) => {
diag.note_unsuccessful_coercion(found, expected);
}
- (_, _) => {
+ _ => {
debug!(
"note_type_err: exp_found={:?}, expected={:?} found={:?}",
exp_found, expected, found
@@ -1713,7 +1894,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
ty::error::TypeError::Sorts(terr)
if exp_found.map_or(false, |ef| terr.found == ef.found) =>
{
- Some(*terr)
+ Some(terr)
}
_ => exp_found,
};
@@ -1738,7 +1919,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// In some (most?) cases cause.body_id points to actual body, but in some cases
// it's an actual definition. According to the comments (e.g. in
- // librustc_typeck/check/compare_method.rs:compare_predicate_entailment) the latter
+ // rustc_hir_analysis/check/compare_method.rs:compare_predicate_entailment) the latter
// is relied upon by some other code. This might (or might not) need cleanup.
let body_owner_def_id =
self.tcx.hir().opt_local_def_id(cause.body_id).unwrap_or_else(|| {
@@ -1750,6 +1931,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
if let Some(ValuePairs::PolyTraitRefs(exp_found)) = values
&& let ty::Closure(def_id, _) = exp_found.expected.skip_binder().self_ty().kind()
&& let Some(def_id) = def_id.as_local()
+ && terr.involves_regions()
{
let span = self.tcx.def_span(def_id);
diag.span_note(span, "this closure does not fulfill the lifetime requirements");
@@ -1829,36 +2011,6 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
}
- pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Binder<'tcx, Ty<'tcx>>> {
- if let ty::Opaque(def_id, substs) = ty.kind() {
- let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
- // Future::Output
- let item_def_id = self.tcx.associated_item_def_ids(future_trait)[0];
-
- let bounds = self.tcx.bound_explicit_item_bounds(*def_id);
-
- for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
- let predicate = predicate.subst(self.tcx, substs);
- let output = predicate
- .kind()
- .map_bound(|kind| match kind {
- ty::PredicateKind::Projection(projection_predicate)
- if projection_predicate.projection_ty.item_def_id == item_def_id =>
- {
- projection_predicate.term.ty()
- }
- _ => None,
- })
- .transpose();
- if output.is_some() {
- // We don't account for multiple `Future::Output = Ty` constraints.
- return output;
- }
- }
- }
- None
- }
-
/// A possible error is to forget to add `.await` when using futures:
///
/// ```compile_fail,E0308
@@ -2033,26 +2185,40 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
diag: &mut Diagnostic,
) {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span)
+ && let Some(msg) = self.should_suggest_as_ref(exp_found.expected, exp_found.found)
+ {
+ diag.span_suggestion(
+ span,
+ msg,
+ // HACK: fix issue# 100605, suggesting convert from &Option<T> to Option<&T>, remove the extra `&`
+ format!("{}.as_ref()", snippet.trim_start_matches('&')),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ pub fn should_suggest_as_ref(&self, expected: Ty<'tcx>, found: Ty<'tcx>) -> Option<&str> {
if let (ty::Adt(exp_def, exp_substs), ty::Ref(_, found_ty, _)) =
- (exp_found.expected.kind(), exp_found.found.kind())
+ (expected.kind(), found.kind())
{
if let ty::Adt(found_def, found_substs) = *found_ty.kind() {
- let path_str = format!("{:?}", exp_def);
if exp_def == &found_def {
- let opt_msg = "you can convert from `&Option<T>` to `Option<&T>` using \
- `.as_ref()`";
- let result_msg = "you can convert from `&Result<T, E>` to \
- `Result<&T, &E>` using `.as_ref()`";
let have_as_ref = &[
- ("std::option::Option", opt_msg),
- ("core::option::Option", opt_msg),
- ("std::result::Result", result_msg),
- ("core::result::Result", result_msg),
+ (
+ sym::Option,
+ "you can convert from `&Option<T>` to `Option<&T>` using \
+ `.as_ref()`",
+ ),
+ (
+ sym::Result,
+ "you can convert from `&Result<T, E>` to \
+ `Result<&T, &E>` using `.as_ref()`",
+ ),
];
- if let Some(msg) = have_as_ref
- .iter()
- .find_map(|(path, msg)| (&path_str == path).then_some(msg))
- {
+ if let Some(msg) = have_as_ref.iter().find_map(|(name, msg)| {
+ self.tcx.is_diagnostic_item(*name, exp_def.did()).then_some(msg)
+ }) {
let mut show_suggestion = true;
for (exp_ty, found_ty) in
iter::zip(exp_substs.types(), found_substs.types())
@@ -2072,26 +2238,20 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
_ => show_suggestion = false,
}
}
- if let (Ok(snippet), true) =
- (self.tcx.sess.source_map().span_to_snippet(span), show_suggestion)
- {
- diag.span_suggestion(
- span,
- *msg,
- format!("{}.as_ref()", snippet),
- Applicability::MachineApplicable,
- );
+ if show_suggestion {
+ return Some(*msg);
}
}
}
}
}
+ None
}
pub fn report_and_explain_type_error(
&self,
trace: TypeTrace<'tcx>,
- terr: &TypeError<'tcx>,
+ terr: TypeError<'tcx>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
use crate::traits::ObligationCauseCode::MatchExpressionArm;
@@ -2111,6 +2271,25 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
struct_span_err!(self.tcx.sess, span, E0580, "{}", failure_str)
}
FailureCode::Error0308(failure_str) => {
+ fn escape_literal(s: &str) -> String {
+ let mut escaped = String::with_capacity(s.len());
+ let mut chrs = s.chars().peekable();
+ while let Some(first) = chrs.next() {
+ match (first, chrs.peek()) {
+ ('\\', Some(&delim @ '"') | Some(&delim @ '\'')) => {
+ escaped.push('\\');
+ escaped.push(delim);
+ chrs.next();
+ }
+ ('"' | '\'', _) => {
+ escaped.push('\\');
+ escaped.push(first)
+ }
+ (c, _) => escaped.push(c),
+ };
+ }
+ escaped
+ }
let mut err = struct_span_err!(self.tcx.sess, span, E0308, "{}", failure_str);
if let Some((expected, found)) = trace.values.ty() {
match (expected.kind(), found.kind()) {
@@ -2132,7 +2311,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
err.span_suggestion(
span,
"if you meant to write a `char` literal, use single quotes",
- format!("'{}'", code),
+ format!("'{}'", escape_literal(code)),
Applicability::MachineApplicable,
);
}
@@ -2147,7 +2326,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
err.span_suggestion(
span,
"if you meant to write a `str` literal, use double quotes",
- format!("\"{}\"", code),
+ format!("\"{}\"", escape_literal(code)),
Applicability::MachineApplicable,
);
}
@@ -2254,11 +2433,11 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
return None;
}
- Some(match (exp_found.expected, exp_found.found) {
- (ty::Term::Ty(expected), ty::Term::Ty(found)) => self.cmp(expected, found),
- (expected, found) => (
- DiagnosticStyledString::highlighted(expected.to_string()),
- DiagnosticStyledString::highlighted(found.to_string()),
+ Some(match (exp_found.expected.unpack(), exp_found.found.unpack()) {
+ (ty::TermKind::Ty(expected), ty::TermKind::Ty(found)) => self.cmp(expected, found),
+ _ => (
+ DiagnosticStyledString::highlighted(exp_found.expected.to_string()),
+ DiagnosticStyledString::highlighted(exp_found.found.to_string()),
),
})
}
@@ -2298,7 +2477,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
origin: Option<SubregionOrigin<'tcx>>,
bound_kind: GenericKind<'tcx>,
sub: Region<'tcx>,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
// Attempt to obtain the span of the parameter so we can
// suggest adding an explicit lifetime bound to it.
let generics = self.tcx.generics_of(generic_param_scope);
@@ -2314,7 +2493,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// We do this to avoid suggesting code that ends up as `T: 'a'b`,
// instead we suggest `T: 'a + 'b` in that case.
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
- let ast_generics = self.tcx.hir().get_generics(hir_id.owner);
+ let ast_generics = self.tcx.hir().get_generics(hir_id.owner.def_id);
let bounds =
ast_generics.and_then(|g| g.bounds_span_for_suggestions(def_id));
// `sp` only covers `T`, change it so that it covers
@@ -2355,6 +2534,9 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let labeled_user_string = match bound_kind {
GenericKind::Param(ref p) => format!("the parameter type `{}`", p),
GenericKind::Projection(ref p) => format!("the associated type `{}`", p),
+ GenericKind::Opaque(def_id, substs) => {
+ format!("the opaque type `{}`", self.tcx.def_path_str_with_substs(def_id, substs))
+ }
};
if let Some(SubregionOrigin::CompareImplItemObligation {
@@ -2376,19 +2558,23 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
type_param_span: Option<(Span, bool)>,
bound_kind: GenericKind<'tcx>,
sub: S,
+ add_lt_sugg: Option<(Span, String)>,
) {
let msg = "consider adding an explicit lifetime bound";
if let Some((sp, has_lifetimes)) = type_param_span {
let suggestion =
if has_lifetimes { format!(" + {}", sub) } else { format!(": {}", sub) };
- err.span_suggestion_verbose(
- sp,
- &format!("{}...", msg),
- suggestion,
+ let mut suggestions = vec![(sp, suggestion)];
+ if let Some(add_lt_sugg) = add_lt_sugg {
+ suggestions.push(add_lt_sugg);
+ }
+ err.multipart_suggestion_verbose(
+ format!("{msg}..."),
+ suggestions,
Applicability::MaybeIncorrect, // Issue #41966
);
} else {
- let consider = format!("{} `{}: {}`...", msg, bound_kind, sub,);
+ let consider = format!("{} `{}: {}`...", msg, bound_kind, sub);
err.help(&consider);
}
}
@@ -2404,7 +2590,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
};
let mut sugg =
vec![(sp, suggestion), (span.shrink_to_hi(), format!(" + {}", new_lt))];
- if let Some(lt) = add_lt_sugg {
+ if let Some(lt) = add_lt_sugg.clone() {
sugg.push(lt);
sugg.rotate_right(1);
}
@@ -2430,7 +2616,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
for h in self.tcx.hir().parent_iter(param.hir_id) {
break 'origin match h.1 {
Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::TyAlias(..),
+ kind: hir::ImplItemKind::Type(..),
generics,
..
})
@@ -2510,7 +2696,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// for the bound is not suitable for suggestions when `-Zverbose` is set because it
// uses `Debug` output, so we handle it specially here so that suggestions are
// always correct.
- binding_suggestion(&mut err, type_param_span, bound_kind, name);
+ binding_suggestion(&mut err, type_param_span, bound_kind, name, None);
err
}
@@ -2523,7 +2709,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
"{} may not live long enough",
labeled_user_string
);
- binding_suggestion(&mut err, type_param_span, bound_kind, "'static");
+ binding_suggestion(&mut err, type_param_span, bound_kind, "'static", None);
err
}
@@ -2557,7 +2743,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
new_binding_suggestion(&mut err, type_param_span);
}
_ => {
- binding_suggestion(&mut err, type_param_span, bound_kind, new_lt);
+ binding_suggestion(
+ &mut err,
+ type_param_span,
+ bound_kind,
+ new_lt,
+ add_lt_sugg,
+ );
}
}
}
@@ -2659,70 +2851,98 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// Float types, respectively). When comparing two ADTs, these rules apply recursively.
pub fn same_type_modulo_infer(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
let (a, b) = self.resolve_vars_if_possible((a, b));
- match (a.kind(), b.kind()) {
- (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) => {
- if def_a != def_b {
- return false;
- }
+ SameTypeModuloInfer(self).relate(a, b).is_ok()
+ }
+}
- substs_a
- .types()
- .zip(substs_b.types())
- .all(|(a, b)| self.same_type_modulo_infer(a, b))
- }
- (&ty::FnDef(did_a, substs_a), &ty::FnDef(did_b, substs_b)) => {
- if did_a != did_b {
- return false;
- }
+struct SameTypeModuloInfer<'a, 'tcx>(&'a InferCtxt<'tcx>);
- substs_a
- .types()
- .zip(substs_b.types())
- .all(|(a, b)| self.same_type_modulo_infer(a, b))
- }
- (&ty::Int(_) | &ty::Uint(_), &ty::Infer(ty::InferTy::IntVar(_)))
+impl<'tcx> TypeRelation<'tcx> for SameTypeModuloInfer<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.0.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ // Unused, only for consts which we treat as always equal
+ ty::ParamEnv::empty()
+ }
+
+ fn tag(&self) -> &'static str {
+ "SameTypeModuloInfer"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn relate_with_variance<T: relate::Relate<'tcx>>(
+ &mut self,
+ _variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> relate::RelateResult<'tcx, T> {
+ self.relate(a, b)
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ match (a.kind(), b.kind()) {
+ (ty::Int(_) | ty::Uint(_), ty::Infer(ty::InferTy::IntVar(_)))
| (
- &ty::Infer(ty::InferTy::IntVar(_)),
- &ty::Int(_) | &ty::Uint(_) | &ty::Infer(ty::InferTy::IntVar(_)),
+ ty::Infer(ty::InferTy::IntVar(_)),
+ ty::Int(_) | ty::Uint(_) | ty::Infer(ty::InferTy::IntVar(_)),
)
- | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_)))
+ | (ty::Float(_), ty::Infer(ty::InferTy::FloatVar(_)))
| (
- &ty::Infer(ty::InferTy::FloatVar(_)),
- &ty::Float(_) | &ty::Infer(ty::InferTy::FloatVar(_)),
+ ty::Infer(ty::InferTy::FloatVar(_)),
+ ty::Float(_) | ty::Infer(ty::InferTy::FloatVar(_)),
)
- | (&ty::Infer(ty::InferTy::TyVar(_)), _)
- | (_, &ty::Infer(ty::InferTy::TyVar(_))) => true,
- (&ty::Ref(_, ty_a, mut_a), &ty::Ref(_, ty_b, mut_b)) => {
- mut_a == mut_b && self.same_type_modulo_infer(ty_a, ty_b)
- }
- (&ty::RawPtr(a), &ty::RawPtr(b)) => {
- a.mutbl == b.mutbl && self.same_type_modulo_infer(a.ty, b.ty)
- }
- (&ty::Slice(a), &ty::Slice(b)) => self.same_type_modulo_infer(a, b),
- (&ty::Array(a_ty, a_ct), &ty::Array(b_ty, b_ct)) => {
- self.same_type_modulo_infer(a_ty, b_ty) && a_ct == b_ct
- }
- (&ty::Tuple(a), &ty::Tuple(b)) => {
- if a.len() != b.len() {
- return false;
- }
- std::iter::zip(a.iter(), b.iter()).all(|(a, b)| self.same_type_modulo_infer(a, b))
- }
- (&ty::FnPtr(a), &ty::FnPtr(b)) => {
- let a = a.skip_binder().inputs_and_output;
- let b = b.skip_binder().inputs_and_output;
- if a.len() != b.len() {
- return false;
- }
- std::iter::zip(a.iter(), b.iter()).all(|(a, b)| self.same_type_modulo_infer(a, b))
- }
- // FIXME(compiler-errors): This needs to be generalized more
- _ => a == b,
+ | (ty::Infer(ty::InferTy::TyVar(_)), _)
+ | (_, ty::Infer(ty::InferTy::TyVar(_))) => Ok(a),
+ (ty::Infer(_), _) | (_, ty::Infer(_)) => Err(TypeError::Mismatch),
+ _ => relate::super_relate_tys(self, a, b),
}
}
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ if (a.is_var() && b.is_free_or_static())
+ || (b.is_var() && a.is_free_or_static())
+ || (a.is_var() && b.is_var())
+ || a == b
+ {
+ Ok(a)
+ } else {
+ Err(TypeError::Mismatch)
+ }
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> relate::RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: relate::Relate<'tcx>,
+ {
+ Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ _b: ty::Const<'tcx>,
+ ) -> relate::RelateResult<'tcx, ty::Const<'tcx>> {
+ // FIXME(compiler-errors): This could at least do some first-order
+ // relation
+ Ok(a)
+ }
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
fn report_inference_failure(
&self,
var_origin: RegionVariableOrigin,
@@ -2781,12 +3001,12 @@ pub enum FailureCode {
}
pub trait ObligationCauseExt<'tcx> {
- fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode;
+ fn as_failure_code(&self, terr: TypeError<'tcx>) -> FailureCode;
fn as_requirement_str(&self) -> &'static str;
}
impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
- fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode {
+ fn as_failure_code(&self, terr: TypeError<'tcx>) -> FailureCode {
use self::FailureCode::*;
use crate::traits::ObligationCauseCode::*;
match self.code() {
@@ -2823,7 +3043,7 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
TypeError::IntrinsicCast => {
Error0308("cannot coerce intrinsics to function pointers")
}
- TypeError::ObjectUnsafeCoercion(did) => Error0038(*did),
+ TypeError::ObjectUnsafeCoercion(did) => Error0038(did),
_ => Error0308("mismatched types"),
},
}
@@ -2853,6 +3073,30 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
}
}
+/// Newtype to allow implementing IntoDiagnosticArg
+pub struct ObligationCauseAsDiagArg<'tcx>(pub ObligationCause<'tcx>);
+
+impl IntoDiagnosticArg for ObligationCauseAsDiagArg<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ use crate::traits::ObligationCauseCode::*;
+ let kind = match self.0.code() {
+ CompareImplItemObligation { kind: ty::AssocKind::Fn, .. } => "method_compat",
+ CompareImplItemObligation { kind: ty::AssocKind::Type, .. } => "type_compat",
+ CompareImplItemObligation { kind: ty::AssocKind::Const, .. } => "const_compat",
+ ExprAssignable => "expr_assignable",
+ IfExpression { .. } => "if_else_different",
+ IfExpressionWithNoElse => "no_else",
+ MainFunctionType => "fn_main_correct_type",
+ StartFunctionType => "fn_start_correct_type",
+ IntrinsicType => "intristic_correct_type",
+ MethodReceiver => "method_correct_type",
+ _ => "other",
+ }
+ .into();
+ rustc_errors::DiagnosticArgValue::Str(kind)
+ }
+}
+
/// This is a bare signal of what kind of type we're dealing with. `ty::TyKind` tracks
/// extra information about each type, but we only care about the category.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
@@ -2886,7 +3130,7 @@ impl TyCategory {
}
}
-impl<'tcx> InferCtxt<'_, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Given a [`hir::Block`], get the span of its last expression or
/// statement, peeling off any inner blocks.
pub fn find_block_span(&self, block: &'tcx hir::Block<'tcx>) -> Span {
@@ -2913,7 +3157,9 @@ impl<'tcx> InferCtxt<'_, 'tcx> {
_ => rustc_span::DUMMY_SP,
}
}
+}
+impl<'tcx> TypeErrCtxt<'_, 'tcx> {
/// Be helpful when the user wrote `{... expr; }` and taking the `;` off
/// is enough to fix the error.
pub fn could_remove_semicolon(
@@ -2930,7 +3176,7 @@ impl<'tcx> InferCtxt<'_, 'tcx> {
let hir::StmtKind::Semi(ref last_expr) = last_stmt.kind else {
return None;
};
- let last_expr_ty = self.in_progress_typeck_results?.borrow().expr_ty_opt(*last_expr)?;
+ let last_expr_ty = self.typeck_results.as_ref()?.expr_ty_opt(*last_expr)?;
let needs_box = match (last_expr_ty.kind(), expected_ty.kind()) {
_ if last_expr_ty.references_error() => return None,
_ if self.same_type_modulo_infer(last_expr_ty, expected_ty) => {
@@ -3013,8 +3259,9 @@ impl<'tcx> InferCtxt<'_, 'tcx> {
let mut find_compatible_candidates = |pat: &hir::Pat<'_>| {
if let hir::PatKind::Binding(_, hir_id, ident, _) = &pat.kind
&& let Some(pat_ty) = self
- .in_progress_typeck_results
- .and_then(|typeck_results| typeck_results.borrow().node_type_opt(*hir_id))
+ .typeck_results
+ .as_ref()
+ .and_then(|typeck_results| typeck_results.node_type_opt(*hir_id))
{
let pat_ty = self.resolve_vars_if_possible(pat_ty);
if self.same_type_modulo_infer(pat_ty, expected_ty)
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
index 561d1354e..7b3178e61 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -1,6 +1,12 @@
+use crate::errors::{
+ AmbigousImpl, AmbigousReturn, AnnotationRequired, InferenceBadError, NeedTypeInfoInGenerator,
+ SourceKindMultiSuggestion, SourceKindSubdiag,
+};
+use crate::infer::error_reporting::TypeErrCtxt;
use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use crate::infer::InferCtxt;
-use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_errors::IntoDiagnostic;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, IntoDiagnosticArg};
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def::{CtorOf, DefKind, Namespace};
@@ -11,8 +17,8 @@ use rustc_middle::hir::nested_filter;
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Print, Printer};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
use rustc_middle::ty::{self, DefIdTree, InferConst};
+use rustc_middle::ty::{GenericArg, GenericArgKind, SubstsRef};
use rustc_middle::ty::{IsSuggestable, Ty, TyCtxt, TypeckResults};
use rustc_span::symbol::{kw, Ident};
use rustc_span::{BytePos, Span};
@@ -60,38 +66,49 @@ pub struct InferenceDiagnosticsParentData {
name: String,
}
+#[derive(Clone)]
pub enum UnderspecifiedArgKind {
Type { prefix: Cow<'static, str> },
Const { is_parameter: bool },
}
impl InferenceDiagnosticsData {
- /// Generate a label for a generic argument which can't be inferred. When not
- /// much is known about the argument, `use_diag` may be used to describe the
- /// labeled value.
- fn cannot_infer_msg(&self) -> String {
- if self.name == "_" && matches!(self.kind, UnderspecifiedArgKind::Type { .. }) {
- return "cannot infer type".to_string();
- }
-
- let suffix = match &self.parent {
- Some(parent) => parent.suffix_string(),
- None => String::new(),
- };
-
- // For example: "cannot infer type for type parameter `T`"
- format!("cannot infer {} `{}`{}", self.kind.prefix_string(), self.name, suffix)
+ fn can_add_more_info(&self) -> bool {
+ !(self.name == "_" && matches!(self.kind, UnderspecifiedArgKind::Type { .. }))
}
- fn where_x_is_specified(&self, in_type: Ty<'_>) -> String {
+ fn where_x_is_kind(&self, in_type: Ty<'_>) -> &'static str {
if in_type.is_ty_infer() {
- String::new()
+ "empty"
} else if self.name == "_" {
// FIXME: Consider specializing this message if there is a single `_`
// in the type.
- ", where the placeholders `_` are specified".to_string()
+ "underscore"
} else {
- format!(", where the {} `{}` is specified", self.kind.prefix_string(), self.name)
+ "has_name"
+ }
+ }
+
+ /// Generate a label for a generic argument which can't be inferred. When not
+ /// much is known about the argument, `use_diag` may be used to describe the
+ /// labeled value.
+ fn make_bad_error(&self, span: Span) -> InferenceBadError<'_> {
+ let has_parent = self.parent.is_some();
+ let bad_kind = if self.can_add_more_info() { "more_info" } else { "other" };
+ let (parent_prefix, parent_name) = self
+ .parent
+ .as_ref()
+ .map(|parent| (parent.prefix, parent.name.clone()))
+ .unwrap_or_default();
+ InferenceBadError {
+ span,
+ bad_kind,
+ prefix_kind: self.kind.clone(),
+ prefix: self.kind.try_get_prefix().unwrap_or_default(),
+ name: self.name.clone(),
+ has_parent,
+ parent_prefix,
+ parent_name,
}
}
}
@@ -113,23 +130,29 @@ impl InferenceDiagnosticsParentData {
fn for_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<InferenceDiagnosticsParentData> {
Self::for_parent_def_id(tcx, tcx.parent(def_id))
}
+}
- fn suffix_string(&self) -> String {
- format!(" declared on the {} `{}`", self.prefix, self.name)
+impl IntoDiagnosticArg for UnderspecifiedArgKind {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ let kind = match self {
+ Self::Type { .. } => "type",
+ Self::Const { is_parameter: true } => "const_with_param",
+ Self::Const { is_parameter: false } => "const",
+ };
+ rustc_errors::DiagnosticArgValue::Str(kind.into())
}
}
impl UnderspecifiedArgKind {
- fn prefix_string(&self) -> Cow<'static, str> {
+ fn try_get_prefix(&self) -> Option<&str> {
match self {
- Self::Type { prefix } => format!("type for {}", prefix).into(),
- Self::Const { is_parameter: true } => "the value of const parameter".into(),
- Self::Const { is_parameter: false } => "the value of the constant".into(),
+ Self::Type { prefix } => Some(prefix.as_ref()),
+ Self::Const { .. } => None,
}
}
}
-fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'_, 'tcx>, ns: Namespace) -> FmtPrinter<'a, 'tcx> {
+fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'tcx>, ns: Namespace) -> FmtPrinter<'a, 'tcx> {
let mut printer = FmtPrinter::new(infcx.tcx, ns);
let ty_getter = move |ty_vid| {
if infcx.probe_ty_var(ty_vid).is_ok() {
@@ -160,7 +183,7 @@ fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'_, 'tcx>, ns: Namespace) -> FmtPr
printer
}
-fn ty_to_string<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
+fn ty_to_string<'tcx>(infcx: &InferCtxt<'tcx>, ty: Ty<'tcx>) -> String {
let printer = fmt_printer(infcx, Namespace::TypeNS);
let ty = infcx.resolve_vars_if_possible(ty);
match ty.kind() {
@@ -177,9 +200,9 @@ fn ty_to_string<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
}
/// We don't want to directly use `ty_to_string` for closures as their type isn't really
-/// something users are familar with. Directly printing the `fn_sig` of closures also
+/// something users are familiar with. Directly printing the `fn_sig` of closures also
/// doesn't work as they actually use the "rust-call" API.
-fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
+fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'tcx>, ty: Ty<'tcx>) -> String {
let ty::Closure(_, substs) = ty.kind() else { unreachable!() };
let fn_sig = substs.as_closure().sig();
let args = fn_sig
@@ -203,7 +226,7 @@ fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String
format!("fn({}){}", args, ret)
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Extracts data used by diagnostic for either types or constants
/// which were stuck during inference.
pub fn extract_inference_diagnostics_data(
@@ -295,7 +318,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
}
- /// Used as a fallback in [InferCtxt::emit_inference_failure_err]
+ /// Used as a fallback in [TypeErrCtxt::emit_inference_failure_err]
/// in case we weren't able to get a better error.
fn bad_inference_failure_err(
&self,
@@ -303,13 +326,48 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
arg_data: InferenceDiagnosticsData,
error_code: TypeAnnotationNeeded,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let error_code = error_code.into();
- let mut err =
- self.tcx.sess.struct_span_err_with_code(span, "type annotations needed", error_code);
- err.span_label(span, arg_data.cannot_infer_msg());
- err
+ let source_kind = "other";
+ let source_name = "";
+ let failure_span = None;
+ let infer_subdiags = Vec::new();
+ let multi_suggestions = Vec::new();
+ let bad_label = Some(arg_data.make_bad_error(span));
+ match error_code {
+ TypeAnnotationNeeded::E0282 => AnnotationRequired {
+ span,
+ source_kind,
+ source_name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ TypeAnnotationNeeded::E0283 => AmbigousImpl {
+ span,
+ source_kind,
+ source_name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ TypeAnnotationNeeded::E0284 => AmbigousReturn {
+ span,
+ source_kind,
+ source_name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ }
}
+}
+impl<'tcx> TypeErrCtxt<'_, 'tcx> {
pub fn emit_inference_failure_err(
&self,
body_id: Option<hir::BodyId>,
@@ -321,14 +379,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let arg = self.resolve_vars_if_possible(arg);
let arg_data = self.extract_inference_diagnostics_data(arg, None);
- let Some(typeck_results) = self.in_progress_typeck_results else {
+ let Some(typeck_results) = &self.typeck_results else {
// If we don't have any typeck results we're outside
// of a body, so we won't be able to get better info
// here.
return self.bad_inference_failure_err(failure_span, arg_data, error_code);
};
- let typeck_results = typeck_results.borrow();
- let typeck_results = &typeck_results;
let mut local_visitor = FindInferSourceVisitor::new(&self, typeck_results, arg);
if let Some(body_id) = body_id {
@@ -340,48 +396,39 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
return self.bad_inference_failure_err(failure_span, arg_data, error_code)
};
- let error_code = error_code.into();
- let mut err = self.tcx.sess.struct_span_err_with_code(
- span,
- &format!("type annotations needed{}", kind.ty_msg(self)),
- error_code,
- );
-
- if should_label_span && !failure_span.overlaps(span) {
- err.span_label(failure_span, "type must be known at this point");
- }
+ let (source_kind, name) = kind.ty_localized_msg(self);
+ let failure_span = if should_label_span && !failure_span.overlaps(span) {
+ Some(failure_span)
+ } else {
+ None
+ };
+ let mut infer_subdiags = Vec::new();
+ let mut multi_suggestions = Vec::new();
match kind {
InferSourceKind::LetBinding { insert_span, pattern_name, ty } => {
- let suggestion_msg = if let Some(name) = pattern_name {
- format!(
- "consider giving `{}` an explicit type{}",
- name,
- arg_data.where_x_is_specified(ty)
- )
- } else {
- format!(
- "consider giving this pattern a type{}",
- arg_data.where_x_is_specified(ty)
- )
- };
- err.span_suggestion_verbose(
- insert_span,
- &suggestion_msg,
- format!(": {}", ty_to_string(self, ty)),
- Applicability::HasPlaceholders,
- );
+ infer_subdiags.push(SourceKindSubdiag::LetLike {
+ span: insert_span,
+ name: pattern_name.map(|name| name.to_string()).unwrap_or_else(String::new),
+ x_kind: arg_data.where_x_is_kind(ty),
+ prefix_kind: arg_data.kind.clone(),
+ prefix: arg_data.kind.try_get_prefix().unwrap_or_default(),
+ arg_name: arg_data.name,
+ kind: if pattern_name.is_some() { "with_pattern" } else { "other" },
+ type_name: ty_to_string(self, ty),
+ });
}
InferSourceKind::ClosureArg { insert_span, ty } => {
- err.span_suggestion_verbose(
- insert_span,
- &format!(
- "consider giving this closure parameter an explicit type{}",
- arg_data.where_x_is_specified(ty)
- ),
- format!(": {}", ty_to_string(self, ty)),
- Applicability::HasPlaceholders,
- );
+ infer_subdiags.push(SourceKindSubdiag::LetLike {
+ span: insert_span,
+ name: String::new(),
+ x_kind: arg_data.where_x_is_kind(ty),
+ prefix_kind: arg_data.kind.clone(),
+ prefix: arg_data.kind.try_get_prefix().unwrap_or_default(),
+ arg_name: arg_data.name,
+ kind: "closure",
+ type_name: ty_to_string(self, ty),
+ });
}
InferSourceKind::GenericArg {
insert_span,
@@ -389,23 +436,25 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
generics_def_id,
def_id: _,
generic_args,
+ have_turbofish,
} => {
let generics = self.tcx.generics_of(generics_def_id);
let is_type = matches!(arg.unpack(), GenericArgKind::Type(_));
- let cannot_infer_msg = format!(
- "cannot infer {} of the {} parameter `{}`{}",
- if is_type { "type" } else { "the value" },
- if is_type { "type" } else { "const" },
- generics.params[argument_index].name,
- // We use the `generics_def_id` here, as even when suggesting `None::<T>`,
- // the type parameter `T` was still declared on the enum, not on the
- // variant.
+ let (parent_exists, parent_prefix, parent_name) =
InferenceDiagnosticsParentData::for_parent_def_id(self.tcx, generics_def_id)
- .map_or(String::new(), |parent| parent.suffix_string()),
- );
+ .map_or((false, String::new(), String::new()), |parent| {
+ (true, parent.prefix.to_string(), parent.name)
+ });
- err.span_label(span, cannot_infer_msg);
+ infer_subdiags.push(SourceKindSubdiag::GenericLabel {
+ span,
+ is_type,
+ param_name: generics.params[argument_index].name.to_string(),
+ parent_exists,
+ parent_prefix,
+ parent_name,
+ });
let args = fmt_printer(self, Namespace::TypeNS)
.comma_sep(generic_args.iter().copied().map(|arg| {
@@ -435,15 +484,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
.unwrap()
.into_buffer();
- err.span_suggestion_verbose(
- insert_span,
- &format!(
- "consider specifying the generic argument{}",
- pluralize!(generic_args.len()),
- ),
- format!("::<{}>", args),
- Applicability::HasPlaceholders,
- );
+ if !have_turbofish {
+ infer_subdiags.push(SourceKindSubdiag::GenericSuggestion {
+ span: insert_span,
+ arg_count: generic_args.len(),
+ args,
+ });
+ }
}
InferSourceKind::FullyQualifiedMethodCall { receiver, successor, substs, def_id } => {
let printer = fmt_printer(self, Namespace::ValueNS);
@@ -468,39 +515,58 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
_ => "",
};
- let suggestion = vec![
- (receiver.span.shrink_to_lo(), format!("{def_path}({adjustment}")),
- (receiver.span.shrink_to_hi().with_hi(successor.1), successor.0.to_string()),
- ];
- err.multipart_suggestion_verbose(
- "try using a fully qualified path to specify the expected types",
- suggestion,
- Applicability::HasPlaceholders,
- );
+ multi_suggestions.push(SourceKindMultiSuggestion::new_fully_qualified(
+ receiver.span,
+ def_path,
+ adjustment,
+ successor,
+ ));
}
InferSourceKind::ClosureReturn { ty, data, should_wrap_expr } => {
- let ret = ty_to_string(self, ty);
- let (arrow, post) = match data {
- FnRetTy::DefaultReturn(_) => ("-> ", " "),
- _ => ("", ""),
- };
- let suggestion = match should_wrap_expr {
- Some(end_span) => vec![
- (data.span(), format!("{}{}{}{{ ", arrow, ret, post)),
- (end_span, " }".to_string()),
- ],
- None => vec![(data.span(), format!("{}{}{}", arrow, ret, post))],
- };
- err.multipart_suggestion_verbose(
- "try giving this closure an explicit return type",
- suggestion,
- Applicability::HasPlaceholders,
- );
+ let ty_info = ty_to_string(self, ty);
+ multi_suggestions.push(SourceKindMultiSuggestion::new_closure_return(
+ ty_info,
+ data,
+ should_wrap_expr,
+ ));
}
}
- err
+ match error_code {
+ TypeAnnotationNeeded::E0282 => AnnotationRequired {
+ span,
+ source_kind,
+ source_name: &name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label: None,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ TypeAnnotationNeeded::E0283 => AmbigousImpl {
+ span,
+ source_kind,
+ source_name: &name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label: None,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ TypeAnnotationNeeded::E0284 => AmbigousReturn {
+ span,
+ source_kind,
+ source_name: &name,
+ failure_span,
+ infer_subdiags,
+ multi_suggestions,
+ bad_label: None,
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic),
+ }
}
+}
+impl<'tcx> InferCtxt<'tcx> {
pub fn need_type_info_err_in_generator(
&self,
kind: hir::GeneratorKind,
@@ -510,15 +576,26 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let ty = self.resolve_vars_if_possible(ty);
let data = self.extract_inference_diagnostics_data(ty.into(), None);
- let mut err = struct_span_err!(
- self.tcx.sess,
+ NeedTypeInfoInGenerator {
+ bad_label: data.make_bad_error(span),
span,
- E0698,
- "type inside {} must be known in this context",
- kind,
- );
- err.span_label(span, data.cannot_infer_msg());
- err
+ generator_kind: GeneratorKindAsDiagArg(kind),
+ }
+ .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic)
+ }
+}
+
+pub struct GeneratorKindAsDiagArg(pub hir::GeneratorKind);
+
+impl IntoDiagnosticArg for GeneratorKindAsDiagArg {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ let kind = match self.0 {
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) => "async_block",
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure) => "async_closure",
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn) => "async_fn",
+ hir::GeneratorKind::Gen => "generator",
+ };
+ rustc_errors::DiagnosticArgValue::Str(kind.into())
}
}
@@ -545,6 +622,7 @@ enum InferSourceKind<'tcx> {
generics_def_id: DefId,
def_id: DefId,
generic_args: &'tcx [GenericArg<'tcx>],
+ have_turbofish: bool,
},
FullyQualifiedMethodCall {
receiver: &'tcx Expr<'tcx>,
@@ -579,22 +657,22 @@ impl<'tcx> InferSource<'tcx> {
}
impl<'tcx> InferSourceKind<'tcx> {
- fn ty_msg(&self, infcx: &InferCtxt<'_, 'tcx>) -> String {
+ fn ty_localized_msg(&self, infcx: &InferCtxt<'tcx>) -> (&'static str, String) {
match *self {
InferSourceKind::LetBinding { ty, .. }
| InferSourceKind::ClosureArg { ty, .. }
| InferSourceKind::ClosureReturn { ty, .. } => {
if ty.is_closure() {
- format!(" for the closure `{}`", closure_as_fn_str(infcx, ty))
+ ("closure", closure_as_fn_str(infcx, ty))
} else if !ty.is_ty_infer() {
- format!(" for `{}`", ty_to_string(infcx, ty))
+ ("normal", ty_to_string(infcx, ty))
} else {
- String::new()
+ ("other", String::new())
}
}
// FIXME: We should be able to add some additional info here.
InferSourceKind::GenericArg { .. }
- | InferSourceKind::FullyQualifiedMethodCall { .. } => String::new(),
+ | InferSourceKind::FullyQualifiedMethodCall { .. } => ("other", String::new()),
}
}
}
@@ -605,6 +683,7 @@ struct InsertableGenericArgs<'tcx> {
substs: SubstsRef<'tcx>,
generics_def_id: DefId,
def_id: DefId,
+ have_turbofish: bool,
}
/// A visitor which searches for the "best" spot to use in the inference error.
@@ -615,7 +694,7 @@ struct InsertableGenericArgs<'tcx> {
/// While doing so, the currently best spot is stored in `infer_source`.
/// For details on how we rank spots, see [Self::source_cost]
struct FindInferSourceVisitor<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
typeck_results: &'a TypeckResults<'tcx>,
target: GenericArg<'tcx>,
@@ -627,7 +706,7 @@ struct FindInferSourceVisitor<'a, 'tcx> {
impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
typeck_results: &'a TypeckResults<'tcx>,
target: GenericArg<'tcx>,
) -> Self {
@@ -823,14 +902,21 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
// impl is currently the `DefId` of `Output` in the trait definition
// which makes this somewhat difficult and prevents us from just
// using `self.path_inferred_subst_iter` here.
- hir::ExprKind::Struct(&hir::QPath::Resolved(_self_ty, path), _, _) => {
- if let Some(ty) = self.opt_node_type(expr.hir_id) {
- if let ty::Adt(_, substs) = ty.kind() {
- return Box::new(self.resolved_path_inferred_subst_iter(path, substs));
- }
+ hir::ExprKind::Struct(&hir::QPath::Resolved(_self_ty, path), _, _)
+ // FIXME(TaKO8Ki): Ideally we should support this. For that
+ // we have to map back from the self type to the
+ // type alias though. That's difficult.
+ //
+ // See the `need_type_info/issue-103053.rs` test for
+ // a example.
+ if !matches!(path.res, Res::Def(DefKind::TyAlias, _)) => {
+ if let Some(ty) = self.opt_node_type(expr.hir_id)
+ && let ty::Adt(_, substs) = ty.kind()
+ {
+ return Box::new(self.resolved_path_inferred_subst_iter(path, substs));
}
}
- hir::ExprKind::MethodCall(segment, _, _) => {
+ hir::ExprKind::MethodCall(segment, ..) => {
if let Some(def_id) = self.typeck_results.type_dependent_def_id(expr.hir_id) {
let generics = tcx.generics_of(def_id);
let insertable: Option<_> = try {
@@ -838,13 +924,14 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
None?
}
let substs = self.node_substs_opt(expr.hir_id)?;
- let span = tcx.hir().span(segment.hir_id?);
+ let span = tcx.hir().span(segment.hir_id);
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
InsertableGenericArgs {
insert_span,
substs,
generics_def_id: def_id,
def_id,
+ have_turbofish: false,
}
};
return Box::new(insertable.into_iter());
@@ -862,6 +949,9 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
substs: SubstsRef<'tcx>,
) -> impl Iterator<Item = InsertableGenericArgs<'tcx>> + 'a {
let tcx = self.infcx.tcx;
+ let have_turbofish = path.segments.iter().any(|segment| {
+ segment.args.map_or(false, |args| args.args.iter().any(|arg| arg.is_ty_or_const()))
+ });
// The last segment of a path often has `Res::Err` and the
// correct `Res` is the one of the whole path.
//
@@ -871,7 +961,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
let generics_def_id = tcx.res_generics_def_id(path.res)?;
let generics = tcx.generics_of(generics_def_id);
if generics.has_impl_trait() {
- None?
+ None?;
}
let insert_span =
path.segments.last().unwrap().ident.span.shrink_to_hi().with_hi(path.span.hi());
@@ -880,25 +970,27 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
substs,
generics_def_id,
def_id: path.res.def_id(),
+ have_turbofish,
}
};
path.segments
.iter()
.filter_map(move |segment| {
- let res = segment.res?;
+ let res = segment.res;
let generics_def_id = tcx.res_generics_def_id(res)?;
let generics = tcx.generics_of(generics_def_id);
if generics.has_impl_trait() {
return None;
}
- let span = tcx.hir().span(segment.hir_id?);
+ let span = tcx.hir().span(segment.hir_id);
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
Some(InsertableGenericArgs {
insert_span,
substs,
generics_def_id,
def_id: res.def_id(),
+ have_turbofish,
})
})
.chain(last_segment_using_path_data)
@@ -925,9 +1017,15 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
if !segment.infer_args || generics.has_impl_trait() {
None?;
}
- let span = tcx.hir().span(segment.hir_id?);
+ let span = tcx.hir().span(segment.hir_id);
let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
- InsertableGenericArgs { insert_span, substs, generics_def_id: def_id, def_id }
+ InsertableGenericArgs {
+ insert_span,
+ substs,
+ generics_def_id: def_id,
+ def_id,
+ have_turbofish: false,
+ }
};
let parent_def_id = generics.parent.unwrap();
@@ -950,7 +1048,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
}
// There cannot be inference variables in the self type,
// so there's nothing for us to do here.
- Res::SelfTy { .. } => {}
+ Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } => {}
_ => warn!(
"unexpected path: def={:?} substs={:?} path={:?}",
def, substs, path,
@@ -1050,7 +1148,13 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
for args in self.expr_inferred_subst_iter(expr) {
debug!(?args);
- let InsertableGenericArgs { insert_span, substs, generics_def_id, def_id } = args;
+ let InsertableGenericArgs {
+ insert_span,
+ substs,
+ generics_def_id,
+ def_id,
+ have_turbofish,
+ } = args;
let generics = tcx.generics_of(generics_def_id);
if let Some(argument_index) = generics
.own_substs(substs)
@@ -1061,7 +1165,7 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
let generic_args = &generics.own_substs_no_defaults(tcx, substs)
[generics.own_counts().lifetimes..];
let span = match expr.kind {
- ExprKind::MethodCall(path, _, _) => path.ident.span,
+ ExprKind::MethodCall(path, ..) => path.ident.span,
_ => expr.span,
};
@@ -1073,6 +1177,7 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
generics_def_id,
def_id,
generic_args,
+ have_turbofish,
},
});
}
@@ -1110,7 +1215,7 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
})
.any(|generics| generics.has_impl_trait())
};
- if let ExprKind::MethodCall(path, args, span) = expr.kind
+ if let ExprKind::MethodCall(path, receiver, args, span) = expr.kind
&& let Some(substs) = self.node_substs_opt(expr.hir_id)
&& substs.iter().any(|arg| self.generic_arg_contains_target(arg))
&& let Some(def_id) = self.typeck_results.type_dependent_def_id(expr.hir_id)
@@ -1118,12 +1223,12 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
&& !has_impl_trait(def_id)
{
let successor =
- args.get(1).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo()));
+ args.get(0).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo()));
let substs = self.infcx.resolve_vars_if_possible(substs);
self.update_infer_source(InferSource {
span: path.ident.span,
kind: InferSourceKind::FullyQualifiedMethodCall {
- receiver: args.first().unwrap(),
+ receiver,
successor,
substs,
def_id,
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
index 9a2ab3e32..da0271a34 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
@@ -1,6 +1,9 @@
//! Error Reporting for Anonymous Region Lifetime Errors
//! where both the regions are anonymous.
+use crate::errors::AddLifetimeParamsSuggestion;
+use crate::errors::LifetimeMismatch;
+use crate::errors::LifetimeMismatchLabels;
use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
use crate::infer::error_reporting::nice_region_error::util::AnonymousParamInfo;
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
@@ -8,11 +11,10 @@ use crate::infer::lexical_region_resolve::RegionResolutionError;
use crate::infer::SubregionOrigin;
use crate::infer::TyCtxt;
-use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorGuaranteed};
-use rustc_hir as hir;
-use rustc_hir::{GenericParamKind, Ty};
+use rustc_errors::AddToDiagnostic;
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir::Ty;
use rustc_middle::ty::Region;
-use rustc_span::symbol::kw;
impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
/// Print the error message for lifetime errors when both the concerned regions are anonymous.
@@ -98,137 +100,50 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let sub_is_ret_type =
self.is_return_type_anon(scope_def_id_sub, bregion_sub, ty_fndecl_sub);
- let span_label_var1 = match anon_param_sup.pat.simple_ident() {
- Some(simple_ident) => format!(" from `{}`", simple_ident),
- None => String::new(),
- };
-
- let span_label_var2 = match anon_param_sub.pat.simple_ident() {
- Some(simple_ident) => format!(" into `{}`", simple_ident),
- None => String::new(),
- };
-
debug!(
"try_report_anon_anon_conflict: sub_is_ret_type={:?} sup_is_ret_type={:?}",
sub_is_ret_type, sup_is_ret_type
);
- let mut err = struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch");
-
- match (sup_is_ret_type, sub_is_ret_type) {
+ let labels = match (sup_is_ret_type, sub_is_ret_type) {
(ret_capture @ Some(ret_span), _) | (_, ret_capture @ Some(ret_span)) => {
let param_span =
if sup_is_ret_type == ret_capture { ty_sub.span } else { ty_sup.span };
-
- err.span_label(
+ LifetimeMismatchLabels::InRet {
param_span,
- "this parameter and the return type are declared with different lifetimes...",
- );
- err.span_label(ret_span, "");
- err.span_label(span, format!("...but data{} is returned here", span_label_var1));
- }
-
- (None, None) => {
- if ty_sup.hir_id == ty_sub.hir_id {
- err.span_label(ty_sup.span, "this type is declared with multiple lifetimes...");
- err.span_label(ty_sub.span, "");
- err.span_label(span, "...but data with one lifetime flows into the other here");
- } else {
- err.span_label(
- ty_sup.span,
- "these two types are declared with different lifetimes...",
- );
- err.span_label(ty_sub.span, "");
- err.span_label(
- span,
- format!("...but data{} flows{} here", span_label_var1, span_label_var2),
- );
+ ret_span,
+ span,
+ label_var1: anon_param_sup.pat.simple_ident(),
}
}
- }
- if suggest_adding_lifetime_params(self.tcx(), sub, ty_sup, ty_sub, &mut err) {
- err.note("each elided lifetime in input position becomes a distinct lifetime");
- }
+ (None, None) => LifetimeMismatchLabels::Normal {
+ hir_equal: ty_sup.hir_id == ty_sub.hir_id,
+ ty_sup: ty_sup.span,
+ ty_sub: ty_sub.span,
+ span,
+ sup: anon_param_sup.pat.simple_ident(),
+ sub: anon_param_sub.pat.simple_ident(),
+ },
+ };
- let reported = err.emit();
+ let suggestion =
+ AddLifetimeParamsSuggestion { tcx: self.tcx(), sub, ty_sup, ty_sub, add_note: true };
+ let err = LifetimeMismatch { span, labels, suggestion };
+ let reported = self.tcx().sess.emit_err(err);
Some(reported)
}
}
+/// Currently only used in rustc_borrowck, probably should be
+/// removed in favour of public_errors::AddLifetimeParamsSuggestion
pub fn suggest_adding_lifetime_params<'tcx>(
tcx: TyCtxt<'tcx>,
sub: Region<'tcx>,
- ty_sup: &Ty<'_>,
- ty_sub: &Ty<'_>,
+ ty_sup: &'tcx Ty<'_>,
+ ty_sub: &'tcx Ty<'_>,
err: &mut Diagnostic,
-) -> bool {
- let (
- hir::Ty { kind: hir::TyKind::Rptr(lifetime_sub, _), .. },
- hir::Ty { kind: hir::TyKind::Rptr(lifetime_sup, _), .. },
- ) = (ty_sub, ty_sup) else {
- return false;
- };
-
- if !lifetime_sub.name.is_anonymous() || !lifetime_sup.name.is_anonymous() {
- return false;
- };
-
- let Some(anon_reg) = tcx.is_suitable_region(sub) else {
- return false;
- };
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(anon_reg.def_id);
-
- let node = tcx.hir().get(hir_id);
- let is_impl = matches!(&node, hir::Node::ImplItem(_));
- let generics = match node {
- hir::Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, ref generics, ..), .. })
- | hir::Node::TraitItem(&hir::TraitItem { ref generics, .. })
- | hir::Node::ImplItem(&hir::ImplItem { ref generics, .. }) => generics,
- _ => return false,
- };
-
- let suggestion_param_name = generics
- .params
- .iter()
- .filter(|p| matches!(p.kind, GenericParamKind::Lifetime { .. }))
- .map(|p| p.name.ident().name)
- .find(|i| *i != kw::UnderscoreLifetime);
- let introduce_new = suggestion_param_name.is_none();
- let suggestion_param_name =
- suggestion_param_name.map(|n| n.to_string()).unwrap_or_else(|| "'a".to_owned());
-
- debug!(?lifetime_sup.span);
- debug!(?lifetime_sub.span);
- let make_suggestion = |span: rustc_span::Span| {
- if span.is_empty() {
- (span, format!("{}, ", suggestion_param_name))
- } else if let Ok("&") = tcx.sess.source_map().span_to_snippet(span).as_deref() {
- (span.shrink_to_hi(), format!("{} ", suggestion_param_name))
- } else {
- (span, suggestion_param_name.clone())
- }
- };
- let mut suggestions =
- vec![make_suggestion(lifetime_sub.span), make_suggestion(lifetime_sup.span)];
-
- if introduce_new {
- let new_param_suggestion =
- if let Some(first) = generics.params.iter().find(|p| !p.name.ident().span.is_empty()) {
- (first.span.shrink_to_lo(), format!("{}, ", suggestion_param_name))
- } else {
- (generics.span, format!("<{}>", suggestion_param_name))
- };
-
- suggestions.push(new_param_suggestion);
- }
-
- let mut sugg = String::from("consider introducing a named lifetime parameter");
- if is_impl {
- sugg.push_str(" and update trait if needed");
- }
- err.multipart_suggestion(sugg, suggestions, Applicability::MaybeIncorrect);
-
- true
+) {
+ let suggestion = AddLifetimeParamsSuggestion { tcx, sub, ty_sup, ty_sub, add_note: false };
+ suggestion.add_to_diagnostic(err);
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
index c1b201da6..d8f540b74 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
@@ -91,7 +91,7 @@ impl<'tcx> Visitor<'tcx> for FindNestedTypeVisitor<'tcx> {
hir::TyKind::TraitObject(bounds, ..) => {
for bound in bounds {
self.current_index.shift_in(1);
- self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
+ self.visit_poly_trait_ref(bound);
self.current_index.shift_out(1);
}
}
@@ -103,7 +103,7 @@ impl<'tcx> Visitor<'tcx> for FindNestedTypeVisitor<'tcx> {
// Find the index of the named region that was part of the
// error. We will then search the function parameters for a bound
// region at the right depth with the same index
- (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => {
+ (Some(rl::Region::EarlyBound(id)), ty::BrNamed(def_id, _)) => {
debug!("EarlyBound id={:?} def_id={:?}", id, def_id);
if id == def_id {
self.found_type = Some(arg);
@@ -133,7 +133,7 @@ impl<'tcx> Visitor<'tcx> for FindNestedTypeVisitor<'tcx> {
Some(
rl::Region::Static
| rl::Region::Free(_, _)
- | rl::Region::EarlyBound(_, _)
+ | rl::Region::EarlyBound(_)
| rl::Region::LateBound(_, _, _),
)
| None,
@@ -188,7 +188,7 @@ impl<'tcx> Visitor<'tcx> for TyPathVisitor<'tcx> {
fn visit_lifetime(&mut self, lifetime: &hir::Lifetime) {
match (self.tcx.named_region(lifetime.hir_id), self.bound_region) {
// the lifetime of the TyPath!
- (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => {
+ (Some(rl::Region::EarlyBound(id)), ty::BrNamed(def_id, _)) => {
debug!("EarlyBound id={:?} def_id={:?}", id, def_id);
if id == def_id {
self.found_it = true;
@@ -209,7 +209,7 @@ impl<'tcx> Visitor<'tcx> for TyPathVisitor<'tcx> {
(
Some(
rl::Region::Static
- | rl::Region::EarlyBound(_, _)
+ | rl::Region::EarlyBound(_)
| rl::Region::LateBound(_, _, _)
| rl::Region::Free(_, _),
)
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
index 893ca3cf7..c5f2a1a3f 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
@@ -1,13 +1,16 @@
//! Error Reporting for when the lifetime for a type doesn't match the `impl` selected for a predicate
//! to hold.
+use crate::errors::{note_and_explain, IntroducesStaticBecauseUnmetLifetimeReq};
+use crate::errors::{
+ DoesNotOutliveStaticFromImpl, ImplicitStaticLifetimeSubdiag, MismatchedStaticLifetime,
+};
use crate::infer::error_reporting::nice_region_error::NiceRegionError;
-use crate::infer::error_reporting::note_and_explain_region;
use crate::infer::lexical_region_resolve::RegionResolutionError;
use crate::infer::{SubregionOrigin, TypeTrace};
use crate::traits::ObligationCauseCode;
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_errors::{ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
use rustc_hir::intravisit::Visitor;
use rustc_middle::ty::TypeVisitor;
@@ -35,15 +38,27 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let ObligationCauseCode::MatchImpl(parent, impl_def_id) = code else {
return None;
};
- let ObligationCauseCode::BindingObligation(_def_id, binding_span) = *parent.code() else {
+ let (ObligationCauseCode::BindingObligation(_, binding_span) | ObligationCauseCode::ExprBindingObligation(_, binding_span, ..))
+ = *parent.code() else {
return None;
};
- let mut err = self.tcx().sess.struct_span_err(cause.span, "incompatible lifetime on type");
+
// FIXME: we should point at the lifetime
- let mut multi_span: MultiSpan = vec![binding_span].into();
- multi_span.push_span_label(binding_span, "introduces a `'static` lifetime requirement");
- err.span_note(multi_span, "because this has an unmet lifetime requirement");
- note_and_explain_region(self.tcx(), &mut err, "", sup, "...", Some(binding_span));
+ let multi_span: MultiSpan = vec![binding_span].into();
+ let multispan_subdiag = IntroducesStaticBecauseUnmetLifetimeReq {
+ unmet_requirements: multi_span,
+ binding_span,
+ };
+
+ let expl = note_and_explain::RegionExplanation::new(
+ self.tcx(),
+ sup,
+ Some(binding_span),
+ note_and_explain::PrefixKind::Empty,
+ note_and_explain::SuffixKind::Continues,
+ );
+ let mut impl_span = None;
+ let mut implicit_static_lifetimes = Vec::new();
if let Some(impl_node) = self.tcx().hir().get_if_local(*impl_def_id) {
// If an impl is local, then maybe this isn't what they want. Try to
// be as helpful as possible with implicit lifetimes.
@@ -72,31 +87,34 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
// there aren't trait objects or because none are implicit, then just
// write a single note on the impl itself.
- let impl_span = self.tcx().def_span(*impl_def_id);
- err.span_note(impl_span, "...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
+ impl_span = Some(self.tcx().def_span(*impl_def_id));
} else {
// Otherwise, point at all implicit static lifetimes
- err.note("...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
for span in &traits {
- err.span_note(*span, "this has an implicit `'static` lifetime requirement");
+ implicit_static_lifetimes
+ .push(ImplicitStaticLifetimeSubdiag::Note { span: *span });
// It would be nice to put this immediately under the above note, but they get
// pushed to the end.
- err.span_suggestion_verbose(
- span.shrink_to_hi(),
- "consider relaxing the implicit `'static` requirement",
- " + '_",
- Applicability::MaybeIncorrect,
- );
+ implicit_static_lifetimes
+ .push(ImplicitStaticLifetimeSubdiag::Sugg { span: span.shrink_to_hi() });
}
}
} else {
// Otherwise just point out the impl.
- let impl_span = self.tcx().def_span(*impl_def_id);
- err.span_note(impl_span, "...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
+ impl_span = Some(self.tcx().def_span(*impl_def_id));
}
- let reported = err.emit();
+ let err = MismatchedStaticLifetime {
+ cause_span: cause.span,
+ unmet_lifetime_reqs: multispan_subdiag,
+ expl,
+ does_not_outlive_static_from_impl: impl_span
+ .map(|span| DoesNotOutliveStaticFromImpl::Spanned { span })
+ .unwrap_or(DoesNotOutliveStaticFromImpl::Unspanned),
+ implicit_static_lifetimes,
+ };
+ let reported = self.tcx().sess.emit_err(err);
Some(reported)
}
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
index 53d9acf7d..aaf5a7af0 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
@@ -1,6 +1,6 @@
+use crate::infer::error_reporting::TypeErrCtxt;
use crate::infer::lexical_region_resolve::RegionResolutionError;
use crate::infer::lexical_region_resolve::RegionResolutionError::*;
-use crate::infer::InferCtxt;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::source_map::Span;
@@ -19,34 +19,34 @@ pub use find_anon_type::find_anon_type;
pub use static_impl_trait::{suggest_new_region_bound, HirTraitObjectVisitor, TraitObjectVisitor};
pub use util::find_param_with_region;
-impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
- pub fn try_report_nice_region_error(&self, error: &RegionResolutionError<'tcx>) -> bool {
+impl<'cx, 'tcx> TypeErrCtxt<'cx, 'tcx> {
+ pub fn try_report_nice_region_error(&'cx self, error: &RegionResolutionError<'tcx>) -> bool {
NiceRegionError::new(self, error.clone()).try_report().is_some()
}
}
pub struct NiceRegionError<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ cx: &'cx TypeErrCtxt<'cx, 'tcx>,
error: Option<RegionResolutionError<'tcx>>,
regions: Option<(Span, ty::Region<'tcx>, ty::Region<'tcx>)>,
}
impl<'cx, 'tcx> NiceRegionError<'cx, 'tcx> {
- pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>, error: RegionResolutionError<'tcx>) -> Self {
- Self { infcx, error: Some(error), regions: None }
+ pub fn new(cx: &'cx TypeErrCtxt<'cx, 'tcx>, error: RegionResolutionError<'tcx>) -> Self {
+ Self { cx, error: Some(error), regions: None }
}
pub fn new_from_span(
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ cx: &'cx TypeErrCtxt<'cx, 'tcx>,
span: Span,
sub: ty::Region<'tcx>,
sup: ty::Region<'tcx>,
) -> Self {
- Self { infcx, error: None, regions: Some((span, sub, sup)) }
+ Self { cx, error: None, regions: Some((span, sub, sup)) }
}
fn tcx(&self) -> TyCtxt<'tcx> {
- self.infcx.tcx
+ self.cx.tcx
}
pub fn try_report_from_nll(&self) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
index 998699158..a58516829 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
@@ -211,7 +211,10 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
);
let mut err = self.tcx().sess.struct_span_err(span, &msg);
- let leading_ellipsis = if let ObligationCauseCode::ItemObligation(def_id) = *cause.code() {
+ let leading_ellipsis = if let ObligationCauseCode::ItemObligation(def_id)
+ | ObligationCauseCode::ExprItemObligation(def_id, ..) =
+ *cause.code()
+ {
err.span_label(span, "doesn't satisfy where-clause");
err.span_label(
self.tcx().def_span(def_id),
@@ -223,12 +226,12 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
false
};
- let expected_trait_ref = self.infcx.resolve_vars_if_possible(ty::TraitRef {
+ let expected_trait_ref = self.cx.resolve_vars_if_possible(ty::TraitRef {
def_id: trait_def_id,
substs: expected_substs,
});
let actual_trait_ref = self
- .infcx
+ .cx
.resolve_vars_if_possible(ty::TraitRef { def_id: trait_def_id, substs: actual_substs });
// Search the expected and actual trait references to see (a)
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
index 9886c572a..9bf755d7f 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
@@ -185,8 +185,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
| ObligationCauseCode::BlockTailExpression(hir_id) = cause.code()
{
let parent_id = tcx.hir().get_parent_item(*hir_id);
- let parent_id = tcx.hir().local_def_id_to_hir_id(parent_id);
- if let Some(fn_decl) = tcx.hir().fn_decl_by_hir_id(parent_id) {
+ if let Some(fn_decl) = tcx.hir().fn_decl_by_hir_id(parent_id.into()) {
let mut span: MultiSpan = fn_decl.output.span().into();
let mut add_label = true;
if let hir::FnRetTy::Return(ty) = fn_decl.output {
@@ -232,7 +231,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
ObligationCauseCode::MatchImpl(parent, ..) => parent.code(),
_ => cause.code(),
}
- && let (&ObligationCauseCode::ItemObligation(item_def_id), None) = (code, override_error_code)
+ && let (&ObligationCauseCode::ItemObligation(item_def_id) | &ObligationCauseCode::ExprItemObligation(item_def_id, ..), None) = (code, override_error_code)
{
// Same case of `impl Foo for dyn Bar { fn qux(&self) {} }` introducing a `'static`
// lifetime as above, but called using a fully-qualified path to the method:
@@ -287,8 +286,8 @@ pub fn suggest_new_region_bound(
) {
debug!("try_report_static_impl_trait: fn_return={:?}", fn_returns);
// FIXME: account for the need of parens in `&(dyn Trait + '_)`
- let consider = "consider changing the";
- let declare = "to declare that the";
+ let consider = "consider changing";
+ let declare = "to declare that";
let explicit = format!("you can add an explicit `{}` lifetime bound", lifetime_name);
let explicit_static =
arg.map(|arg| format!("explicit `'static` bound to the lifetime of {}", arg));
@@ -300,12 +299,16 @@ pub fn suggest_new_region_bound(
continue;
}
match fn_return.kind {
- TyKind::OpaqueDef(item_id, _) => {
+ TyKind::OpaqueDef(item_id, _, _) => {
let item = tcx.hir().item(item_id);
let ItemKind::OpaqueTy(opaque) = &item.kind else {
return;
};
+ // Get the identity type for this RPIT
+ let did = item_id.owner_id.to_def_id();
+ let ty = tcx.mk_opaque(did, ty::InternalSubsts::identity_for_item(tcx, did));
+
if let Some(span) = opaque
.bounds
.iter()
@@ -322,7 +325,7 @@ pub fn suggest_new_region_bound(
if let Some(explicit_static) = &explicit_static {
err.span_suggestion_verbose(
span,
- &format!("{} `impl Trait`'s {}", consider, explicit_static),
+ &format!("{consider} `{ty}`'s {explicit_static}"),
&lifetime_name,
Applicability::MaybeIncorrect,
);
@@ -352,12 +355,7 @@ pub fn suggest_new_region_bound(
} else {
err.span_suggestion_verbose(
fn_return.span.shrink_to_hi(),
- &format!(
- "{declare} `impl Trait` {captures}, {explicit}",
- declare = declare,
- captures = captures,
- explicit = explicit,
- ),
+ &format!("{declare} `{ty}` {captures}, {explicit}",),
&plus_lt,
Applicability::MaybeIncorrect,
);
@@ -368,7 +366,7 @@ pub fn suggest_new_region_bound(
err.span_suggestion_verbose(
fn_return.span.shrink_to_hi(),
&format!(
- "{declare} trait object {captures}, {explicit}",
+ "{declare} the trait object {captures}, {explicit}",
declare = declare,
captures = captures,
explicit = explicit,
@@ -385,7 +383,7 @@ pub fn suggest_new_region_bound(
if let Some(explicit_static) = &explicit_static {
err.span_suggestion_verbose(
lt.span,
- &format!("{} trait object's {}", consider, explicit_static),
+ &format!("{} the trait object's {}", consider, explicit_static),
&lifetime_name,
Applicability::MaybeIncorrect,
);
@@ -415,7 +413,8 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let tcx = self.tcx();
match tcx.hir().get_if_local(def_id) {
Some(Node::ImplItem(impl_item)) => {
- match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id())) {
+ match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id()).def_id)
+ {
Some(Node::Item(Item {
kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
..
@@ -425,7 +424,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
}
Some(Node::TraitItem(trait_item)) => {
let trait_did = tcx.hir().get_parent_item(trait_item.hir_id());
- match tcx.hir().find_by_def_id(trait_did) {
+ match tcx.hir().find_by_def_id(trait_did.def_id) {
Some(Node::Item(Item { kind: ItemKind::Trait(..), .. })) => {
// The method being called is defined in the `trait`, but the `'static`
// obligation comes from the `impl`. Find that `impl` so that we can point
@@ -486,7 +485,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
tcx,
ctxt.param_env,
ctxt.assoc_item.def_id,
- self.infcx.resolve_vars_if_possible(ctxt.substs),
+ self.cx.resolve_vars_if_possible(ctxt.substs),
) else {
return false;
};
@@ -544,7 +543,7 @@ pub struct TraitObjectVisitor(pub FxHashSet<DefId>);
impl<'tcx> TypeVisitor<'tcx> for TraitObjectVisitor {
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
match t.kind() {
- ty::Dynamic(preds, re) if re.is_static() => {
+ ty::Dynamic(preds, re, _) if re.is_static() => {
if let Some(def_id) = preds.principal_def_id() {
self.0.insert(def_id);
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
index da465a764..5d536e982 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
@@ -84,12 +84,12 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let expected_highlight = HighlightBuilder::build(self.tcx(), expected);
let expected = self
- .infcx
+ .cx
.extract_inference_diagnostics_data(expected.into(), Some(expected_highlight))
.name;
let found_highlight = HighlightBuilder::build(self.tcx(), found);
let found =
- self.infcx.extract_inference_diagnostics_data(found.into(), Some(found_highlight)).name;
+ self.cx.extract_inference_diagnostics_data(found.into(), Some(found_highlight)).name;
err.span_label(sp, &format!("found `{}`", found));
err.span_label(trait_sp, &format!("expected `{}`", expected));
@@ -154,16 +154,12 @@ impl<'tcx> Visitor<'tcx> for TypeParamSpanVisitor<'tcx> {
}
hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
[segment]
- if segment
- .res
- .map(|res| {
- matches!(
- res,
- Res::SelfTy { trait_: _, alias_to: _ }
- | Res::Def(hir::def::DefKind::TyParam, _)
- )
- })
- .unwrap_or(false) =>
+ if matches!(
+ segment.res,
+ Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
+ | Res::Def(hir::def::DefKind::TyParam, _)
+ ) =>
{
self.types.push(path.span);
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
index 3e9d491af..f1461d701 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
@@ -130,7 +130,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let ret_ty = fn_ty.fn_sig(self.tcx()).output();
let span = hir_sig.decl.output.span();
let future_output = if hir_sig.header.is_async() {
- ret_ty.map_bound(|ty| self.infcx.get_impl_future_output_ty(ty)).transpose()
+ ret_ty.map_bound(|ty| self.cx.get_impl_future_output_ty(ty)).transpose()
} else {
None
};
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note.rs b/compiler/rustc_infer/src/infer/error_reporting/note.rs
index c1940c5c0..41b115f33 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/note.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/note.rs
@@ -1,100 +1,90 @@
-use crate::infer::error_reporting::{note_and_explain_region, ObligationCauseExt};
-use crate::infer::{self, InferCtxt, SubregionOrigin};
-use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use crate::errors::RegionOriginNote;
+use crate::infer::error_reporting::{note_and_explain_region, TypeErrCtxt};
+use crate::infer::{self, SubregionOrigin};
+use rustc_errors::{
+ fluent, struct_span_err, AddToDiagnostic, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+};
use rustc_middle::traits::ObligationCauseCode;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::{self, Region};
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+use super::ObligationCauseAsDiagArg;
+
+impl<'tcx> TypeErrCtxt<'_, 'tcx> {
pub(super) fn note_region_origin(&self, err: &mut Diagnostic, origin: &SubregionOrigin<'tcx>) {
- let mut label_or_note = |span, msg: &str| {
- let sub_count = err.children.iter().filter(|d| d.span.is_dummy()).count();
- let expanded_sub_count = err.children.iter().filter(|d| !d.span.is_dummy()).count();
- let span_is_primary = err.span.primary_spans().iter().all(|&sp| sp == span);
- if span_is_primary && sub_count == 0 && expanded_sub_count == 0 {
- err.span_label(span, msg);
- } else if span_is_primary && expanded_sub_count == 0 {
- err.note(msg);
- } else {
- err.span_note(span, msg);
- }
- };
match *origin {
- infer::Subtype(ref trace) => {
- if let Some((expected, found)) = self.values_str(trace.values) {
- label_or_note(
- trace.cause.span,
- &format!("...so that the {}", trace.cause.as_requirement_str()),
- );
-
- err.note_expected_found(&"", expected, &"", found);
- } else {
- // FIXME: this really should be handled at some earlier stage. Our
- // handling of region checking when type errors are present is
- // *terrible*.
-
- label_or_note(
- trace.cause.span,
- &format!("...so that {}", trace.cause.as_requirement_str()),
- );
- }
+ infer::Subtype(ref trace) => RegionOriginNote::WithRequirement {
+ span: trace.cause.span,
+ requirement: ObligationCauseAsDiagArg(trace.cause.clone()),
+ expected_found: self.values_str(trace.values),
}
+ .add_to_diagnostic(err),
infer::Reborrow(span) => {
- label_or_note(span, "...so that reference does not outlive borrowed content");
+ RegionOriginNote::Plain { span, msg: fluent::infer_reborrow }.add_to_diagnostic(err)
}
infer::ReborrowUpvar(span, ref upvar_id) => {
let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
- label_or_note(span, &format!("...so that closure can access `{}`", var_name));
+ RegionOriginNote::WithName {
+ span,
+ msg: fluent::infer_reborrow,
+ name: &var_name.to_string(),
+ continues: false,
+ }
+ .add_to_diagnostic(err);
}
infer::RelateObjectBound(span) => {
- label_or_note(span, "...so that it can be closed over into an object");
+ RegionOriginNote::Plain { span, msg: fluent::infer_relate_object_bound }
+ .add_to_diagnostic(err);
}
infer::DataBorrowed(ty, span) => {
- label_or_note(
+ RegionOriginNote::WithName {
span,
- &format!(
- "...so that the type `{}` is not borrowed for too long",
- self.ty_to_string(ty)
- ),
- );
+ msg: fluent::infer_data_borrowed,
+ name: &self.ty_to_string(ty),
+ continues: false,
+ }
+ .add_to_diagnostic(err);
}
infer::ReferenceOutlivesReferent(ty, span) => {
- label_or_note(
+ RegionOriginNote::WithName {
span,
- &format!(
- "...so that the reference type `{}` does not outlive the data it points at",
- self.ty_to_string(ty)
- ),
- );
+ msg: fluent::infer_reference_outlives_referent,
+ name: &self.ty_to_string(ty),
+ continues: false,
+ }
+ .add_to_diagnostic(err);
}
- infer::RelateParamBound(span, t, opt_span) => {
- label_or_note(
+ infer::RelateParamBound(span, ty, opt_span) => {
+ RegionOriginNote::WithName {
span,
- &format!(
- "...so that the type `{}` will meet its required lifetime bounds{}",
- self.ty_to_string(t),
- if opt_span.is_some() { "..." } else { "" },
- ),
- );
+ msg: fluent::infer_relate_param_bound,
+ name: &self.ty_to_string(ty),
+ continues: opt_span.is_some(),
+ }
+ .add_to_diagnostic(err);
if let Some(span) = opt_span {
- err.span_note(span, "...that is required by this bound");
+ RegionOriginNote::Plain { span, msg: fluent::infer_relate_param_bound_2 }
+ .add_to_diagnostic(err);
}
}
infer::RelateRegionParamBound(span) => {
- label_or_note(
- span,
- "...so that the declared lifetime parameter bounds are satisfied",
- );
+ RegionOriginNote::Plain { span, msg: fluent::infer_relate_region_param_bound }
+ .add_to_diagnostic(err);
}
infer::CompareImplItemObligation { span, .. } => {
- label_or_note(
- span,
- "...so that the definition in impl matches the definition from the trait",
- );
+ RegionOriginNote::Plain { span, msg: fluent::infer_compare_impl_item_obligation }
+ .add_to_diagnostic(err);
}
infer::CheckAssociatedTypeBounds { ref parent, .. } => {
self.note_region_origin(err, &parent);
}
+ infer::AscribeUserTypeProvePredicate(span) => {
+ RegionOriginNote::Plain {
+ span,
+ msg: fluent::infer_ascribe_user_type_prove_predicate,
+ }
+ .add_to_diagnostic(err);
+ }
}
}
@@ -107,7 +97,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
match origin {
infer::Subtype(box trace) => {
let terr = TypeError::RegionsDoesNotOutlive(sup, sub);
- let mut err = self.report_and_explain_type_error(trace, &terr);
+ let mut err = self.report_and_explain_type_error(trace, terr);
match (*sub, *sup) {
(ty::RePlaceholder(_), ty::RePlaceholder(_)) => {}
(ty::RePlaceholder(_), _) => {
@@ -374,6 +364,27 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
err
}
+ infer::AscribeUserTypeProvePredicate(span) => {
+ let mut err =
+ struct_span_err!(self.tcx.sess, span, E0478, "lifetime bound not satisfied");
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "lifetime instantiated with ",
+ sup,
+ "",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "but lifetime must outlive ",
+ sub,
+ "",
+ None,
+ );
+ err
+ }
}
}
@@ -390,10 +401,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
if matches!(
&trace.cause.code().peel_derives(),
ObligationCauseCode::BindingObligation(..)
+ | ObligationCauseCode::ExprBindingObligation(..)
) =>
{
// Hack to get around the borrow checker because trace.cause has an `Rc`.
- if let ObligationCauseCode::BindingObligation(_, span) =
+ if let ObligationCauseCode::BindingObligation(_, span)
+ | ObligationCauseCode::ExprBindingObligation(_, span, ..) =
&trace.cause.code().peel_derives()
{
let span = *span;
@@ -406,7 +419,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
infer::Subtype(box trace) => {
let terr = TypeError::RegionsPlaceholderMismatch;
- return self.report_and_explain_type_error(trace, &terr);
+ return self.report_and_explain_type_error(trace, terr);
}
_ => return self.report_concrete_failure(placeholder_origin, sub, sup),
}
diff --git a/compiler/rustc_infer/src/infer/free_regions.rs b/compiler/rustc_infer/src/infer/free_regions.rs
index d566634a4..728d691a2 100644
--- a/compiler/rustc_infer/src/infer/free_regions.rs
+++ b/compiler/rustc_infer/src/infer/free_regions.rs
@@ -27,13 +27,13 @@ impl<'a, 'tcx> RegionRelations<'a, 'tcx> {
}
}
-#[derive(Clone, Debug, Default)]
+#[derive(Clone, Debug)]
pub struct FreeRegionMap<'tcx> {
// Stores the relation `a < b`, where `a` and `b` are regions.
//
// Invariant: only free regions like `'x` or `'static` are stored
// in this relation, not scopes.
- relation: TransitiveRelation<Region<'tcx>>,
+ pub(crate) relation: TransitiveRelation<Region<'tcx>>,
}
impl<'tcx> FreeRegionMap<'tcx> {
@@ -45,15 +45,6 @@ impl<'tcx> FreeRegionMap<'tcx> {
self.relation.is_empty()
}
- // Record that `'sup:'sub`. Or, put another way, `'sub <= 'sup`.
- // (with the exception that `'static: 'x` is not notable)
- pub fn relate_regions(&mut self, sub: Region<'tcx>, sup: Region<'tcx>) {
- debug!("relate_regions(sub={:?}, sup={:?})", sub, sup);
- if sub.is_free_or_static() && sup.is_free() {
- self.relation.add(sub, sup)
- }
- }
-
/// Tests whether `r_a <= r_b`.
///
/// Both regions must meet `is_free_or_static`.
diff --git a/compiler/rustc_infer/src/infer/freshen.rs b/compiler/rustc_infer/src/infer/freshen.rs
index 84004d2b2..ff5d1a05a 100644
--- a/compiler/rustc_infer/src/infer/freshen.rs
+++ b/compiler/rustc_infer/src/infer/freshen.rs
@@ -38,7 +38,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVi
use std::collections::hash_map::Entry;
pub struct TypeFreshener<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
ty_freshen_count: u32,
const_freshen_count: u32,
ty_freshen_map: FxHashMap<ty::InferTy, Ty<'tcx>>,
@@ -47,7 +47,7 @@ pub struct TypeFreshener<'a, 'tcx> {
}
impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>, keep_static: bool) -> TypeFreshener<'a, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'tcx>, keep_static: bool) -> TypeFreshener<'a, 'tcx> {
TypeFreshener {
infcx,
ty_freshen_count: 0,
@@ -126,7 +126,6 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
| ty::ReFree(_)
| ty::ReVar(_)
| ty::RePlaceholder(..)
- | ty::ReEmpty(_)
| ty::ReErased => {
// replace all free regions with 'erased
self.tcx().lifetimes.re_erased
diff --git a/compiler/rustc_infer/src/infer/fudge.rs b/compiler/rustc_infer/src/infer/fudge.rs
index 2f0eadce6..6dd6c4e1f 100644
--- a/compiler/rustc_infer/src/infer/fudge.rs
+++ b/compiler/rustc_infer/src/infer/fudge.rs
@@ -43,7 +43,7 @@ struct VariableLengths {
region_constraints_len: usize,
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
fn variable_lengths(&self) -> VariableLengths {
let mut inner = self.inner.borrow_mut();
VariableLengths {
@@ -167,7 +167,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
pub struct InferenceFudger<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
type_vars: (Range<TyVid>, Vec<TypeVariableOrigin>),
int_vars: Range<IntVid>,
float_vars: Range<FloatVid>,
diff --git a/compiler/rustc_infer/src/infer/glb.rs b/compiler/rustc_infer/src/infer/glb.rs
index 1570a08f3..6ffefcb7a 100644
--- a/compiler/rustc_infer/src/infer/glb.rs
+++ b/compiler/rustc_infer/src/infer/glb.rs
@@ -113,7 +113,7 @@ impl<'tcx> TypeRelation<'tcx> for Glb<'_, '_, 'tcx> {
}
impl<'combine, 'infcx, 'tcx> LatticeDir<'infcx, 'tcx> for Glb<'combine, 'infcx, 'tcx> {
- fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'tcx> {
+ fn infcx(&self) -> &'infcx InferCtxt<'tcx> {
self.fields.infcx
}
diff --git a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
index d0d9efe15..28c87a115 100644
--- a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
+++ b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
@@ -59,7 +59,7 @@ impl<'a, 'tcx> CombineFields<'a, 'tcx> {
}
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Replaces all bound variables (lifetimes, types, and constants) bound by
/// `binder` with placeholder variables in a new universe. This means that the
/// new placeholders can only be named by inference variables created after
@@ -69,7 +69,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// For more details visit the relevant sections of the [rustc dev guide].
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
pub fn replace_bound_vars_with_placeholders<T>(&self, binder: ty::Binder<'tcx, T>) -> T
where
T: TypeFoldable<'tcx> + Copy,
@@ -81,19 +81,19 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
let next_universe = self.create_next_universe();
let delegate = FnMutDelegate {
- regions: |br: ty::BoundRegion| {
+ regions: &mut |br: ty::BoundRegion| {
self.tcx.mk_region(ty::RePlaceholder(ty::PlaceholderRegion {
universe: next_universe,
name: br.kind,
}))
},
- types: |bound_ty: ty::BoundTy| {
+ types: &mut |bound_ty: ty::BoundTy| {
self.tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
universe: next_universe,
name: bound_ty.var,
}))
},
- consts: |bound_var: ty::BoundVar, ty| {
+ consts: &mut |bound_var: ty::BoundVar, ty| {
self.tcx.mk_const(ty::ConstS {
kind: ty::ConstKind::Placeholder(ty::PlaceholderConst {
universe: next_universe,
@@ -104,9 +104,8 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
},
};
- let result = self.tcx.replace_bound_vars_uncached(binder, delegate);
- debug!(?next_universe, ?result);
- result
+ debug!(?next_universe);
+ self.tcx.replace_bound_vars_uncached(binder, delegate)
}
/// See [RegionConstraintCollector::leak_check][1].
@@ -115,7 +114,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn leak_check(
&self,
overly_polymorphic: bool,
- snapshot: &CombinedSnapshot<'_, 'tcx>,
+ snapshot: &CombinedSnapshot<'tcx>,
) -> RelateResult<'tcx, ()> {
// If the user gave `-Zno-leak-check`, or we have been
// configured to skip the leak check, then skip the leak check
diff --git a/compiler/rustc_infer/src/infer/lattice.rs b/compiler/rustc_infer/src/infer/lattice.rs
index 1e3293efa..eba65361a 100644
--- a/compiler/rustc_infer/src/infer/lattice.rs
+++ b/compiler/rustc_infer/src/infer/lattice.rs
@@ -31,7 +31,7 @@ use rustc_middle::ty::{self, Ty};
/// GLB moves "down" the lattice (to smaller values); LUB moves
/// "up" the lattice (to bigger values).
pub trait LatticeDir<'f, 'tcx>: TypeRelation<'tcx> {
- fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>;
+ fn infcx(&self) -> &'f InferCtxt<'tcx>;
fn cause(&self) -> &ObligationCause<'tcx>;
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
index 3783cfb4c..5f13b2b3d 100644
--- a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
@@ -15,8 +15,9 @@ use rustc_data_structures::graph::implementation::{
use rustc_data_structures::intern::Interned;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::PlaceholderRegion;
use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_middle::ty::{ReEarlyBound, ReEmpty, ReErased, ReFree, ReStatic};
+use rustc_middle::ty::{ReEarlyBound, ReErased, ReFree, ReStatic};
use rustc_middle::ty::{ReLateBound, RePlaceholder, ReVar};
use rustc_middle::ty::{Region, RegionVid};
use rustc_span::Span;
@@ -51,6 +52,13 @@ pub struct LexicalRegionResolutions<'tcx> {
#[derive(Copy, Clone, Debug)]
pub(crate) enum VarValue<'tcx> {
+ /// Empty lifetime is for data that is never accessed. We tag the
+ /// empty lifetime with a universe -- the idea is that we don't
+ /// want `exists<'a> { forall<'b> { 'b: 'a } }` to be satisfiable.
+ /// Therefore, the `'empty` in a universe `U` is less than all
+ /// regions visible from `U`, but not less than regions not visible
+ /// from `U`.
+ Empty(ty::UniverseIndex),
Value(Region<'tcx>),
ErrorValue,
}
@@ -117,7 +125,7 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
&mut self,
errors: &mut Vec<RegionResolutionError<'tcx>>,
) -> LexicalRegionResolutions<'tcx> {
- let mut var_data = self.construct_var_data(self.tcx());
+ let mut var_data = self.construct_var_data();
if cfg!(debug_assertions) {
self.dump_constraints();
@@ -137,13 +145,12 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
/// Initially, the value for all variables is set to `'empty`, the
/// empty region. The `expansion` phase will grow this larger.
- fn construct_var_data(&self, tcx: TyCtxt<'tcx>) -> LexicalRegionResolutions<'tcx> {
+ fn construct_var_data(&self) -> LexicalRegionResolutions<'tcx> {
LexicalRegionResolutions {
values: IndexVec::from_fn_n(
|vid| {
let vid_universe = self.var_infos[vid].universe;
- let re_empty = tcx.mk_region(ty::ReEmpty(vid_universe));
- VarValue::Value(re_empty)
+ VarValue::Empty(vid_universe)
},
self.num_vars(),
),
@@ -189,20 +196,131 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
}
+ /// Gets the LUb of a given region and the empty region
+ fn lub_empty(&self, a_region: Region<'tcx>) -> Result<Region<'tcx>, PlaceholderRegion> {
+ match *a_region {
+ ReLateBound(..) | ReErased => {
+ bug!("cannot relate region: {:?}", a_region);
+ }
+
+ ReVar(v_id) => {
+ span_bug!(
+ self.var_infos[v_id].origin.span(),
+ "lub invoked with non-concrete regions: {:?}",
+ a_region,
+ );
+ }
+
+ ReStatic => {
+ // nothing lives longer than `'static`
+ Ok(self.tcx().lifetimes.re_static)
+ }
+
+ ReEarlyBound(_) | ReFree(_) => {
+ // All empty regions are less than early-bound, free,
+ // and scope regions.
+ Ok(a_region)
+ }
+
+ RePlaceholder(placeholder) => Err(placeholder),
+ }
+ }
+
fn expansion(&self, var_values: &mut LexicalRegionResolutions<'tcx>) {
+ // In the first pass, we expand region vids according to constraints we
+ // have previously found. In the second pass, we loop through the region
+ // vids we expanded and expand *across* region vids (effectively
+ // "expanding" new `RegSubVar` constraints).
+
+ // Tracks the `VarSubVar` constraints generated for each region vid. We
+ // later use this to expand across vids.
let mut constraints = IndexVec::from_elem_n(Vec::new(), var_values.values.len());
+ // Tracks the changed region vids.
let mut changes = Vec::new();
for constraint in self.data.constraints.keys() {
- let (a_vid, a_region, b_vid, b_data) = match *constraint {
+ match *constraint {
Constraint::RegSubVar(a_region, b_vid) => {
let b_data = var_values.value_mut(b_vid);
- (None, a_region, b_vid, b_data)
+
+ if self.expand_node(a_region, b_vid, b_data) {
+ changes.push(b_vid);
+ }
}
Constraint::VarSubVar(a_vid, b_vid) => match *var_values.value(a_vid) {
VarValue::ErrorValue => continue,
+ VarValue::Empty(a_universe) => {
+ let b_data = var_values.value_mut(b_vid);
+
+ let changed = (|| match *b_data {
+ VarValue::Empty(b_universe) => {
+ // Empty regions are ordered according to the universe
+ // they are associated with.
+ let ui = a_universe.min(b_universe);
+
+ debug!(
+ "Expanding value of {:?} \
+ from empty lifetime with universe {:?} \
+ to empty lifetime with universe {:?}",
+ b_vid, b_universe, ui
+ );
+
+ *b_data = VarValue::Empty(ui);
+ true
+ }
+ VarValue::Value(cur_region) => {
+ let lub = match self.lub_empty(cur_region) {
+ Ok(r) => r,
+ // If the empty and placeholder regions are in the same universe,
+ // then the LUB is the Placeholder region (which is the cur_region).
+ // If they are not in the same universe, the LUB is the Static lifetime.
+ Err(placeholder) if a_universe == placeholder.universe => {
+ cur_region
+ }
+ Err(_) => self.tcx().lifetimes.re_static,
+ };
+
+ if lub == cur_region {
+ return false;
+ }
+
+ debug!(
+ "Expanding value of {:?} from {:?} to {:?}",
+ b_vid, cur_region, lub
+ );
+
+ *b_data = VarValue::Value(lub);
+ true
+ }
+
+ VarValue::ErrorValue => false,
+ })();
+
+ if changed {
+ changes.push(b_vid);
+ }
+ match b_data {
+ VarValue::Value(Region(Interned(ReStatic, _)))
+ | VarValue::ErrorValue => (),
+ _ => {
+ constraints[a_vid].push((a_vid, b_vid));
+ constraints[b_vid].push((a_vid, b_vid));
+ }
+ }
+ }
VarValue::Value(a_region) => {
let b_data = var_values.value_mut(b_vid);
- (Some(a_vid), a_region, b_vid, b_data)
+
+ if self.expand_node(a_region, b_vid, b_data) {
+ changes.push(b_vid);
+ }
+ match b_data {
+ VarValue::Value(Region(Interned(ReStatic, _)))
+ | VarValue::ErrorValue => (),
+ _ => {
+ constraints[a_vid].push((a_vid, b_vid));
+ constraints[b_vid].push((a_vid, b_vid));
+ }
+ }
}
},
Constraint::RegSubReg(..) | Constraint::VarSubReg(..) => {
@@ -210,18 +328,6 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
// is done, in `collect_errors`.
continue;
}
- };
- if self.expand_node(a_region, b_vid, b_data) {
- changes.push(b_vid);
- }
- if let Some(a_vid) = a_vid {
- match b_data {
- VarValue::Value(Region(Interned(ReStatic, _))) | VarValue::ErrorValue => (),
- _ => {
- constraints[a_vid].push((a_vid, b_vid));
- constraints[b_vid].push((a_vid, b_vid));
- }
- }
}
}
@@ -242,6 +348,10 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
}
+ /// Expands the value of the region represented with `b_vid` with current
+ /// value `b_data` to the lub of `b_data` and `a_region`. The corresponds
+ /// with the constraint `'?b: 'a` (`'a <: '?b`), where `'a` is some known
+ /// region and `'?b` is some region variable.
fn expand_node(
&self,
a_region: Region<'tcx>,
@@ -263,14 +373,28 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
match *b_data {
+ VarValue::Empty(empty_ui) => {
+ let lub = match self.lub_empty(a_region) {
+ Ok(r) => r,
+ // If this empty region is from a universe that can
+ // name the placeholder, then the placeholder is
+ // larger; otherwise, the only ancestor is `'static`.
+ Err(placeholder) if empty_ui.can_name(placeholder.universe) => {
+ self.tcx().mk_region(RePlaceholder(placeholder))
+ }
+ Err(_) => self.tcx().lifetimes.re_static,
+ };
+
+ debug!("Expanding value of {:?} from empty lifetime to {:?}", b_vid, lub);
+
+ *b_data = VarValue::Value(lub);
+ true
+ }
VarValue::Value(cur_region) => {
// This is a specialized version of the `lub_concrete_regions`
// check below for a common case, here purely as an
// optimization.
let b_universe = self.var_infos[b_vid].universe;
- if let ReEmpty(a_universe) = *a_region && a_universe == b_universe {
- return false;
- }
let mut lub = self.lub_concrete_regions(a_region, cur_region);
if lub == cur_region {
@@ -300,6 +424,78 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
}
+ /// True if `a <= b`.
+ fn sub_region_values(&self, a: VarValue<'tcx>, b: VarValue<'tcx>) -> bool {
+ match (a, b) {
+ // Error region is `'static`
+ (VarValue::ErrorValue, _) | (_, VarValue::ErrorValue) => return true,
+ (VarValue::Empty(a_ui), VarValue::Empty(b_ui)) => {
+ // Empty regions are ordered according to the universe
+ // they are associated with.
+ a_ui.min(b_ui) == b_ui
+ }
+ (VarValue::Value(a), VarValue::Empty(_)) => {
+ match *a {
+ ReLateBound(..) | ReErased => {
+ bug!("cannot relate region: {:?}", a);
+ }
+
+ ReVar(v_id) => {
+ span_bug!(
+ self.var_infos[v_id].origin.span(),
+ "lub_concrete_regions invoked with non-concrete region: {:?}",
+ a
+ );
+ }
+
+ ReStatic | ReEarlyBound(_) | ReFree(_) => {
+ // nothing lives longer than `'static`
+
+ // All empty regions are less than early-bound, free,
+ // and scope regions.
+
+ false
+ }
+
+ RePlaceholder(_) => {
+ // The LUB is either `a` or `'static`
+ false
+ }
+ }
+ }
+ (VarValue::Empty(a_ui), VarValue::Value(b)) => {
+ match *b {
+ ReLateBound(..) | ReErased => {
+ bug!("cannot relate region: {:?}", b);
+ }
+
+ ReVar(v_id) => {
+ span_bug!(
+ self.var_infos[v_id].origin.span(),
+ "lub_concrete_regions invoked with non-concrete regions: {:?}",
+ b
+ );
+ }
+
+ ReStatic | ReEarlyBound(_) | ReFree(_) => {
+ // nothing lives longer than `'static`
+ // All empty regions are less than early-bound, free,
+ // and scope regions.
+ true
+ }
+
+ RePlaceholder(placeholder) => {
+ // If this empty region is from a universe that can
+ // name the placeholder, then the placeholder is
+ // larger; otherwise, the only ancestor is `'static`.
+ if a_ui.can_name(placeholder.universe) { true } else { false }
+ }
+ }
+ }
+ (VarValue::Value(a), VarValue::Value(b)) => self.sub_concrete_regions(a, b),
+ }
+ }
+
/// True if `a <= b`, but not defined over inference variables.
#[instrument(level = "trace", skip(self))]
fn sub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> bool {
@@ -333,9 +529,9 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
///
/// Neither `a` nor `b` may be an inference variable (hence the
/// term "concrete regions").
- #[instrument(level = "trace", skip(self))]
+ #[instrument(level = "trace", skip(self), ret)]
fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> {
- let r = match (*a, *b) {
+ match (*a, *b) {
(ReLateBound(..), _) | (_, ReLateBound(..)) | (ReErased, _) | (_, ReErased) => {
bug!("cannot relate region: LUB({:?}, {:?})", a, b);
}
@@ -355,37 +551,6 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
self.tcx().lifetimes.re_static
}
- (ReEmpty(_), ReEarlyBound(_) | ReFree(_)) => {
- // All empty regions are less than early-bound, free,
- // and scope regions.
- b
- }
-
- (ReEarlyBound(_) | ReFree(_), ReEmpty(_)) => {
- // All empty regions are less than early-bound, free,
- // and scope regions.
- a
- }
-
- (ReEmpty(a_ui), ReEmpty(b_ui)) => {
- // Empty regions are ordered according to the universe
- // they are associated with.
- let ui = a_ui.min(b_ui);
- self.tcx().mk_region(ReEmpty(ui))
- }
-
- (ReEmpty(empty_ui), RePlaceholder(placeholder))
- | (RePlaceholder(placeholder), ReEmpty(empty_ui)) => {
- // If this empty region is from a universe that can
- // name the placeholder, then the placeholder is
- // larger; otherwise, the only ancestor is `'static`.
- if empty_ui.can_name(placeholder.universe) {
- self.tcx().mk_region(RePlaceholder(placeholder))
- } else {
- self.tcx().lifetimes.re_static
- }
- }
-
(ReEarlyBound(_) | ReFree(_), ReEarlyBound(_) | ReFree(_)) => {
self.region_rels.lub_free_regions(a, b)
}
@@ -399,11 +564,7 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
self.tcx().lifetimes.re_static
}
}
- };
-
- debug!("lub_concrete_regions({:?}, {:?}) = {:?}", a, b, r);
-
- r
+ }
}
/// After expansion is complete, go and check upper bounds (i.e.,
@@ -512,7 +673,7 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
for (node_vid, value) in var_data.values.iter_enumerated() {
match *value {
- VarValue::Value(_) => { /* Inference successful */ }
+ VarValue::Empty(_) | VarValue::Value(_) => { /* Inference successful */ }
VarValue::ErrorValue => {
// Inference impossible: this value contains
// inconsistent constraints.
@@ -833,12 +994,25 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
VerifyBound::OutlivedBy(r) => {
- self.sub_concrete_regions(min, var_values.normalize(self.tcx(), *r))
+ let a = match *min {
+ ty::ReVar(rid) => var_values.values[rid],
+ _ => VarValue::Value(min),
+ };
+ let b = match **r {
+ ty::ReVar(rid) => var_values.values[rid],
+ _ => VarValue::Value(*r),
+ };
+ self.sub_region_values(a, b)
}
- VerifyBound::IsEmpty => {
- matches!(*min, ty::ReEmpty(_))
- }
+ VerifyBound::IsEmpty => match *min {
+ ty::ReVar(rid) => match var_values.values[rid] {
+ VarValue::ErrorValue => false,
+ VarValue::Empty(_) => true,
+ VarValue::Value(_) => false,
+ },
+ _ => false,
+ },
VerifyBound::AnyBound(bs) => {
bs.iter().any(|b| self.bound_is_met(b, var_values, generic_ty, min))
@@ -880,6 +1054,7 @@ impl<'tcx> LexicalRegionResolutions<'tcx> {
) -> ty::Region<'tcx> {
let result = match *r {
ty::ReVar(rid) => match self.values[rid] {
+ VarValue::Empty(_) => r,
VarValue::Value(r) => r,
VarValue::ErrorValue => tcx.lifetimes.re_static,
},
diff --git a/compiler/rustc_infer/src/infer/lub.rs b/compiler/rustc_infer/src/infer/lub.rs
index 9f96d52c8..d6e56fcb7 100644
--- a/compiler/rustc_infer/src/infer/lub.rs
+++ b/compiler/rustc_infer/src/infer/lub.rs
@@ -119,7 +119,7 @@ impl<'tcx> ConstEquateRelation<'tcx> for Lub<'_, '_, 'tcx> {
}
impl<'combine, 'infcx, 'tcx> LatticeDir<'infcx, 'tcx> for Lub<'combine, 'infcx, 'tcx> {
- fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'tcx> {
+ fn infcx(&self) -> &'infcx InferCtxt<'tcx> {
self.fields.infcx
}
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
index d7d1b5fa2..ffb020398 100644
--- a/compiler/rustc_infer/src/infer/mod.rs
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -20,6 +20,7 @@ use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType};
use rustc_middle::mir::interpret::{ErrorHandled, EvalToValTreeResult};
+use rustc_middle::mir::ConstraintCategory;
use rustc_middle::traits::select;
use rustc_middle::ty::abstract_const::{AbstractConst, FailureKind};
use rustc_middle::ty::error::{ExpectedFound, TypeError};
@@ -32,12 +33,13 @@ pub use rustc_middle::ty::IntVarValue;
use rustc_middle::ty::{self, GenericParamDefKind, InferConst, Ty, TyCtxt};
use rustc_middle::ty::{ConstVid, FloatVid, IntVid, TyVid};
use rustc_span::symbol::Symbol;
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
-use std::cell::{Cell, Ref, RefCell};
+use std::cell::{Cell, RefCell};
use std::fmt;
use self::combine::CombineFields;
+use self::error_reporting::TypeErrCtxt;
use self::free_regions::RegionRelations;
use self::lexical_region_resolve::LexicalRegionResolutions;
use self::outlives::env::OutlivesEnvironment;
@@ -251,7 +253,7 @@ pub enum DefiningAnchor {
Error,
}
-pub struct InferCtxt<'a, 'tcx> {
+pub struct InferCtxt<'tcx> {
pub tcx: TyCtxt<'tcx>,
/// The `DefId` of the item in whose context we are performing inference or typeck.
@@ -271,12 +273,6 @@ pub struct InferCtxt<'a, 'tcx> {
/// solving is left to borrowck instead.
pub considering_regions: bool,
- /// During type-checking/inference of a body, `in_progress_typeck_results`
- /// contains a reference to the typeck results being built up, which are
- /// used for reading closure kinds/signatures as they are inferred,
- /// and for error reporting logic to read arbitrary node types.
- pub in_progress_typeck_results: Option<&'a RefCell<ty::TypeckResults<'tcx>>>,
-
pub inner: RefCell<InferCtxtInner<'tcx>>,
/// If set, this flag causes us to skip the 'leak check' during
@@ -316,12 +312,12 @@ pub struct InferCtxt<'a, 'tcx> {
///
/// Don't read this flag directly, call `is_tainted_by_errors()`
/// and `set_tainted_by_errors()`.
- tainted_by_errors_flag: Cell<bool>,
+ tainted_by_errors: Cell<Option<ErrorGuaranteed>>,
/// Track how many errors were reported when this infcx is created.
/// If the number of errors increases, that's also a sign (line
/// `tainted_by_errors`) to avoid reporting certain kinds of errors.
- // FIXME(matthewjasper) Merge into `tainted_by_errors_flag`
+ // FIXME(matthewjasper) Merge into `tainted_by_errors`
err_count_on_creation: usize,
/// This flag is true while there is an active snapshot.
@@ -337,6 +333,9 @@ pub struct InferCtxt<'a, 'tcx> {
/// when we enter into a higher-ranked (`for<..>`) type or trait
/// bound.
universe: Cell<ty::UniverseIndex>,
+
+ normalize_fn_sig_for_diagnostic:
+ Option<Lrc<dyn Fn(&InferCtxt<'tcx>, ty::PolyFnSig<'tcx>) -> ty::PolyFnSig<'tcx>>>,
}
/// See the `error_reporting` module for more details.
@@ -350,12 +349,11 @@ pub enum ValuePairs<'tcx> {
impl<'tcx> ValuePairs<'tcx> {
pub fn ty(&self) -> Option<(Ty<'tcx>, Ty<'tcx>)> {
- if let ValuePairs::Terms(ExpectedFound {
- expected: ty::Term::Ty(expected),
- found: ty::Term::Ty(found),
- }) = self
+ if let ValuePairs::Terms(ExpectedFound { expected, found }) = self
+ && let Some(expected) = expected.ty()
+ && let Some(found) = found.ty()
{
- Some((*expected, *found))
+ Some((expected, found))
} else {
None
}
@@ -406,7 +404,11 @@ pub enum SubregionOrigin<'tcx> {
/// Comparing the signature and requirements of an impl method against
/// the containing trait.
- CompareImplItemObligation { span: Span, impl_item_def_id: LocalDefId, trait_item_def_id: DefId },
+ CompareImplItemObligation {
+ span: Span,
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ },
/// Checking that the bounds of a trait's associated type hold for a given impl
CheckAssociatedTypeBounds {
@@ -414,12 +416,24 @@ pub enum SubregionOrigin<'tcx> {
impl_item_def_id: LocalDefId,
trait_item_def_id: DefId,
},
+
+ AscribeUserTypeProvePredicate(Span),
}
// `SubregionOrigin` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(SubregionOrigin<'_>, 32);
+impl<'tcx> SubregionOrigin<'tcx> {
+ pub fn to_constraint_category(&self) -> ConstraintCategory<'tcx> {
+ match self {
+ Self::Subtype(type_trace) => type_trace.cause.to_constraint_category(),
+ Self::AscribeUserTypeProvePredicate(span) => ConstraintCategory::Predicate(*span),
+ _ => ConstraintCategory::BoringNoLocation,
+ }
+ }
+}
+
/// Times when we replace late-bound regions with variables:
#[derive(Clone, Copy, Debug)]
pub enum LateBoundRegionConversionTime {
@@ -504,7 +518,7 @@ pub enum FixupError<'tcx> {
}
/// See the `region_obligations` field for more information.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct RegionObligation<'tcx> {
pub sub_region: ty::Region<'tcx>,
pub sup_type: Ty<'tcx>,
@@ -532,14 +546,13 @@ impl<'tcx> fmt::Display for FixupError<'tcx> {
}
}
-/// A temporary returned by `tcx.infer_ctxt()`. This is necessary
-/// for multiple `InferCtxt` to share the same `in_progress_typeck_results`
-/// without using `Rc` or something similar.
+/// Used to configure inference contexts before their creation
pub struct InferCtxtBuilder<'tcx> {
tcx: TyCtxt<'tcx>,
defining_use_anchor: DefiningAnchor,
considering_regions: bool,
- fresh_typeck_results: Option<RefCell<ty::TypeckResults<'tcx>>>,
+ normalize_fn_sig_for_diagnostic:
+ Option<Lrc<dyn Fn(&InferCtxt<'tcx>, ty::PolyFnSig<'tcx>) -> ty::PolyFnSig<'tcx>>>,
}
pub trait TyCtxtInferExt<'tcx> {
@@ -552,25 +565,17 @@ impl<'tcx> TyCtxtInferExt<'tcx> for TyCtxt<'tcx> {
tcx: self,
defining_use_anchor: DefiningAnchor::Error,
considering_regions: true,
- fresh_typeck_results: None,
+ normalize_fn_sig_for_diagnostic: None,
}
}
}
impl<'tcx> InferCtxtBuilder<'tcx> {
- /// Used only by `rustc_typeck` during body type-checking/inference,
- /// will initialize `in_progress_typeck_results` with fresh `TypeckResults`.
- /// Will also change the scope for opaque type defining use checks to the given owner.
- pub fn with_fresh_in_progress_typeck_results(mut self, table_owner: LocalDefId) -> Self {
- self.fresh_typeck_results = Some(RefCell::new(ty::TypeckResults::new(table_owner)));
- self.with_opaque_type_inference(DefiningAnchor::Bind(table_owner))
- }
-
/// Whenever the `InferCtxt` should be able to handle defining uses of opaque types,
/// you need to call this function. Otherwise the opaque type will be treated opaquely.
///
/// It is only meant to be called in two places, for typeck
- /// (via `with_fresh_in_progress_typeck_results`) and for the inference context used
+ /// (via `Inherited::build`) and for the inference context used
/// in mir borrowck.
pub fn with_opaque_type_inference(mut self, defining_use_anchor: DefiningAnchor) -> Self {
self.defining_use_anchor = defining_use_anchor;
@@ -582,6 +587,14 @@ impl<'tcx> InferCtxtBuilder<'tcx> {
self
}
+ pub fn with_normalize_fn_sig_for_diagnostic(
+ mut self,
+ fun: Lrc<dyn Fn(&InferCtxt<'tcx>, ty::PolyFnSig<'tcx>) -> ty::PolyFnSig<'tcx>>,
+ ) -> Self {
+ self.normalize_fn_sig_for_diagnostic = Some(fun);
+ self
+ }
+
/// Given a canonical value `C` as a starting point, create an
/// inference context that contains each of the bound values
/// within instantiated as a fresh variable. The `f` closure is
@@ -589,47 +602,45 @@ impl<'tcx> InferCtxtBuilder<'tcx> {
/// `V` and a substitution `S`. This substitution `S` maps from
/// the bound values in `C` to their instantiated values in `V`
/// (in other words, `S(C) = V`).
- pub fn enter_with_canonical<T, R>(
+ pub fn build_with_canonical<T>(
&mut self,
span: Span,
canonical: &Canonical<'tcx, T>,
- f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>, T, CanonicalVarValues<'tcx>) -> R,
- ) -> R
+ ) -> (InferCtxt<'tcx>, T, CanonicalVarValues<'tcx>)
where
T: TypeFoldable<'tcx>,
{
- self.enter(|infcx| {
- let (value, subst) =
- infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
- f(infcx, value, subst)
- })
+ let infcx = self.build();
+ let (value, subst) = infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
+ (infcx, value, subst)
}
- pub fn enter<R>(&mut self, f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>) -> R) -> R {
+ pub fn build(&mut self) -> InferCtxt<'tcx> {
let InferCtxtBuilder {
tcx,
defining_use_anchor,
considering_regions,
- ref fresh_typeck_results,
+ ref normalize_fn_sig_for_diagnostic,
} = *self;
- let in_progress_typeck_results = fresh_typeck_results.as_ref();
- f(InferCtxt {
+ InferCtxt {
tcx,
defining_use_anchor,
considering_regions,
- in_progress_typeck_results,
inner: RefCell::new(InferCtxtInner::new()),
lexical_region_resolutions: RefCell::new(None),
selection_cache: Default::default(),
evaluation_cache: Default::default(),
reported_trait_errors: Default::default(),
reported_closure_mismatch: Default::default(),
- tainted_by_errors_flag: Cell::new(false),
+ tainted_by_errors: Cell::new(None),
err_count_on_creation: tcx.sess.err_count(),
in_snapshot: Cell::new(false),
skip_leak_check: Cell::new(false),
universe: Cell::new(ty::UniverseIndex::ROOT),
- })
+ normalize_fn_sig_for_diagnostic: normalize_fn_sig_for_diagnostic
+ .as_ref()
+ .map(|f| f.clone()),
+ }
}
}
@@ -641,7 +652,7 @@ impl<'tcx, T> InferOk<'tcx, T> {
/// Extracts `value`, registering any obligations into `fulfill_cx`.
pub fn into_value_registering_obligations(
self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
fulfill_cx: &mut dyn TraitEngine<'tcx>,
) -> T {
let InferOk { value, obligations } = self;
@@ -657,29 +668,34 @@ impl<'tcx> InferOk<'tcx, ()> {
}
#[must_use = "once you start a snapshot, you should always consume it"]
-pub struct CombinedSnapshot<'a, 'tcx> {
+pub struct CombinedSnapshot<'tcx> {
undo_snapshot: Snapshot<'tcx>,
region_constraints_snapshot: RegionSnapshot,
universe: ty::UniverseIndex,
was_in_snapshot: bool,
- _in_progress_typeck_results: Option<Ref<'a, ty::TypeckResults<'tcx>>>,
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
+ /// Creates a `TypeErrCtxt` for emitting various inference errors.
+ /// During typeck, use `FnCtxt::infer_err` instead.
+ pub fn err_ctxt(&self) -> TypeErrCtxt<'_, 'tcx> {
+ TypeErrCtxt { infcx: self, typeck_results: None }
+ }
+
/// calls `tcx.try_unify_abstract_consts` after
/// canonicalizing the consts.
#[instrument(skip(self), level = "debug")]
pub fn try_unify_abstract_consts(
&self,
- a: ty::Unevaluated<'tcx, ()>,
- b: ty::Unevaluated<'tcx, ()>,
+ a: ty::UnevaluatedConst<'tcx>,
+ b: ty::UnevaluatedConst<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> bool {
// Reject any attempt to unify two unevaluated constants that contain inference
// variables, since inference variables in queries lead to ICEs.
- if a.substs.has_infer_types_or_consts()
- || b.substs.has_infer_types_or_consts()
- || param_env.has_infer_types_or_consts()
+ if a.substs.has_non_region_infer()
+ || b.substs.has_non_region_infer()
+ || param_env.has_non_region_infer()
{
debug!("a or b or param_env contain infer vars in its substs -> cannot unify");
return false;
@@ -704,7 +720,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// if this is not a type variable.
///
/// No attempt is made to resolve `ty`.
- pub fn type_var_origin(&'a self, ty: Ty<'tcx>) -> Option<TypeVariableOrigin> {
+ pub fn type_var_origin(&self, ty: Ty<'tcx>) -> Option<TypeVariableOrigin> {
match *ty.kind() {
ty::Infer(ty::TyVar(vid)) => {
Some(*self.inner.borrow_mut().type_variables().var_origin(vid))
@@ -745,7 +761,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
vars
}
- fn combine_fields(
+ fn combine_fields<'a>(
&'a self,
trace: TypeTrace<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -787,7 +803,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
result
}
- fn start_snapshot(&self) -> CombinedSnapshot<'a, 'tcx> {
+ fn start_snapshot(&self) -> CombinedSnapshot<'tcx> {
debug!("start_snapshot()");
let in_snapshot = self.in_snapshot.replace(true);
@@ -799,22 +815,16 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
region_constraints_snapshot: inner.unwrap_region_constraints().start_snapshot(),
universe: self.universe(),
was_in_snapshot: in_snapshot,
- // Borrow typeck results "in progress" (i.e., during typeck)
- // to ban writes from within a snapshot to them.
- _in_progress_typeck_results: self
- .in_progress_typeck_results
- .map(|typeck_results| typeck_results.borrow()),
}
}
#[instrument(skip(self, snapshot), level = "debug")]
- fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot<'a, 'tcx>) {
+ fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot<'tcx>) {
let CombinedSnapshot {
undo_snapshot,
region_constraints_snapshot,
universe,
was_in_snapshot,
- _in_progress_typeck_results,
} = snapshot;
self.in_snapshot.set(was_in_snapshot);
@@ -826,13 +836,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
#[instrument(skip(self, snapshot), level = "debug")]
- fn commit_from(&self, snapshot: CombinedSnapshot<'a, 'tcx>) {
+ fn commit_from(&self, snapshot: CombinedSnapshot<'tcx>) {
let CombinedSnapshot {
undo_snapshot,
region_constraints_snapshot: _,
universe: _,
was_in_snapshot,
- _in_progress_typeck_results,
} = snapshot;
self.in_snapshot.set(was_in_snapshot);
@@ -844,7 +853,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
#[instrument(skip(self, f), level = "debug")]
pub fn commit_if_ok<T, E, F>(&self, f: F) -> Result<T, E>
where
- F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> Result<T, E>,
+ F: FnOnce(&CombinedSnapshot<'tcx>) -> Result<T, E>,
{
let snapshot = self.start_snapshot();
let r = f(&snapshot);
@@ -864,7 +873,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
#[instrument(skip(self, f), level = "debug")]
pub fn probe<R, F>(&self, f: F) -> R
where
- F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R,
+ F: FnOnce(&CombinedSnapshot<'tcx>) -> R,
{
let snapshot = self.start_snapshot();
let r = f(&snapshot);
@@ -876,7 +885,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
#[instrument(skip(self, f), level = "debug")]
pub fn probe_maybe_skip_leak_check<R, F>(&self, should_skip: bool, f: F) -> R
where
- F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R,
+ F: FnOnce(&CombinedSnapshot<'tcx>) -> R,
{
let snapshot = self.start_snapshot();
let was_skip_leak_check = self.skip_leak_check.get();
@@ -896,7 +905,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// - `Some(false)` -- if there are `'a: 'b` constraints but none involve placeholders
pub fn region_constraints_added_in_snapshot(
&self,
- snapshot: &CombinedSnapshot<'a, 'tcx>,
+ snapshot: &CombinedSnapshot<'tcx>,
) -> Option<bool> {
self.inner
.borrow_mut()
@@ -904,7 +913,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
.region_constraints_added_in_snapshot(&snapshot.undo_snapshot)
}
- pub fn opaque_types_added_in_snapshot(&self, snapshot: &CombinedSnapshot<'a, 'tcx>) -> bool {
+ pub fn opaque_types_added_in_snapshot(&self, snapshot: &CombinedSnapshot<'tcx>) -> bool {
self.inner.borrow().undo_log.opaque_types_in_snapshot(&snapshot.undo_snapshot)
}
@@ -988,7 +997,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
cause: &ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
predicate: ty::PolyCoercePredicate<'tcx>,
- ) -> Option<InferResult<'tcx, ()>> {
+ ) -> Result<InferResult<'tcx, ()>, (TyVid, TyVid)> {
let subtype_predicate = predicate.map_bound(|p| ty::SubtypePredicate {
a_is_expected: false, // when coercing from `a` to `b`, `b` is expected
a: p.a,
@@ -1002,7 +1011,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
cause: &ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
predicate: ty::PolySubtypePredicate<'tcx>,
- ) -> Option<InferResult<'tcx, ()>> {
+ ) -> Result<InferResult<'tcx, ()>, (TyVid, TyVid)> {
// Check for two unresolved inference variables, in which case we can
// make no progress. This is partly a micro-optimization, but it's
// also an opportunity to "sub-unify" the variables. This isn't
@@ -1021,12 +1030,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
match (r_a.kind(), r_b.kind()) {
(&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => {
self.inner.borrow_mut().type_variables().sub(a_vid, b_vid);
- return None;
+ return Err((a_vid, b_vid));
}
_ => {}
}
- Some(self.commit_if_ok(|_snapshot| {
+ Ok(self.commit_if_ok(|_snapshot| {
let ty::SubtypePredicate { a_is_expected, a, b } =
self.replace_bound_vars_with_placeholders(predicate);
@@ -1141,8 +1150,8 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// Return the universe that the region `r` was created in. For
/// most regions (e.g., `'static`, named regions from the user,
/// etc) this is the root universe U0. For inference variables or
- /// placeholders, however, it will return the universe which which
- /// they are associated.
+ /// placeholders, however, it will return the universe which they
+ /// are associated.
pub fn universe_of_region(&self, r: ty::Region<'tcx>) -> ty::UniverseIndex {
self.inner.borrow_mut().unwrap_region_constraints().universe(r)
}
@@ -1227,23 +1236,25 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn is_tainted_by_errors(&self) -> bool {
debug!(
"is_tainted_by_errors(err_count={}, err_count_on_creation={}, \
- tainted_by_errors_flag={})",
+ tainted_by_errors={})",
self.tcx.sess.err_count(),
self.err_count_on_creation,
- self.tainted_by_errors_flag.get()
+ self.tainted_by_errors.get().is_some()
);
if self.tcx.sess.err_count() > self.err_count_on_creation {
return true; // errors reported since this infcx was made
}
- self.tainted_by_errors_flag.get()
+ self.tainted_by_errors.get().is_some()
}
/// Set the "tainted by errors" flag to true. We call this when we
/// observe an error from a prior pass.
pub fn set_tainted_by_errors(&self) {
debug!("set_tainted_by_errors()");
- self.tainted_by_errors_flag.set(true)
+ self.tainted_by_errors.set(Some(
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "`InferCtxt` incorrectly tainted by errors"),
+ ));
}
pub fn skip_region_resolution(&self) {
@@ -1272,7 +1283,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
assert!(old_value.is_none());
}
- /// Process the region constraints and return any any errors that
+ /// Process the region constraints and return any errors that
/// result. After this, no more unification operations should be
/// done -- or the compiler will panic -- but it is legal to use
/// `resolve_vars_if_possible` as well as `fully_resolve`.
@@ -1306,32 +1317,6 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
errors
}
-
- /// Process the region constraints and report any errors that
- /// result. After this, no more unification operations should be
- /// done -- or the compiler will panic -- but it is legal to use
- /// `resolve_vars_if_possible` as well as `fully_resolve`.
- ///
- /// Make sure to call [`InferCtxt::process_registered_region_obligations`]
- /// first, or preferrably use [`InferCtxt::check_region_obligations_and_report_errors`]
- /// to do both of these operations together.
- pub fn resolve_regions_and_report_errors(
- &self,
- generic_param_scope: LocalDefId,
- outlives_env: &OutlivesEnvironment<'tcx>,
- ) {
- let errors = self.resolve_regions(outlives_env);
-
- if !self.is_tainted_by_errors() {
- // As a heuristic, just skip reporting region errors
- // altogether if other errors have been reported while
- // this infcx was in use. This is totally hokey but
- // otherwise we have a hard time separating legit region
- // errors from silly ones.
- self.report_region_errors(generic_param_scope, &errors);
- }
- }
-
/// Obtains (and clears) the current set of region
/// constraints. The inference context is still usable: further
/// unifications will simply add new constraints.
@@ -1484,62 +1469,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
* except during the writeback phase.
*/
- resolve::fully_resolve(self, value)
- }
-
- // [Note-Type-error-reporting]
- // An invariant is that anytime the expected or actual type is Error (the special
- // error type, meaning that an error occurred when typechecking this expression),
- // this is a derived error. The error cascaded from another error (that was already
- // reported), so it's not useful to display it to the user.
- // The following methods implement this logic.
- // They check if either the actual or expected type is Error, and don't print the error
- // in this case. The typechecker should only ever report type errors involving mismatched
- // types using one of these methods, and should not call span_err directly for such
- // errors.
-
- pub fn type_error_struct_with_diag<M>(
- &self,
- sp: Span,
- mk_diag: M,
- actual_ty: Ty<'tcx>,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>
- where
- M: FnOnce(String) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
- {
- let actual_ty = self.resolve_vars_if_possible(actual_ty);
- debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty);
-
- let mut err = mk_diag(self.ty_to_string(actual_ty));
-
- // Don't report an error if actual type is `Error`.
- if actual_ty.references_error() {
- err.downgrade_to_delayed_bug();
- }
-
- err
- }
-
- pub fn report_mismatched_types(
- &self,
- cause: &ObligationCause<'tcx>,
- expected: Ty<'tcx>,
- actual: Ty<'tcx>,
- err: TypeError<'tcx>,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let trace = TypeTrace::types(cause, true, expected, actual);
- self.report_and_explain_type_error(trace, &err)
- }
-
- pub fn report_mismatched_consts(
- &self,
- cause: &ObligationCause<'tcx>,
- expected: ty::Const<'tcx>,
- actual: ty::Const<'tcx>,
- err: TypeError<'tcx>,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let trace = TypeTrace::consts(cause, true, expected, actual);
- self.report_and_explain_type_error(trace, &err)
+ let value = resolve::fully_resolve(self, value);
+ assert!(
+ value.as_ref().map_or(true, |value| !value.needs_infer()),
+ "`{value:?}` is not fully resolved"
+ );
+ value
}
pub fn replace_bound_vars_with_fresh_vars<T>(
@@ -1556,7 +1491,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
struct ToFreshVars<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
span: Span,
lbrct: LateBoundRegionConversionTime,
map: FxHashMap<ty::BoundVar, ty::GenericArg<'tcx>>,
@@ -1656,7 +1591,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn try_const_eval_resolve(
&self,
param_env: ty::ParamEnv<'tcx>,
- unevaluated: ty::Unevaluated<'tcx>,
+ unevaluated: ty::UnevaluatedConst<'tcx>,
ty: Ty<'tcx>,
span: Option<Span>,
) -> Result<ty::Const<'tcx>, ErrorHandled> {
@@ -1691,7 +1626,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn const_eval_resolve(
&self,
mut param_env: ty::ParamEnv<'tcx>,
- unevaluated: ty::Unevaluated<'tcx>,
+ unevaluated: ty::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
let mut substs = self.resolve_vars_if_possible(unevaluated.substs);
@@ -1699,8 +1634,8 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// Postpone the evaluation of constants whose substs depend on inference
// variables
- if substs.has_infer_types_or_consts() {
- let ac = AbstractConst::new(self.tcx, unevaluated.shrink());
+ if substs.has_non_region_infer() {
+ let ac = AbstractConst::new(self.tcx, unevaluated);
match ac {
Ok(None) => {
substs = InternalSubsts::identity_for_item(self.tcx, unevaluated.def.did);
@@ -1722,11 +1657,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
debug!(?param_env_erased);
debug!(?substs_erased);
- let unevaluated = ty::Unevaluated {
- def: unevaluated.def,
- substs: substs_erased,
- promoted: unevaluated.promoted,
- };
+ let unevaluated = ty::UnevaluatedConst { def: unevaluated.def, substs: substs_erased };
// The return value is the evaluated value which doesn't contain any reference to inference
// variables, thus we don't need to substitute back the original values.
@@ -1785,6 +1716,86 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
}
}
+impl<'tcx> TypeErrCtxt<'_, 'tcx> {
+ /// Process the region constraints and report any errors that
+ /// result. After this, no more unification operations should be
+ /// done -- or the compiler will panic -- but it is legal to use
+ /// `resolve_vars_if_possible` as well as `fully_resolve`.
+ ///
+ /// Make sure to call [`InferCtxt::process_registered_region_obligations`]
+ /// first, or preferably use [`InferCtxt::check_region_obligations_and_report_errors`]
+ /// to do both of these operations together.
+ pub fn resolve_regions_and_report_errors(
+ &self,
+ generic_param_scope: LocalDefId,
+ outlives_env: &OutlivesEnvironment<'tcx>,
+ ) {
+ let errors = self.resolve_regions(outlives_env);
+
+ if !self.is_tainted_by_errors() {
+ // As a heuristic, just skip reporting region errors
+ // altogether if other errors have been reported while
+ // this infcx was in use. This is totally hokey but
+ // otherwise we have a hard time separating legit region
+ // errors from silly ones.
+ self.report_region_errors(generic_param_scope, &errors);
+ }
+ }
+
+ // [Note-Type-error-reporting]
+ // An invariant is that anytime the expected or actual type is Error (the special
+ // error type, meaning that an error occurred when typechecking this expression),
+ // this is a derived error. The error cascaded from another error (that was already
+ // reported), so it's not useful to display it to the user.
+ // The following methods implement this logic.
+ // They check if either the actual or expected type is Error, and don't print the error
+ // in this case. The typechecker should only ever report type errors involving mismatched
+ // types using one of these methods, and should not call span_err directly for such
+ // errors.
+
+ pub fn type_error_struct_with_diag<M>(
+ &self,
+ sp: Span,
+ mk_diag: M,
+ actual_ty: Ty<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>
+ where
+ M: FnOnce(String) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ {
+ let actual_ty = self.resolve_vars_if_possible(actual_ty);
+ debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty);
+
+ let mut err = mk_diag(self.ty_to_string(actual_ty));
+
+ // Don't report an error if actual type is `Error`.
+ if actual_ty.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ }
+
+ pub fn report_mismatched_types(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ err: TypeError<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ self.report_and_explain_type_error(TypeTrace::types(cause, true, expected, actual), err)
+ }
+
+ pub fn report_mismatched_consts(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: ty::Const<'tcx>,
+ actual: ty::Const<'tcx>,
+ err: TypeError<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ self.report_and_explain_type_error(TypeTrace::consts(cause, true, expected, actual), err)
+ }
+}
+
/// Helper for `ty_or_const_infer_var_changed` (see comment on that), currently
/// used only for `traits::fulfill`'s list of `stalled_on` inference variables.
#[derive(Copy, Clone, Debug)]
@@ -1814,7 +1825,7 @@ impl<'tcx> TyOrConstInferVar<'tcx> {
/// Tries to extract an inference variable from a type, returns `None`
/// for types other than `ty::Infer(_)` (or `InferTy::Fresh*`).
- pub fn maybe_from_ty(ty: Ty<'tcx>) -> Option<Self> {
+ fn maybe_from_ty(ty: Ty<'tcx>) -> Option<Self> {
match *ty.kind() {
ty::Infer(ty::TyVar(v)) => Some(TyOrConstInferVar::Ty(v)),
ty::Infer(ty::IntVar(v)) => Some(TyOrConstInferVar::TyInt(v)),
@@ -1825,7 +1836,7 @@ impl<'tcx> TyOrConstInferVar<'tcx> {
/// Tries to extract an inference variable from a constant, returns `None`
/// for constants other than `ty::ConstKind::Infer(_)` (or `InferConst::Fresh`).
- pub fn maybe_from_const(ct: ty::Const<'tcx>) -> Option<Self> {
+ fn maybe_from_const(ct: ty::Const<'tcx>) -> Option<Self> {
match ct.kind() {
ty::ConstKind::Infer(InferConst::Var(v)) => Some(TyOrConstInferVar::Const(v)),
_ => None,
@@ -1854,7 +1865,7 @@ impl<'tcx> TypeFolder<'tcx> for InferenceLiteralEraser<'tcx> {
}
struct ShallowResolver<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> TypeFolder<'tcx> for ShallowResolver<'a, 'tcx> {
@@ -1937,6 +1948,18 @@ impl<'tcx> TypeTrace<'tcx> {
}
}
+ pub fn poly_trait_refs(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: ty::PolyTraitRef<'tcx>,
+ b: ty::PolyTraitRef<'tcx>,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: PolyTraitRefs(ExpectedFound::new(a_is_expected, a.into(), b.into())),
+ }
+ }
+
pub fn consts(
cause: &ObligationCause<'tcx>,
a_is_expected: bool,
@@ -1962,6 +1985,7 @@ impl<'tcx> SubregionOrigin<'tcx> {
DataBorrowed(_, a) => a,
ReferenceOutlivesReferent(_, a) => a,
CompareImplItemObligation { span, .. } => span,
+ AscribeUserTypeProvePredicate(span) => span,
CheckAssociatedTypeBounds { ref parent, .. } => parent.span(),
}
}
@@ -1994,6 +2018,10 @@ impl<'tcx> SubregionOrigin<'tcx> {
parent: Box::new(default()),
},
+ traits::ObligationCauseCode::AscribeUserTypeProvePredicate(span) => {
+ SubregionOrigin::AscribeUserTypeProvePredicate(span)
+ }
+
_ => default(),
}
}
@@ -2015,16 +2043,6 @@ impl RegionVariableOrigin {
}
}
-impl<'tcx> fmt::Debug for RegionObligation<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- f,
- "RegionObligation(sub_region={:?}, sup_type={:?})",
- self.sub_region, self.sup_type
- )
- }
-}
-
/// Replaces substs that reference param or infer variables with suitable
/// placeholders. This function is meant to remove these param and infer
/// substs when they're not actually needed to evaluate a constant.
@@ -2034,21 +2052,17 @@ fn replace_param_and_infer_substs_with_placeholder<'tcx>(
) -> SubstsRef<'tcx> {
tcx.mk_substs(substs.iter().enumerate().map(|(idx, arg)| {
match arg.unpack() {
- GenericArgKind::Type(_)
- if arg.has_param_types_or_consts() || arg.has_infer_types_or_consts() =>
- {
+ GenericArgKind::Type(_) if arg.has_non_region_param() || arg.has_non_region_infer() => {
tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
universe: ty::UniverseIndex::ROOT,
name: ty::BoundVar::from_usize(idx),
}))
.into()
}
- GenericArgKind::Const(ct)
- if ct.has_infer_types_or_consts() || ct.has_param_types_or_consts() =>
- {
+ GenericArgKind::Const(ct) if ct.has_non_region_infer() || ct.has_non_region_param() => {
let ty = ct.ty();
// If the type references param or infer, replace that too...
- if ty.has_param_types_or_consts() || ty.has_infer_types_or_consts() {
+ if ty.has_non_region_param() || ty.has_non_region_infer() {
bug!("const `{ct}`'s type should not reference params or types");
}
tcx.mk_const(ty::ConstS {
diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
index bab4f3e9e..600f94f09 100644
--- a/compiler/rustc_infer/src/infer/nll_relate/mod.rs
+++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
@@ -25,7 +25,9 @@ use crate::infer::combine::ConstEquateRelation;
use crate::infer::InferCtxt;
use crate::infer::{ConstVarValue, ConstVariableValue};
use crate::infer::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::traits::PredicateObligation;
use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
@@ -44,7 +46,7 @@ pub struct TypeRelating<'me, 'tcx, D>
where
D: TypeRelatingDelegate<'tcx>,
{
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
/// Callback to use when we deduce an outlives relationship.
delegate: D,
@@ -91,11 +93,9 @@ pub trait TypeRelatingDelegate<'tcx> {
);
fn const_equate(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>);
- fn register_opaque_type(
+ fn register_opaque_type_obligations(
&mut self,
- a: Ty<'tcx>,
- b: Ty<'tcx>,
- a_is_expected: bool,
+ obligations: Vec<PredicateObligation<'tcx>>,
) -> Result<(), TypeError<'tcx>>;
/// Creates a new universe index. Used when instantiating placeholders.
@@ -149,11 +149,7 @@ impl<'me, 'tcx, D> TypeRelating<'me, 'tcx, D>
where
D: TypeRelatingDelegate<'tcx>,
{
- pub fn new(
- infcx: &'me InferCtxt<'me, 'tcx>,
- delegate: D,
- ambient_variance: ty::Variance,
- ) -> Self {
+ pub fn new(infcx: &'me InferCtxt<'tcx>, delegate: D, ambient_variance: ty::Variance) -> Self {
Self {
infcx,
delegate,
@@ -357,7 +353,7 @@ where
// In NLL, we don't have type inference variables
// floating around, so we can do this rather imprecise
// variant of the occurs-check.
- assert!(!generalized_ty.has_infer_types_or_consts());
+ assert!(!generalized_ty.has_non_region_infer());
}
self.infcx.inner.borrow_mut().type_variables().instantiate(vid, generalized_ty);
@@ -396,6 +392,37 @@ where
generalizer.relate(value, value)
}
+
+ fn relate_opaques(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ let (a, b) = if self.a_is_expected() { (a, b) } else { (b, a) };
+ let mut generalize = |ty, ty_is_expected| {
+ let var = self.infcx.next_ty_var_id_in_universe(
+ TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.delegate.span(),
+ },
+ ty::UniverseIndex::ROOT,
+ );
+ if ty_is_expected {
+ self.relate_ty_var((ty, var))
+ } else {
+ self.relate_ty_var((var, ty))
+ }
+ };
+ let (a, b) = match (a.kind(), b.kind()) {
+ (&ty::Opaque(..), _) => (a, generalize(b, false)?),
+ (_, &ty::Opaque(..)) => (generalize(a, true)?, b),
+ _ => unreachable!(),
+ };
+ let cause = ObligationCause::dummy_with_span(self.delegate.span());
+ let obligations = self
+ .infcx
+ .handle_opaque_type(a, b, true, &cause, self.delegate.param_env())?
+ .obligations;
+ self.delegate.register_opaque_type_obligations(obligations)?;
+ trace!(a = ?a.kind(), b = ?b.kind(), "opaque type instantiated");
+ Ok(a)
+ }
}
/// When we instantiate an inference variable with a value in
@@ -516,7 +543,7 @@ where
true
}
- #[instrument(skip(self, info), level = "trace")]
+ #[instrument(skip(self, info), level = "trace", ret)]
fn relate_with_variance<T: Relate<'tcx>>(
&mut self,
variance: ty::Variance,
@@ -534,8 +561,6 @@ where
self.ambient_variance = old_ambient_variance;
- debug!(?r);
-
Ok(r)
}
@@ -572,32 +597,16 @@ where
(&ty::Infer(ty::TyVar(vid)), _) => self.relate_ty_var((vid, b)),
(&ty::Opaque(a_def_id, _), &ty::Opaque(b_def_id, _)) if a_def_id == b_def_id => {
- self.infcx.super_combine_tys(self, a, b)
+ infcx.super_combine_tys(self, a, b).or_else(|err| {
+ self.tcx().sess.delay_span_bug(
+ self.delegate.span(),
+ "failure to relate an opaque to itself should result in an error later on",
+ );
+ if a_def_id.is_local() { self.relate_opaques(a, b) } else { Err(err) }
+ })
}
(&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..)) if did.is_local() => {
- let (a, b) = if self.a_is_expected() { (a, b) } else { (b, a) };
- let mut generalize = |ty, ty_is_expected| {
- let var = infcx.next_ty_var_id_in_universe(
- TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: self.delegate.span(),
- },
- ty::UniverseIndex::ROOT,
- );
- if ty_is_expected {
- self.relate_ty_var((ty, var))
- } else {
- self.relate_ty_var((var, ty))
- }
- };
- let (a, b) = match (a.kind(), b.kind()) {
- (&ty::Opaque(..), _) => (a, generalize(b, false)?),
- (_, &ty::Opaque(..)) => (generalize(a, true)?, b),
- _ => unreachable!(),
- };
- self.delegate.register_opaque_type(a, b, true)?;
- trace!(a = ?a.kind(), b = ?b.kind(), "opaque type instantiated");
- Ok(a)
+ self.relate_opaques(a, b)
}
(&ty::Projection(projection_ty), _)
@@ -859,7 +868,7 @@ struct TypeGeneralizer<'me, 'tcx, D>
where
D: TypeRelatingDelegate<'tcx>,
{
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
delegate: &'me mut D,
diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs
index e579afbf3..a982f11f7 100644
--- a/compiler/rustc_infer/src/infer/opaque_types.rs
+++ b/compiler/rustc_infer/src/infer/opaque_types.rs
@@ -1,13 +1,16 @@
+use crate::errors::OpaqueHiddenTypeDiag;
use crate::infer::{DefiningAnchor, InferCtxt, InferOk};
use crate::traits;
+use hir::def::DefKind;
use hir::def_id::{DefId, LocalDefId};
use hir::{HirId, OpaqueTyOrigin};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::vec_map::VecMap;
use rustc_hir as hir;
use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::BottomUpFolder;
-use rustc_middle::ty::subst::{GenericArgKind, Subst};
+use rustc_middle::ty::GenericArgKind;
use rustc_middle::ty::{
self, OpaqueHiddenType, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable,
TypeVisitable, TypeVisitor,
@@ -39,7 +42,7 @@ pub struct OpaqueTypeDecl<'tcx> {
pub origin: hir::OpaqueTyOrigin,
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// This is a backwards compatibility hack to prevent breaking changes from
/// lazy TAIT around RPIT handling.
pub fn replace_opaque_types_with_inference_vars<T: TypeFoldable<'tcx>>(
@@ -72,7 +75,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
// for opaque types, and then use that kind to fix the spans for type errors
// that we see later on.
let ty_var = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
+ kind: TypeVariableOriginKind::OpaqueTypeInference(def_id),
span,
});
obligations.extend(
@@ -100,7 +103,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
return Ok(InferOk { value: (), obligations: vec![] });
}
let (a, b) = if a_is_expected { (a, b) } else { (b, a) };
- let process = |a: Ty<'tcx>, b: Ty<'tcx>| match *a.kind() {
+ let process = |a: Ty<'tcx>, b: Ty<'tcx>, a_is_expected| match *a.kind() {
ty::Opaque(def_id, substs) if def_id.is_local() => {
let def_id = def_id.expect_local();
let origin = match self.defining_use_anchor {
@@ -153,22 +156,11 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
if let Some(OpaqueTyOrigin::TyAlias) =
did2.as_local().and_then(|did2| self.opaque_type_origin(did2, cause.span))
{
- self.tcx
- .sess
- .struct_span_err(
- cause.span,
- "opaque type's hidden type cannot be another opaque type from the same scope",
- )
- .span_label(cause.span, "one of the two opaque types used here has to be outside its defining scope")
- .span_note(
- self.tcx.def_span(def_id),
- "opaque type whose hidden type is being assigned",
- )
- .span_note(
- self.tcx.def_span(did2),
- "opaque type being used as hidden type",
- )
- .emit();
+ self.tcx.sess.emit_err(OpaqueHiddenTypeDiag {
+ span: cause.span,
+ hidden_type: self.tcx.def_span(did2),
+ opaque_type: self.tcx.def_span(def_id),
+ });
}
}
Some(self.register_hidden_type(
@@ -177,25 +169,18 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
param_env,
b,
origin,
+ a_is_expected,
))
}
_ => None,
};
- if let Some(res) = process(a, b) {
+ if let Some(res) = process(a, b, true) {
res
- } else if let Some(res) = process(b, a) {
+ } else if let Some(res) = process(b, a, false) {
res
} else {
- // Rerun equality check, but this time error out due to
- // different types.
- match self.at(cause, param_env).define_opaque_types(false).eq(a, b) {
- Ok(_) => span_bug!(
- cause.span,
- "opaque types are never equal to anything but themselves: {:#?}",
- (a.kind(), b.kind())
- ),
- Err(e) => Err(e),
- }
+ let (a, b) = self.resolve_vars_if_possible((a, b));
+ Err(TypeError::Sorts(ExpectedFound::new(true, a, b)))
}
}
@@ -400,7 +385,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
});
}
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
pub fn opaque_type_origin(&self, def_id: LocalDefId, span: Span) -> Option<OpaqueTyOrigin> {
let opaque_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
let parent_def_id = match self.defining_use_anchor {
@@ -431,16 +416,14 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
in_definition_scope.then_some(*origin)
}
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
fn opaque_ty_origin_unchecked(&self, def_id: LocalDefId, span: Span) -> OpaqueTyOrigin {
- let origin = match self.tcx.hir().expect_item(def_id).kind {
+ match self.tcx.hir().expect_item(def_id).kind {
hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => origin,
ref itemkind => {
span_bug!(span, "weird opaque type: {:?}, {:#?}", def_id, itemkind)
}
- };
- trace!(?origin);
- origin
+ }
}
}
@@ -530,15 +513,16 @@ impl UseKind {
}
}
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
#[instrument(skip(self), level = "debug")]
- pub fn register_hidden_type(
+ fn register_hidden_type(
&self,
opaque_type_key: OpaqueTypeKey<'tcx>,
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
hidden_ty: Ty<'tcx>,
origin: hir::OpaqueTyOrigin,
+ a_is_expected: bool,
) -> InferResult<'tcx, ()> {
let tcx = self.tcx;
let OpaqueTypeKey { def_id, substs } = opaque_type_key;
@@ -557,21 +541,24 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
origin,
);
if let Some(prev) = prev {
- obligations = self.at(&cause, param_env).eq(prev, hidden_ty)?.obligations;
+ obligations =
+ self.at(&cause, param_env).eq_exp(a_is_expected, prev, hidden_ty)?.obligations;
}
let item_bounds = tcx.bound_explicit_item_bounds(def_id.to_def_id());
- for predicate in item_bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
- debug!(?predicate);
- let predicate = predicate.subst(tcx, substs);
-
+ for (predicate, _) in item_bounds.subst_iter_copied(tcx, substs) {
let predicate = predicate.fold_with(&mut BottomUpFolder {
tcx,
ty_op: |ty| match *ty.kind() {
// We can't normalize associated types from `rustc_infer`,
// but we can eagerly register inference variables for them.
- ty::Projection(projection_ty) if !projection_ty.has_escaping_bound_vars() => {
+ // FIXME(RPITIT): Don't replace RPITITs with inference vars.
+ ty::Projection(projection_ty)
+ if !projection_ty.has_escaping_bound_vars()
+ && tcx.def_kind(projection_ty.item_def_id)
+ != DefKind::ImplTraitPlaceholder =>
+ {
self.infer_projection(
param_env,
projection_ty,
@@ -587,6 +574,12 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
{
hidden_ty
}
+ // FIXME(RPITIT): This can go away when we move to associated types
+ ty::Projection(proj)
+ if def_id.to_def_id() == proj.item_def_id && substs == proj.substs =>
+ {
+ hidden_ty
+ }
_ => ty,
},
lt_op: |lt| lt,
@@ -635,7 +628,7 @@ fn may_define_opaque_type(tcx: TyCtxt<'_>, def_id: LocalDefId, opaque_hir_id: hi
let scope = tcx.hir().get_defining_scope(opaque_hir_id);
// We walk up the node tree until we hit the root or the scope of the opaque type.
while hir_id != scope && hir_id != hir::CRATE_HIR_ID {
- hir_id = tcx.hir().local_def_id_to_hir_id(tcx.hir().get_parent_item(hir_id));
+ hir_id = tcx.hir().get_parent_item(hir_id).into();
}
// Syntactically, we are allowed to define the concrete type if:
let res = hir_id == scope;
diff --git a/compiler/rustc_infer/src/infer/opaque_types/table.rs b/compiler/rustc_infer/src/infer/opaque_types/table.rs
index fb12da0cc..4d124554a 100644
--- a/compiler/rustc_infer/src/infer/opaque_types/table.rs
+++ b/compiler/rustc_infer/src/infer/opaque_types/table.rs
@@ -29,7 +29,7 @@ impl<'tcx> OpaqueTypeStorage<'tcx> {
}
}
- #[instrument(level = "debug")]
+ #[instrument(level = "debug", ret)]
pub fn take_opaque_types(&mut self) -> OpaqueTypeMap<'tcx> {
std::mem::take(&mut self.opaque_types)
}
diff --git a/compiler/rustc_infer/src/infer/outlives/components.rs b/compiler/rustc_infer/src/infer/outlives/components.rs
index b2d7f4a66..14ee9f051 100644
--- a/compiler/rustc_infer/src/infer/outlives/components.rs
+++ b/compiler/rustc_infer/src/infer/outlives/components.rs
@@ -3,8 +3,9 @@
// RFC for reference.
use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir::def_id::DefId;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, SubstsRef, Ty, TyCtxt, TypeVisitable};
use smallvec::{smallvec, SmallVec};
#[derive(Debug)]
@@ -45,6 +46,8 @@ pub enum Component<'tcx> {
// them. This gives us room to improve the regionck reasoning in
// the future without breaking backwards compat.
EscapingProjection(Vec<Component<'tcx>>),
+
+ Opaque(DefId, SubstsRef<'tcx>),
}
/// Push onto `out` all the things that must outlive `'a` for the condition
@@ -120,6 +123,17 @@ fn compute_components<'tcx>(
out.push(Component::Param(p));
}
+ // Ignore lifetimes found in opaque types. Opaque types can
+ // have lifetimes in their substs which their hidden type doesn't
+ // actually use. If we inferred that an opaque type is outlived by
+ // its parameter lifetimes, then we could prove that any lifetime
+ // outlives any other lifetime, which is unsound.
+ // See https://github.com/rust-lang/rust/issues/84305 for
+ // more details.
+ ty::Opaque(def_id, substs) => {
+ out.push(Component::Opaque(def_id, substs));
+ },
+
// For projections, we prefer to generate an obligation like
// `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
// regionck more ways to prove that it holds. However,
@@ -168,7 +182,6 @@ fn compute_components<'tcx>(
ty::Float(..) | // OutlivesScalar
ty::Never | // ...
ty::Adt(..) | // OutlivesNominalType
- ty::Opaque(..) | // OutlivesNominalType (ish)
ty::Foreign(..) | // OutlivesNominalType
ty::Str | // OutlivesScalar (ish)
ty::Slice(..) | // ...
diff --git a/compiler/rustc_infer/src/infer/outlives/env.rs b/compiler/rustc_infer/src/infer/outlives/env.rs
index b2decd64f..33543135d 100644
--- a/compiler/rustc_infer/src/infer/outlives/env.rs
+++ b/compiler/rustc_infer/src/infer/outlives/env.rs
@@ -2,6 +2,7 @@ use crate::infer::free_regions::FreeRegionMap;
use crate::infer::{GenericKind, InferCtxt};
use crate::traits::query::OutlivesBound;
use rustc_data_structures::fx::FxIndexSet;
+use rustc_data_structures::transitive_relation::TransitiveRelationBuilder;
use rustc_middle::ty::{self, ReEarlyBound, ReFree, ReVar, Region};
use super::explicit_outlives_bounds;
@@ -51,23 +52,49 @@ pub struct OutlivesEnvironment<'tcx> {
region_bound_pairs: RegionBoundPairs<'tcx>,
}
+/// Builder of OutlivesEnvironment.
+#[derive(Debug)]
+struct OutlivesEnvironmentBuilder<'tcx> {
+ param_env: ty::ParamEnv<'tcx>,
+ region_relation: TransitiveRelationBuilder<Region<'tcx>>,
+ region_bound_pairs: RegionBoundPairs<'tcx>,
+}
+
/// "Region-bound pairs" tracks outlives relations that are known to
/// be true, either because of explicit where-clauses like `T: 'a` or
/// because of implied bounds.
pub type RegionBoundPairs<'tcx> =
FxIndexSet<ty::OutlivesPredicate<GenericKind<'tcx>, Region<'tcx>>>;
-impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
- pub fn new(param_env: ty::ParamEnv<'tcx>) -> Self {
- let mut env = OutlivesEnvironment {
+impl<'tcx> OutlivesEnvironment<'tcx> {
+ /// Create a builder using `ParamEnv` and add explicit outlives bounds into it.
+ fn builder(param_env: ty::ParamEnv<'tcx>) -> OutlivesEnvironmentBuilder<'tcx> {
+ let mut builder = OutlivesEnvironmentBuilder {
param_env,
- free_region_map: Default::default(),
+ region_relation: Default::default(),
region_bound_pairs: Default::default(),
};
- env.add_outlives_bounds(None, explicit_outlives_bounds(param_env));
+ builder.add_outlives_bounds(None, explicit_outlives_bounds(param_env));
- env
+ builder
+ }
+
+ #[inline]
+ /// Create a new `OutlivesEnvironment` without extra outlives bounds.
+ pub fn new(param_env: ty::ParamEnv<'tcx>) -> Self {
+ Self::builder(param_env).build()
+ }
+
+ /// Create a new `OutlivesEnvironment` with extra outlives bounds.
+ pub fn with_bounds(
+ param_env: ty::ParamEnv<'tcx>,
+ infcx: Option<&InferCtxt<'tcx>>,
+ extra_bounds: impl IntoIterator<Item = OutlivesBound<'tcx>>,
+ ) -> Self {
+ let mut builder = Self::builder(param_env);
+ builder.add_outlives_bounds(infcx, extra_bounds);
+ builder.build()
}
/// Borrows current value of the `free_region_map`.
@@ -79,6 +106,18 @@ impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
pub fn region_bound_pairs(&self) -> &RegionBoundPairs<'tcx> {
&self.region_bound_pairs
}
+}
+
+impl<'tcx> OutlivesEnvironmentBuilder<'tcx> {
+ #[inline]
+ #[instrument(level = "debug")]
+ fn build(self) -> OutlivesEnvironment<'tcx> {
+ OutlivesEnvironment {
+ param_env: self.param_env,
+ free_region_map: FreeRegionMap { relation: self.region_relation.freeze() },
+ region_bound_pairs: self.region_bound_pairs,
+ }
+ }
/// Processes outlives bounds that are known to hold, whether from implied or other sources.
///
@@ -86,11 +125,8 @@ impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
/// contain inference variables, it must be supplied, in which
/// case we will register "givens" on the inference context. (See
/// `RegionConstraintData`.)
- pub fn add_outlives_bounds<I>(
- &mut self,
- infcx: Option<&InferCtxt<'a, 'tcx>>,
- outlives_bounds: I,
- ) where
+ fn add_outlives_bounds<I>(&mut self, infcx: Option<&InferCtxt<'tcx>>, outlives_bounds: I)
+ where
I: IntoIterator<Item = OutlivesBound<'tcx>>,
{
// Record relationships such as `T:'x` that don't go into the
@@ -106,6 +142,10 @@ impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
self.region_bound_pairs
.insert(ty::OutlivesPredicate(GenericKind::Projection(projection_b), r_a));
}
+ OutlivesBound::RegionSubOpaque(r_a, def_id, substs) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Opaque(def_id, substs), r_a));
+ }
OutlivesBound::RegionSubRegion(r_a, r_b) => {
if let (ReEarlyBound(_) | ReFree(_), ReVar(vid_b)) = (r_a.kind(), r_b.kind()) {
infcx
@@ -122,7 +162,9 @@ impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
// system to be more general and to make use
// of *every* relationship that arises here,
// but presently we do not.)
- self.free_region_map.relate_regions(r_a, r_b);
+ if r_a.is_free_or_static() && r_b.is_free() {
+ self.region_relation.add(r_a, r_b)
+ }
}
}
}
diff --git a/compiler/rustc_infer/src/infer/outlives/mod.rs b/compiler/rustc_infer/src/infer/outlives/mod.rs
index 2a085288f..2d19d1823 100644
--- a/compiler/rustc_infer/src/infer/outlives/mod.rs
+++ b/compiler/rustc_infer/src/infer/outlives/mod.rs
@@ -9,7 +9,7 @@ pub mod verify;
use rustc_middle::traits::query::OutlivesBound;
use rustc_middle::ty;
-#[instrument(level = "debug", skip(param_env))]
+#[instrument(level = "debug", skip(param_env), ret)]
pub fn explicit_outlives_bounds<'tcx>(
param_env: ty::ParamEnv<'tcx>,
) -> impl Iterator<Item = OutlivesBound<'tcx>> + 'tcx {
diff --git a/compiler/rustc_infer/src/infer/outlives/obligations.rs b/compiler/rustc_infer/src/infer/outlives/obligations.rs
index ad052f58c..6ca884799 100644
--- a/compiler/rustc_infer/src/infer/outlives/obligations.rs
+++ b/compiler/rustc_infer/src/infer/outlives/obligations.rs
@@ -68,12 +68,14 @@ use crate::infer::{
};
use crate::traits::{ObligationCause, ObligationCauseCode};
use rustc_data_structures::undo_log::UndoLogs;
+use rustc_hir::def_id::DefId;
use rustc_hir::def_id::LocalDefId;
+use rustc_middle::mir::ConstraintCategory;
use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::{self, Region, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{self, Region, SubstsRef, Ty, TyCtxt, TypeVisitable};
use smallvec::smallvec;
-impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Registers that the given region obligation must be resolved
/// from within the scope of `body_id`. These regions are enqueued
/// and later processed by regionck, when full type information is
@@ -92,12 +94,14 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
sub_region: Region<'tcx>,
cause: &ObligationCause<'tcx>,
) {
+ debug!(?sup_type, ?sub_region, ?cause);
let origin = SubregionOrigin::from_obligation_cause(cause, || {
infer::RelateParamBound(
cause.span,
sup_type,
match cause.code().peel_derives() {
- ObligationCauseCode::BindingObligation(_, span) => Some(*span),
+ ObligationCauseCode::BindingObligation(_, span)
+ | ObligationCauseCode::ExprBindingObligation(_, span, ..) => Some(*span),
_ => None,
},
)
@@ -161,7 +165,8 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
let outlives =
&mut TypeOutlives::new(self, self.tcx, &region_bound_pairs, None, param_env);
- outlives.type_must_outlive(origin, sup_type, sub_region);
+ let category = origin.to_constraint_category();
+ outlives.type_must_outlive(origin, sup_type, sub_region, category);
}
}
@@ -178,7 +183,7 @@ impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
outlives_env.param_env,
);
- self.resolve_regions_and_report_errors(generic_param_scope, outlives_env)
+ self.err_ctxt().resolve_regions_and_report_errors(generic_param_scope, outlives_env)
}
}
@@ -205,6 +210,7 @@ pub trait TypeOutlivesDelegate<'tcx> {
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
+ constraint_category: ConstraintCategory<'tcx>,
);
fn push_verify(
@@ -247,19 +253,19 @@ where
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
+ #[instrument(level = "debug", skip(self))]
pub fn type_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
+ category: ConstraintCategory<'tcx>,
) {
- debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})", ty, region, origin);
-
assert!(!ty.has_escaping_bound_vars());
let mut components = smallvec![];
push_outlives_components(self.tcx, ty, &mut components);
- self.components_must_outlive(origin, &components, region);
+ self.components_must_outlive(origin, &components, region, category);
}
fn components_must_outlive(
@@ -267,21 +273,25 @@ where
origin: infer::SubregionOrigin<'tcx>,
components: &[Component<'tcx>],
region: ty::Region<'tcx>,
+ category: ConstraintCategory<'tcx>,
) {
for component in components.iter() {
let origin = origin.clone();
match component {
Component::Region(region1) => {
- self.delegate.push_sub_region_constraint(origin, region, *region1);
+ self.delegate.push_sub_region_constraint(origin, region, *region1, category);
}
Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty);
}
+ Component::Opaque(def_id, substs) => {
+ self.opaque_must_outlive(*def_id, substs, origin, region)
+ }
Component::Projection(projection_ty) => {
self.projection_must_outlive(origin, region, *projection_ty);
}
Component::EscapingProjection(subcomponents) => {
- self.components_must_outlive(origin, &subcomponents, region);
+ self.components_must_outlive(origin, &subcomponents, region, category);
}
Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
@@ -308,17 +318,69 @@ where
);
let generic = GenericKind::Param(param_ty);
- let verify_bound = self.verify_bound.generic_bound(generic);
+ let verify_bound = self.verify_bound.param_bound(param_ty);
self.delegate.push_verify(origin, generic, region, verify_bound);
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
+ fn opaque_must_outlive(
+ &mut self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region<'tcx>,
+ ) {
+ self.generic_must_outlive(
+ origin,
+ region,
+ GenericKind::Opaque(def_id, substs),
+ def_id,
+ substs,
+ true,
+ |ty| match *ty.kind() {
+ ty::Opaque(def_id, substs) => (def_id, substs),
+ _ => bug!("expected only projection types from env, not {:?}", ty),
+ },
+ );
+ }
+
+ #[instrument(level = "debug", skip(self))]
fn projection_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
) {
+ self.generic_must_outlive(
+ origin,
+ region,
+ GenericKind::Projection(projection_ty),
+ projection_ty.item_def_id,
+ projection_ty.substs,
+ false,
+ |ty| match ty.kind() {
+ ty::Projection(projection_ty) => (projection_ty.item_def_id, projection_ty.substs),
+ _ => bug!("expected only projection types from env, not {:?}", ty),
+ },
+ );
+ }
+
+ #[instrument(level = "debug", skip(self, filter))]
+ fn generic_must_outlive(
+ &mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region<'tcx>,
+ generic: GenericKind<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ is_opaque: bool,
+ filter: impl Fn(Ty<'tcx>) -> (DefId, SubstsRef<'tcx>),
+ ) {
+ // An optimization for a common case with opaque types.
+ if substs.is_empty() {
+ return;
+ }
+
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
@@ -337,16 +399,15 @@ where
// These are guaranteed to apply, no matter the inference
// results.
let trait_bounds: Vec<_> =
- self.verify_bound.projection_declared_bounds_from_trait(projection_ty).collect();
+ self.verify_bound.declared_region_bounds(def_id, substs).collect();
debug!(?trait_bounds);
// Compute the bounds we can derive from the environment. This
// is an "approximate" match -- in some cases, these bounds
// may not apply.
- let mut approx_env_bounds =
- self.verify_bound.projection_approx_declared_bounds_from_env(projection_ty);
- debug!("projection_must_outlive: approx_env_bounds={:?}", approx_env_bounds);
+ let mut approx_env_bounds = self.verify_bound.approx_declared_bounds_from_env(generic);
+ debug!(?approx_env_bounds);
// Remove outlives bounds that we get from the environment but
// which are also deducible from the trait. This arises (cc
@@ -360,14 +421,8 @@ where
// If the declaration is `trait Trait<'b> { type Item: 'b; }`, then `projection_declared_bounds_from_trait`
// will be invoked with `['b => ^1]` and so we will get `^1` returned.
let bound = bound_outlives.skip_binder();
- match *bound.0.kind() {
- ty::Projection(projection_ty) => self
- .verify_bound
- .projection_declared_bounds_from_trait(projection_ty)
- .all(|r| r != bound.1),
-
- _ => panic!("expected only projection types from env, not {:?}", bound.0),
- }
+ let (def_id, substs) = filter(bound.0);
+ self.verify_bound.declared_region_bounds(def_id, substs).all(|r| r != bound.1)
});
// If declared bounds list is empty, the only applicable rule is
@@ -384,23 +439,11 @@ where
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
- let needs_infer = projection_ty.needs_infer();
- if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer {
- debug!("projection_must_outlive: no declared bounds");
-
- for k in projection_ty.substs {
- match k.unpack() {
- GenericArgKind::Lifetime(lt) => {
- self.delegate.push_sub_region_constraint(origin.clone(), region, lt);
- }
- GenericArgKind::Type(ty) => {
- self.type_must_outlive(origin.clone(), ty, region);
- }
- GenericArgKind::Const(_) => {
- // Const parameters don't impose constraints.
- }
- }
- }
+ let needs_infer = substs.needs_infer();
+ if approx_env_bounds.is_empty() && trait_bounds.is_empty() && (needs_infer || is_opaque) {
+ debug!("no declared bounds");
+
+ self.substs_must_outlive(substs, origin, region);
return;
}
@@ -430,9 +473,10 @@ where
.all(|b| b == Some(trait_bounds[0]))
{
let unique_bound = trait_bounds[0];
- debug!("projection_must_outlive: unique trait bound = {:?}", unique_bound);
- debug!("projection_must_outlive: unique declared bound appears in trait ref");
- self.delegate.push_sub_region_constraint(origin, region, unique_bound);
+ debug!(?unique_bound);
+ debug!("unique declared bound appears in trait ref");
+ let category = origin.to_constraint_category();
+ self.delegate.push_sub_region_constraint(origin, region, unique_bound, category);
return;
}
@@ -441,19 +485,51 @@ where
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
- let generic = GenericKind::Projection(projection_ty);
- let verify_bound = self.verify_bound.generic_bound(generic);
+ let verify_bound = self.verify_bound.projection_opaque_bounds(
+ generic,
+ def_id,
+ substs,
+ &mut Default::default(),
+ );
debug!("projection_must_outlive: pushing {:?}", verify_bound);
self.delegate.push_verify(origin, generic, region, verify_bound);
}
+
+ fn substs_must_outlive(
+ &mut self,
+ substs: SubstsRef<'tcx>,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region<'tcx>,
+ ) {
+ let constraint = origin.to_constraint_category();
+ for k in substs {
+ match k.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ self.delegate.push_sub_region_constraint(
+ origin.clone(),
+ region,
+ lt,
+ constraint,
+ );
+ }
+ GenericArgKind::Type(ty) => {
+ self.type_must_outlive(origin.clone(), ty, region, constraint);
+ }
+ GenericArgKind::Const(_) => {
+ // Const parameters don't impose constraints.
+ }
+ }
+ }
+ }
}
-impl<'cx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'tcx> {
+impl<'cx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
+ _constraint_category: ConstraintCategory<'tcx>,
) {
self.sub_regions(origin, a, b)
}
diff --git a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
index 772e297b7..a5c21f0fb 100644
--- a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
+++ b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
@@ -34,7 +34,7 @@ use crate::infer::region_constraints::VerifyIfEq;
/// like are used. This is a particular challenge since this function is invoked
/// very late in inference and hence cannot make use of the normal inference
/// machinery.
-#[tracing::instrument(level = "debug", skip(tcx, param_env))]
+#[instrument(level = "debug", skip(tcx, param_env))]
pub fn extract_verify_if_eq<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -71,7 +71,7 @@ pub fn extract_verify_if_eq<'tcx>(
}
/// True if a (potentially higher-ranked) outlives
-#[tracing::instrument(level = "debug", skip(tcx, param_env))]
+#[instrument(level = "debug", skip(tcx, param_env))]
pub(super) fn can_match_erased_ty<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -110,7 +110,7 @@ impl<'tcx> Match<'tcx> {
/// Binds the pattern variable `br` to `value`; returns an `Err` if the pattern
/// is already bound to a different value.
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn bind(
&mut self,
br: ty::BoundRegion,
@@ -174,7 +174,14 @@ impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
#[instrument(skip(self), level = "debug")]
fn tys(&mut self, pattern: Ty<'tcx>, value: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
- if pattern == value { Ok(pattern) } else { relate::super_relate_tys(self, pattern, value) }
+ if let ty::Error(_) = pattern.kind() {
+ // Unlike normal `TypeRelation` rules, `ty::Error` does not equal any type.
+ self.no_match()
+ } else if pattern == value {
+ Ok(pattern)
+ } else {
+ relate::super_relate_tys(self, pattern, value)
+ }
}
#[instrument(skip(self), level = "debug")]
diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs
index c7d7ef40d..f470b2eb8 100644
--- a/compiler/rustc_infer/src/infer/outlives/verify.rs
+++ b/compiler/rustc_infer/src/infer/outlives/verify.rs
@@ -2,11 +2,10 @@ use crate::infer::outlives::components::{compute_components_recursive, Component
use crate::infer::outlives::env::RegionBoundPairs;
use crate::infer::region_constraints::VerifyIfEq;
use crate::infer::{GenericKind, VerifyBound};
-use rustc_data_structures::captures::Captures;
use rustc_data_structures::sso::SsoHashSet;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::{GenericArg, Subst};
-use rustc_middle::ty::{self, EarlyBinder, OutlivesPredicate, Ty, TyCtxt};
+use rustc_middle::ty::GenericArg;
+use rustc_middle::ty::{self, EarlyBinder, OutlivesPredicate, SubstsRef, Ty, TyCtxt};
use smallvec::smallvec;
@@ -38,25 +37,13 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
Self { tcx, region_bound_pairs, implicit_region_bound, param_env }
}
- /// Returns a "verify bound" that encodes what we know about
- /// `generic` and the regions it outlives.
- pub fn generic_bound(&self, generic: GenericKind<'tcx>) -> VerifyBound<'tcx> {
- let mut visited = SsoHashSet::new();
- match generic {
- GenericKind::Param(param_ty) => self.param_bound(param_ty),
- GenericKind::Projection(projection_ty) => {
- self.projection_bound(projection_ty, &mut visited)
- }
- }
- }
-
- fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> {
- debug!("param_bound(param_ty={:?})", param_ty);
-
+ #[instrument(level = "debug", skip(self))]
+ pub fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> {
// Start with anything like `T: 'a` we can scrape from the
// environment. If the environment contains something like
// `for<'a> T: 'a`, then we know that `T` outlives everything.
let declared_bounds_from_env = self.declared_generic_bounds_from_env(param_ty);
+ debug!(?declared_bounds_from_env);
let mut param_bounds = vec![];
for declared_bound in declared_bounds_from_env {
let bound_region = declared_bound.map_bound(|outlives| outlives.1);
@@ -65,6 +52,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
param_bounds.push(VerifyBound::OutlivedBy(region));
} else {
// This is `for<'a> T: 'a`. This means that `T` outlives everything! All done here.
+ debug!("found that {param_ty:?} outlives any lifetime, returning empty vector");
return VerifyBound::AllBounds(vec![]);
}
}
@@ -72,6 +60,7 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
// Add in the default bound of fn body that applies to all in
// scope type parameters:
if let Some(r) = self.implicit_region_bound {
+ debug!("adding implicit region bound of {r:?}");
param_bounds.push(VerifyBound::OutlivedBy(r));
}
@@ -103,41 +92,31 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
/// the clause from the environment only applies if `'0 = 'a`,
/// which we don't know yet. But we would still include `'b` in
/// this list.
- pub fn projection_approx_declared_bounds_from_env(
+ pub fn approx_declared_bounds_from_env(
&self,
- projection_ty: ty::ProjectionTy<'tcx>,
+ generic: GenericKind<'tcx>,
) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> {
- let projection_ty = GenericKind::Projection(projection_ty).to_ty(self.tcx);
+ let projection_ty = generic.to_ty(self.tcx);
let erased_projection_ty = self.tcx.erase_regions(projection_ty);
self.declared_generic_bounds_from_env_for_erased_ty(erased_projection_ty)
}
- /// Searches the where-clauses in scope for regions that
- /// `projection_ty` is known to outlive. Currently requires an
- /// exact match.
- pub fn projection_declared_bounds_from_trait(
+ #[instrument(level = "debug", skip(self, visited))]
+ pub fn projection_opaque_bounds(
&self,
- projection_ty: ty::ProjectionTy<'tcx>,
- ) -> impl Iterator<Item = ty::Region<'tcx>> + 'cx + Captures<'tcx> {
- self.declared_projection_bounds_from_trait(projection_ty)
- }
-
- pub fn projection_bound(
- &self,
- projection_ty: ty::ProjectionTy<'tcx>,
+ generic: GenericKind<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
visited: &mut SsoHashSet<GenericArg<'tcx>>,
) -> VerifyBound<'tcx> {
- debug!("projection_bound(projection_ty={:?})", projection_ty);
-
- let projection_ty_as_ty =
- self.tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs);
+ let generic_ty = generic.to_ty(self.tcx);
// Search the env for where clauses like `P: 'a`.
- let env_bounds = self
- .projection_approx_declared_bounds_from_env(projection_ty)
+ let projection_opaque_bounds = self
+ .approx_declared_bounds_from_env(generic)
.into_iter()
.map(|binder| {
- if let Some(ty::OutlivesPredicate(ty, r)) = binder.no_bound_vars() && ty == projection_ty_as_ty {
+ if let Some(ty::OutlivesPredicate(ty, r)) = binder.no_bound_vars() && ty == generic_ty {
// Micro-optimize if this is an exact match (this
// occurs often when there are no region variables
// involved).
@@ -147,21 +126,19 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
VerifyBound::IfEq(verify_if_eq_b)
}
});
-
// Extend with bounds that we can find from the trait.
- let trait_bounds = self
- .projection_declared_bounds_from_trait(projection_ty)
- .map(|r| VerifyBound::OutlivedBy(r));
+ let trait_bounds =
+ self.declared_region_bounds(def_id, substs).map(|r| VerifyBound::OutlivedBy(r));
// see the extensive comment in projection_must_outlive
let recursive_bound = {
let mut components = smallvec![];
- let ty = self.tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs);
- compute_components_recursive(self.tcx, ty.into(), &mut components, visited);
+ compute_components_recursive(self.tcx, generic_ty.into(), &mut components, visited);
self.bound_from_components(&components, visited)
};
- VerifyBound::AnyBound(env_bounds.chain(trait_bounds).collect()).or(recursive_bound)
+ VerifyBound::AnyBound(projection_opaque_bounds.chain(trait_bounds).collect())
+ .or(recursive_bound)
}
fn bound_from_components(
@@ -193,7 +170,18 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
match *component {
Component::Region(lt) => VerifyBound::OutlivedBy(lt),
Component::Param(param_ty) => self.param_bound(param_ty),
- Component::Projection(projection_ty) => self.projection_bound(projection_ty, visited),
+ Component::Opaque(did, substs) => self.projection_opaque_bounds(
+ GenericKind::Opaque(did, substs),
+ did,
+ substs,
+ visited,
+ ),
+ Component::Projection(projection_ty) => self.projection_opaque_bounds(
+ GenericKind::Projection(projection_ty),
+ projection_ty.item_def_id,
+ projection_ty.substs,
+ visited,
+ ),
Component::EscapingProjection(ref components) => {
self.bound_from_components(components, visited)
}
@@ -291,30 +279,6 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
/// }
/// ```
///
- /// then this function would return `'x`. This is subject to the
- /// limitations around higher-ranked bounds described in
- /// `region_bounds_declared_on_associated_item`.
- fn declared_projection_bounds_from_trait(
- &self,
- projection_ty: ty::ProjectionTy<'tcx>,
- ) -> impl Iterator<Item = ty::Region<'tcx>> + 'cx + Captures<'tcx> {
- debug!("projection_bounds(projection_ty={:?})", projection_ty);
- let tcx = self.tcx;
- self.region_bounds_declared_on_associated_item(projection_ty.item_def_id)
- .map(move |r| EarlyBinder(r).subst(tcx, projection_ty.substs))
- }
-
- /// Given the `DefId` of an associated item, returns any region
- /// bounds attached to that associated item from the trait definition.
- ///
- /// For example:
- ///
- /// ```rust
- /// trait Foo<'a> {
- /// type Bar: 'a;
- /// }
- /// ```
- ///
/// If we were given the `DefId` of `Foo::Bar`, we would return
/// `'a`. You could then apply the substitutions from the
/// projection to convert this into your namespace. This also
@@ -334,17 +298,20 @@ impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
///
/// This is for simplicity, and because we are not really smart
/// enough to cope with such bounds anywhere.
- fn region_bounds_declared_on_associated_item(
+ pub fn declared_region_bounds(
&self,
- assoc_item_def_id: DefId,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
) -> impl Iterator<Item = ty::Region<'tcx>> {
let tcx = self.tcx;
- let bounds = tcx.item_bounds(assoc_item_def_id);
+ let bounds = tcx.item_bounds(def_id);
+ trace!("{:#?}", bounds);
bounds
.into_iter()
.filter_map(|p| p.to_opt_type_outlives())
.filter_map(|p| p.no_bound_vars())
.map(|b| b.1)
+ .map(move |r| EarlyBinder(r).subst(tcx, substs))
}
/// Searches through a predicate list for a predicate `T: 'a`.
diff --git a/compiler/rustc_infer/src/infer/projection.rs b/compiler/rustc_infer/src/infer/projection.rs
index b45a6514d..9f12bc972 100644
--- a/compiler/rustc_infer/src/infer/projection.rs
+++ b/compiler/rustc_infer/src/infer/projection.rs
@@ -6,7 +6,7 @@ use crate::traits::{Obligation, PredicateObligation};
use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use super::InferCtxt;
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
/// Instead of normalizing an associated type projection,
/// this function generates an inference variable and registers
/// an obligation that this inference variable must be the result
diff --git a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
index 397efe6ee..90858e307 100644
--- a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
+++ b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
@@ -66,7 +66,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
tcx: TyCtxt<'tcx>,
overly_polymorphic: bool,
max_universe: ty::UniverseIndex,
- snapshot: &CombinedSnapshot<'_, 'tcx>,
+ snapshot: &CombinedSnapshot<'tcx>,
) -> RelateResult<'tcx, ()> {
debug!(
"leak_check(max_universe={:?}, snapshot.universe={:?}, overly_polymorphic={:?})",
diff --git a/compiler/rustc_infer/src/infer/region_constraints/mod.rs b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
index 0d4472a1c..67b3da687 100644
--- a/compiler/rustc_infer/src/infer/region_constraints/mod.rs
+++ b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
@@ -12,8 +12,10 @@ use rustc_data_structures::intern::Interned;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::undo_log::UndoLogs;
use rustc_data_structures::unify as ut;
+use rustc_hir::def_id::DefId;
use rustc_index::vec::IndexVec;
use rustc_middle::infer::unify_key::{RegionVidKey, UnifiedRegion};
+use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::ReStatic;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{ReLateBound, ReVar};
@@ -168,6 +170,7 @@ pub struct Verify<'tcx> {
pub enum GenericKind<'tcx> {
Param(ty::ParamTy),
Projection(ty::ProjectionTy<'tcx>),
+ Opaque(DefId, SubstsRef<'tcx>),
}
/// Describes the things that some `GenericKind` value `G` is known to
@@ -187,7 +190,7 @@ pub enum GenericKind<'tcx> {
/// }
/// ```
/// This is described with an `AnyRegion('a, 'b)` node.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, TypeFoldable, TypeVisitable)]
pub enum VerifyBound<'tcx> {
/// See [`VerifyIfEq`] docs
IfEq(ty::Binder<'tcx, VerifyIfEq<'tcx>>),
@@ -426,21 +429,21 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
data
}
- pub fn data(&self) -> &RegionConstraintData<'tcx> {
+ pub(super) fn data(&self) -> &RegionConstraintData<'tcx> {
&self.data
}
- pub fn start_snapshot(&mut self) -> RegionSnapshot {
+ pub(super) fn start_snapshot(&mut self) -> RegionSnapshot {
debug!("RegionConstraintCollector: start_snapshot");
RegionSnapshot { any_unifications: self.any_unifications }
}
- pub fn rollback_to(&mut self, snapshot: RegionSnapshot) {
+ pub(super) fn rollback_to(&mut self, snapshot: RegionSnapshot) {
debug!("RegionConstraintCollector: rollback_to({:?})", snapshot);
self.any_unifications = snapshot.any_unifications;
}
- pub fn new_region_var(
+ pub(super) fn new_region_var(
&mut self,
universe: ty::UniverseIndex,
origin: RegionVariableOrigin,
@@ -455,12 +458,12 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
/// Returns the universe for the given variable.
- pub fn var_universe(&self, vid: RegionVid) -> ty::UniverseIndex {
+ pub(super) fn var_universe(&self, vid: RegionVid) -> ty::UniverseIndex {
self.var_infos[vid].universe
}
/// Returns the origin for the given variable.
- pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin {
+ pub(super) fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin {
self.var_infos[vid].origin
}
@@ -492,7 +495,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
self.undo_log.push(AddVerify(index));
}
- pub fn add_given(&mut self, sub: Region<'tcx>, sup: ty::RegionVid) {
+ pub(super) fn add_given(&mut self, sub: Region<'tcx>, sup: ty::RegionVid) {
// cannot add givens once regions are resolved
if self.data.givens.insert((sub, sup)) {
debug!("add_given({:?} <= {:?})", sub, sup);
@@ -501,7 +504,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
}
- pub fn make_eqregion(
+ pub(super) fn make_eqregion(
&mut self,
origin: SubregionOrigin<'tcx>,
sub: Region<'tcx>,
@@ -530,7 +533,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
}
- pub fn member_constraint(
+ pub(super) fn member_constraint(
&mut self,
key: ty::OpaqueTypeKey<'tcx>,
definition_span: Span,
@@ -554,7 +557,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
#[instrument(skip(self, origin), level = "debug")]
- pub fn make_subregion(
+ pub(super) fn make_subregion(
&mut self,
origin: SubregionOrigin<'tcx>,
sub: Region<'tcx>,
@@ -585,7 +588,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
}
- pub fn verify_generic_bound(
+ pub(super) fn verify_generic_bound(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
@@ -595,7 +598,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
self.add_verify(Verify { kind, origin, region: sub, bound });
}
- pub fn lub_regions(
+ pub(super) fn lub_regions(
&mut self,
tcx: TyCtxt<'tcx>,
origin: SubregionOrigin<'tcx>,
@@ -613,7 +616,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
}
- pub fn glb_regions(
+ pub(super) fn glb_regions(
&mut self,
tcx: TyCtxt<'tcx>,
origin: SubregionOrigin<'tcx>,
@@ -634,7 +637,7 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
}
/// Resolves the passed RegionVid to the root RegionVid in the unification table
- pub fn opportunistic_resolve_var(&mut self, rid: ty::RegionVid) -> ty::RegionVid {
+ pub(super) fn opportunistic_resolve_var(&mut self, rid: ty::RegionVid) -> ty::RegionVid {
self.unification_table().find(rid).vid
}
@@ -699,7 +702,6 @@ impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
ty::ReStatic | ty::ReErased | ty::ReFree(..) | ty::ReEarlyBound(..) => {
ty::UniverseIndex::ROOT
}
- ty::ReEmpty(ui) => ui,
ty::RePlaceholder(placeholder) => placeholder.universe,
ty::ReVar(vid) => self.var_universe(vid),
ty::ReLateBound(..) => bug!("universe(): encountered bound region {:?}", region),
@@ -748,6 +750,9 @@ impl<'tcx> fmt::Debug for GenericKind<'tcx> {
match *self {
GenericKind::Param(ref p) => write!(f, "{:?}", p),
GenericKind::Projection(ref p) => write!(f, "{:?}", p),
+ GenericKind::Opaque(def_id, substs) => ty::tls::with(|tcx| {
+ write!(f, "{}", tcx.def_path_str_with_substs(def_id, tcx.lift(substs).unwrap()))
+ }),
}
}
}
@@ -757,6 +762,9 @@ impl<'tcx> fmt::Display for GenericKind<'tcx> {
match *self {
GenericKind::Param(ref p) => write!(f, "{}", p),
GenericKind::Projection(ref p) => write!(f, "{}", p),
+ GenericKind::Opaque(def_id, substs) => ty::tls::with(|tcx| {
+ write!(f, "{}", tcx.def_path_str_with_substs(def_id, tcx.lift(substs).unwrap()))
+ }),
}
}
}
@@ -766,6 +774,7 @@ impl<'tcx> GenericKind<'tcx> {
match *self {
GenericKind::Param(ref p) => p.to_ty(tcx),
GenericKind::Projection(ref p) => tcx.mk_projection(p.item_def_id, p.substs),
+ GenericKind::Opaque(def_id, substs) => tcx.mk_opaque(def_id, substs),
}
}
}
diff --git a/compiler/rustc_infer/src/infer/resolve.rs b/compiler/rustc_infer/src/infer/resolve.rs
index 3d99f0958..4db4ff238 100644
--- a/compiler/rustc_infer/src/infer/resolve.rs
+++ b/compiler/rustc_infer/src/infer/resolve.rs
@@ -1,6 +1,5 @@
use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use super::{FixupError, FixupResult, InferCtxt, Span};
-use rustc_middle::mir;
use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFolder, TypeSuperFoldable};
use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitor};
use rustc_middle::ty::{self, Const, InferConst, Ty, TyCtxt, TypeFoldable, TypeVisitable};
@@ -16,12 +15,12 @@ use std::ops::ControlFlow;
/// useful for printing messages etc but also required at various
/// points for correctness.
pub struct OpportunisticVarResolver<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> OpportunisticVarResolver<'a, 'tcx> {
#[inline]
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ pub fn new(infcx: &'a InferCtxt<'tcx>) -> Self {
OpportunisticVarResolver { infcx }
}
}
@@ -32,7 +31,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticVarResolver<'a, 'tcx> {
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !t.has_infer_types_or_consts() {
+ if !t.has_non_region_infer() {
t // micro-optimize -- if there is nothing in this type that this fold affects...
} else {
let t = self.infcx.shallow_resolve(t);
@@ -41,17 +40,13 @@ impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticVarResolver<'a, 'tcx> {
}
fn fold_const(&mut self, ct: Const<'tcx>) -> Const<'tcx> {
- if !ct.has_infer_types_or_consts() {
+ if !ct.has_non_region_infer() {
ct // micro-optimize -- if there is nothing in this const that this fold affects...
} else {
let ct = self.infcx.shallow_resolve(ct);
ct.super_fold_with(self)
}
}
-
- fn fold_mir_const(&mut self, constant: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- constant.super_fold_with(self)
- }
}
/// The opportunistic region resolver opportunistically resolves regions
@@ -62,11 +57,11 @@ impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticVarResolver<'a, 'tcx> {
/// If you want to resolve type and const variables as well, call
/// [InferCtxt::resolve_vars_if_possible] first.
pub struct OpportunisticRegionResolver<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> OpportunisticRegionResolver<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ pub fn new(infcx: &'a InferCtxt<'tcx>) -> Self {
OpportunisticRegionResolver { infcx }
}
}
@@ -116,11 +111,11 @@ impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticRegionResolver<'a, 'tcx> {
/// It does not construct the fully resolved type (which might
/// involve some hashing and so forth).
pub struct UnresolvedTypeFinder<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> UnresolvedTypeFinder<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ pub fn new(infcx: &'a InferCtxt<'tcx>) -> Self {
UnresolvedTypeFinder { infcx }
}
}
@@ -167,7 +162,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> {
/// Full type resolution replaces all type and region variables with
/// their concrete results. If any variable cannot be replaced (never unified, etc)
/// then an `Err` result is returned.
-pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, value: T) -> FixupResult<'tcx, T>
+pub fn fully_resolve<'tcx, T>(infcx: &InferCtxt<'tcx>, value: T) -> FixupResult<'tcx, T>
where
T: TypeFoldable<'tcx>,
{
@@ -177,7 +172,7 @@ where
// N.B. This type is not public because the protocol around checking the
// `err` field is not enforceable otherwise.
struct FullTypeResolver<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
}
impl<'a, 'tcx> FallibleTypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
diff --git a/compiler/rustc_infer/src/infer/sub.rs b/compiler/rustc_infer/src/infer/sub.rs
index b27571275..97354ba5d 100644
--- a/compiler/rustc_infer/src/infer/sub.rs
+++ b/compiler/rustc_infer/src/infer/sub.rs
@@ -2,7 +2,6 @@ use super::combine::{CombineFields, RelationDir};
use super::SubregionOrigin;
use crate::infer::combine::ConstEquateRelation;
-use crate::infer::{TypeVariableOrigin, TypeVariableOriginKind};
use crate::traits::Obligation;
use rustc_middle::ty::relate::{Cause, Relate, RelateResult, TypeRelation};
use rustc_middle::ty::visit::TypeVisitable;
@@ -11,8 +10,8 @@ use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt};
use std::mem;
/// Ensures `a` is made a subtype of `b`. Returns `a` on success.
-pub struct Sub<'combine, 'infcx, 'tcx> {
- fields: &'combine mut CombineFields<'infcx, 'tcx>,
+pub struct Sub<'combine, 'a, 'tcx> {
+ fields: &'combine mut CombineFields<'a, 'tcx>,
a_is_expected: bool,
}
@@ -129,30 +128,37 @@ impl<'tcx> TypeRelation<'tcx> for Sub<'_, '_, 'tcx> {
(&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..))
if self.fields.define_opaque_types && did.is_local() =>
{
- let mut generalize = |ty, ty_is_expected| {
- let var = infcx.next_ty_var_id_in_universe(
- TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: self.fields.trace.cause.span,
- },
- ty::UniverseIndex::ROOT,
- );
- self.fields.instantiate(ty, RelationDir::SubtypeOf, var, ty_is_expected)?;
- Ok(infcx.tcx.mk_ty_var(var))
- };
- let (a, b) = if self.a_is_expected { (a, b) } else { (b, a) };
- let (a, b) = match (a.kind(), b.kind()) {
- (&ty::Opaque(..), _) => (a, generalize(b, true)?),
- (_, &ty::Opaque(..)) => (generalize(a, false)?, b),
- _ => unreachable!(),
- };
self.fields.obligations.extend(
infcx
- .handle_opaque_type(a, b, true, &self.fields.trace.cause, self.param_env())?
+ .handle_opaque_type(
+ a,
+ b,
+ self.a_is_expected,
+ &self.fields.trace.cause,
+ self.param_env(),
+ )?
.obligations,
);
Ok(a)
}
+ // Optimization of GeneratorWitness relation since we know that all
+ // free regions are replaced with bound regions during construction.
+ // This greatly speeds up subtyping of GeneratorWitness.
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
+ let a_types = infcx.tcx.anonymize_bound_vars(a_types);
+ let b_types = infcx.tcx.anonymize_bound_vars(b_types);
+ if a_types.bound_vars() == b_types.bound_vars() {
+ let (a_types, b_types) = infcx.replace_bound_vars_with_placeholders(
+ a_types.map_bound(|a_types| (a_types, b_types.skip_binder())),
+ );
+ for (a, b) in std::iter::zip(a_types, b_types) {
+ self.relate(a, b)?;
+ }
+ Ok(a)
+ } else {
+ Err(ty::error::TypeError::Sorts(ty::relate::expected_found(self, a, b)))
+ }
+ }
_ => {
self.fields.infcx.super_combine_tys(self, a, b)?;
diff --git a/compiler/rustc_infer/src/infer/type_variable.rs b/compiler/rustc_infer/src/infer/type_variable.rs
index a0e2965b6..7ff086452 100644
--- a/compiler/rustc_infer/src/infer/type_variable.rs
+++ b/compiler/rustc_infer/src/infer/type_variable.rs
@@ -122,6 +122,7 @@ pub enum TypeVariableOriginKind {
MiscVariable,
NormalizeProjectionType,
TypeInference,
+ OpaqueTypeInference(DefId),
TypeParameterDefinition(Symbol, Option<DefId>),
/// One of the upvars or closure kind parameters in a `ClosureSubsts`
diff --git a/compiler/rustc_infer/src/infer/undo_log.rs b/compiler/rustc_infer/src/infer/undo_log.rs
index 74a26ebc3..611961ab1 100644
--- a/compiler/rustc_infer/src/infer/undo_log.rs
+++ b/compiler/rustc_infer/src/infer/undo_log.rs
@@ -100,7 +100,7 @@ impl Default for InferCtxtUndoLogs<'_> {
}
/// The UndoLogs trait defines how we undo a particular kind of action (of type T). We can undo any
-/// action that is convertable into an UndoLog (per the From impls above).
+/// action that is convertible into an UndoLog (per the From impls above).
impl<'tcx, T> UndoLogs<T> for InferCtxtUndoLogs<'tcx>
where
UndoLog<'tcx>: From<T>,
diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs
index 7769a68ba..e040634ed 100644
--- a/compiler/rustc_infer/src/lib.rs
+++ b/compiler/rustc_infer/src/lib.rs
@@ -2,7 +2,7 @@
//!
//! - **Type inference.** The type inference code can be found in the `infer` module;
//! this code handles low-level equality and subtyping operations. The
-//! type check pass in the compiler is found in the `rustc_typeck` crate.
+//! type check pass in the compiler is found in the `rustc_hir_analysis` crate.
//!
//! For more information about how rustc works, see the [rustc dev guide].
//!
@@ -17,9 +17,8 @@
#![feature(box_patterns)]
#![feature(control_flow_enum)]
#![feature(extend_one)]
-#![feature(label_break_value)]
#![feature(let_chains)]
-#![feature(let_else)]
+#![feature(if_let_guard)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(try_blocks)]
@@ -35,5 +34,6 @@ extern crate tracing;
#[macro_use]
extern crate rustc_middle;
+mod errors;
pub mod infer;
pub mod traits;
diff --git a/compiler/rustc_infer/src/traits/engine.rs b/compiler/rustc_infer/src/traits/engine.rs
index 736278ba0..b2b985a22 100644
--- a/compiler/rustc_infer/src/traits/engine.rs
+++ b/compiler/rustc_infer/src/traits/engine.rs
@@ -10,7 +10,7 @@ use super::{ObligationCause, PredicateObligation};
pub trait TraitEngine<'tcx>: 'tcx {
fn normalize_projection_type(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
@@ -21,7 +21,7 @@ pub trait TraitEngine<'tcx>: 'tcx {
/// parameters (except for `Self`).
fn register_bound(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
def_id: DefId,
@@ -41,14 +41,13 @@ pub trait TraitEngine<'tcx>: 'tcx {
fn register_predicate_obligation(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligation: PredicateObligation<'tcx>,
);
- fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>>;
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>>;
- fn select_where_possible(&mut self, infcx: &InferCtxt<'_, 'tcx>)
- -> Vec<FulfillmentError<'tcx>>;
+ fn select_where_possible(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>>;
fn pending_obligations(&self) -> Vec<PredicateObligation<'tcx>>;
@@ -58,7 +57,7 @@ pub trait TraitEngine<'tcx>: 'tcx {
pub trait TraitEngineExt<'tcx> {
fn register_predicate_obligations(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
);
}
@@ -66,7 +65,7 @@ pub trait TraitEngineExt<'tcx> {
impl<'tcx, T: ?Sized + TraitEngine<'tcx>> TraitEngineExt<'tcx> for T {
fn register_predicate_obligations(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
) {
for obligation in obligations {
diff --git a/compiler/rustc_infer/src/traits/error_reporting/mod.rs b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
index 95b6c4ce1..f8b5009a5 100644
--- a/compiler/rustc_infer/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
@@ -10,7 +10,7 @@ use rustc_span::Span;
use std::fmt;
use std::iter;
-impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxt<'tcx> {
pub fn report_extra_impl_obligation(
&self,
error_span: Span,
diff --git a/compiler/rustc_infer/src/traits/mod.rs b/compiler/rustc_infer/src/traits/mod.rs
index 4df4de21a..c8600ded9 100644
--- a/compiler/rustc_infer/src/traits/mod.rs
+++ b/compiler/rustc_infer/src/traits/mod.rs
@@ -67,6 +67,14 @@ impl<'tcx> PredicateObligation<'tcx> {
recursion_depth: self.recursion_depth,
})
}
+
+ pub fn without_const(mut self, tcx: TyCtxt<'tcx>) -> PredicateObligation<'tcx> {
+ self.param_env = self.param_env.without_const();
+ if let ty::PredicateKind::Trait(trait_pred) = self.predicate.kind().skip_binder() && trait_pred.is_const_if_const() {
+ self.predicate = tcx.mk_predicate(self.predicate.kind().map_bound(|_| ty::PredicateKind::Trait(trait_pred.without_const())));
+ }
+ self
+ }
}
impl<'tcx> TraitObligation<'tcx> {
@@ -105,6 +113,8 @@ pub struct FulfillmentError<'tcx> {
#[derive(Clone)]
pub enum FulfillmentErrorCode<'tcx> {
+ /// Inherently impossible to fulfill; this trait is implemented if and only if it is already implemented.
+ CodeCycle(Vec<Obligation<'tcx, ty::Predicate<'tcx>>>),
CodeSelectionError(SelectionError<'tcx>),
CodeProjectionError(MismatchedProjectionTypes<'tcx>),
CodeSubtypeError(ExpectedFound<Ty<'tcx>>, TypeError<'tcx>), // always comes from a SubtypePredicate
diff --git a/compiler/rustc_infer/src/traits/structural_impls.rs b/compiler/rustc_infer/src/traits/structural_impls.rs
index 573d2d1e3..1c6ab6a08 100644
--- a/compiler/rustc_infer/src/traits/structural_impls.rs
+++ b/compiler/rustc_infer/src/traits/structural_impls.rs
@@ -47,6 +47,7 @@ impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> {
write!(f, "CodeConstEquateError({:?}, {:?})", a, b)
}
super::CodeAmbiguity => write!(f, "Ambiguity"),
+ super::CodeCycle(ref cycle) => write!(f, "Cycle({:?})", cycle),
}
}
}
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
index f5a1edf6d..e12c069dc 100644
--- a/compiler/rustc_infer/src/traits/util.rs
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -246,6 +246,13 @@ impl<'tcx> Elaborator<'tcx> {
Component::UnresolvedInferenceVariable(_) => None,
+ Component::Opaque(def_id, substs) => {
+ let ty = tcx.mk_opaque(def_id, substs);
+ Some(ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(
+ ty, r_min,
+ )))
+ }
+
Component::Projection(projection) => {
// We might end up here if we have `Foo<<Bar as Baz>::Assoc>: 'a`.
// With this, we can deduce that `<Bar as Baz>::Assoc: 'a`.
@@ -262,8 +269,9 @@ impl<'tcx> Elaborator<'tcx> {
None
}
})
- .map(ty::Binder::dummy)
- .map(|predicate_kind| predicate_kind.to_predicate(tcx))
+ .map(|predicate_kind| {
+ bound_predicate.rebind(predicate_kind).to_predicate(tcx)
+ })
.filter(|&predicate| visited.insert(predicate))
.map(|predicate| {
predicate_obligation(
diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml
index 1ecbc876c..6a4c5b4d3 100644
--- a/compiler/rustc_interface/Cargo.toml
+++ b/compiler/rustc_interface/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
libloading = "0.7.1"
@@ -17,6 +16,7 @@ rustc_attr = { path = "../rustc_attr" }
rustc_borrowck = { path = "../rustc_borrowck" }
rustc_builtin_macros = { path = "../rustc_builtin_macros" }
rustc_expand = { path = "../rustc_expand" }
+rustc_macros = { path = "../rustc_macros" }
rustc_parse = { path = "../rustc_parse" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
@@ -37,7 +37,8 @@ rustc_mir_build = { path = "../rustc_mir_build" }
rustc_mir_transform = { path = "../rustc_mir_transform" }
rustc_monomorphize = { path = "../rustc_monomorphize" }
rustc_passes = { path = "../rustc_passes" }
-rustc_typeck = { path = "../rustc_typeck" }
+rustc_hir_analysis = { path = "../rustc_hir_analysis" }
+rustc_hir_typeck = { path = "../rustc_hir_typeck" }
rustc_lint = { path = "../rustc_lint" }
rustc_errors = { path = "../rustc_errors" }
rustc_plugin_impl = { path = "../rustc_plugin_impl" }
diff --git a/compiler/rustc_interface/src/errors.rs b/compiler/rustc_interface/src/errors.rs
new file mode 100644
index 000000000..f5135c78d
--- /dev/null
+++ b/compiler/rustc_interface/src/errors.rs
@@ -0,0 +1,89 @@
+use rustc_macros::Diagnostic;
+use rustc_span::{Span, Symbol};
+
+use std::io;
+use std::path::Path;
+
+#[derive(Diagnostic)]
+#[diag(interface_ferris_identifier)]
+pub struct FerrisIdentifier {
+ #[primary_span]
+ pub spans: Vec<Span>,
+ #[suggestion(code = "ferris", applicability = "maybe-incorrect")]
+ pub first_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_emoji_identifier)]
+pub struct EmojiIdentifier {
+ #[primary_span]
+ pub spans: Vec<Span>,
+ pub ident: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_mixed_bin_crate)]
+pub struct MixedBinCrate;
+
+#[derive(Diagnostic)]
+#[diag(interface_mixed_proc_macro_crate)]
+pub struct MixedProcMacroCrate;
+
+#[derive(Diagnostic)]
+#[diag(interface_proc_macro_doc_without_arg)]
+pub struct ProcMacroDocWithoutArg;
+
+#[derive(Diagnostic)]
+#[diag(interface_error_writing_dependencies)]
+pub struct ErrorWritingDependencies<'a> {
+ pub path: &'a Path,
+ pub error: io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_input_file_would_be_overwritten)]
+pub struct InputFileWouldBeOverWritten<'a> {
+ pub path: &'a Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_generated_file_conflicts_with_directory)]
+pub struct GeneratedFileConflictsWithDirectory<'a> {
+ pub input_path: &'a Path,
+ pub dir_path: &'a Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_temps_dir_error)]
+pub struct TempsDirError;
+
+#[derive(Diagnostic)]
+#[diag(interface_out_dir_error)]
+pub struct OutDirError;
+
+#[derive(Diagnostic)]
+#[diag(interface_cant_emit_mir)]
+pub struct CantEmitMIR {
+ pub error: io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_rustc_error_fatal)]
+pub struct RustcErrorFatal {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_rustc_error_unexpected_annotation)]
+pub struct RustcErrorUnexpectedAnnotation {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(interface_failed_writing_file)]
+pub struct FailedWritingFile<'a> {
+ pub path: &'a Path,
+ pub error: io::Error,
+}
diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs
index 94f81b660..89aaa0b95 100644
--- a/compiler/rustc_interface/src/interface.rs
+++ b/compiler/rustc_interface/src/interface.rs
@@ -17,7 +17,7 @@ use rustc_session::config::{self, CheckCfg, ErrorOutputType, Input, OutputFilena
use rustc_session::early_error;
use rustc_session::lint;
use rustc_session::parse::{CrateConfig, ParseSess};
-use rustc_session::{DiagnosticOutput, Session};
+use rustc_session::Session;
use rustc_span::source_map::{FileLoader, FileName};
use rustc_span::symbol::sym;
use std::path::PathBuf;
@@ -25,7 +25,10 @@ use std::result;
pub type Result<T> = result::Result<T, ErrorGuaranteed>;
-/// Represents a compiler session.
+/// Represents a compiler session. Note that every `Compiler` contains a
+/// `Session`, but `Compiler` also contains some things that cannot be in
+/// `Session`, due to `Session` being in a crate that has many fewer
+/// dependencies than this crate.
///
/// Can be used to run `rustc_interface` queries.
/// Created by passing [`Config`] to [`run_compiler`].
@@ -176,7 +179,7 @@ pub fn parse_check_cfg(specs: Vec<String>) -> CheckCfg {
let ident = arg.ident().expect("multi-segment cfg key");
names_valid.insert(ident.name.to_string());
} else {
- error!("`names()` arguments must be simple identifers");
+ error!("`names()` arguments must be simple identifiers");
}
}
continue 'specs;
@@ -204,7 +207,7 @@ pub fn parse_check_cfg(specs: Vec<String>) -> CheckCfg {
continue 'specs;
} else {
error!(
- "`values()` first argument must be a simple identifer"
+ "`values()` first argument must be a simple identifier"
);
}
} else if args.is_empty() {
@@ -247,7 +250,6 @@ pub struct Config {
pub output_dir: Option<PathBuf>,
pub output_file: Option<PathBuf>,
pub file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
- pub diagnostic_output: DiagnosticOutput,
pub lint_caps: FxHashMap<lint::LintId, lint::Level>,
@@ -276,67 +278,60 @@ pub struct Config {
pub registry: Registry,
}
-pub fn create_compiler_and_run<R>(config: Config, f: impl FnOnce(&Compiler) -> R) -> R {
- crate::callbacks::setup_callbacks();
-
- let registry = &config.registry;
- let (mut sess, codegen_backend) = util::create_session(
- config.opts,
- config.crate_cfg,
- config.crate_check_cfg,
- config.diagnostic_output,
- config.file_loader,
- config.input_path.clone(),
- config.lint_caps,
- config.make_codegen_backend,
- registry.clone(),
- );
-
- if let Some(parse_sess_created) = config.parse_sess_created {
- parse_sess_created(
- &mut Lrc::get_mut(&mut sess)
- .expect("create_session() should never share the returned session")
- .parse_sess,
- );
- }
-
- let temps_dir = sess.opts.unstable_opts.temps_dir.as_ref().map(|o| PathBuf::from(&o));
-
- let compiler = Compiler {
- sess,
- codegen_backend,
- input: config.input,
- input_path: config.input_path,
- output_dir: config.output_dir,
- output_file: config.output_file,
- temps_dir,
- register_lints: config.register_lints,
- override_queries: config.override_queries,
- };
-
- rustc_span::with_source_map(compiler.sess.parse_sess.clone_source_map(), move || {
- let r = {
- let _sess_abort_error = OnDrop(|| {
- compiler.sess.finish_diagnostics(registry);
- });
-
- f(&compiler)
- };
-
- let prof = compiler.sess.prof.clone();
- prof.generic_activity("drop_compiler").run(move || drop(compiler));
- r
- })
-}
-
// JUSTIFICATION: before session exists, only config
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Send) -> R {
- tracing::trace!("run_compiler");
+ trace!("run_compiler");
util::run_in_thread_pool_with_globals(
config.opts.edition,
config.opts.unstable_opts.threads,
- || create_compiler_and_run(config, f),
+ || {
+ crate::callbacks::setup_callbacks();
+
+ let registry = &config.registry;
+ let (mut sess, codegen_backend) = util::create_session(
+ config.opts,
+ config.crate_cfg,
+ config.crate_check_cfg,
+ config.file_loader,
+ config.input_path.clone(),
+ config.lint_caps,
+ config.make_codegen_backend,
+ registry.clone(),
+ );
+
+ if let Some(parse_sess_created) = config.parse_sess_created {
+ parse_sess_created(&mut sess.parse_sess);
+ }
+
+ let temps_dir = sess.opts.unstable_opts.temps_dir.as_ref().map(|o| PathBuf::from(&o));
+
+ let compiler = Compiler {
+ sess: Lrc::new(sess),
+ codegen_backend: Lrc::new(codegen_backend),
+ input: config.input,
+ input_path: config.input_path,
+ output_dir: config.output_dir,
+ output_file: config.output_file,
+ temps_dir,
+ register_lints: config.register_lints,
+ override_queries: config.override_queries,
+ };
+
+ rustc_span::with_source_map(compiler.sess.parse_sess.clone_source_map(), move || {
+ let r = {
+ let _sess_abort_error = OnDrop(|| {
+ compiler.sess.finish_diagnostics(registry);
+ });
+
+ f(&compiler)
+ };
+
+ let prof = compiler.sess.prof.clone();
+ prof.generic_activity("drop_compiler").run(move || drop(compiler));
+ r
+ })
+ },
)
}
diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs
index d443057eb..a41a749ee 100644
--- a/compiler/rustc_interface/src/lib.rs
+++ b/compiler/rustc_interface/src/lib.rs
@@ -1,12 +1,17 @@
#![feature(box_patterns)]
-#![feature(let_else)]
#![feature(internal_output_capture)]
#![feature(thread_spawn_unchecked)]
#![feature(once_cell)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
+#[macro_use]
+extern crate tracing;
mod callbacks;
+mod errors;
pub mod interface;
mod passes;
mod proc_macro_decls;
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 8f0835917..7f1d21bf1 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -1,3 +1,8 @@
+use crate::errors::{
+ CantEmitMIR, EmojiIdentifier, ErrorWritingDependencies, FerrisIdentifier,
+ GeneratedFileConflictsWithDirectory, InputFileWouldBeOverWritten, MixedBinCrate,
+ MixedProcMacroCrate, OutDirError, ProcMacroDocWithoutArg, TempsDirError,
+};
use crate::interface::{Compiler, Result};
use crate::proc_macro_decls;
use crate::util;
@@ -8,10 +13,9 @@ use rustc_borrowck as mir_borrowck;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::parallel;
use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
-use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan, PResult};
+use rustc_errors::{ErrorGuaranteed, PResult};
use rustc_expand::base::{ExtCtxt, LintStoreExpand, ResolverExpand};
use rustc_hir::def_id::StableCrateId;
-use rustc_hir::definitions::Definitions;
use rustc_lint::{BufferedEarlyLint, EarlyCheckNode, LintStore};
use rustc_metadata::creader::CStore;
use rustc_middle::arena::Arena;
@@ -25,15 +29,13 @@ use rustc_plugin_impl as plugin;
use rustc_query_impl::{OnDiskCache, Queries as TcxQueries};
use rustc_resolve::{Resolver, ResolverArenas};
use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType};
-use rustc_session::cstore::{CrateStoreDyn, MetadataLoader, MetadataLoaderDyn};
+use rustc_session::cstore::{MetadataLoader, MetadataLoaderDyn};
use rustc_session::output::filename_for_input;
use rustc_session::search_paths::PathKind;
use rustc_session::{Limit, Session};
use rustc_span::symbol::{sym, Symbol};
use rustc_span::FileName;
use rustc_trait_selection::traits;
-use rustc_typeck as typeck;
-use tracing::{info, warn};
use std::any::Any;
use std::cell::RefCell;
@@ -64,7 +66,7 @@ pub fn parse<'a>(sess: &'a Session, input: &Input) -> PResult<'a, ast::Crate> {
}
if sess.opts.unstable_opts.hir_stats {
- hir_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS");
+ hir_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS", "ast-stats-1");
}
Ok(krate)
@@ -132,10 +134,7 @@ mod boxed_resolver {
f((&mut *resolver).as_mut().unwrap())
}
- pub fn to_resolver_outputs(
- resolver: Rc<RefCell<BoxedResolver>>,
- ) -> (Definitions, Box<CrateStoreDyn>, ty::ResolverOutputs, ty::ResolverAstLowering)
- {
+ pub fn to_resolver_outputs(resolver: Rc<RefCell<BoxedResolver>>) -> ty::ResolverOutputs {
match Rc::try_unwrap(resolver) {
Ok(resolver) => {
let mut resolver = resolver.into_inner();
@@ -160,7 +159,7 @@ pub fn create_resolver(
krate: &ast::Crate,
crate_name: &str,
) -> BoxedResolver {
- tracing::trace!("create_resolver");
+ trace!("create_resolver");
BoxedResolver::new(sess, move |sess, resolver_arenas| {
Resolver::new(sess, krate, crate_name, metadata_loader, resolver_arenas)
})
@@ -274,7 +273,7 @@ pub fn configure_and_expand(
crate_name: &str,
resolver: &mut Resolver<'_>,
) -> Result<ast::Crate> {
- tracing::trace!("configure_and_expand");
+ trace!("configure_and_expand");
pre_expansion_lint(sess, lint_store, resolver.registered_tools(), &krate, crate_name);
rustc_builtin_macros::register_builtin_macros(resolver);
@@ -374,10 +373,10 @@ pub fn configure_and_expand(
if crate_types.len() > 1 {
if is_executable_crate {
- sess.err("cannot mix `bin` crate type with others");
+ sess.emit_err(MixedBinCrate);
}
if is_proc_macro_crate {
- sess.err("cannot mix `proc-macro` crate type with others");
+ sess.emit_err(MixedProcMacroCrate);
}
}
@@ -388,13 +387,7 @@ pub fn configure_and_expand(
// However, we do emit a warning, to let such users know that they should
// start passing '--crate-type proc-macro'
if has_proc_macro_decls && sess.opts.actually_rustdoc && !is_proc_macro_crate {
- let mut msg = sess.diagnostic().struct_warn(
- "Trying to document proc macro crate \
- without passing '--crate-type proc-macro to rustdoc",
- );
-
- msg.warn("The generated documentation may be incorrect");
- msg.emit();
+ sess.emit_warning(ProcMacroDocWithoutArg);
} else {
krate = sess.time("maybe_create_a_macro_crate", || {
let is_test_crate = sess.opts.test;
@@ -417,7 +410,7 @@ pub fn configure_and_expand(
}
if sess.opts.unstable_opts.hir_stats {
- hir_stats::print_ast_stats(&krate, "POST EXPANSION AST STATS");
+ hir_stats::print_ast_stats(&krate, "POST EXPANSION AST STATS", "ast-stats-2");
}
resolver.resolve_crate(&krate);
@@ -443,23 +436,9 @@ pub fn configure_and_expand(
spans.sort();
if ident == sym::ferris {
let first_span = spans[0];
- sess.diagnostic()
- .struct_span_err(
- MultiSpan::from(spans),
- "Ferris cannot be used as an identifier",
- )
- .span_suggestion(
- first_span,
- "try using their name instead",
- "ferris",
- Applicability::MaybeIncorrect,
- )
- .emit();
+ sess.emit_err(FerrisIdentifier { spans, first_span });
} else {
- sess.diagnostic().span_err(
- MultiSpan::from(spans),
- &format!("identifiers cannot contain emoji: `{}`", ident),
- );
+ sess.emit_err(EmojiIdentifier { spans, ident });
}
}
});
@@ -589,13 +568,24 @@ fn write_out_deps(
// Account for explicitly marked-to-track files
// (e.g. accessed in proc macros).
let file_depinfo = sess.parse_sess.file_depinfo.borrow();
- let extra_tracked_files = file_depinfo.iter().map(|path_sym| {
- let path = PathBuf::from(path_sym.as_str());
+
+ let normalize_path = |path: PathBuf| {
let file = FileName::from(path);
escape_dep_filename(&file.prefer_local().to_string())
- });
+ };
+
+ let extra_tracked_files =
+ file_depinfo.iter().map(|path_sym| normalize_path(PathBuf::from(path_sym.as_str())));
files.extend(extra_tracked_files);
+ // We also need to track used PGO profile files
+ if let Some(ref profile_instr) = sess.opts.cg.profile_use {
+ files.push(normalize_path(profile_instr.as_path().to_path_buf()));
+ }
+ if let Some(ref profile_sample) = sess.opts.unstable_opts.profile_sample_use {
+ files.push(normalize_path(profile_sample.as_path().to_path_buf()));
+ }
+
if sess.binary_dep_depinfo() {
if let Some(ref backend) = sess.opts.unstable_opts.codegen_backend {
if backend.contains('.') {
@@ -662,11 +652,9 @@ fn write_out_deps(
.emit_artifact_notification(&deps_filename, "dep-info");
}
}
- Err(e) => sess.fatal(&format!(
- "error writing dependencies to `{}`: {}",
- deps_filename.display(),
- e
- )),
+ Err(error) => {
+ sess.emit_fatal(ErrorWritingDependencies { path: &deps_filename, error });
+ }
}
}
@@ -696,20 +684,12 @@ pub fn prepare_outputs(
if let Some(ref input_path) = compiler.input_path {
if sess.opts.will_create_output_file() {
if output_contains_path(&output_paths, input_path) {
- let reported = sess.err(&format!(
- "the input file \"{}\" would be overwritten by the generated \
- executable",
- input_path.display()
- ));
+ let reported = sess.emit_err(InputFileWouldBeOverWritten { path: input_path });
return Err(reported);
}
- if let Some(dir_path) = output_conflicts_with_dir(&output_paths) {
- let reported = sess.err(&format!(
- "the generated executable for the input file \"{}\" conflicts with the \
- existing directory \"{}\"",
- input_path.display(),
- dir_path.display()
- ));
+ if let Some(ref dir_path) = output_conflicts_with_dir(&output_paths) {
+ let reported =
+ sess.emit_err(GeneratedFileConflictsWithDirectory { input_path, dir_path });
return Err(reported);
}
}
@@ -717,8 +697,7 @@ pub fn prepare_outputs(
if let Some(ref dir) = compiler.temps_dir {
if fs::create_dir_all(dir).is_err() {
- let reported =
- sess.err("failed to find or create the directory specified by `--temps-dir`");
+ let reported = sess.emit_err(TempsDirError);
return Err(reported);
}
}
@@ -731,8 +710,7 @@ pub fn prepare_outputs(
if !only_dep_info {
if let Some(ref dir) = compiler.output_dir {
if fs::create_dir_all(dir).is_err() {
- let reported =
- sess.err("failed to find or create the directory specified by `--out-dir`");
+ let reported = sess.emit_err(OutDirError);
return Err(reported);
}
}
@@ -753,11 +731,11 @@ pub static DEFAULT_QUERY_PROVIDERS: LazyLock<Providers> = LazyLock::new(|| {
rustc_mir_transform::provide(providers);
rustc_monomorphize::provide(providers);
rustc_privacy::provide(providers);
- typeck::provide(providers);
+ rustc_hir_analysis::provide(providers);
+ rustc_hir_typeck::provide(providers);
ty::provide(providers);
traits::provide(providers);
rustc_passes::provide(providers);
- rustc_resolve::provide(providers);
rustc_traits::provide(providers);
rustc_ty_utils::provide(providers);
rustc_metadata::provide(providers);
@@ -806,8 +784,7 @@ pub fn create_global_ctxt<'tcx>(
// incr. comp. yet.
dep_graph.assert_ignored();
- let (definitions, cstore, resolver_outputs, resolver_for_lowering) =
- BoxedResolver::to_resolver_outputs(resolver);
+ let resolver_outputs = BoxedResolver::to_resolver_outputs(resolver);
let sess = &compiler.session();
let query_result_on_disk_cache = rustc_incremental::load_query_result_cache(sess);
@@ -834,10 +811,7 @@ pub fn create_global_ctxt<'tcx>(
lint_store,
arena,
hir_arena,
- definitions,
- cstore,
resolver_outputs,
- resolver_for_lowering,
krate,
dep_graph,
queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn),
@@ -897,7 +871,7 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
});
// passes are timed inside typeck
- typeck::check_crate(tcx)?;
+ rustc_hir_analysis::check_crate(tcx)?;
sess.time("misc_checking_2", || {
parallel!(
@@ -907,13 +881,13 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
});
},
{
- sess.time("liveness_and_intrinsic_checking", || {
- tcx.hir().par_for_each_module(|module| {
+ sess.time("liveness_checking", || {
+ tcx.hir().par_body_owners(|def_id| {
// this must run before MIR dump, because
// "not all control paths return a value" is reported here.
//
// maybe move the check to a MIR pass?
- tcx.ensure().check_mod_liveness(module);
+ tcx.ensure().check_liveness(def_id.to_def_id());
});
});
}
@@ -953,7 +927,7 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
sess.time("misc_checking_3", || {
parallel!(
{
- tcx.ensure().privacy_access_levels(());
+ tcx.ensure().effective_visibilities(());
parallel!(
{
@@ -1015,8 +989,8 @@ pub fn start_codegen<'tcx>(
info!("Post-codegen\n{:?}", tcx.debug_stats());
if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) {
- if let Err(e) = rustc_mir_transform::dump_mir::emit_mir(tcx, outputs) {
- tcx.sess.err(&format!("could not emit MIR: {}", e));
+ if let Err(error) = rustc_mir_transform::dump_mir::emit_mir(tcx, outputs) {
+ tcx.sess.emit_err(CantEmitMIR { error });
tcx.sess.abort_if_errors();
}
}
diff --git a/compiler/rustc_interface/src/proc_macro_decls.rs b/compiler/rustc_interface/src/proc_macro_decls.rs
index 5371c513d..4c236c693 100644
--- a/compiler/rustc_interface/src/proc_macro_decls.rs
+++ b/compiler/rustc_interface/src/proc_macro_decls.rs
@@ -1,4 +1,3 @@
-use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
@@ -10,7 +9,7 @@ fn proc_macro_decls_static(tcx: TyCtxt<'_>, (): ()) -> Option<LocalDefId> {
for id in tcx.hir().items() {
let attrs = finder.tcx.hir().attrs(id.hir_id());
if finder.tcx.sess.contains_name(attrs, sym::rustc_proc_macro_decls) {
- finder.decls = Some(id.def_id);
+ finder.decls = Some(id.owner_id.def_id);
}
}
@@ -19,7 +18,7 @@ fn proc_macro_decls_static(tcx: TyCtxt<'_>, (): ()) -> Option<LocalDefId> {
struct Finder<'tcx> {
tcx: TyCtxt<'tcx>,
- decls: Option<hir::def_id::LocalDefId>,
+ decls: Option<LocalDefId>,
}
pub(crate) fn provide(providers: &mut Providers) {
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index 73402ae08..91d180e1e 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -1,3 +1,4 @@
+use crate::errors::{FailedWritingFile, RustcErrorFatal, RustcErrorUnexpectedAnnotation};
use crate::interface::{Compiler, Result};
use crate::passes::{self, BoxedResolver, QueryContext};
@@ -165,7 +166,7 @@ impl<'tcx> Queries<'tcx> {
pub fn expansion(
&self,
) -> Result<&Query<(Lrc<ast::Crate>, Rc<RefCell<BoxedResolver>>, Lrc<LintStore>)>> {
- tracing::trace!("expansion");
+ trace!("expansion");
self.expansion.compute(|| {
let crate_name = self.crate_name()?.peek().clone();
let (krate, lint_store) = self.register_plugins()?.take();
@@ -245,6 +246,10 @@ impl<'tcx> Queries<'tcx> {
// Don't do code generation if there were any errors
self.session().compile_status()?;
+ // If we have any delayed bugs, for example because we created TyKind::Error earlier,
+ // it's likely that codegen will only cause more ICEs, obscuring the original problem
+ self.session().diagnostic().flush_delayed();
+
// Hook for UI tests.
Self::check_for_rustc_errors_attr(tcx);
@@ -274,18 +279,14 @@ impl<'tcx> Queries<'tcx> {
// Bare `#[rustc_error]`.
None => {
- tcx.sess.span_fatal(
- tcx.def_span(def_id),
- "fatal error triggered by #[rustc_error]",
- );
+ tcx.sess.emit_fatal(RustcErrorFatal { span: tcx.def_span(def_id) });
}
// Some other attribute.
Some(_) => {
- tcx.sess.span_warn(
- tcx.def_span(def_id),
- "unexpected annotation used with `#[rustc_error(...)]!",
- );
+ tcx.sess.emit_warning(RustcErrorUnexpectedAnnotation {
+ span: tcx.def_span(def_id),
+ });
}
}
}
@@ -360,9 +361,8 @@ impl Linker {
if sess.opts.unstable_opts.no_link {
let encoded = CodegenResults::serialize_rlink(&codegen_results);
let rlink_file = self.prepare_outputs.with_extension(config::RLINK_EXT);
- std::fs::write(&rlink_file, encoded).map_err(|err| {
- sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
- })?;
+ std::fs::write(&rlink_file, encoded)
+ .map_err(|error| sess.emit_fatal(FailedWritingFile { path: &rlink_file, error }))?;
return Ok(());
}
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index a9fdfa241..eb8e65a6d 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -1,4 +1,4 @@
-#![cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#![allow(rustc::bad_opt_access)]
use crate::interface::parse_cfgspecs;
use rustc_data_structures::fx::FxHashSet;
@@ -17,14 +17,12 @@ use rustc_session::config::{CFGuard, ExternEntry, LinkerPluginLto, LtoCli, Switc
use rustc_session::lint::Level;
use rustc_session::search_paths::SearchPath;
use rustc_session::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
-use rustc_session::{build_session, getopts, DiagnosticOutput, Session};
+use rustc_session::{build_session, getopts, Session};
use rustc_span::edition::{Edition, DEFAULT_EDITION};
use rustc_span::symbol::sym;
use rustc_span::SourceFileHashAlgorithm;
-use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy};
-use rustc_target::spec::{
- RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel,
-};
+use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, RelocModel};
+use rustc_target::spec::{RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel};
use std::collections::{BTreeMap, BTreeSet};
use std::iter::FromIterator;
@@ -42,16 +40,7 @@ fn build_session_options_and_crate_config(matches: getopts::Matches) -> (Options
fn mk_session(matches: getopts::Matches) -> (Session, CfgSpecs) {
let registry = registry::Registry::new(&[]);
let (sessopts, cfg) = build_session_options_and_crate_config(matches);
- let sess = build_session(
- sessopts,
- None,
- None,
- registry,
- DiagnosticOutput::Default,
- Default::default(),
- None,
- None,
- );
+ let sess = build_session(sessopts, None, None, registry, Default::default(), None, None);
(sess, cfg)
}
@@ -542,7 +531,7 @@ fn test_codegen_options_tracking_hash() {
}
// Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
- // This list is in alphabetical order.
+ // tidy-alphabetical-start
untracked!(ar, String::from("abc"));
untracked!(codegen_units, Some(42));
untracked!(default_linker_libraries, true);
@@ -552,12 +541,13 @@ fn test_codegen_options_tracking_hash() {
untracked!(link_args, vec![String::from("abc"), String::from("def")]);
untracked!(link_self_contained, Some(true));
untracked!(linker, Some(PathBuf::from("linker")));
- untracked!(linker_flavor, Some(LinkerFlavor::Gcc));
+ untracked!(linker_flavor, Some(LinkerFlavorCli::Gcc));
untracked!(no_stack_check, true);
untracked!(remark, Passes::Some(vec![String::from("pass1"), String::from("pass2")]));
untracked!(rpath, true);
untracked!(save_temps, true);
untracked!(strip, Strip::Debuginfo);
+ // tidy-alphabetical-end
macro_rules! tracked {
($name: ident, $non_default_value: expr) => {
@@ -569,7 +559,7 @@ fn test_codegen_options_tracking_hash() {
}
// Make sure that changing a [TRACKED] option changes the hash.
- // This list is in alphabetical order.
+ // tidy-alphabetical-start
tracked!(code_model, Some(CodeModel::Large));
tracked!(control_flow_guard, CFGuard::Checks);
tracked!(debug_assertions, Some(true));
@@ -579,8 +569,8 @@ fn test_codegen_options_tracking_hash() {
tracked!(force_unwind_tables, Some(true));
tracked!(inline_threshold, Some(0xf007ba11));
tracked!(instrument_coverage, Some(InstrumentCoverage::All));
- tracked!(linker_plugin_lto, LinkerPluginLto::LinkerPluginAuto);
tracked!(link_dead_code, Some(true));
+ tracked!(linker_plugin_lto, LinkerPluginLto::LinkerPluginAuto);
tracked!(llvm_args, vec![String::from("1"), String::from("2")]);
tracked!(lto, LtoCli::Fat);
tracked!(metadata, vec![String::from("A"), String::from("B")]);
@@ -601,6 +591,7 @@ fn test_codegen_options_tracking_hash() {
tracked!(symbol_mangling_version, Some(SymbolManglingVersion::V0));
tracked!(target_cpu, Some(String::from("abc")));
tracked!(target_feature, String::from("all the features, all of them"));
+ // tidy-alphabetical-end
}
#[test]
@@ -621,12 +612,13 @@ fn test_top_level_options_tracked_no_crate() {
}
// Make sure that changing a [TRACKED_NO_CRATE_HASH] option leaves the crate hash unchanged but changes the incremental hash.
- // This list is in alphabetical order.
- tracked!(remap_path_prefix, vec![("/home/bors/rust".into(), "src".into())]);
+ // tidy-alphabetical-start
tracked!(
real_rust_source_base_dir,
Some("/home/bors/rust/.rustup/toolchains/nightly/lib/rustlib/src/rust".into())
);
+ tracked!(remap_path_prefix, vec![("/home/bors/rust".into(), "src".into())]);
+ // tidy-alphabetical-end
}
#[test]
@@ -643,7 +635,7 @@ fn test_unstable_options_tracking_hash() {
}
// Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
- // This list is in alphabetical order.
+ // tidy-alphabetical-start
untracked!(assert_incr_state, Some(String::from("loaded")));
untracked!(deduplicate_diagnostics, false);
untracked!(dep_tasks, true);
@@ -656,6 +648,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(dump_mir_dir, String::from("abc"));
untracked!(dump_mir_exclude_pass_number, true);
untracked!(dump_mir_graphviz, true);
+ untracked!(dylib_lto, true);
untracked!(emit_stack_sizes, true);
untracked!(future_incompat_test, true);
untracked!(hir_stats, true);
@@ -680,12 +673,12 @@ fn test_unstable_options_tracking_hash() {
untracked!(perf_stats, true);
// `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);
- untracked!(profile_closures, true);
untracked!(print_llvm_passes, true);
untracked!(print_mono_items, Some(String::from("abc")));
untracked!(print_type_sizes, true);
untracked!(proc_macro_backtrace, true);
untracked!(proc_macro_execution_strategy, ProcMacroExecutionStrategy::CrossThread);
+ untracked!(profile_closures, true);
untracked!(query_dep_graph, true);
untracked!(save_analysis, true);
untracked!(self_profile, SwitchWithOptPath::Enabled(None));
@@ -694,7 +687,6 @@ fn test_unstable_options_tracking_hash() {
untracked!(span_free_formats, true);
untracked!(temps_dir, Some(String::from("abc")));
untracked!(threads, 99);
- untracked!(time, true);
untracked!(time_llvm_passes, true);
untracked!(time_passes, true);
untracked!(trace_macros, true);
@@ -704,6 +696,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(unstable_options, true);
untracked!(validate_mir, true);
untracked!(verbose, true);
+ // tidy-alphabetical-end
macro_rules! tracked {
($name: ident, $non_default_value: expr) => {
@@ -715,7 +708,7 @@ fn test_unstable_options_tracking_hash() {
}
// Make sure that changing a [TRACKED] option changes the hash.
- // This list is in alphabetical order.
+ // tidy-alphabetical-start
tracked!(allow_features, Some(vec![String::from("lang_items")]));
tracked!(always_encode_mir, true);
tracked!(asm_comments, true);
@@ -736,10 +729,10 @@ fn test_unstable_options_tracking_hash() {
tracked!(debug_macros, true);
tracked!(dep_info_omit_d_target, true);
tracked!(drop_tracking, true);
- tracked!(export_executable_symbols, true);
tracked!(dual_proc_macros, true);
tracked!(dwarf_version, Some(5));
tracked!(emit_thin_lto, false);
+ tracked!(export_executable_symbols, true);
tracked!(fewer_names, Some(true));
tracked!(force_unstable_if_unmarked, true);
tracked!(fuel, Some(("abc".to_string(), 99)));
@@ -760,13 +753,13 @@ fn test_unstable_options_tracking_hash() {
tracked!(mir_opt_level, Some(4));
tracked!(move_size_limit, Some(4096));
tracked!(mutable_noalias, Some(true));
- tracked!(new_llvm_pass_manager, Some(true));
tracked!(no_generate_arange_section, true);
tracked!(no_link, true);
- tracked!(no_unique_section_names, true);
tracked!(no_profiler_runtime, true);
+ tracked!(no_unique_section_names, true);
tracked!(oom, OomStrategy::Panic);
tracked!(osx_rpath_install_name, true);
+ tracked!(packed_bundled_libs, true);
tracked!(panic_abort_tests, true);
tracked!(panic_in_drop, PanicStrategy::Abort);
tracked!(pick_stable_methods_before_any_unstable, false);
@@ -776,8 +769,8 @@ fn test_unstable_options_tracking_hash() {
tracked!(print_fuel, Some("abc".to_string()));
tracked!(profile, true);
tracked!(profile_emit, Some(PathBuf::from("abc")));
- tracked!(profiler_runtime, "abc".to_string());
tracked!(profile_sample_use, Some(PathBuf::from("abc")));
+ tracked!(profiler_runtime, "abc".to_string());
tracked!(relax_elf_relocations, Some(true));
tracked!(relro_level, Some(RelroLevel::Full));
tracked!(remap_cwd_prefix, Some(PathBuf::from("abc")));
@@ -806,6 +799,7 @@ fn test_unstable_options_tracking_hash() {
tracked!(verify_llvm_ir, true);
tracked!(virtual_function_elimination, true);
tracked!(wasi_exec_model, Some(WasiExecModel::Reactor));
+ // tidy-alphabetical-end
macro_rules! tracked_no_crate_hash {
($name: ident, $non_default_value: expr) => {
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index 5e5596f13..519b8a7fc 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -1,23 +1,17 @@
+use info;
use libloading::Library;
use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-#[cfg(parallel_compiler)]
-use rustc_data_structures::jobserver;
-use rustc_data_structures::sync::Lrc;
use rustc_errors::registry::Registry;
-#[cfg(parallel_compiler)]
-use rustc_middle::ty::tls;
use rustc_parse::validate_attr;
-#[cfg(parallel_compiler)]
-use rustc_query_impl::{QueryContext, QueryCtxt};
use rustc_session as session;
use rustc_session::config::CheckCfg;
use rustc_session::config::{self, CrateType};
use rustc_session::config::{ErrorOutputType, Input, OutputFilenames};
use rustc_session::lint::{self, BuiltinLintDiagnostics, LintBuffer};
use rustc_session::parse::CrateConfig;
-use rustc_session::{early_error, filesearch, output, DiagnosticOutput, Session};
+use rustc_session::{early_error, filesearch, output, Session};
use rustc_span::edition::Edition;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::source_map::FileLoader;
@@ -25,13 +19,10 @@ use rustc_span::symbol::{sym, Symbol};
use std::env;
use std::env::consts::{DLL_PREFIX, DLL_SUFFIX};
use std::mem;
-#[cfg(not(parallel_compiler))]
-use std::panic;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::OnceLock;
use std::thread;
-use tracing::info;
/// Function pointer type that constructs a new CodegenBackend.
pub type MakeBackendFn = fn() -> Box<dyn CodegenBackend>;
@@ -65,7 +56,6 @@ pub fn create_session(
sopts: config::Options,
cfg: FxHashSet<(String, Option<String>)>,
check_cfg: CheckCfg,
- diagnostic_output: DiagnosticOutput,
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
input_path: Option<PathBuf>,
lint_caps: FxHashMap<lint::LintId, lint::Level>,
@@ -73,7 +63,7 @@ pub fn create_session(
Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
>,
descriptions: Registry,
-) -> (Lrc<Session>, Lrc<Box<dyn CodegenBackend>>) {
+) -> (Session, Box<dyn CodegenBackend>) {
let codegen_backend = if let Some(make_codegen_backend) = make_codegen_backend {
make_codegen_backend(&sopts)
} else {
@@ -104,7 +94,6 @@ pub fn create_session(
input_path,
bundle,
descriptions,
- diagnostic_output,
lint_caps,
file_loader,
target_override,
@@ -121,7 +110,7 @@ pub fn create_session(
sess.parse_sess.config = cfg;
sess.parse_sess.check_config = check_cfg;
- (Lrc::new(sess), Lrc::new(codegen_backend))
+ (sess, codegen_backend)
}
const STACK_SIZE: usize = 8 * 1024 * 1024;
@@ -132,79 +121,86 @@ fn get_stack_size() -> Option<usize> {
env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
}
-/// Like a `thread::Builder::spawn` followed by a `join()`, but avoids the need
-/// for `'static` bounds.
-#[cfg(not(parallel_compiler))]
-fn scoped_thread<F: FnOnce() -> R + Send, R: Send>(cfg: thread::Builder, f: F) -> R {
- // SAFETY: join() is called immediately, so any closure captures are still
- // alive.
- match unsafe { cfg.spawn_unchecked(f) }.unwrap().join() {
- Ok(v) => v,
- Err(e) => panic::resume_unwind(e),
- }
-}
-
#[cfg(not(parallel_compiler))]
-pub fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
_threads: usize,
f: F,
) -> R {
- let mut cfg = thread::Builder::new().name("rustc".to_string());
-
+ // The "thread pool" is a single spawned thread in the non-parallel
+ // compiler. We run on a spawned thread instead of the main thread (a) to
+ // provide control over the stack size, and (b) to increase similarity with
+ // the parallel compiler, in particular to ensure there is no accidental
+ // sharing of data between the main thread and the compilation thread
+ // (which might cause problems for the parallel compiler).
+ let mut builder = thread::Builder::new().name("rustc".to_string());
if let Some(size) = get_stack_size() {
- cfg = cfg.stack_size(size);
+ builder = builder.stack_size(size);
}
- let main_handler = move || rustc_span::create_session_globals_then(edition, f);
-
- scoped_thread(cfg, main_handler)
-}
-
-/// Creates a new thread and forwards information in thread locals to it.
-/// The new thread runs the deadlock handler.
-/// Must only be called when a deadlock is about to happen.
-#[cfg(parallel_compiler)]
-unsafe fn handle_deadlock() {
- let registry = rustc_rayon_core::Registry::current();
-
- let query_map = tls::with(|tcx| {
- QueryCtxt::from_tcx(tcx)
- .try_collect_active_jobs()
- .expect("active jobs shouldn't be locked in deadlock handler")
- });
- thread::spawn(move || rustc_query_impl::deadlock(query_map, &registry));
+ // We build the session globals and run `f` on the spawned thread, because
+ // `SessionGlobals` does not impl `Send` in the non-parallel compiler.
+ thread::scope(|s| {
+ // `unwrap` is ok here because `spawn_scoped` only panics if the thread
+ // name contains null bytes.
+ let r = builder
+ .spawn_scoped(s, move || rustc_span::create_session_globals_then(edition, f))
+ .unwrap()
+ .join();
+
+ match r {
+ Ok(v) => v,
+ Err(e) => std::panic::resume_unwind(e),
+ }
+ })
}
#[cfg(parallel_compiler)]
-pub fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
threads: usize,
f: F,
) -> R {
- let mut config = rayon::ThreadPoolBuilder::new()
+ use rustc_data_structures::jobserver;
+ use rustc_middle::ty::tls;
+ use rustc_query_impl::{deadlock, QueryContext, QueryCtxt};
+
+ let mut builder = rayon::ThreadPoolBuilder::new()
.thread_name(|_| "rustc".to_string())
.acquire_thread_handler(jobserver::acquire_thread)
.release_thread_handler(jobserver::release_thread)
.num_threads(threads)
- .deadlock_handler(|| unsafe { handle_deadlock() });
-
+ .deadlock_handler(|| {
+ // On deadlock, creates a new thread and forwards information in thread
+ // locals to it. The new thread runs the deadlock handler.
+ let query_map = tls::with(|tcx| {
+ QueryCtxt::from_tcx(tcx)
+ .try_collect_active_jobs()
+ .expect("active jobs shouldn't be locked in deadlock handler")
+ });
+ let registry = rustc_rayon_core::Registry::current();
+ thread::spawn(move || deadlock(query_map, &registry));
+ });
if let Some(size) = get_stack_size() {
- config = config.stack_size(size);
+ builder = builder.stack_size(size);
}
- let with_pool = move |pool: &rayon::ThreadPool| pool.install(f);
-
+ // We create the session globals on the main thread, then create the thread
+ // pool. Upon creation, each worker thread created gets a copy of the
+ // session globals in TLS. This is possible because `SessionGlobals` impls
+ // `Send` in the parallel compiler.
rustc_span::create_session_globals_then(edition, || {
rustc_span::with_session_globals(|session_globals| {
- // The main handler runs for each Rayon worker thread and sets up
- // the thread local rustc uses. `session_globals` is captured and set
- // on the new threads.
- let main_handler = move |thread: rayon::ThreadBuilder| {
- rustc_span::set_session_globals_then(session_globals, || thread.run())
- };
-
- config.build_scoped(main_handler, with_pool).unwrap()
+ builder
+ .build_scoped(
+ // Initialize each new worker thread when created.
+ move |thread: rayon::ThreadBuilder| {
+ rustc_span::set_session_globals_then(session_globals, || thread.run())
+ },
+ // Run `f` on the first thread in the thread pool.
+ move |pool: &rayon::ThreadPool| pool.install(f),
+ )
+ .unwrap()
})
})
}
@@ -559,7 +555,7 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec<C
// command line, then reuse the empty `base` Vec to hold the types that
// will be found in crate attributes.
// JUSTIFICATION: before wrapper fn is available
- #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ #[allow(rustc::bad_opt_access)]
let mut base = session.opts.crate_types.clone();
if base.is_empty() {
base.extend(attr_types);
diff --git a/compiler/rustc_lexer/Cargo.toml b/compiler/rustc_lexer/Cargo.toml
index 35af11053..ad685c2ad 100644
--- a/compiler/rustc_lexer/Cargo.toml
+++ b/compiler/rustc_lexer/Cargo.toml
@@ -12,7 +12,6 @@ Rust lexer used by rustc. No stability guarantees are provided.
# Note: do not remove this blank `[lib]` section.
# This will be used when publishing this crate as `rustc-ap-rustc_lexer`.
[lib]
-doctest = false
# Note that this crate purposefully does not depend on other rustc crates
[dependencies]
diff --git a/compiler/rustc_lexer/src/cursor.rs b/compiler/rustc_lexer/src/cursor.rs
index 21557a9c8..eceef5980 100644
--- a/compiler/rustc_lexer/src/cursor.rs
+++ b/compiler/rustc_lexer/src/cursor.rs
@@ -4,8 +4,8 @@ use std::str::Chars;
///
/// Next characters can be peeked via `first` method,
/// and position can be shifted forward via `bump` method.
-pub(crate) struct Cursor<'a> {
- initial_len: usize,
+pub struct Cursor<'a> {
+ len_remaining: usize,
/// Iterator over chars. Slightly faster than a &str.
chars: Chars<'a>,
#[cfg(debug_assertions)]
@@ -15,9 +15,9 @@ pub(crate) struct Cursor<'a> {
pub(crate) const EOF_CHAR: char = '\0';
impl<'a> Cursor<'a> {
- pub(crate) fn new(input: &'a str) -> Cursor<'a> {
+ pub fn new(input: &'a str) -> Cursor<'a> {
Cursor {
- initial_len: input.len(),
+ len_remaining: input.len(),
chars: input.chars(),
#[cfg(debug_assertions)]
prev: EOF_CHAR,
@@ -61,13 +61,13 @@ impl<'a> Cursor<'a> {
}
/// Returns amount of already consumed symbols.
- pub(crate) fn len_consumed(&self) -> u32 {
- (self.initial_len - self.chars.as_str().len()) as u32
+ pub(crate) fn pos_within_token(&self) -> u32 {
+ (self.len_remaining - self.chars.as_str().len()) as u32
}
/// Resets the number of bytes consumed to 0.
- pub(crate) fn reset_len_consumed(&mut self) {
- self.initial_len = self.chars.as_str().len();
+ pub(crate) fn reset_pos_within_token(&mut self) {
+ self.len_remaining = self.chars.as_str().len();
}
/// Moves to the next character.
diff --git a/compiler/rustc_lexer/src/lib.rs b/compiler/rustc_lexer/src/lib.rs
index 6d311af90..51515976e 100644
--- a/compiler/rustc_lexer/src/lib.rs
+++ b/compiler/rustc_lexer/src/lib.rs
@@ -18,6 +18,8 @@
//! lexeme types.
//!
//! [`rustc_parse::lexer`]: ../rustc_parse/lexer/index.html
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
// We want to be able to build this crate with a stable compiler, so no
// `#![feature]` attributes should be added.
@@ -27,9 +29,11 @@ pub mod unescape;
#[cfg(test)]
mod tests;
+pub use crate::cursor::Cursor;
+
use self::LiteralKind::*;
use self::TokenKind::*;
-use crate::cursor::{Cursor, EOF_CHAR};
+use crate::cursor::EOF_CHAR;
use std::convert::TryFrom;
/// Parsed token.
@@ -53,29 +57,42 @@ pub enum TokenKind {
// Multi-char tokens:
/// "// comment"
LineComment { doc_style: Option<DocStyle> },
+
/// `/* block comment */`
///
- /// Block comments can be recursive, so the sequence like `/* /* */`
+ /// Block comments can be recursive, so a sequence like `/* /* */`
/// will not be considered terminated and will result in a parsing error.
BlockComment { doc_style: Option<DocStyle>, terminated: bool },
- /// Any whitespace characters sequence.
+
+ /// Any whitespace character sequence.
Whitespace,
+
/// "ident" or "continue"
- /// At this step keywords are also considered identifiers.
+ ///
+ /// At this step, keywords are also considered identifiers.
Ident,
+
/// Like the above, but containing invalid unicode codepoints.
InvalidIdent,
+
/// "r#ident"
RawIdent,
- /// An unknown prefix like `foo#`, `foo'`, `foo"`. Note that only the
+
+ /// An unknown prefix, like `foo#`, `foo'`, `foo"`.
+ ///
+ /// Note that only the
/// prefix (`foo`) is included in the token, not the separator (which is
/// lexed as its own distinct token). In Rust 2021 and later, reserved
/// prefixes are reported as errors; in earlier editions, they result in a
/// (allowed by default) lint, and are treated as regular identifier
/// tokens.
UnknownPrefix,
- /// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
+
+ /// Examples: `"12_u8"`, `"1.0e-40"`, `b"123`.
+ ///
+ /// See [LiteralKind] for more details.
Literal { kind: LiteralKind, suffix_start: u32 },
+
/// "'a"
Lifetime { starts_with_number: bool },
@@ -137,9 +154,12 @@ pub enum TokenKind {
/// Unknown token, not expected by the lexer, e.g. "№"
Unknown,
+
+ /// End of input.
+ Eof,
}
-#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DocStyle {
Outer,
Inner,
@@ -217,13 +237,6 @@ pub fn strip_shebang(input: &str) -> Option<usize> {
None
}
-/// Parses the first token from the provided input string.
-#[inline]
-pub fn first_token(input: &str) -> Token {
- debug_assert!(!input.is_empty());
- Cursor::new(input).advance_token()
-}
-
/// Validates a raw string literal. Used for getting more information about a
/// problem with a `RawStr`/`RawByteStr` with a `None` field.
#[inline]
@@ -241,12 +254,8 @@ pub fn validate_raw_str(input: &str, prefix_len: u32) -> Result<(), RawStrError>
pub fn tokenize(input: &str) -> impl Iterator<Item = Token> + '_ {
let mut cursor = Cursor::new(input);
std::iter::from_fn(move || {
- if cursor.is_eof() {
- None
- } else {
- cursor.reset_len_consumed();
- Some(cursor.advance_token())
- }
+ let token = cursor.advance_token();
+ if token.kind != TokenKind::Eof { Some(token) } else { None }
})
}
@@ -309,8 +318,11 @@ pub fn is_ident(string: &str) -> bool {
impl Cursor<'_> {
/// Parses a token from the input string.
- fn advance_token(&mut self) -> Token {
- let first_char = self.bump().unwrap();
+ pub fn advance_token(&mut self) -> Token {
+ let first_char = match self.bump() {
+ Some(c) => c,
+ None => return Token::new(TokenKind::Eof, 0),
+ };
let token_kind = match first_char {
// Slash, comment or block comment.
'/' => match self.first() {
@@ -327,7 +339,7 @@ impl Cursor<'_> {
('#', c1) if is_id_start(c1) => self.raw_ident(),
('#', _) | ('"', _) => {
let res = self.raw_double_quoted_string(1);
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if res.is_ok() {
self.eat_literal_suffix();
}
@@ -342,7 +354,7 @@ impl Cursor<'_> {
('\'', _) => {
self.bump();
let terminated = self.single_quoted_string();
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if terminated {
self.eat_literal_suffix();
}
@@ -352,7 +364,7 @@ impl Cursor<'_> {
('"', _) => {
self.bump();
let terminated = self.double_quoted_string();
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if terminated {
self.eat_literal_suffix();
}
@@ -362,7 +374,7 @@ impl Cursor<'_> {
('r', '"') | ('r', '#') => {
self.bump();
let res = self.raw_double_quoted_string(2);
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if res.is_ok() {
self.eat_literal_suffix();
}
@@ -379,7 +391,7 @@ impl Cursor<'_> {
// Numeric literal.
c @ '0'..='9' => {
let literal_kind = self.number(c);
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
self.eat_literal_suffix();
TokenKind::Literal { kind: literal_kind, suffix_start }
}
@@ -418,7 +430,7 @@ impl Cursor<'_> {
// String literal.
'"' => {
let terminated = self.double_quoted_string();
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if terminated {
self.eat_literal_suffix();
}
@@ -431,7 +443,9 @@ impl Cursor<'_> {
}
_ => Unknown,
};
- Token::new(token_kind, self.len_consumed())
+ let res = Token::new(token_kind, self.pos_within_token());
+ self.reset_pos_within_token();
+ res
}
fn line_comment(&mut self) -> TokenKind {
@@ -616,7 +630,7 @@ impl Cursor<'_> {
if !can_be_a_lifetime {
let terminated = self.single_quoted_string();
- let suffix_start = self.len_consumed();
+ let suffix_start = self.pos_within_token();
if terminated {
self.eat_literal_suffix();
}
@@ -641,7 +655,7 @@ impl Cursor<'_> {
if self.first() == '\'' {
self.bump();
let kind = Char { terminated: true };
- Literal { kind, suffix_start: self.len_consumed() }
+ Literal { kind, suffix_start: self.pos_within_token() }
} else {
Lifetime { starts_with_number }
}
@@ -722,7 +736,7 @@ impl Cursor<'_> {
fn raw_string_unvalidated(&mut self, prefix_len: u32) -> Result<u32, RawStrError> {
debug_assert!(self.prev() == 'r');
- let start_pos = self.len_consumed();
+ let start_pos = self.pos_within_token();
let mut possible_terminator_offset = None;
let mut max_hashes = 0;
@@ -776,7 +790,7 @@ impl Cursor<'_> {
// Keep track of possible terminators to give a hint about
// where there might be a missing terminator
possible_terminator_offset =
- Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
+ Some(self.pos_within_token() - start_pos - n_end_hashes + prefix_len);
max_hashes = n_end_hashes;
}
}
diff --git a/compiler/rustc_lexer/src/unescape.rs b/compiler/rustc_lexer/src/unescape.rs
index 3da6bc146..8f64b5f51 100644
--- a/compiler/rustc_lexer/src/unescape.rs
+++ b/compiler/rustc_lexer/src/unescape.rs
@@ -93,7 +93,7 @@ where
// NOTE: Raw strings do not perform any explicit character escaping, here we
// only translate CRLF to LF and produce errors on bare CR.
Mode::RawStr | Mode::RawByteStr => {
- unescape_raw_str_or_byte_str(literal_text, mode, callback)
+ unescape_raw_str_or_raw_byte_str(literal_text, mode, callback)
}
}
}
@@ -105,7 +105,7 @@ pub fn unescape_byte_literal<F>(literal_text: &str, mode: Mode, callback: &mut F
where
F: FnMut(Range<usize>, Result<u8, EscapeError>),
{
- assert!(mode.is_bytes());
+ debug_assert!(mode.is_bytes());
unescape_literal(literal_text, mode, &mut |range, result| {
callback(range, result.map(byte_from_char));
})
@@ -129,7 +129,7 @@ pub fn unescape_byte(literal_text: &str) -> Result<u8, (usize, EscapeError)> {
}
/// What kind of literal do we parse.
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Mode {
Char,
Str,
@@ -140,17 +140,13 @@ pub enum Mode {
}
impl Mode {
- pub fn in_single_quotes(self) -> bool {
+ pub fn in_double_quotes(self) -> bool {
match self {
- Mode::Char | Mode::Byte => true,
- Mode::Str | Mode::ByteStr | Mode::RawStr | Mode::RawByteStr => false,
+ Mode::Str | Mode::ByteStr | Mode::RawStr | Mode::RawByteStr => true,
+ Mode::Char | Mode::Byte => false,
}
}
- pub fn in_double_quotes(self) -> bool {
- !self.in_single_quotes()
- }
-
pub fn is_bytes(self) -> bool {
match self {
Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true,
@@ -184,7 +180,7 @@ fn scan_escape(chars: &mut Chars<'_>, mode: Mode) -> Result<char, EscapeError> {
let value = hi * 16 + lo;
- // For a byte literal verify that it is within ASCII range.
+ // For a non-byte literal verify that it is within ASCII range.
if !mode.is_bytes() && !is_ascii(value) {
return Err(EscapeError::OutOfRangeHexEscape);
}
@@ -263,6 +259,7 @@ fn ascii_check(first_char: char, mode: Mode) -> Result<char, EscapeError> {
}
fn unescape_char_or_byte(chars: &mut Chars<'_>, mode: Mode) -> Result<char, EscapeError> {
+ debug_assert!(mode == Mode::Char || mode == Mode::Byte);
let first_char = chars.next().ok_or(EscapeError::ZeroChars)?;
let res = match first_char {
'\\' => scan_escape(chars, mode),
@@ -282,7 +279,7 @@ fn unescape_str_or_byte_str<F>(src: &str, mode: Mode, callback: &mut F)
where
F: FnMut(Range<usize>, Result<char, EscapeError>),
{
- assert!(mode.in_double_quotes());
+ debug_assert!(mode == Mode::Str || mode == Mode::ByteStr);
let initial_len = src.len();
let mut chars = src.chars();
while let Some(first_char) = chars.next() {
@@ -344,11 +341,11 @@ where
/// sequence of characters or errors.
/// NOTE: Raw strings do not perform any explicit character escaping, here we
/// only translate CRLF to LF and produce errors on bare CR.
-fn unescape_raw_str_or_byte_str<F>(literal_text: &str, mode: Mode, callback: &mut F)
+fn unescape_raw_str_or_raw_byte_str<F>(literal_text: &str, mode: Mode, callback: &mut F)
where
F: FnMut(Range<usize>, Result<char, EscapeError>),
{
- assert!(mode.in_double_quotes());
+ debug_assert!(mode == Mode::RawStr || mode == Mode::RawByteStr);
let initial_len = literal_text.len();
let mut chars = literal_text.chars();
@@ -368,7 +365,7 @@ where
fn byte_from_char(c: char) -> u8 {
let res = c as u32;
- assert!(res <= u8::MAX as u32, "guaranteed because of Mode::ByteStr");
+ debug_assert!(res <= u8::MAX as u32, "guaranteed because of Mode::ByteStr");
res as u8
}
diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml
index 7c0f2c440..abe61406c 100644
--- a/compiler/rustc_lint/Cargo.toml
+++ b/compiler/rustc_lint/Cargo.toml
@@ -5,7 +5,7 @@ edition = "2021"
[dependencies]
tracing = "0.1"
-unicode-security = "0.0.5"
+unicode-security = "0.1.0"
rustc_middle = { path = "../rustc_middle" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_attr = { path = "../rustc_attr" }
diff --git a/compiler/rustc_lint/src/array_into_iter.rs b/compiler/rustc_lint/src/array_into_iter.rs
index 121fefdc6..abebc533c 100644
--- a/compiler/rustc_lint/src/array_into_iter.rs
+++ b/compiler/rustc_lint/src/array_into_iter.rs
@@ -61,7 +61,7 @@ impl<'tcx> LateLintPass<'tcx> for ArrayIntoIter {
}
// We only care about method call expressions.
- if let hir::ExprKind::MethodCall(call, args, _) = &expr.kind {
+ if let hir::ExprKind::MethodCall(call, receiver_arg, ..) = &expr.kind {
if call.ident.name != sym::into_iter {
return;
}
@@ -75,7 +75,6 @@ impl<'tcx> LateLintPass<'tcx> for ArrayIntoIter {
};
// As this is a method call expression, we have at least one argument.
- let receiver_arg = &args[0];
let receiver_ty = cx.typeck_results().expr_ty(receiver_arg);
let adjustments = cx.typeck_results().expr_adjustments(receiver_arg);
@@ -119,37 +118,41 @@ impl<'tcx> LateLintPass<'tcx> for ArrayIntoIter {
// to an array or to a slice.
_ => bug!("array type coerced to something other than array or slice"),
};
- cx.struct_span_lint(ARRAY_INTO_ITER, call.ident.span, |lint| {
- let mut diag = lint.build(fluent::lint::array_into_iter);
- diag.set_arg("target", target);
- diag.span_suggestion(
- call.ident.span,
- fluent::lint::use_iter_suggestion,
- "iter",
- Applicability::MachineApplicable,
- );
- if self.for_expr_span == expr.span {
+ cx.struct_span_lint(
+ ARRAY_INTO_ITER,
+ call.ident.span,
+ fluent::lint_array_into_iter,
+ |diag| {
+ diag.set_arg("target", target);
diag.span_suggestion(
- receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
- fluent::lint::remove_into_iter_suggestion,
- "",
- Applicability::MaybeIncorrect,
+ call.ident.span,
+ fluent::use_iter_suggestion,
+ "iter",
+ Applicability::MachineApplicable,
);
- } else if receiver_ty.is_array() {
- diag.multipart_suggestion(
- fluent::lint::use_explicit_into_iter_suggestion,
- vec![
- (expr.span.shrink_to_lo(), "IntoIterator::into_iter(".into()),
- (
- receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
- ")".into(),
- ),
- ],
- Applicability::MaybeIncorrect,
- );
- }
- diag.emit();
- })
+ if self.for_expr_span == expr.span {
+ diag.span_suggestion(
+ receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
+ fluent::remove_into_iter_suggestion,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ } else if receiver_ty.is_array() {
+ diag.multipart_suggestion(
+ fluent::use_explicit_into_iter_suggestion,
+ vec![
+ (expr.span.shrink_to_lo(), "IntoIterator::into_iter(".into()),
+ (
+ receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
+ ")".into(),
+ ),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ diag
+ },
+ )
}
}
}
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index bd58021f7..d425adf47 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -21,6 +21,7 @@
//! `late_lint_methods!` invocation in `lib.rs`.
use crate::{
+ errors::BuiltinEllpisisInclusiveRangePatterns,
types::{transparent_newtype_field, CItemKind},
EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
};
@@ -32,8 +33,8 @@ use rustc_ast_pretty::pprust::{self, expr_to_string};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::{
- fluent, Applicability, Diagnostic, DiagnosticMessage, DiagnosticStyledString,
- LintDiagnosticBuilder, MultiSpan,
+ fluent, Applicability, DelayDm, Diagnostic, DiagnosticBuilder, DiagnosticMessage,
+ DiagnosticStyledString, MultiSpan,
};
use rustc_feature::{deprecated_attributes, AttributeGate, BuiltinAttribute, GateIssue, Stability};
use rustc_hir as hir;
@@ -45,8 +46,7 @@ use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::layout::{LayoutError, LayoutOf};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::Instance;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt, VariantDef};
use rustc_session::lint::{BuiltinLintDiagnostics, FutureIncompatibilityReason};
use rustc_span::edition::Edition;
use rustc_span::source_map::Spanned;
@@ -58,7 +58,6 @@ use rustc_trait_selection::traits::{self, misc::can_type_implement_copy};
use crate::nonstandard_style::{method_context, MethodLateContext};
use std::fmt::Write;
-use tracing::{debug, trace};
// hardwired lints from librustc_middle
pub use rustc_session::lint::builtin::*;
@@ -98,30 +97,31 @@ fn pierce_parens(mut expr: &ast::Expr) -> &ast::Expr {
impl EarlyLintPass for WhileTrue {
fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
- if let ast::ExprKind::While(cond, _, label) = &e.kind {
- if let ast::ExprKind::Lit(ref lit) = pierce_parens(cond).kind {
- if let ast::LitKind::Bool(true) = lit.kind {
- if !lit.span.from_expansion() {
- let condition_span = e.span.with_hi(cond.span.hi());
- cx.struct_span_lint(WHILE_TRUE, condition_span, |lint| {
- lint.build(fluent::lint::builtin_while_true)
- .span_suggestion_short(
- condition_span,
- fluent::lint::suggestion,
- format!(
- "{}loop",
- label.map_or_else(String::new, |label| format!(
- "{}: ",
- label.ident,
- ))
- ),
- Applicability::MachineApplicable,
- )
- .emit();
- })
- }
- }
- }
+ if let ast::ExprKind::While(cond, _, label) = &e.kind
+ && let ast::ExprKind::Lit(ref lit) = pierce_parens(cond).kind
+ && let ast::LitKind::Bool(true) = lit.kind
+ && !lit.span.from_expansion()
+ {
+ let condition_span = e.span.with_hi(cond.span.hi());
+ cx.struct_span_lint(
+ WHILE_TRUE,
+ condition_span,
+ fluent::lint_builtin_while_true,
+ |lint| {
+ lint.span_suggestion_short(
+ condition_span,
+ fluent::suggestion,
+ format!(
+ "{}loop",
+ label.map_or_else(String::new, |label| format!(
+ "{}: ",
+ label.ident,
+ ))
+ ),
+ Applicability::MachineApplicable,
+ )
+ },
+ )
}
}
}
@@ -157,9 +157,12 @@ impl BoxPointers {
for leaf in ty.walk() {
if let GenericArgKind::Type(leaf_ty) = leaf.unpack() {
if leaf_ty.is_box() {
- cx.struct_span_lint(BOX_POINTERS, span, |lint| {
- lint.build(fluent::lint::builtin_box_pointers).set_arg("ty", ty).emit();
- });
+ cx.struct_span_lint(
+ BOX_POINTERS,
+ span,
+ fluent::lint_builtin_box_pointers,
+ |lint| lint.set_arg("ty", ty),
+ );
}
}
}
@@ -174,7 +177,7 @@ impl<'tcx> LateLintPass<'tcx> for BoxPointers {
| hir::ItemKind::Enum(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::Union(..) => {
- self.check_heap_type(cx, it.span, cx.tcx.type_of(it.def_id))
+ self.check_heap_type(cx, it.span, cx.tcx.type_of(it.owner_id))
}
_ => (),
}
@@ -258,28 +261,21 @@ impl<'tcx> LateLintPass<'tcx> for NonShorthandFieldPatterns {
if cx.tcx.find_field_index(ident, &variant)
== Some(cx.tcx.field_index(fieldpat.hir_id, cx.typeck_results()))
{
- cx.struct_span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span, |lint| {
- let binding = match binding_annot {
- hir::BindingAnnotation::Unannotated => None,
- hir::BindingAnnotation::Mutable => Some("mut"),
- hir::BindingAnnotation::Ref => Some("ref"),
- hir::BindingAnnotation::RefMut => Some("ref mut"),
- };
- let suggested_ident = if let Some(binding) = binding {
- format!("{} {}", binding, ident)
- } else {
- ident.to_string()
- };
- lint.build(fluent::lint::builtin_non_shorthand_field_patterns)
- .set_arg("ident", ident.clone())
- .span_suggestion(
+ cx.struct_span_lint(
+ NON_SHORTHAND_FIELD_PATTERNS,
+ fieldpat.span,
+ fluent::lint_builtin_non_shorthand_field_patterns,
+ |lint| {
+ let suggested_ident =
+ format!("{}{}", binding_annot.prefix_str(), ident);
+ lint.set_arg("ident", ident.clone()).span_suggestion(
fieldpat.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
suggested_ident,
Applicability::MachineApplicable,
)
- .emit();
- });
+ },
+ );
}
}
}
@@ -319,14 +315,17 @@ impl UnsafeCode {
&self,
cx: &EarlyContext<'_>,
span: Span,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
// This comes from a macro that has `#[allow_internal_unsafe]`.
if span.allows_unsafe() {
return;
}
- cx.struct_span_lint(UNSAFE_CODE, span, decorate);
+ cx.struct_span_lint(UNSAFE_CODE, span, msg, decorate);
}
fn report_overridden_symbol_name(
@@ -335,8 +334,8 @@ impl UnsafeCode {
span: Span,
msg: DiagnosticMessage,
) {
- self.report_unsafe(cx, span, |lint| {
- lint.build(msg).note(fluent::lint::builtin_overridden_symbol_name).emit();
+ self.report_unsafe(cx, span, msg, |lint| {
+ lint.note(fluent::lint_builtin_overridden_symbol_name)
})
}
@@ -346,8 +345,8 @@ impl UnsafeCode {
span: Span,
msg: DiagnosticMessage,
) {
- self.report_unsafe(cx, span, |lint| {
- lint.build(msg).note(fluent::lint::builtin_overridden_symbol_section).emit();
+ self.report_unsafe(cx, span, msg, |lint| {
+ lint.note(fluent::lint_builtin_overridden_symbol_section)
})
}
}
@@ -355,8 +354,8 @@ impl UnsafeCode {
impl EarlyLintPass for UnsafeCode {
fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) {
if attr.has_name(sym::allow_internal_unsafe) {
- self.report_unsafe(cx, attr.span, |lint| {
- lint.build(fluent::lint::builtin_allow_internal_unsafe).emit();
+ self.report_unsafe(cx, attr.span, fluent::lint_builtin_allow_internal_unsafe, |lint| {
+ lint
});
}
}
@@ -365,31 +364,27 @@ impl EarlyLintPass for UnsafeCode {
if let ast::ExprKind::Block(ref blk, _) = e.kind {
// Don't warn about generated blocks; that'll just pollute the output.
if blk.rules == ast::BlockCheckMode::Unsafe(ast::UserProvided) {
- self.report_unsafe(cx, blk.span, |lint| {
- lint.build(fluent::lint::builtin_unsafe_block).emit();
- });
+ self.report_unsafe(cx, blk.span, fluent::lint_builtin_unsafe_block, |lint| lint);
}
}
}
fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) {
match it.kind {
- ast::ItemKind::Trait(box ast::Trait { unsafety: ast::Unsafe::Yes(_), .. }) => self
- .report_unsafe(cx, it.span, |lint| {
- lint.build(fluent::lint::builtin_unsafe_trait).emit();
- }),
+ ast::ItemKind::Trait(box ast::Trait { unsafety: ast::Unsafe::Yes(_), .. }) => {
+ self.report_unsafe(cx, it.span, fluent::lint_builtin_unsafe_trait, |lint| lint)
+ }
- ast::ItemKind::Impl(box ast::Impl { unsafety: ast::Unsafe::Yes(_), .. }) => self
- .report_unsafe(cx, it.span, |lint| {
- lint.build(fluent::lint::builtin_unsafe_impl).emit();
- }),
+ ast::ItemKind::Impl(box ast::Impl { unsafety: ast::Unsafe::Yes(_), .. }) => {
+ self.report_unsafe(cx, it.span, fluent::lint_builtin_unsafe_impl, |lint| lint)
+ }
ast::ItemKind::Fn(..) => {
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_no_mangle_fn,
+ fluent::lint_builtin_no_mangle_fn,
);
}
@@ -397,7 +392,7 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_export_name_fn,
+ fluent::lint_builtin_export_name_fn,
);
}
@@ -405,7 +400,7 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_section(
cx,
attr.span,
- fluent::lint::builtin_link_section_fn,
+ fluent::lint_builtin_link_section_fn,
);
}
}
@@ -415,7 +410,7 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_no_mangle_static,
+ fluent::lint_builtin_no_mangle_static,
);
}
@@ -423,7 +418,7 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_export_name_static,
+ fluent::lint_builtin_export_name_static,
);
}
@@ -431,7 +426,7 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_section(
cx,
attr.span,
- fluent::lint::builtin_link_section_static,
+ fluent::lint_builtin_link_section_static,
);
}
}
@@ -446,14 +441,14 @@ impl EarlyLintPass for UnsafeCode {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_no_mangle_method,
+ fluent::lint_builtin_no_mangle_method,
);
}
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
self.report_overridden_symbol_name(
cx,
attr.span,
- fluent::lint::builtin_export_name_method,
+ fluent::lint_builtin_export_name_method,
);
}
}
@@ -471,13 +466,11 @@ impl EarlyLintPass for UnsafeCode {
{
let msg = match ctxt {
FnCtxt::Foreign => return,
- FnCtxt::Free => fluent::lint::builtin_decl_unsafe_fn,
- FnCtxt::Assoc(_) if body.is_none() => fluent::lint::builtin_decl_unsafe_method,
- FnCtxt::Assoc(_) => fluent::lint::builtin_impl_unsafe_method,
+ FnCtxt::Free => fluent::lint_builtin_decl_unsafe_fn,
+ FnCtxt::Assoc(_) if body.is_none() => fluent::lint_builtin_decl_unsafe_method,
+ FnCtxt::Assoc(_) => fluent::lint_builtin_impl_unsafe_method,
};
- self.report_unsafe(cx, span, |lint| {
- lint.build(msg).emit();
- });
+ self.report_unsafe(cx, span, msg, |lint| lint);
}
}
}
@@ -570,7 +563,7 @@ impl MissingDoc {
// It's an option so the crate root can also use this function (it doesn't
// have a `NodeId`).
if def_id != CRATE_DEF_ID {
- if !cx.access_levels.is_exported(def_id) {
+ if !cx.effective_visibilities.is_exported(def_id) {
return;
}
}
@@ -578,12 +571,12 @@ impl MissingDoc {
let attrs = cx.tcx.hir().attrs(cx.tcx.hir().local_def_id_to_hir_id(def_id));
let has_doc = attrs.iter().any(has_doc);
if !has_doc {
- cx.struct_span_lint(MISSING_DOCS, cx.tcx.def_span(def_id), |lint| {
- lint.build(fluent::lint::builtin_missing_doc)
- .set_arg("article", article)
- .set_arg("desc", desc)
- .emit();
- });
+ cx.struct_span_lint(
+ MISSING_DOCS,
+ cx.tcx.def_span(def_id),
+ fluent::lint_builtin_missing_doc,
+ |lint| lint.set_arg("article", article).set_arg("desc", desc),
+ );
}
}
}
@@ -613,9 +606,9 @@ impl<'tcx> LateLintPass<'tcx> for MissingDoc {
match it.kind {
hir::ItemKind::Trait(..) => {
// Issue #11592: traits are always considered exported, even when private.
- if cx.tcx.visibility(it.def_id)
+ if cx.tcx.visibility(it.owner_id)
== ty::Visibility::Restricted(
- cx.tcx.parent_module_from_def_id(it.def_id).to_def_id(),
+ cx.tcx.parent_module_from_def_id(it.owner_id.def_id).to_def_id(),
)
{
return;
@@ -634,15 +627,15 @@ impl<'tcx> LateLintPass<'tcx> for MissingDoc {
_ => return,
};
- let (article, desc) = cx.tcx.article_and_description(it.def_id.to_def_id());
+ let (article, desc) = cx.tcx.article_and_description(it.owner_id.to_def_id());
- self.check_missing_docs_attrs(cx, it.def_id, article, desc);
+ self.check_missing_docs_attrs(cx, it.owner_id.def_id, article, desc);
}
fn check_trait_item(&mut self, cx: &LateContext<'_>, trait_item: &hir::TraitItem<'_>) {
- let (article, desc) = cx.tcx.article_and_description(trait_item.def_id.to_def_id());
+ let (article, desc) = cx.tcx.article_and_description(trait_item.owner_id.to_def_id());
- self.check_missing_docs_attrs(cx, trait_item.def_id, article, desc);
+ self.check_missing_docs_attrs(cx, trait_item.owner_id.def_id, article, desc);
}
fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
@@ -669,13 +662,13 @@ impl<'tcx> LateLintPass<'tcx> for MissingDoc {
}
}
- let (article, desc) = cx.tcx.article_and_description(impl_item.def_id.to_def_id());
- self.check_missing_docs_attrs(cx, impl_item.def_id, article, desc);
+ let (article, desc) = cx.tcx.article_and_description(impl_item.owner_id.to_def_id());
+ self.check_missing_docs_attrs(cx, impl_item.owner_id.def_id, article, desc);
}
fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'_>) {
- let (article, desc) = cx.tcx.article_and_description(foreign_item.def_id.to_def_id());
- self.check_missing_docs_attrs(cx, foreign_item.def_id, article, desc);
+ let (article, desc) = cx.tcx.article_and_description(foreign_item.owner_id.to_def_id());
+ self.check_missing_docs_attrs(cx, foreign_item.owner_id.def_id, article, desc);
}
fn check_field_def(&mut self, cx: &LateContext<'_>, sf: &hir::FieldDef<'_>) {
@@ -728,7 +721,7 @@ declare_lint_pass!(MissingCopyImplementations => [MISSING_COPY_IMPLEMENTATIONS])
impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- if !cx.access_levels.is_reachable(item.def_id) {
+ if !cx.effective_visibilities.is_reachable(item.owner_id.def_id) {
return;
}
let (def, ty) = match item.kind {
@@ -736,21 +729,21 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(item.def_id);
+ let def = cx.tcx.adt_def(item.owner_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemKind::Union(_, ref ast_generics) => {
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(item.def_id);
+ let def = cx.tcx.adt_def(item.owner_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemKind::Enum(_, ref ast_generics) => {
if !ast_generics.params.is_empty() {
return;
}
- let def = cx.tcx.adt_def(item.def_id);
+ let def = cx.tcx.adt_def(item.owner_id);
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
_ => return,
@@ -759,7 +752,7 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
return;
}
let param_env = ty::ParamEnv::empty();
- if ty.is_copy_modulo_regions(cx.tcx.at(item.span), param_env) {
+ if ty.is_copy_modulo_regions(cx.tcx, param_env) {
return;
}
if can_type_implement_copy(
@@ -770,9 +763,12 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
)
.is_ok()
{
- cx.struct_span_lint(MISSING_COPY_IMPLEMENTATIONS, item.span, |lint| {
- lint.build(fluent::lint::builtin_missing_copy_impl).emit();
- })
+ cx.struct_span_lint(
+ MISSING_COPY_IMPLEMENTATIONS,
+ item.span,
+ fluent::lint_builtin_missing_copy_impl,
+ |lint| lint,
+ )
}
}
}
@@ -818,7 +814,7 @@ impl_lint_pass!(MissingDebugImplementations => [MISSING_DEBUG_IMPLEMENTATIONS]);
impl<'tcx> LateLintPass<'tcx> for MissingDebugImplementations {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
- if !cx.access_levels.is_reachable(item.def_id) {
+ if !cx.effective_visibilities.is_reachable(item.owner_id.def_id) {
return;
}
@@ -845,12 +841,13 @@ impl<'tcx> LateLintPass<'tcx> for MissingDebugImplementations {
debug!("{:?}", self.impling_types);
}
- if !self.impling_types.as_ref().unwrap().contains(&item.def_id) {
- cx.struct_span_lint(MISSING_DEBUG_IMPLEMENTATIONS, item.span, |lint| {
- lint.build(fluent::lint::builtin_missing_debug_impl)
- .set_arg("debug", cx.tcx.def_path_str(debug))
- .emit();
- });
+ if !self.impling_types.as_ref().unwrap().contains(&item.owner_id.def_id) {
+ cx.struct_span_lint(
+ MISSING_DEBUG_IMPLEMENTATIONS,
+ item.span,
+ fluent::lint_builtin_missing_debug_impl,
+ |lint| lint.set_arg("debug", cx.tcx.def_path_str(debug)),
+ );
}
}
}
@@ -918,24 +915,26 @@ impl EarlyLintPass for AnonymousParameters {
for arg in sig.decl.inputs.iter() {
if let ast::PatKind::Ident(_, ident, None) = arg.pat.kind {
if ident.name == kw::Empty {
- cx.struct_span_lint(ANONYMOUS_PARAMETERS, arg.pat.span, |lint| {
- let ty_snip = cx.sess().source_map().span_to_snippet(arg.ty.span);
+ let ty_snip = cx.sess().source_map().span_to_snippet(arg.ty.span);
- let (ty_snip, appl) = if let Ok(ref snip) = ty_snip {
- (snip.as_str(), Applicability::MachineApplicable)
- } else {
- ("<type>", Applicability::HasPlaceholders)
- };
-
- lint.build(fluent::lint::builtin_anonymous_params)
- .span_suggestion(
+ let (ty_snip, appl) = if let Ok(ref snip) = ty_snip {
+ (snip.as_str(), Applicability::MachineApplicable)
+ } else {
+ ("<type>", Applicability::HasPlaceholders)
+ };
+ cx.struct_span_lint(
+ ANONYMOUS_PARAMETERS,
+ arg.pat.span,
+ fluent::lint_builtin_anonymous_params,
+ |lint| {
+ lint.span_suggestion(
arg.pat.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
format!("_: {}", ty_snip),
appl,
)
- .emit();
- })
+ },
+ )
}
}
}
@@ -970,38 +969,44 @@ impl EarlyLintPass for DeprecatedAttr {
_,
) = gate
{
- cx.struct_span_lint(DEPRECATED, attr.span, |lint| {
- // FIXME(davidtwco) translatable deprecated attr
- lint.build(fluent::lint::builtin_deprecated_attr_link)
- .set_arg("name", name)
- .set_arg("reason", reason)
- .set_arg("link", link)
- .span_suggestion_short(
- attr.span,
- suggestion.map(|s| s.into()).unwrap_or(
- fluent::lint::builtin_deprecated_attr_default_suggestion,
- ),
- "",
- Applicability::MachineApplicable,
- )
- .emit();
- });
+ // FIXME(davidtwco) translatable deprecated attr
+ cx.struct_span_lint(
+ DEPRECATED,
+ attr.span,
+ fluent::lint_builtin_deprecated_attr_link,
+ |lint| {
+ lint.set_arg("name", name)
+ .set_arg("reason", reason)
+ .set_arg("link", link)
+ .span_suggestion_short(
+ attr.span,
+ suggestion.map(|s| s.into()).unwrap_or(
+ fluent::lint_builtin_deprecated_attr_default_suggestion,
+ ),
+ "",
+ Applicability::MachineApplicable,
+ )
+ },
+ );
}
return;
}
}
if attr.has_name(sym::no_start) || attr.has_name(sym::crate_id) {
- cx.struct_span_lint(DEPRECATED, attr.span, |lint| {
- lint.build(fluent::lint::builtin_deprecated_attr_used)
- .set_arg("name", pprust::path_to_string(&attr.get_normal_item().path))
- .span_suggestion_short(
- attr.span,
- fluent::lint::builtin_deprecated_attr_default_suggestion,
- "",
- Applicability::MachineApplicable,
- )
- .emit();
- });
+ cx.struct_span_lint(
+ DEPRECATED,
+ attr.span,
+ fluent::lint_builtin_deprecated_attr_used,
+ |lint| {
+ lint.set_arg("name", pprust::path_to_string(&attr.get_normal_item().path))
+ .span_suggestion_short(
+ attr.span,
+ fluent::lint_builtin_deprecated_attr_default_suggestion,
+ "",
+ Applicability::MachineApplicable,
+ )
+ },
+ );
}
}
}
@@ -1028,20 +1033,21 @@ fn warn_if_doc(cx: &EarlyContext<'_>, node_span: Span, node_kind: &str, attrs: &
let span = sugared_span.take().unwrap_or(attr.span);
if is_doc_comment || attr.has_name(sym::doc) {
- cx.struct_span_lint(UNUSED_DOC_COMMENTS, span, |lint| {
- let mut err = lint.build(fluent::lint::builtin_unused_doc_comment);
- err.set_arg("kind", node_kind);
- err.span_label(node_span, fluent::lint::label);
- match attr.kind {
- AttrKind::DocComment(CommentKind::Line, _) | AttrKind::Normal(..) => {
- err.help(fluent::lint::plain_help);
- }
- AttrKind::DocComment(CommentKind::Block, _) => {
- err.help(fluent::lint::block_help);
- }
- }
- err.emit();
- });
+ cx.struct_span_lint(
+ UNUSED_DOC_COMMENTS,
+ span,
+ fluent::lint_builtin_unused_doc_comment,
+ |lint| {
+ lint.set_arg("kind", node_kind).span_label(node_span, fluent::label).help(
+ match attr.kind {
+ AttrKind::DocComment(CommentKind::Line, _) | AttrKind::Normal(..) => {
+ fluent::plain_help
+ }
+ AttrKind::DocComment(CommentKind::Block, _) => fluent::block_help,
+ },
+ )
+ },
+ );
}
}
}
@@ -1155,18 +1161,21 @@ impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems {
match param.kind {
GenericParamKind::Lifetime { .. } => {}
GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
- cx.struct_span_lint(NO_MANGLE_GENERIC_ITEMS, span, |lint| {
- lint.build(fluent::lint::builtin_no_mangle_generic)
- .span_suggestion_short(
+ cx.struct_span_lint(
+ NO_MANGLE_GENERIC_ITEMS,
+ span,
+ fluent::lint_builtin_no_mangle_generic,
+ |lint| {
+ lint.span_suggestion_short(
no_mangle_attr.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
"",
// Use of `#[no_mangle]` suggests FFI intent; correct
// fix may be to monomorphize source by hand
Applicability::MaybeIncorrect,
)
- .emit();
- });
+ },
+ );
break;
}
}
@@ -1182,27 +1191,29 @@ impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems {
if cx.sess().contains_name(attrs, sym::no_mangle) {
// Const items do not refer to a particular location in memory, and therefore
// don't have anything to attach a symbol to
- cx.struct_span_lint(NO_MANGLE_CONST_ITEMS, it.span, |lint| {
- let mut err = lint.build(fluent::lint::builtin_const_no_mangle);
-
- // account for "pub const" (#45562)
- let start = cx
- .tcx
- .sess
- .source_map()
- .span_to_snippet(it.span)
- .map(|snippet| snippet.find("const").unwrap_or(0))
- .unwrap_or(0) as u32;
- // `const` is 5 chars
- let const_span = it.span.with_hi(BytePos(it.span.lo().0 + start + 5));
- err.span_suggestion(
- const_span,
- fluent::lint::suggestion,
- "pub static",
- Applicability::MachineApplicable,
- );
- err.emit();
- });
+ cx.struct_span_lint(
+ NO_MANGLE_CONST_ITEMS,
+ it.span,
+ fluent::lint_builtin_const_no_mangle,
+ |lint| {
+ // account for "pub const" (#45562)
+ let start = cx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(it.span)
+ .map(|snippet| snippet.find("const").unwrap_or(0))
+ .unwrap_or(0) as u32;
+ // `const` is 5 chars
+ let const_span = it.span.with_hi(BytePos(it.span.lo().0 + start + 5));
+ lint.span_suggestion(
+ const_span,
+ fluent::suggestion,
+ "pub static",
+ Applicability::MachineApplicable,
+ )
+ },
+ );
}
}
hir::ItemKind::Impl(hir::Impl { generics, items, .. }) => {
@@ -1215,7 +1226,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems {
check_no_mangle_on_generic_fn(
no_mangle_attr,
Some(generics),
- cx.tcx.hir().get_generics(it.id.def_id).unwrap(),
+ cx.tcx.hir().get_generics(it.id.owner_id.def_id).unwrap(),
it.span,
);
}
@@ -1262,9 +1273,12 @@ impl<'tcx> LateLintPass<'tcx> for MutableTransmutes {
get_transmute_from_to(cx, expr).map(|(ty1, ty2)| (ty1.kind(), ty2.kind()))
{
if to_mt == hir::Mutability::Mut && from_mt == hir::Mutability::Not {
- cx.struct_span_lint(MUTABLE_TRANSMUTES, expr.span, |lint| {
- lint.build(fluent::lint::builtin_mutable_transmutes).emit();
- });
+ cx.struct_span_lint(
+ MUTABLE_TRANSMUTES,
+ expr.span,
+ fluent::lint_builtin_mutable_transmutes,
+ |lint| lint,
+ );
}
}
@@ -1312,9 +1326,12 @@ impl<'tcx> LateLintPass<'tcx> for UnstableFeatures {
if attr.has_name(sym::feature) {
if let Some(items) = attr.meta_item_list() {
for item in items {
- cx.struct_span_lint(UNSTABLE_FEATURES, item.span(), |lint| {
- lint.build(fluent::lint::builtin_unstable_features).emit();
- });
+ cx.struct_span_lint(
+ UNSTABLE_FEATURES,
+ item.span(),
+ fluent::lint_builtin_unstable_features,
+ |lint| lint,
+ );
}
}
}
@@ -1368,26 +1385,26 @@ impl UnreachablePub {
exportable: bool,
) {
let mut applicability = Applicability::MachineApplicable;
- if cx.tcx.visibility(def_id).is_public() && !cx.access_levels.is_reachable(def_id) {
+ if cx.tcx.visibility(def_id).is_public() && !cx.effective_visibilities.is_reachable(def_id)
+ {
if vis_span.from_expansion() {
applicability = Applicability::MaybeIncorrect;
}
let def_span = cx.tcx.def_span(def_id);
- cx.struct_span_lint(UNREACHABLE_PUB, def_span, |lint| {
- let mut err = lint.build(fluent::lint::builtin_unreachable_pub);
- err.set_arg("what", what);
-
- err.span_suggestion(
- vis_span,
- fluent::lint::suggestion,
- "pub(crate)",
- applicability,
- );
- if exportable {
- err.help(fluent::lint::help);
- }
- err.emit();
- });
+ cx.struct_span_lint(
+ UNREACHABLE_PUB,
+ def_span,
+ fluent::lint_builtin_unreachable_pub,
+ |lint| {
+ lint.set_arg("what", what);
+
+ lint.span_suggestion(vis_span, fluent::suggestion, "pub(crate)", applicability);
+ if exportable {
+ lint.help(fluent::help);
+ }
+ lint
+ },
+ );
}
}
}
@@ -1398,11 +1415,11 @@ impl<'tcx> LateLintPass<'tcx> for UnreachablePub {
if let hir::ItemKind::Use(_, hir::UseKind::ListStem) = &item.kind {
return;
}
- self.perform_lint(cx, "item", item.def_id, item.vis_span, true);
+ self.perform_lint(cx, "item", item.owner_id.def_id, item.vis_span, true);
}
fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'tcx>) {
- self.perform_lint(cx, "item", foreign_item.def_id, foreign_item.vis_span, true);
+ self.perform_lint(cx, "item", foreign_item.owner_id.def_id, foreign_item.vis_span, true);
}
fn check_field_def(&mut self, cx: &LateContext<'_>, field: &hir::FieldDef<'_>) {
@@ -1412,8 +1429,8 @@ impl<'tcx> LateLintPass<'tcx> for UnreachablePub {
fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
// Only lint inherent impl items.
- if cx.tcx.associated_item(impl_item.def_id).trait_item_def_id.is_none() {
- self.perform_lint(cx, "item", impl_item.def_id, impl_item.vis_span, false);
+ if cx.tcx.associated_item(impl_item.owner_id).trait_item_def_id.is_none() {
+ self.perform_lint(cx, "item", impl_item.owner_id.def_id, impl_item.vis_span, false);
}
}
}
@@ -1474,9 +1491,9 @@ impl TypeAliasBounds {
impl Visitor<'_> for WalkAssocTypes<'_> {
fn visit_qpath(&mut self, qpath: &hir::QPath<'_>, id: hir::HirId, span: Span) {
if TypeAliasBounds::is_type_variable_assoc(qpath) {
- self.err.span_help(span, fluent::lint::builtin_type_alias_bounds_help);
+ self.err.span_help(span, fluent::lint_builtin_type_alias_bounds_help);
}
- intravisit::walk_qpath(self, qpath, id, span)
+ intravisit::walk_qpath(self, qpath, id)
}
}
@@ -1517,36 +1534,34 @@ impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
let mut suggested_changing_assoc_types = false;
if !where_spans.is_empty() {
- cx.lint(TYPE_ALIAS_BOUNDS, |lint| {
- let mut err = lint.build(fluent::lint::builtin_type_alias_where_clause);
- err.set_span(where_spans);
- err.span_suggestion(
+ cx.lint(TYPE_ALIAS_BOUNDS, fluent::lint_builtin_type_alias_where_clause, |lint| {
+ lint.set_span(where_spans);
+ lint.span_suggestion(
type_alias_generics.where_clause_span,
- fluent::lint::suggestion,
+ fluent::suggestion,
"",
Applicability::MachineApplicable,
);
if !suggested_changing_assoc_types {
- TypeAliasBounds::suggest_changing_assoc_types(ty, &mut err);
+ TypeAliasBounds::suggest_changing_assoc_types(ty, lint);
suggested_changing_assoc_types = true;
}
- err.emit();
+ lint
});
}
if !inline_spans.is_empty() {
- cx.lint(TYPE_ALIAS_BOUNDS, |lint| {
- let mut err = lint.build(fluent::lint::builtin_type_alias_generic_bounds);
- err.set_span(inline_spans);
- err.multipart_suggestion(
- fluent::lint::suggestion,
+ cx.lint(TYPE_ALIAS_BOUNDS, fluent::lint_builtin_type_alias_generic_bounds, |lint| {
+ lint.set_span(inline_spans);
+ lint.multipart_suggestion(
+ fluent::suggestion,
inline_sugg,
Applicability::MachineApplicable,
);
if !suggested_changing_assoc_types {
- TypeAliasBounds::suggest_changing_assoc_types(ty, &mut err);
+ TypeAliasBounds::suggest_changing_assoc_types(ty, lint);
}
- err.emit();
+ lint
});
}
}
@@ -1624,7 +1639,7 @@ impl<'tcx> LateLintPass<'tcx> for TrivialConstraints {
use rustc_middle::ty::PredicateKind::*;
if cx.tcx.features().trivial_bounds {
- let predicates = cx.tcx.predicates_of(item.def_id);
+ let predicates = cx.tcx.predicates_of(item.owner_id);
for &(predicate, span) in predicates.predicates {
let predicate_kind_name = match predicate.kind().skip_binder() {
Trait(..) => "trait",
@@ -1645,12 +1660,15 @@ impl<'tcx> LateLintPass<'tcx> for TrivialConstraints {
TypeWellFormedFromEnv(..) => continue,
};
if predicate.is_global() {
- cx.struct_span_lint(TRIVIAL_BOUNDS, span, |lint| {
- lint.build(fluent::lint::builtin_trivial_bounds)
- .set_arg("predicate_kind_name", predicate_kind_name)
- .set_arg("predicate", predicate)
- .emit();
- });
+ cx.struct_span_lint(
+ TRIVIAL_BOUNDS,
+ span,
+ fluent::lint_builtin_trivial_bounds,
+ |lint| {
+ lint.set_arg("predicate_kind_name", predicate_kind_name)
+ .set_arg("predicate", predicate)
+ },
+ );
}
}
}
@@ -1750,8 +1768,8 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns {
};
if let Some((start, end, join)) = endpoints {
- let msg = fluent::lint::builtin_ellipsis_inclusive_range_patterns;
- let suggestion = fluent::lint::suggestion;
+ let msg = fluent::lint_builtin_ellipsis_inclusive_range_patterns;
+ let suggestion = fluent::suggestion;
if parenthesise {
self.node_id = Some(pat.id);
let end = expr_to_string(&end);
@@ -1760,55 +1778,37 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns {
None => format!("&(..={})", end),
};
if join.edition() >= Edition::Edition2021 {
- let mut err = cx.sess().struct_span_err_with_code(
- pat.span,
- msg,
- rustc_errors::error_code!(E0783),
- );
- err.span_suggestion(
- pat.span,
- suggestion,
+ cx.sess().emit_err(BuiltinEllpisisInclusiveRangePatterns {
+ span: pat.span,
+ suggestion: pat.span,
replace,
- Applicability::MachineApplicable,
- )
- .emit();
+ });
} else {
- cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, pat.span, |lint| {
- lint.build(msg)
- .span_suggestion(
- pat.span,
- suggestion,
- replace,
- Applicability::MachineApplicable,
- )
- .emit();
+ cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, pat.span, msg, |lint| {
+ lint.span_suggestion(
+ pat.span,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
});
}
} else {
let replace = "..=";
if join.edition() >= Edition::Edition2021 {
- let mut err = cx.sess().struct_span_err_with_code(
- pat.span,
- msg,
- rustc_errors::error_code!(E0783),
- );
- err.span_suggestion_short(
- join,
- suggestion,
- replace,
- Applicability::MachineApplicable,
- )
- .emit();
+ cx.sess().emit_err(BuiltinEllpisisInclusiveRangePatterns {
+ span: pat.span,
+ suggestion: join,
+ replace: replace.to_string(),
+ });
} else {
- cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, join, |lint| {
- lint.build(msg)
- .span_suggestion_short(
- join,
- suggestion,
- replace,
- Applicability::MachineApplicable,
- )
- .emit();
+ cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, join, msg, |lint| {
+ lint.span_suggestion_short(
+ join,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
});
}
};
@@ -1864,7 +1864,7 @@ declare_lint! {
}
pub struct UnnameableTestItems {
- boundary: Option<LocalDefId>, // Id of the item under which things are not nameable
+ boundary: Option<hir::OwnerId>, // Id of the item under which things are not nameable
items_nameable: bool,
}
@@ -1882,21 +1882,24 @@ impl<'tcx> LateLintPass<'tcx> for UnnameableTestItems {
if let hir::ItemKind::Mod(..) = it.kind {
} else {
self.items_nameable = false;
- self.boundary = Some(it.def_id);
+ self.boundary = Some(it.owner_id);
}
return;
}
let attrs = cx.tcx.hir().attrs(it.hir_id());
if let Some(attr) = cx.sess().find_by_name(attrs, sym::rustc_test_marker) {
- cx.struct_span_lint(UNNAMEABLE_TEST_ITEMS, attr.span, |lint| {
- lint.build(fluent::lint::builtin_unnameable_test_items).emit();
- });
+ cx.struct_span_lint(
+ UNNAMEABLE_TEST_ITEMS,
+ attr.span,
+ fluent::lint_builtin_unnameable_test_items,
+ |lint| lint,
+ );
}
}
fn check_item_post(&mut self, _cx: &LateContext<'_>, it: &hir::Item<'_>) {
- if !self.items_nameable && self.boundary == Some(it.def_id) {
+ if !self.items_nameable && self.boundary == Some(it.owner_id) {
self.items_nameable = true;
}
}
@@ -2007,23 +2010,24 @@ impl KeywordIdents {
return;
}
- cx.struct_span_lint(KEYWORD_IDENTS, ident.span, |lint| {
- lint.build(fluent::lint::builtin_keyword_idents)
- .set_arg("kw", ident.clone())
- .set_arg("next", next_edition)
- .span_suggestion(
+ cx.struct_span_lint(
+ KEYWORD_IDENTS,
+ ident.span,
+ fluent::lint_builtin_keyword_idents,
+ |lint| {
+ lint.set_arg("kw", ident.clone()).set_arg("next", next_edition).span_suggestion(
ident.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
format!("r#{}", ident),
Applicability::MachineApplicable,
)
- .emit();
- });
+ },
+ );
}
}
impl EarlyLintPass for KeywordIdents {
- fn check_mac_def(&mut self, cx: &EarlyContext<'_>, mac_def: &ast::MacroDef, _id: ast::NodeId) {
+ fn check_mac_def(&mut self, cx: &EarlyContext<'_>, mac_def: &ast::MacroDef) {
self.check_tokens(cx, mac_def.body.inner_tokens());
}
fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &ast::MacCall) {
@@ -2039,13 +2043,13 @@ declare_lint_pass!(ExplicitOutlivesRequirements => [EXPLICIT_OUTLIVES_REQUIREMEN
impl ExplicitOutlivesRequirements {
fn lifetimes_outliving_lifetime<'tcx>(
inferred_outlives: &'tcx [(ty::Predicate<'tcx>, Span)],
- index: u32,
+ def_id: DefId,
) -> Vec<ty::Region<'tcx>> {
inferred_outlives
.iter()
.filter_map(|(pred, _)| match pred.kind().skip_binder() {
ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => match *a {
- ty::ReEarlyBound(ebr) if ebr.index == index => Some(b),
+ ty::ReEarlyBound(ebr) if ebr.def_id == def_id => Some(b),
_ => None,
},
_ => None,
@@ -2082,8 +2086,12 @@ impl ExplicitOutlivesRequirements {
.filter_map(|(i, bound)| {
if let hir::GenericBound::Outlives(lifetime) = bound {
let is_inferred = match tcx.named_region(lifetime.hir_id) {
- Some(Region::EarlyBound(index, ..)) => inferred_outlives.iter().any(|r| {
- if let ty::ReEarlyBound(ebr) = **r { ebr.index == index } else { false }
+ Some(Region::EarlyBound(def_id)) => inferred_outlives.iter().any(|r| {
+ if let ty::ReEarlyBound(ebr) = **r {
+ ebr.def_id == def_id
+ } else {
+ false
+ }
}),
_ => false,
};
@@ -2157,7 +2165,7 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
use rustc_middle::middle::resolve_lifetime::Region;
- let def_id = item.def_id;
+ let def_id = item.owner_id.def_id;
if let hir::ItemKind::Struct(_, ref hir_generics)
| hir::ItemKind::Enum(_, ref hir_generics)
| hir::ItemKind::Union(_, ref hir_generics) = item.kind
@@ -2177,11 +2185,14 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements {
for (i, where_predicate) in hir_generics.predicates.iter().enumerate() {
let (relevant_lifetimes, bounds, span, in_where_clause) = match where_predicate {
hir::WherePredicate::RegionPredicate(predicate) => {
- if let Some(Region::EarlyBound(index, ..)) =
+ if let Some(Region::EarlyBound(region_def_id)) =
cx.tcx.named_region(predicate.lifetime.hir_id)
{
(
- Self::lifetimes_outliving_lifetime(inferred_outlives, index),
+ Self::lifetimes_outliving_lifetime(
+ inferred_outlives,
+ region_def_id,
+ ),
&predicate.bounds,
predicate.span,
predicate.in_where_clause,
@@ -2262,19 +2273,21 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements {
}
if !lint_spans.is_empty() {
- cx.struct_span_lint(EXPLICIT_OUTLIVES_REQUIREMENTS, lint_spans.clone(), |lint| {
- lint.build(fluent::lint::builtin_explicit_outlives)
- .set_arg("count", bound_count)
- .multipart_suggestion(
- fluent::lint::suggestion,
+ cx.struct_span_lint(
+ EXPLICIT_OUTLIVES_REQUIREMENTS,
+ lint_spans.clone(),
+ fluent::lint_builtin_explicit_outlives,
+ |lint| {
+ lint.set_arg("count", bound_count).multipart_suggestion(
+ fluent::suggestion,
lint_spans
.into_iter()
.map(|span| (span, String::new()))
.collect::<Vec<_>>(),
Applicability::MachineApplicable,
)
- .emit();
- });
+ },
+ );
}
}
}
@@ -2321,18 +2334,24 @@ impl EarlyLintPass for IncompleteFeatures {
.chain(features.declared_lib_features.iter().map(|(name, span)| (name, span)))
.filter(|(&name, _)| features.incomplete(name))
.for_each(|(&name, &span)| {
- cx.struct_span_lint(INCOMPLETE_FEATURES, span, |lint| {
- let mut builder = lint.build(fluent::lint::builtin_incomplete_features);
- builder.set_arg("name", name);
- if let Some(n) = rustc_feature::find_feature_issue(name, GateIssue::Language) {
- builder.set_arg("n", n);
- builder.note(fluent::lint::note);
- }
- if HAS_MIN_FEATURES.contains(&name) {
- builder.help(fluent::lint::help);
- }
- builder.emit();
- })
+ cx.struct_span_lint(
+ INCOMPLETE_FEATURES,
+ span,
+ fluent::lint_builtin_incomplete_features,
+ |lint| {
+ lint.set_arg("name", name);
+ if let Some(n) =
+ rustc_feature::find_feature_issue(name, GateIssue::Language)
+ {
+ lint.set_arg("n", n);
+ lint.note(fluent::note);
+ }
+ if HAS_MIN_FEATURES.contains(&name) {
+ lint.help(fluent::help);
+ }
+ lint
+ },
+ )
});
}
}
@@ -2419,13 +2438,13 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
_ => {}
}
}
- } else if let hir::ExprKind::MethodCall(_, ref args, _) = expr.kind {
+ } else if let hir::ExprKind::MethodCall(_, receiver, ..) = expr.kind {
// Find problematic calls to `MaybeUninit::assume_init`.
let def_id = cx.typeck_results().type_dependent_def_id(expr.hir_id)?;
if cx.tcx.is_diagnostic_item(sym::assume_init, def_id) {
// This is a call to *some* method named `assume_init`.
// See if the `self` parameter is one of the dangerous constructors.
- if let hir::ExprKind::Call(ref path_expr, _) = args[0].kind {
+ if let hir::ExprKind::Call(ref path_expr, _) = receiver.kind {
if let hir::ExprKind::Path(ref qpath) = path_expr.kind {
let def_id = cx.qpath_res(qpath, path_expr.hir_id).opt_def_id()?;
match cx.tcx.get_diagnostic_name(def_id) {
@@ -2441,12 +2460,27 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
None
}
- /// Test if this enum has several actually "existing" variants.
- /// Zero-sized uninhabited variants do not always have a tag assigned and thus do not "exist".
- fn is_multi_variant<'tcx>(adt: ty::AdtDef<'tcx>) -> bool {
- // As an approximation, we only count dataless variants. Those are definitely inhabited.
- let existing_variants = adt.variants().iter().filter(|v| v.fields.is_empty()).count();
- existing_variants > 1
+ fn variant_find_init_error<'tcx>(
+ cx: &LateContext<'tcx>,
+ variant: &VariantDef,
+ substs: ty::SubstsRef<'tcx>,
+ descr: &str,
+ init: InitKind,
+ ) -> Option<InitError> {
+ variant.fields.iter().find_map(|field| {
+ ty_find_init_error(cx, field.ty(cx.tcx, substs), init).map(|(mut msg, span)| {
+ if span.is_none() {
+ // Point to this field, should be helpful for figuring
+ // out where the source of the error is.
+ let span = cx.tcx.def_span(field.did);
+ write!(&mut msg, " (in this {descr})").unwrap();
+ (msg, Some(span))
+ } else {
+ // Just forward.
+ (msg, span)
+ }
+ })
+ })
}
/// Return `Some` only if we are sure this type does *not*
@@ -2475,7 +2509,16 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
Char if init == InitKind::Uninit => {
Some(("characters must be a valid Unicode codepoint".to_string(), None))
}
- // Recurse and checks for some compound types.
+ Int(_) | Uint(_) if init == InitKind::Uninit => {
+ Some(("integers must not be uninitialized".to_string(), None))
+ }
+ Float(_) if init == InitKind::Uninit => {
+ Some(("floats must not be uninitialized".to_string(), None))
+ }
+ RawPtr(_) if init == InitKind::Uninit => {
+ Some(("raw pointers must not be uninitialized".to_string(), None))
+ }
+ // Recurse and checks for some compound types. (but not unions)
Adt(adt_def, substs) if !adt_def.is_union() => {
// First check if this ADT has a layout attribute (like `NonNull` and friends).
use std::ops::Bound;
@@ -2483,7 +2526,11 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
// We exploit here that `layout_scalar_valid_range` will never
// return `Bound::Excluded`. (And we have tests checking that we
// handle the attribute correctly.)
- (Bound::Included(lo), _) if lo > 0 => {
+ // We don't add a span since users cannot declare such types anyway.
+ (Bound::Included(lo), Bound::Included(hi)) if 0 < lo && lo < hi => {
+ return Some((format!("`{}` must be non-null", ty), None));
+ }
+ (Bound::Included(lo), Bound::Unbounded) if 0 < lo => {
return Some((format!("`{}` must be non-null", ty), None));
}
(Bound::Included(_), _) | (_, Bound::Included(_))
@@ -2499,50 +2546,65 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
}
_ => {}
}
- // Now, recurse.
- match adt_def.variants().len() {
- 0 => Some(("enums with no variants have no valid value".to_string(), None)),
- 1 => {
- // Struct, or enum with exactly one variant.
- // Proceed recursively, check all fields.
- let variant = &adt_def.variant(VariantIdx::from_u32(0));
- variant.fields.iter().find_map(|field| {
- ty_find_init_error(cx, field.ty(cx.tcx, substs), init).map(
- |(mut msg, span)| {
- if span.is_none() {
- // Point to this field, should be helpful for figuring
- // out where the source of the error is.
- let span = cx.tcx.def_span(field.did);
- write!(
- &mut msg,
- " (in this {} field)",
- adt_def.descr()
- )
- .unwrap();
- (msg, Some(span))
- } else {
- // Just forward.
- (msg, span)
- }
- },
- )
- })
- }
- // Multi-variant enum.
- _ => {
- if init == InitKind::Uninit && is_multi_variant(*adt_def) {
- let span = cx.tcx.def_span(adt_def.did());
- Some((
- "enums have to be initialized to a variant".to_string(),
- Some(span),
- ))
- } else {
- // In principle, for zero-initialization we could figure out which variant corresponds
- // to tag 0, and check that... but for now we just accept all zero-initializations.
- None
- }
+ // Handle structs.
+ if adt_def.is_struct() {
+ return variant_find_init_error(
+ cx,
+ adt_def.non_enum_variant(),
+ substs,
+ "struct field",
+ init,
+ );
+ }
+ // And now, enums.
+ let span = cx.tcx.def_span(adt_def.did());
+ let mut potential_variants = adt_def.variants().iter().filter_map(|variant| {
+ let definitely_inhabited = match variant
+ .inhabited_predicate(cx.tcx, *adt_def)
+ .subst(cx.tcx, substs)
+ .apply_any_module(cx.tcx, cx.param_env)
+ {
+ // Entirely skip uninhbaited variants.
+ Some(false) => return None,
+ // Forward the others, but remember which ones are definitely inhabited.
+ Some(true) => true,
+ None => false,
+ };
+ Some((variant, definitely_inhabited))
+ });
+ let Some(first_variant) = potential_variants.next() else {
+ return Some(("enums with no inhabited variants have no valid value".to_string(), Some(span)));
+ };
+ // So we have at least one potentially inhabited variant. Might we have two?
+ let Some(second_variant) = potential_variants.next() else {
+ // There is only one potentially inhabited variant. So we can recursively check that variant!
+ return variant_find_init_error(
+ cx,
+ &first_variant.0,
+ substs,
+ "field of the only potentially inhabited enum variant",
+ init,
+ );
+ };
+ // So we have at least two potentially inhabited variants.
+ // If we can prove that we have at least two *definitely* inhabited variants,
+ // then we have a tag and hence leaving this uninit is definitely disallowed.
+ // (Leaving it zeroed could be okay, depending on which variant is encoded as zero tag.)
+ if init == InitKind::Uninit {
+ let definitely_inhabited = (first_variant.1 as usize)
+ + (second_variant.1 as usize)
+ + potential_variants
+ .filter(|(_variant, definitely_inhabited)| *definitely_inhabited)
+ .count();
+ if definitely_inhabited > 1 {
+ return Some((
+ "enums with multiple inhabited variants have to be initialized to a variant".to_string(),
+ Some(span),
+ ));
}
}
+ // We couldn't find anything wrong here.
+ None
}
Tuple(..) => {
// Proceed recursively, check all fields.
@@ -2571,28 +2633,37 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
with_no_trimmed_paths!(ty_find_init_error(cx, conjured_ty, init))
{
// FIXME(davidtwco): make translatable
- cx.struct_span_lint(INVALID_VALUE, expr.span, |lint| {
- let mut err = lint.build(&format!(
- "the type `{}` does not permit {}",
- conjured_ty,
- match init {
- InitKind::Zeroed => "zero-initialization",
- InitKind::Uninit => "being left uninitialized",
- },
- ));
- err.span_label(expr.span, "this code causes undefined behavior when executed");
- err.span_label(
- expr.span,
- "help: use `MaybeUninit<T>` instead, \
+ cx.struct_span_lint(
+ INVALID_VALUE,
+ expr.span,
+ DelayDm(|| {
+ format!(
+ "the type `{}` does not permit {}",
+ conjured_ty,
+ match init {
+ InitKind::Zeroed => "zero-initialization",
+ InitKind::Uninit => "being left uninitialized",
+ },
+ )
+ }),
+ |lint| {
+ lint.span_label(
+ expr.span,
+ "this code causes undefined behavior when executed",
+ );
+ lint.span_label(
+ expr.span,
+ "help: use `MaybeUninit<T>` instead, \
and only call `assume_init` after initialization is done",
- );
- if let Some(span) = span {
- err.span_note(span, &msg);
- } else {
- err.note(&msg);
- }
- err.emit();
- });
+ );
+ if let Some(span) = span {
+ lint.span_note(span, &msg);
+ } else {
+ lint.note(&msg);
+ }
+ lint
+ },
+ );
}
}
}
@@ -2673,7 +2744,7 @@ impl ClashingExternDeclarations {
/// Insert a new foreign item into the seen set. If a symbol with the same name already exists
/// for the item, return its HirId without updating the set.
fn insert(&mut self, tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> Option<HirId> {
- let did = fi.def_id.to_def_id();
+ let did = fi.owner_id.to_def_id();
let instance = Instance::new(did, ty::List::identity_for_item(tcx, did));
let name = Symbol::intern(tcx.symbol_name(instance).name);
if let Some(&hir_id) = self.seen_decls.get(&name) {
@@ -2691,14 +2762,14 @@ impl ClashingExternDeclarations {
/// symbol's name.
fn name_of_extern_decl(tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> SymbolName {
if let Some((overridden_link_name, overridden_link_name_span)) =
- tcx.codegen_fn_attrs(fi.def_id).link_name.map(|overridden_link_name| {
+ tcx.codegen_fn_attrs(fi.owner_id).link_name.map(|overridden_link_name| {
// FIXME: Instead of searching through the attributes again to get span
// information, we could have codegen_fn_attrs also give span information back for
// where the attribute was defined. However, until this is found to be a
// bottleneck, this does just fine.
(
overridden_link_name,
- tcx.get_attr(fi.def_id.to_def_id(), sym::link_name).unwrap().span,
+ tcx.get_attr(fi.owner_id.to_def_id(), sym::link_name).unwrap().span,
)
})
{
@@ -2915,10 +2986,10 @@ impl<'tcx> LateLintPass<'tcx> for ClashingExternDeclarations {
let tcx = cx.tcx;
if let Some(existing_hid) = self.insert(tcx, this_fi) {
let existing_decl_ty = tcx.type_of(tcx.hir().local_def_id(existing_hid));
- let this_decl_ty = tcx.type_of(this_fi.def_id);
+ let this_decl_ty = tcx.type_of(this_fi.owner_id);
debug!(
"ClashingExternDeclarations: Comparing existing {:?}: {:?} to this {:?}: {:?}",
- existing_hid, existing_decl_ty, this_fi.def_id, this_decl_ty
+ existing_hid, existing_decl_ty, this_fi.owner_id, this_decl_ty
);
// Check that the declarations match.
if !Self::structurally_same_type(
@@ -2938,31 +3009,29 @@ impl<'tcx> LateLintPass<'tcx> for ClashingExternDeclarations {
SymbolName::Link(_, annot_span) => fi.span.to(annot_span),
};
// Finally, emit the diagnostic.
+
+ let msg = if orig.get_name() == this_fi.ident.name {
+ fluent::lint_builtin_clashing_extern_same_name
+ } else {
+ fluent::lint_builtin_clashing_extern_diff_name
+ };
tcx.struct_span_lint_hir(
CLASHING_EXTERN_DECLARATIONS,
this_fi.hir_id(),
get_relevant_span(this_fi),
+ msg,
|lint| {
let mut expected_str = DiagnosticStyledString::new();
expected_str.push(existing_decl_ty.fn_sig(tcx).to_string(), false);
let mut found_str = DiagnosticStyledString::new();
found_str.push(this_decl_ty.fn_sig(tcx).to_string(), true);
- lint.build(if orig.get_name() == this_fi.ident.name {
- fluent::lint::builtin_clashing_extern_same_name
- } else {
- fluent::lint::builtin_clashing_extern_diff_name
- })
- .set_arg("this_fi", this_fi.ident.name)
- .set_arg("orig", orig.get_name())
- .span_label(
- get_relevant_span(orig_fi),
- fluent::lint::previous_decl_label,
- )
- .span_label(get_relevant_span(this_fi), fluent::lint::mismatch_label)
- // FIXME(davidtwco): translatable expected/found
- .note_expected_found(&"", expected_str, &"", found_str)
- .emit();
+ lint.set_arg("this_fi", this_fi.ident.name)
+ .set_arg("orig", orig.get_name())
+ .span_label(get_relevant_span(orig_fi), fluent::previous_decl_label)
+ .span_label(get_relevant_span(this_fi), fluent::mismatch_label)
+ // FIXME(davidtwco): translatable expected/found
+ .note_expected_found(&"", expected_str, &"", found_str)
},
);
}
@@ -3043,11 +3112,12 @@ impl<'tcx> LateLintPass<'tcx> for DerefNullPtr {
if let rustc_hir::ExprKind::Unary(rustc_hir::UnOp::Deref, expr_deref) = expr.kind {
if is_null_ptr(cx, expr_deref) {
- cx.struct_span_lint(DEREF_NULLPTR, expr.span, |lint| {
- let mut err = lint.build(fluent::lint::builtin_deref_nullptr);
- err.span_label(expr.span, fluent::lint::label);
- err.emit();
- });
+ cx.struct_span_lint(
+ DEREF_NULLPTR,
+ expr.span,
+ fluent::lint_builtin_deref_nullptr,
+ |lint| lint.span_label(expr.span, fluent::label),
+ );
}
}
}
@@ -3060,6 +3130,7 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
+ /// # #![feature(asm_experimental_arch)]
/// use std::arch::asm;
///
/// fn main() {
@@ -3157,9 +3228,8 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
cx.lookup_with_diagnostics(
NAMED_ASM_LABELS,
Some(target_spans),
- |diag| {
- diag.build(fluent::lint::builtin_asm_labels).emit();
- },
+ fluent::lint_builtin_asm_labels,
+ |lint| lint,
BuiltinLintDiagnostics::NamedAsmLabel(
"only local labels of the form `<number>:` should be used in inline asm"
.to_string(),
@@ -3170,3 +3240,118 @@ impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
}
}
}
+
+declare_lint! {
+ /// The `special_module_name` lint detects module
+ /// declarations for files that have a special meaning.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// mod lib;
+ ///
+ /// fn main() {
+ /// lib::run();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Cargo recognizes `lib.rs` and `main.rs` as the root of a
+ /// library or binary crate, so declaring them as modules
+ /// will lead to miscompilation of the crate unless configured
+ /// explicitly.
+ ///
+ /// To access a library from a binary target within the same crate,
+ /// use `your_crate_name::` as the path instead of `lib::`:
+ ///
+ /// ```rust,compile_fail
+ /// // bar/src/lib.rs
+ /// fn run() {
+ /// // ...
+ /// }
+ ///
+ /// // bar/src/main.rs
+ /// fn main() {
+ /// bar::run();
+ /// }
+ /// ```
+ ///
+ /// Binary targets cannot be used as libraries and so declaring
+ /// one as a module is not allowed.
+ pub SPECIAL_MODULE_NAME,
+ Warn,
+ "module declarations for files with a special meaning",
+}
+
+declare_lint_pass!(SpecialModuleName => [SPECIAL_MODULE_NAME]);
+
+impl EarlyLintPass for SpecialModuleName {
+ fn check_crate(&mut self, cx: &EarlyContext<'_>, krate: &ast::Crate) {
+ for item in &krate.items {
+ if let ast::ItemKind::Mod(
+ _,
+ ast::ModKind::Unloaded | ast::ModKind::Loaded(_, ast::Inline::No, _),
+ ) = item.kind
+ {
+ if item.attrs.iter().any(|a| a.has_name(sym::path)) {
+ continue;
+ }
+
+ match item.ident.name.as_str() {
+ "lib" => cx.struct_span_lint(SPECIAL_MODULE_NAME, item.span, "found module declaration for lib.rs", |lint| {
+ lint
+ .note("lib.rs is the root of this crate's library target")
+ .help("to refer to it from other targets, use the library's name as the path")
+ }),
+ "main" => cx.struct_span_lint(SPECIAL_MODULE_NAME, item.span, "found module declaration for main.rs", |lint| {
+ lint
+ .note("a binary crate cannot be used as library")
+ }),
+ _ => continue
+ }
+ }
+ }
+ }
+}
+
+pub use rustc_session::lint::builtin::UNEXPECTED_CFGS;
+
+declare_lint_pass!(UnexpectedCfgs => [UNEXPECTED_CFGS]);
+
+impl EarlyLintPass for UnexpectedCfgs {
+ fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &ast::Crate) {
+ let cfg = &cx.sess().parse_sess.config;
+ let check_cfg = &cx.sess().parse_sess.check_config;
+ for &(name, value) in cfg {
+ if let Some(names_valid) = &check_cfg.names_valid {
+ if !names_valid.contains(&name) {
+ cx.lookup(
+ UNEXPECTED_CFGS,
+ None::<MultiSpan>,
+ fluent::lint_builtin_unexpected_cli_config_name,
+ |diag| diag.help(fluent::help).set_arg("name", name),
+ );
+ }
+ }
+ if let Some(value) = value {
+ if let Some(values) = &check_cfg.values_valid.get(&name) {
+ if !values.contains(&value) {
+ cx.lookup(
+ UNEXPECTED_CFGS,
+ None::<MultiSpan>,
+ fluent::lint_builtin_unexpected_cli_config_value,
+ |diag| {
+ diag.help(fluent::help)
+ .set_arg("name", name)
+ .set_arg("value", value)
+ },
+ );
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
index b95fc341d..cec0003ff 100644
--- a/compiler/rustc_lint/src/context.rs
+++ b/compiler/rustc_lint/src/context.rs
@@ -16,20 +16,22 @@
use self::TargetLint::*;
+use crate::errors::{
+ CheckNameDeprecated, CheckNameUnknown, CheckNameUnknownTool, CheckNameWarning, RequestedLevel,
+ UnsupportedGroup,
+};
use crate::levels::LintLevelsBuilder;
use crate::passes::{EarlyLintPassObject, LateLintPassObject};
use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync;
-use rustc_errors::{add_elided_lifetime_in_path_suggestion, struct_span_err};
-use rustc_errors::{
- Applicability, DecorateLint, LintDiagnosticBuilder, MultiSpan, SuggestionStyle,
-};
+use rustc_errors::{add_elided_lifetime_in_path_suggestion, DiagnosticBuilder, DiagnosticMessage};
+use rustc_errors::{Applicability, DecorateLint, MultiSpan, SuggestionStyle};
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::{CrateNum, DefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
-use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::middle::stability;
use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -39,14 +41,17 @@ use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintI
use rustc_session::Session;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::symbol::{sym, Ident, Symbol};
-use rustc_span::{BytePos, Span, DUMMY_SP};
+use rustc_span::{BytePos, Span};
use rustc_target::abi;
-use tracing::debug;
use std::cell::Cell;
use std::iter;
use std::slice;
+type EarlyLintPassFactory = dyn Fn() -> EarlyLintPassObject + sync::Send + sync::Sync;
+type LateLintPassFactory =
+ dyn for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx> + sync::Send + sync::Sync;
+
/// Information about the registered lints.
///
/// This is basically the subset of `Context` that we can
@@ -61,11 +66,11 @@ pub struct LintStore {
/// interior mutability, we don't enforce this (and lints should, in theory,
/// be compatible with being constructed more than once, though not
/// necessarily in a sane manner. This is safe though.)
- pub pre_expansion_passes: Vec<Box<dyn Fn() -> EarlyLintPassObject + sync::Send + sync::Sync>>,
- pub early_passes: Vec<Box<dyn Fn() -> EarlyLintPassObject + sync::Send + sync::Sync>>,
- pub late_passes: Vec<Box<dyn Fn() -> LateLintPassObject + sync::Send + sync::Sync>>,
+ pub pre_expansion_passes: Vec<Box<EarlyLintPassFactory>>,
+ pub early_passes: Vec<Box<EarlyLintPassFactory>>,
+ pub late_passes: Vec<Box<LateLintPassFactory>>,
/// This is unique in that we construct them per-module, so not once.
- pub late_module_passes: Vec<Box<dyn Fn() -> LateLintPassObject + sync::Send + sync::Sync>>,
+ pub late_module_passes: Vec<Box<LateLintPassFactory>>,
/// Lints indexed by name.
by_name: FxHashMap<String, TargetLint>,
@@ -183,14 +188,20 @@ impl LintStore {
pub fn register_late_pass(
&mut self,
- pass: impl Fn() -> LateLintPassObject + 'static + sync::Send + sync::Sync,
+ pass: impl for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx>
+ + 'static
+ + sync::Send
+ + sync::Sync,
) {
self.late_passes.push(Box::new(pass));
}
pub fn register_late_mod_pass(
&mut self,
- pass: impl Fn() -> LateLintPassObject + 'static + sync::Send + sync::Sync,
+ pass: impl for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx>
+ + 'static
+ + sync::Send
+ + sync::Sync,
) {
self.late_module_passes.push(Box::new(pass));
}
@@ -326,68 +337,41 @@ impl LintStore {
) {
let (tool_name, lint_name_only) = parse_lint_and_tool_name(lint_name);
if lint_name_only == crate::WARNINGS.name_lower() && matches!(level, Level::ForceWarn(_)) {
- struct_span_err!(
- sess,
- DUMMY_SP,
- E0602,
- "`{}` lint group is not supported with ´--force-warn´",
- crate::WARNINGS.name_lower()
- )
- .emit();
+ sess.emit_err(UnsupportedGroup { lint_group: crate::WARNINGS.name_lower() });
return;
}
- let db = match self.check_lint_name(lint_name_only, tool_name, registered_tools) {
- CheckLintNameResult::Ok(_) => None,
- CheckLintNameResult::Warning(ref msg, _) => Some(sess.struct_warn(msg)),
+ let lint_name = lint_name.to_string();
+ match self.check_lint_name(lint_name_only, tool_name, registered_tools) {
+ CheckLintNameResult::Warning(msg, _) => {
+ sess.emit_warning(CheckNameWarning {
+ msg,
+ sub: RequestedLevel { level, lint_name },
+ });
+ }
CheckLintNameResult::NoLint(suggestion) => {
- let mut err =
- struct_span_err!(sess, DUMMY_SP, E0602, "unknown lint: `{}`", lint_name);
-
- if let Some(suggestion) = suggestion {
- err.help(&format!("did you mean: `{}`", suggestion));
+ sess.emit_err(CheckNameUnknown {
+ lint_name: lint_name.clone(),
+ suggestion,
+ sub: RequestedLevel { level, lint_name },
+ });
+ }
+ CheckLintNameResult::Tool(result) => {
+ if let Err((Some(_), new_name)) = result {
+ sess.emit_warning(CheckNameDeprecated {
+ lint_name: lint_name.clone(),
+ new_name,
+ sub: RequestedLevel { level, lint_name },
+ });
}
-
- Some(err.forget_guarantee())
}
- CheckLintNameResult::Tool(result) => match result {
- Err((Some(_), new_name)) => Some(sess.struct_warn(&format!(
- "lint name `{}` is deprecated \
- and does not have an effect anymore. \
- Use: {}",
- lint_name, new_name
- ))),
- _ => None,
- },
- CheckLintNameResult::NoTool => Some(
- struct_span_err!(
- sess,
- DUMMY_SP,
- E0602,
- "unknown lint tool: `{}`",
- tool_name.unwrap()
- )
- .forget_guarantee(),
- ),
+ CheckLintNameResult::NoTool => {
+ sess.emit_err(CheckNameUnknownTool {
+ tool_name: tool_name.unwrap(),
+ sub: RequestedLevel { level, lint_name },
+ });
+ }
+ _ => {}
};
-
- if let Some(mut db) = db {
- let msg = format!(
- "requested on the command line with `{} {}`",
- match level {
- Level::Allow => "-A",
- Level::Warn => "-W",
- Level::ForceWarn(_) => "--force-warn",
- Level::Deny => "-D",
- Level::Forbid => "-F",
- Level::Expect(_) => {
- unreachable!("lints with the level of `expect` should not run this code");
- }
- },
- lint_name
- );
- db.note(&msg);
- db.emit();
- }
}
/// True if this symbol represents a lint group name.
@@ -440,7 +424,7 @@ impl LintStore {
None => {
// 1. The tool is currently running, so this lint really doesn't exist.
// FIXME: should this handle tools that never register a lint, like rustfmt?
- tracing::debug!("lints={:?}", self.by_name.keys().collect::<Vec<_>>());
+ debug!("lints={:?}", self.by_name.keys().collect::<Vec<_>>());
let tool_prefix = format!("{}::", tool_name);
return if self.by_name.keys().any(|lint| lint.starts_with(&tool_prefix)) {
self.no_lint_suggestion(&complete_name)
@@ -533,7 +517,7 @@ impl LintStore {
CheckLintNameResult::Tool(Err((Some(slice::from_ref(id)), complete_name)))
}
Some(other) => {
- tracing::debug!("got renamed lint {:?}", other);
+ debug!("got renamed lint {:?}", other);
CheckLintNameResult::NoLint(None)
}
}
@@ -558,7 +542,7 @@ pub struct LateContext<'tcx> {
pub param_env: ty::ParamEnv<'tcx>,
/// Items accessible from the crate being checked.
- pub access_levels: &'tcx AccessLevels,
+ pub effective_visibilities: &'tcx EffectiveVisibilities,
/// The store of registered lints and the lint levels.
pub lint_store: &'tcx LintStore,
@@ -574,7 +558,7 @@ pub struct LateContext<'tcx> {
/// Context for lint checking of the AST, after expansion, before lowering to HIR.
pub struct EarlyContext<'a> {
- pub builder: LintLevelsBuilder<'a>,
+ pub builder: LintLevelsBuilder<'a, crate::levels::TopDown>,
pub buffered: LintBuffer,
}
@@ -582,7 +566,7 @@ pub trait LintPassObject: Sized {}
impl LintPassObject for EarlyLintPassObject {}
-impl LintPassObject for LateLintPassObject {}
+impl LintPassObject for LateLintPassObject<'_> {}
pub trait LintContext: Sized {
type PassObject: LintPassObject;
@@ -590,17 +574,23 @@ pub trait LintContext: Sized {
fn sess(&self) -> &Session;
fn lints(&self) -> &LintStore;
+ /// Emit a lint at the appropriate level, with an optional associated span and an existing diagnostic.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
fn lookup_with_diagnostics(
&self,
lint: &'static Lint,
span: Option<impl Into<MultiSpan>>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
diagnostic: BuiltinLintDiagnostics,
) {
- self.lookup(lint, span, |lint| {
- // We first generate a blank diagnostic.
- let mut db = lint.build("");
-
+ // We first generate a blank diagnostic.
+ self.lookup(lint, span, msg,|db| {
// Now, set up surrounding context.
let sess = self.sess();
match diagnostic {
@@ -674,7 +664,7 @@ pub trait LintContext: Sized {
) => {
add_elided_lifetime_in_path_suggestion(
sess.source_map(),
- &mut db,
+ db,
n,
path_span,
incl_angl_brckt,
@@ -710,7 +700,7 @@ pub trait LintContext: Sized {
}
}
BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span) => {
- stability::deprecation_suggestion(&mut db, "macro", suggestion, span)
+ stability::deprecation_suggestion(db, "macro", suggestion, span)
}
BuiltinLintDiagnostics::UnusedDocComment(span) => {
db.span_label(span, "rustdoc does not generate documentation for macro invocations");
@@ -865,9 +855,14 @@ pub trait LintContext: Sized {
if let Some(positional_arg_to_replace) = position_sp_to_replace {
let name = if is_formatting_arg { named_arg_name + "$" } else { named_arg_name };
-
+ let span_to_replace = if let Ok(positional_arg_content) =
+ self.sess().source_map().span_to_snippet(positional_arg_to_replace) && positional_arg_content.starts_with(':') {
+ positional_arg_to_replace.shrink_to_lo()
+ } else {
+ positional_arg_to_replace
+ };
db.span_suggestion_verbose(
- positional_arg_to_replace,
+ span_to_replace,
"use the named argument by name to avoid ambiguity",
name,
Applicability::MaybeIncorrect,
@@ -876,17 +871,25 @@ pub trait LintContext: Sized {
}
}
// Rewrap `db`, and pass control to the user.
- decorate(LintDiagnosticBuilder::new(db));
+ decorate(db)
});
}
// FIXME: These methods should not take an Into<MultiSpan> -- instead, callers should need to
// set the span in their `decorate` function (preferably using set_span).
+ /// Emit a lint at the appropriate level, with an optional associated span.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
fn lookup<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
);
/// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
@@ -897,31 +900,48 @@ pub trait LintContext: Sized {
span: S,
decorator: impl for<'a> DecorateLint<'a, ()>,
) {
- self.lookup(lint, Some(span), |diag| decorator.decorate_lint(diag));
+ self.lookup(lint, Some(span), decorator.msg(), |diag| decorator.decorate_lint(diag));
}
+ /// Emit a lint at the appropriate level, with an associated span.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
fn struct_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: S,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
- self.lookup(lint, Some(span), decorate);
+ self.lookup(lint, Some(span), msg, decorate);
}
/// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
/// generated by `#[derive(LintDiagnostic)]`).
fn emit_lint(&self, lint: &'static Lint, decorator: impl for<'a> DecorateLint<'a, ()>) {
- self.lookup(lint, None as Option<Span>, |diag| decorator.decorate_lint(diag));
+ self.lookup(lint, None as Option<Span>, decorator.msg(), |diag| {
+ decorator.decorate_lint(diag)
+ });
}
/// Emit a lint at the appropriate level, with no associated span.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
fn lint(
&self,
lint: &'static Lint,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
- self.lookup(lint, None as Option<Span>, decorate);
+ self.lookup(lint, None as Option<Span>, msg, decorate);
}
/// This returns the lint level for the given lint at the current location.
@@ -968,8 +988,8 @@ impl<'a> EarlyContext<'a> {
}
}
-impl LintContext for LateContext<'_> {
- type PassObject = LateLintPassObject;
+impl<'tcx> LintContext for LateContext<'tcx> {
+ type PassObject = LateLintPassObject<'tcx>;
/// Gets the overall compiler `Session` object.
fn sess(&self) -> &Session {
@@ -984,13 +1004,16 @@ impl LintContext for LateContext<'_> {
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let hir_id = self.last_node_with_lint_attrs;
match span {
- Some(s) => self.tcx.struct_span_lint_hir(lint, hir_id, s, decorate),
- None => self.tcx.struct_lint_node(lint, hir_id, decorate),
+ Some(s) => self.tcx.struct_span_lint_hir(lint, hir_id, s, msg, decorate),
+ None => self.tcx.struct_lint_node(lint, hir_id, msg, decorate),
}
}
@@ -1015,9 +1038,12 @@ impl LintContext for EarlyContext<'_> {
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
- self.builder.struct_lint(lint, span.map(|s| s.into()), decorate)
+ self.builder.struct_lint(lint, span.map(|s| s.into()), msg, decorate)
}
fn get_lint_level(&self, lint: &'static Lint) -> Level {
@@ -1057,7 +1083,7 @@ impl<'tcx> LateContext<'tcx> {
.filter(|typeck_results| typeck_results.hir_owner == id.owner)
.or_else(|| {
if self.tcx.has_typeck_results(id.owner.to_def_id()) {
- Some(self.tcx.typeck(id.owner))
+ Some(self.tcx.typeck(id.owner.def_id))
} else {
None
}
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
index d13711c3a..aee870dd2 100644
--- a/compiler/rustc_lint/src/early.rs
+++ b/compiler/rustc_lint/src/early.rs
@@ -26,7 +26,6 @@ use rustc_span::symbol::Ident;
use rustc_span::Span;
use std::slice;
-use tracing::debug;
macro_rules! run_early_pass { ($cx:expr, $f:ident, $($args:expr),*) => ({
$cx.pass.$f(&$cx.context, $($args),*);
@@ -44,9 +43,8 @@ impl<'a, T: EarlyLintPass> EarlyContextAndPass<'a, T> {
self.context.lookup_with_diagnostics(
lint_id.lint,
Some(span),
- |lint| {
- lint.build(&msg).emit();
- },
+ msg,
+ |lint| lint,
diagnostic,
);
}
@@ -60,6 +58,7 @@ impl<'a, T: EarlyLintPass> EarlyContextAndPass<'a, T> {
F: FnOnce(&mut Self),
{
let is_crate_node = id == ast::CRATE_NODE_ID;
+ debug!(?id);
let push = self.context.builder.push(attrs, is_crate_node, None);
self.check_id(id);
@@ -101,6 +100,12 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
run_early_pass!(self, check_pat_post, p);
}
+ fn visit_pat_field(&mut self, field: &'a ast::PatField) {
+ self.with_lint_attrs(field.id, &field.attrs, |cx| {
+ ast_visit::walk_pat_field(cx, field);
+ });
+ }
+
fn visit_anon_const(&mut self, c: &'a ast::AnonConst) {
self.check_id(c.id);
ast_visit::walk_anon_const(self, c);
@@ -142,7 +147,7 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
fn visit_fn(&mut self, fk: ast_visit::FnKind<'a>, span: Span, id: ast::NodeId) {
run_early_pass!(self, check_fn, fk, span, id);
self.check_id(id);
- ast_visit::walk_fn(self, fk, span);
+ ast_visit::walk_fn(self, fk);
// Explicitly check for lints associated with 'closure_id', since
// it does not have a corresponding AST node
@@ -219,9 +224,10 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
}
fn visit_generic_param(&mut self, param: &'a ast::GenericParam) {
- run_early_pass!(self, check_generic_param, param);
- self.check_id(param.id);
- ast_visit::walk_generic_param(self, param);
+ self.with_lint_attrs(param.id, &param.attrs, |cx| {
+ run_early_pass!(cx, check_generic_param, param);
+ ast_visit::walk_generic_param(cx, param);
+ });
}
fn visit_generics(&mut self, g: &'a ast::Generics) {
@@ -233,9 +239,9 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
ast_visit::walk_where_predicate(self, p);
}
- fn visit_poly_trait_ref(&mut self, t: &'a ast::PolyTraitRef, m: &'a ast::TraitBoundModifier) {
- run_early_pass!(self, check_poly_trait_ref, t, m);
- ast_visit::walk_poly_trait_ref(self, t, m);
+ fn visit_poly_trait_ref(&mut self, t: &'a ast::PolyTraitRef) {
+ run_early_pass!(self, check_poly_trait_ref, t);
+ ast_visit::walk_poly_trait_ref(self, t);
}
fn visit_assoc_item(&mut self, item: &'a ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
@@ -260,9 +266,9 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
ast_visit::walk_path(self, p);
}
- fn visit_path_segment(&mut self, path_span: Span, s: &'a ast::PathSegment) {
+ fn visit_path_segment(&mut self, s: &'a ast::PathSegment) {
self.check_id(s.id);
- ast_visit::walk_path_segment(self, path_span, s);
+ ast_visit::walk_path_segment(self, s);
}
fn visit_attribute(&mut self, attr: &'a ast::Attribute) {
@@ -270,7 +276,7 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
}
fn visit_mac_def(&mut self, mac: &'a ast::MacroDef, id: ast::NodeId) {
- run_early_pass!(self, check_mac_def, mac, id);
+ run_early_pass!(self, check_mac_def, mac);
self.check_id(id);
}
@@ -403,7 +409,7 @@ pub fn check_ast_node<'a>(
if sess.opts.unstable_opts.no_interleave_lints {
for (i, pass) in passes.iter_mut().enumerate() {
buffered =
- sess.prof.extra_verbose_generic_activity("run_lint", pass.name()).run(|| {
+ sess.prof.verbose_generic_activity_with_arg("run_lint", pass.name()).run(|| {
early_lint_node(
sess,
!pre_expansion && i == 0,
diff --git a/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
index f41ee6404..f9d746622 100644
--- a/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
+++ b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
@@ -50,26 +50,24 @@ fn enforce_mem_discriminant(
) {
let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
if is_non_enum(ty_param) {
- cx.struct_span_lint(ENUM_INTRINSICS_NON_ENUMS, expr_span, |builder| {
- builder
- .build(fluent::lint::enum_intrinsics_mem_discriminant)
- .set_arg("ty_param", ty_param)
- .span_note(args_span, fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ ENUM_INTRINSICS_NON_ENUMS,
+ expr_span,
+ fluent::lint_enum_intrinsics_mem_discriminant,
+ |lint| lint.set_arg("ty_param", ty_param).span_note(args_span, fluent::note),
+ );
}
}
fn enforce_mem_variant_count(cx: &LateContext<'_>, func_expr: &hir::Expr<'_>, span: Span) {
let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
if is_non_enum(ty_param) {
- cx.struct_span_lint(ENUM_INTRINSICS_NON_ENUMS, span, |builder| {
- builder
- .build(fluent::lint::enum_intrinsics_mem_variant)
- .set_arg("ty_param", ty_param)
- .note(fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ ENUM_INTRINSICS_NON_ENUMS,
+ span,
+ fluent::lint_enum_intrinsics_mem_variant,
+ |lint| lint.set_arg("ty_param", ty_param).note(fluent::note),
+ );
}
}
diff --git a/compiler/rustc_lint/src/errors.rs b/compiler/rustc_lint/src/errors.rs
new file mode 100644
index 000000000..a49d1bdac
--- /dev/null
+++ b/compiler/rustc_lint/src/errors.rs
@@ -0,0 +1,150 @@
+use rustc_errors::{
+ fluent, AddToDiagnostic, Diagnostic, ErrorGuaranteed, Handler, IntoDiagnostic,
+ SubdiagnosticMessage,
+};
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_session::lint::Level;
+use rustc_span::{Span, Symbol};
+
+#[derive(Diagnostic)]
+#[diag(lint_overruled_attribute, code = "E0453")]
+pub struct OverruledAttribute {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub overruled: Span,
+ pub lint_level: String,
+ pub lint_source: Symbol,
+ #[subdiagnostic]
+ pub sub: OverruledAttributeSub,
+}
+//
+pub enum OverruledAttributeSub {
+ DefaultSource { id: String },
+ NodeSource { span: Span, reason: Option<Symbol> },
+ CommandLineSource,
+}
+
+impl AddToDiagnostic for OverruledAttributeSub {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ match self {
+ OverruledAttributeSub::DefaultSource { id } => {
+ diag.note(fluent::lint_default_source);
+ diag.set_arg("id", id);
+ }
+ OverruledAttributeSub::NodeSource { span, reason } => {
+ diag.span_label(span, fluent::lint_node_source);
+ if let Some(rationale) = reason {
+ diag.note(rationale.as_str());
+ }
+ }
+ OverruledAttributeSub::CommandLineSource => {
+ diag.note(fluent::lint_command_line_source);
+ }
+ }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_malformed_attribute, code = "E0452")]
+pub struct MalformedAttribute {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: MalformedAttributeSub,
+}
+
+#[derive(Subdiagnostic)]
+pub enum MalformedAttributeSub {
+ #[label(lint_bad_attribute_argument)]
+ BadAttributeArgument(#[primary_span] Span),
+ #[label(lint_reason_must_be_string_literal)]
+ ReasonMustBeStringLiteral(#[primary_span] Span),
+ #[label(lint_reason_must_come_last)]
+ ReasonMustComeLast(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_unknown_tool_in_scoped_lint, code = "E0710")]
+pub struct UnknownToolInScopedLint {
+ #[primary_span]
+ pub span: Option<Span>,
+ pub tool_name: Symbol,
+ pub lint_name: String,
+ #[help]
+ pub is_nightly_build: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_builtin_ellipsis_inclusive_range_patterns, code = "E0783")]
+pub struct BuiltinEllpisisInclusiveRangePatterns {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion_short(code = "{replace}", applicability = "machine-applicable")]
+ pub suggestion: Span,
+ pub replace: String,
+}
+
+#[derive(Subdiagnostic)]
+#[note(lint_requested_level)]
+pub struct RequestedLevel {
+ pub level: Level,
+ pub lint_name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_unsupported_group, code = "E0602")]
+pub struct UnsupportedGroup {
+ pub lint_group: String,
+}
+
+pub struct CheckNameUnknown {
+ pub lint_name: String,
+ pub suggestion: Option<Symbol>,
+ pub sub: RequestedLevel,
+}
+
+impl IntoDiagnostic<'_> for CheckNameUnknown {
+ fn into_diagnostic(
+ self,
+ handler: &Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(fluent::lint_check_name_unknown);
+ diag.code(rustc_errors::error_code!(E0602));
+ if let Some(suggestion) = self.suggestion {
+ diag.help(fluent::help);
+ diag.set_arg("suggestion", suggestion);
+ }
+ diag.set_arg("lint_name", self.lint_name);
+ diag.subdiagnostic(self.sub);
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_check_name_unknown_tool, code = "E0602")]
+pub struct CheckNameUnknownTool {
+ pub tool_name: Symbol,
+ #[subdiagnostic]
+ pub sub: RequestedLevel,
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_check_name_warning)]
+pub struct CheckNameWarning {
+ pub msg: String,
+ #[subdiagnostic]
+ pub sub: RequestedLevel,
+}
+
+#[derive(Diagnostic)]
+#[diag(lint_check_name_deprecated)]
+pub struct CheckNameDeprecated {
+ pub lint_name: String,
+ pub new_name: String,
+ #[subdiagnostic]
+ pub sub: RequestedLevel,
+}
diff --git a/compiler/rustc_lint/src/expect.rs b/compiler/rustc_lint/src/expect.rs
index 699e81543..cf8f31bcb 100644
--- a/compiler/rustc_lint/src/expect.rs
+++ b/compiler/rustc_lint/src/expect.rs
@@ -16,8 +16,10 @@ fn check_expectations(tcx: TyCtxt<'_>, tool_filter: Option<Symbol>) {
return;
}
+ let lint_expectations = tcx.lint_expectations(());
let fulfilled_expectations = tcx.sess.diagnostic().steal_fulfilled_expectation_ids();
- let lint_expectations = &tcx.lint_levels(()).lint_expectations;
+
+ tracing::debug!(?lint_expectations, ?fulfilled_expectations);
for (id, expectation) in lint_expectations {
// This check will always be true, since `lint_expectations` only
@@ -43,17 +45,17 @@ fn emit_unfulfilled_expectation_lint(
builtin::UNFULFILLED_LINT_EXPECTATIONS,
hir_id,
expectation.emission_span,
- |diag| {
- let mut diag = diag.build(fluent::lint::expectation);
+ fluent::lint_expectation,
+ |lint| {
if let Some(rationale) = expectation.reason {
- diag.note(rationale.as_str());
+ lint.note(rationale.as_str());
}
if expectation.is_unfulfilled_lint_expectations {
- diag.note(fluent::lint::note);
+ lint.note(fluent::note);
}
- diag.emit();
+ lint
},
);
}
diff --git a/compiler/rustc_lint/src/for_loops_over_fallibles.rs b/compiler/rustc_lint/src/for_loops_over_fallibles.rs
new file mode 100644
index 000000000..ed8d424e0
--- /dev/null
+++ b/compiler/rustc_lint/src/for_loops_over_fallibles.rs
@@ -0,0 +1,183 @@
+use crate::{LateContext, LateLintPass, LintContext};
+
+use hir::{Expr, Pat};
+use rustc_errors::{Applicability, DelayDm};
+use rustc_hir as hir;
+use rustc_infer::traits::TraitEngine;
+use rustc_infer::{infer::TyCtxtInferExt, traits::ObligationCause};
+use rustc_middle::ty::{self, List};
+use rustc_span::{sym, Span};
+use rustc_trait_selection::traits::TraitEngineExt;
+
+declare_lint! {
+ /// The `for_loops_over_fallibles` lint checks for `for` loops over `Option` or `Result` values.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let opt = Some(1);
+ /// for x in opt { /* ... */}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Both `Option` and `Result` implement `IntoIterator` trait, which allows using them in a `for` loop.
+ /// `for` loop over `Option` or `Result` will iterate either 0 (if the value is `None`/`Err(_)`)
+ /// or 1 time (if the value is `Some(_)`/`Ok(_)`). This is not very useful and is more clearly expressed
+ /// via `if let`.
+ ///
+ /// `for` loop can also be accidentally written with the intention to call a function multiple times,
+ /// while the function returns `Some(_)`, in these cases `while let` loop should be used instead.
+ ///
+ /// The "intended" use of `IntoIterator` implementations for `Option` and `Result` is passing them to
+ /// generic code that expects something implementing `IntoIterator`. For example using `.chain(option)`
+ /// to optionally add a value to an iterator.
+ pub FOR_LOOPS_OVER_FALLIBLES,
+ Warn,
+ "for-looping over an `Option` or a `Result`, which is more clearly expressed as an `if let`"
+}
+
+declare_lint_pass!(ForLoopsOverFallibles => [FOR_LOOPS_OVER_FALLIBLES]);
+
+impl<'tcx> LateLintPass<'tcx> for ForLoopsOverFallibles {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ let Some((pat, arg)) = extract_for_loop(expr) else { return };
+
+ let ty = cx.typeck_results().expr_ty(arg);
+
+ let &ty::Adt(adt, substs) = ty.kind() else { return };
+
+ let (article, ty, var) = match adt.did() {
+ did if cx.tcx.is_diagnostic_item(sym::Option, did) => ("an", "Option", "Some"),
+ did if cx.tcx.is_diagnostic_item(sym::Result, did) => ("a", "Result", "Ok"),
+ _ => return,
+ };
+
+ let msg = DelayDm(|| {
+ format!(
+ "for loop over {article} `{ty}`. This is more readably written as an `if let` statement",
+ )
+ });
+
+ cx.struct_span_lint(FOR_LOOPS_OVER_FALLIBLES, arg.span, msg, |lint| {
+ if let Some(recv) = extract_iterator_next_call(cx, arg)
+ && let Ok(recv_snip) = cx.sess().source_map().span_to_snippet(recv.span)
+ {
+ lint.span_suggestion(
+ recv.span.between(arg.span.shrink_to_hi()),
+ format!("to iterate over `{recv_snip}` remove the call to `next`"),
+ ".by_ref()",
+ Applicability::MaybeIncorrect
+ );
+ } else {
+ lint.multipart_suggestion_verbose(
+ format!("to check pattern in a loop use `while let`"),
+ vec![
+ // NB can't use `until` here because `expr.span` and `pat.span` have different syntax contexts
+ (expr.span.with_hi(pat.span.lo()), format!("while let {var}(")),
+ (pat.span.between(arg.span), format!(") = ")),
+ ],
+ Applicability::MaybeIncorrect
+ );
+ }
+
+ if suggest_question_mark(cx, adt, substs, expr.span) {
+ lint.span_suggestion(
+ arg.span.shrink_to_hi(),
+ "consider unwrapping the `Result` with `?` to iterate over its contents",
+ "?",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ lint.multipart_suggestion_verbose(
+ "consider using `if let` to clear intent",
+ vec![
+ // NB can't use `until` here because `expr.span` and `pat.span` have different syntax contexts
+ (expr.span.with_hi(pat.span.lo()), format!("if let {var}(")),
+ (pat.span.between(arg.span), format!(") = ")),
+ ],
+ Applicability::MaybeIncorrect,
+ )
+ })
+ }
+}
+
+fn extract_for_loop<'tcx>(expr: &Expr<'tcx>) -> Option<(&'tcx Pat<'tcx>, &'tcx Expr<'tcx>)> {
+ if let hir::ExprKind::DropTemps(e) = expr.kind
+ && let hir::ExprKind::Match(iterexpr, [arm], hir::MatchSource::ForLoopDesugar) = e.kind
+ && let hir::ExprKind::Call(_, [arg]) = iterexpr.kind
+ && let hir::ExprKind::Loop(block, ..) = arm.body.kind
+ && let [stmt] = block.stmts
+ && let hir::StmtKind::Expr(e) = stmt.kind
+ && let hir::ExprKind::Match(_, [_, some_arm], _) = e.kind
+ && let hir::PatKind::Struct(_, [field], _) = some_arm.pat.kind
+ {
+ Some((field.pat, arg))
+ } else {
+ None
+ }
+}
+
+fn extract_iterator_next_call<'tcx>(
+ cx: &LateContext<'_>,
+ expr: &Expr<'tcx>,
+) -> Option<&'tcx Expr<'tcx>> {
+ // This won't work for `Iterator::next(iter)`, is this an issue?
+ if let hir::ExprKind::MethodCall(_, recv, _, _) = expr.kind
+ && cx.typeck_results().type_dependent_def_id(expr.hir_id) == cx.tcx.lang_items().next_fn()
+ {
+ Some(recv)
+ } else {
+ return None
+ }
+}
+
+fn suggest_question_mark<'tcx>(
+ cx: &LateContext<'tcx>,
+ adt: ty::AdtDef<'tcx>,
+ substs: &List<ty::GenericArg<'tcx>>,
+ span: Span,
+) -> bool {
+ let Some(body_id) = cx.enclosing_body else { return false };
+ let Some(into_iterator_did) = cx.tcx.get_diagnostic_item(sym::IntoIterator) else { return false };
+
+ if !cx.tcx.is_diagnostic_item(sym::Result, adt.did()) {
+ return false;
+ }
+
+ // Check that the function/closure/constant we are in has a `Result` type.
+ // Otherwise suggesting using `?` may not be a good idea.
+ {
+ let ty = cx.typeck_results().expr_ty(&cx.tcx.hir().body(body_id).value);
+ let ty::Adt(ret_adt, ..) = ty.kind() else { return false };
+ if !cx.tcx.is_diagnostic_item(sym::Result, ret_adt.did()) {
+ return false;
+ }
+ }
+
+ let ty = substs.type_at(0);
+ let infcx = cx.tcx.infer_ctxt().build();
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ let cause = ObligationCause::new(
+ span,
+ body_id.hir_id,
+ rustc_infer::traits::ObligationCauseCode::MiscObligation,
+ );
+ fulfill_cx.register_bound(
+ &infcx,
+ ty::ParamEnv::empty(),
+ // Erase any region vids from the type, which may not be resolved
+ infcx.tcx.erase_regions(ty),
+ into_iterator_did,
+ cause,
+ );
+
+ // Select all, including ambiguous predicates
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+
+ errors.is_empty()
+}
diff --git a/compiler/rustc_lint/src/hidden_unicode_codepoints.rs b/compiler/rustc_lint/src/hidden_unicode_codepoints.rs
index fe2712525..7e884e990 100644
--- a/compiler/rustc_lint/src/hidden_unicode_codepoints.rs
+++ b/compiler/rustc_lint/src/hidden_unicode_codepoints.rs
@@ -60,52 +60,56 @@ impl HiddenUnicodeCodepoints {
})
.collect();
- cx.struct_span_lint(TEXT_DIRECTION_CODEPOINT_IN_LITERAL, span, |lint| {
- let mut err = lint.build(fluent::lint::hidden_unicode_codepoints);
- err.set_arg("label", label);
- err.set_arg("count", spans.len());
- err.span_label(span, fluent::lint::label);
- err.note(fluent::lint::note);
- if point_at_inner_spans {
- for (c, span) in &spans {
- err.span_label(*span, format!("{:?}", c));
+ cx.struct_span_lint(
+ TEXT_DIRECTION_CODEPOINT_IN_LITERAL,
+ span,
+ fluent::lint_hidden_unicode_codepoints,
+ |lint| {
+ lint.set_arg("label", label);
+ lint.set_arg("count", spans.len());
+ lint.span_label(span, fluent::label);
+ lint.note(fluent::note);
+ if point_at_inner_spans {
+ for (c, span) in &spans {
+ lint.span_label(*span, format!("{:?}", c));
+ }
}
- }
- if point_at_inner_spans && !spans.is_empty() {
- err.multipart_suggestion_with_style(
- fluent::lint::suggestion_remove,
- spans.iter().map(|(_, span)| (*span, "".to_string())).collect(),
- Applicability::MachineApplicable,
- SuggestionStyle::HideCodeAlways,
- );
- err.multipart_suggestion(
- fluent::lint::suggestion_escape,
- spans
- .into_iter()
- .map(|(c, span)| {
- let c = format!("{:?}", c);
- (span, c[1..c.len() - 1].to_string())
- })
- .collect(),
- Applicability::MachineApplicable,
- );
- } else {
- // FIXME: in other suggestions we've reversed the inner spans of doc comments. We
- // should do the same here to provide the same good suggestions as we do for
- // literals above.
- err.set_arg(
- "escaped",
- spans
- .into_iter()
- .map(|(c, _)| format!("{:?}", c))
- .collect::<Vec<String>>()
- .join(", "),
- );
- err.note(fluent::lint::suggestion_remove);
- err.note(fluent::lint::no_suggestion_note_escape);
- }
- err.emit();
- });
+ if point_at_inner_spans && !spans.is_empty() {
+ lint.multipart_suggestion_with_style(
+ fluent::suggestion_remove,
+ spans.iter().map(|(_, span)| (*span, "".to_string())).collect(),
+ Applicability::MachineApplicable,
+ SuggestionStyle::HideCodeAlways,
+ );
+ lint.multipart_suggestion(
+ fluent::suggestion_escape,
+ spans
+ .into_iter()
+ .map(|(c, span)| {
+ let c = format!("{:?}", c);
+ (span, c[1..c.len() - 1].to_string())
+ })
+ .collect(),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // FIXME: in other suggestions we've reversed the inner spans of doc comments. We
+ // should do the same here to provide the same good suggestions as we do for
+ // literals above.
+ lint.set_arg(
+ "escaped",
+ spans
+ .into_iter()
+ .map(|(c, _)| format!("{:?}", c))
+ .collect::<Vec<String>>()
+ .join(", "),
+ );
+ lint.note(fluent::suggestion_remove);
+ lint.note(fluent::no_suggestion_note_escape);
+ }
+ lint
+ },
+ );
}
}
impl EarlyLintPass for HiddenUnicodeCodepoints {
@@ -120,8 +124,8 @@ impl EarlyLintPass for HiddenUnicodeCodepoints {
fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) {
// byte strings are already handled well enough by `EscapeError::NonAsciiCharInByteString`
let (text, span, padding) = match &expr.kind {
- ast::ExprKind::Lit(ast::Lit { token, kind, span }) => {
- let text = token.symbol;
+ ast::ExprKind::Lit(ast::Lit { token_lit, kind, span }) => {
+ let text = token_lit.symbol;
if !contains_text_flow_control_chars(text.as_str()) {
return;
}
diff --git a/compiler/rustc_lint/src/internal.rs b/compiler/rustc_lint/src/internal.rs
index c26d78247..11e4650cb 100644
--- a/compiler/rustc_lint/src/internal.rs
+++ b/compiler/rustc_lint/src/internal.rs
@@ -12,7 +12,6 @@ use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::hygiene::{ExpnKind, MacroKind};
use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::Span;
-use tracing::debug;
declare_tool_lint! {
pub rustc::DEFAULT_HASH_TYPES,
@@ -35,13 +34,16 @@ impl LateLintPass<'_> for DefaultHashTypes {
Some(sym::HashSet) => "FxHashSet",
_ => return,
};
- cx.struct_span_lint(DEFAULT_HASH_TYPES, path.span, |lint| {
- lint.build(fluent::lint::default_hash_types)
- .set_arg("preferred", replace)
- .set_arg("used", cx.tcx.item_name(def_id))
- .note(fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ DEFAULT_HASH_TYPES,
+ path.span,
+ fluent::lint_default_hash_types,
+ |lint| {
+ lint.set_arg("preferred", replace)
+ .set_arg("used", cx.tcx.item_name(def_id))
+ .note(fluent::note)
+ },
+ );
}
}
@@ -52,7 +54,7 @@ fn typeck_results_of_method_fn<'tcx>(
expr: &Expr<'_>,
) -> Option<(Span, DefId, ty::subst::SubstsRef<'tcx>)> {
match expr.kind {
- ExprKind::MethodCall(segment, _, _)
+ ExprKind::MethodCall(segment, ..)
if let Some(def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) =>
{
Some((segment.ident.span, def_id, cx.typeck_results().node_substs(expr.hir_id)))
@@ -81,12 +83,12 @@ impl LateLintPass<'_> for QueryStability {
if let Ok(Some(instance)) = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs) {
let def_id = instance.def_id();
if cx.tcx.has_attr(def_id, sym::rustc_lint_query_instability) {
- cx.struct_span_lint(POTENTIAL_QUERY_INSTABILITY, span, |lint| {
- lint.build(fluent::lint::query_instability)
- .set_arg("query", cx.tcx.item_name(def_id))
- .note(fluent::lint::note)
- .emit();
- })
+ cx.struct_span_lint(
+ POTENTIAL_QUERY_INSTABILITY,
+ span,
+ fluent::lint_query_instability,
+ |lint| lint.set_arg("query", cx.tcx.item_name(def_id)).note(fluent::note),
+ )
}
}
}
@@ -119,21 +121,19 @@ impl<'tcx> LateLintPass<'tcx> for TyTyKind {
_: rustc_hir::HirId,
) {
if let Some(segment) = path.segments.iter().nth_back(1)
- && let Some(res) = &segment.res
- && lint_ty_kind_usage(cx, res)
+ && lint_ty_kind_usage(cx, &segment.res)
{
let span = path.span.with_hi(
segment.args.map_or(segment.ident.span, |a| a.span_ext).hi()
);
- cx.struct_span_lint(USAGE_OF_TY_TYKIND, path.span, |lint| {
- lint.build(fluent::lint::tykind_kind)
+ cx.struct_span_lint(USAGE_OF_TY_TYKIND, path.span, fluent::lint_tykind_kind, |lint| {
+ lint
.span_suggestion(
span,
- fluent::lint::suggestion,
+ fluent::suggestion,
"ty",
Applicability::MaybeIncorrect, // ty maybe needs an import
)
- .emit();
});
}
}
@@ -142,85 +142,85 @@ impl<'tcx> LateLintPass<'tcx> for TyTyKind {
match &ty.kind {
TyKind::Path(QPath::Resolved(_, path)) => {
if lint_ty_kind_usage(cx, &path.res) {
- cx.struct_span_lint(USAGE_OF_TY_TYKIND, path.span, |lint| {
- let hir = cx.tcx.hir();
- match hir.find(hir.get_parent_node(ty.hir_id)) {
- Some(Node::Pat(Pat {
- kind:
- PatKind::Path(qpath)
- | PatKind::TupleStruct(qpath, ..)
- | PatKind::Struct(qpath, ..),
- ..
- })) => {
- if let QPath::TypeRelative(qpath_ty, ..) = qpath
- && qpath_ty.hir_id == ty.hir_id
- {
- lint.build(fluent::lint::tykind_kind)
- .span_suggestion(
- path.span,
- fluent::lint::suggestion,
- "ty",
- Applicability::MaybeIncorrect, // ty maybe needs an import
- )
- .emit();
- return;
- }
+ let hir = cx.tcx.hir();
+ let span = match hir.find(hir.get_parent_node(ty.hir_id)) {
+ Some(Node::Pat(Pat {
+ kind:
+ PatKind::Path(qpath)
+ | PatKind::TupleStruct(qpath, ..)
+ | PatKind::Struct(qpath, ..),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ Some(path.span)
+ } else {
+ None
}
- Some(Node::Expr(Expr {
- kind: ExprKind::Path(qpath),
- ..
- })) => {
- if let QPath::TypeRelative(qpath_ty, ..) = qpath
- && qpath_ty.hir_id == ty.hir_id
- {
- lint.build(fluent::lint::tykind_kind)
- .span_suggestion(
- path.span,
- fluent::lint::suggestion,
- "ty",
- Applicability::MaybeIncorrect, // ty maybe needs an import
- )
- .emit();
- return;
- }
+ }
+ Some(Node::Expr(Expr {
+ kind: ExprKind::Path(qpath),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ Some(path.span)
+ } else {
+ None
}
- // Can't unify these two branches because qpath below is `&&` and above is `&`
- // and `A | B` paths don't play well together with adjustments, apparently.
- Some(Node::Expr(Expr {
- kind: ExprKind::Struct(qpath, ..),
- ..
- })) => {
- if let QPath::TypeRelative(qpath_ty, ..) = qpath
- && qpath_ty.hir_id == ty.hir_id
- {
- lint.build(fluent::lint::tykind_kind)
- .span_suggestion(
- path.span,
- fluent::lint::suggestion,
- "ty",
- Applicability::MaybeIncorrect, // ty maybe needs an import
- )
- .emit();
- return;
- }
+ }
+ // Can't unify these two branches because qpath below is `&&` and above is `&`
+ // and `A | B` paths don't play well together with adjustments, apparently.
+ Some(Node::Expr(Expr {
+ kind: ExprKind::Struct(qpath, ..),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ Some(path.span)
+ } else {
+ None
}
- _ => {}
}
- lint.build(fluent::lint::tykind).help(fluent::lint::help).emit();
- })
+ _ => None
+ };
+
+ match span {
+ Some(span) => {
+ cx.struct_span_lint(
+ USAGE_OF_TY_TYKIND,
+ path.span,
+ fluent::lint_tykind_kind,
+ |lint| lint.span_suggestion(
+ span,
+ fluent::suggestion,
+ "ty",
+ Applicability::MaybeIncorrect, // ty maybe needs an import
+ )
+ )
+ },
+ None => cx.struct_span_lint(
+ USAGE_OF_TY_TYKIND,
+ path.span,
+ fluent::lint_tykind,
+ |lint| lint.help(fluent::help)
+ )
+ }
} else if !ty.span.from_expansion() && let Some(t) = is_ty_or_ty_ctxt(cx, &path) {
if path.segments.len() > 1 {
- cx.struct_span_lint(USAGE_OF_QUALIFIED_TY, path.span, |lint| {
- lint.build(fluent::lint::ty_qualified)
+ cx.struct_span_lint(USAGE_OF_QUALIFIED_TY, path.span, fluent::lint_ty_qualified, |lint| {
+ lint
.set_arg("ty", t.clone())
.span_suggestion(
path.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
t,
// The import probably needs to be changed
Applicability::MaybeIncorrect,
)
- .emit();
})
}
}
@@ -246,7 +246,7 @@ fn is_ty_or_ty_ctxt(cx: &LateContext<'_>, path: &Path<'_>) -> Option<String> {
}
}
// Only lint on `&Ty` and `&TyCtxt` if it is used outside of a trait.
- Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
+ Res::SelfTyAlias { alias_to: did, is_trait_impl: false, .. } => {
if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
if let Some(name @ (sym::Ty | sym::TyCtxt)) = cx.tcx.get_diagnostic_name(adt.did())
{
@@ -310,11 +310,8 @@ impl EarlyLintPass for LintPassImpl {
cx.struct_span_lint(
LINT_PASS_IMPL_WITHOUT_MACRO,
lint_pass.path.span,
- |lint| {
- lint.build(fluent::lint::lintpass_by_hand)
- .help(fluent::lint::help)
- .emit();
- },
+ fluent::lint_lintpass_by_hand,
+ |lint| lint.help(fluent::help),
)
}
}
@@ -351,12 +348,12 @@ impl<'tcx> LateLintPass<'tcx> for ExistingDocKeyword {
if is_doc_keyword(v) {
return;
}
- cx.struct_span_lint(EXISTING_DOC_KEYWORD, attr.span, |lint| {
- lint.build(fluent::lint::non_existant_doc_keyword)
- .set_arg("keyword", v)
- .help(fluent::lint::help)
- .emit();
- });
+ cx.struct_span_lint(
+ EXISTING_DOC_KEYWORD,
+ attr.span,
+ fluent::lint_non_existant_doc_keyword,
+ |lint| lint.set_arg("keyword", v).help(fluent::help),
+ );
}
}
}
@@ -374,7 +371,7 @@ declare_tool_lint! {
declare_tool_lint! {
pub rustc::DIAGNOSTIC_OUTSIDE_OF_IMPL,
Allow,
- "prevent creation of diagnostics outside of `SessionDiagnostic`/`AddSubdiagnostic` impls",
+ "prevent creation of diagnostics outside of `IntoDiagnostic`/`AddToDiagnostic` impls",
report_in_external_macro: true
}
@@ -393,24 +390,33 @@ impl LateLintPass<'_> for Diagnostics {
return;
}
+ let mut found_parent_with_attr = false;
let mut found_impl = false;
- for (_, parent) in cx.tcx.hir().parent_iter(expr.hir_id) {
+ for (hir_id, parent) in cx.tcx.hir().parent_iter(expr.hir_id) {
+ if let Some(owner_did) = hir_id.as_owner() {
+ found_parent_with_attr = found_parent_with_attr
+ || cx.tcx.has_attr(owner_did.to_def_id(), sym::rustc_lint_diagnostics);
+ }
+
debug!(?parent);
if let Node::Item(Item { kind: ItemKind::Impl(impl_), .. }) = parent &&
let Impl { of_trait: Some(of_trait), .. } = impl_ &&
let Some(def_id) = of_trait.trait_def_id() &&
let Some(name) = cx.tcx.get_diagnostic_name(def_id) &&
- matches!(name, sym::SessionDiagnostic | sym::AddSubdiagnostic | sym::DecorateLint)
+ matches!(name, sym::IntoDiagnostic | sym::AddToDiagnostic | sym::DecorateLint)
{
found_impl = true;
break;
}
}
debug!(?found_impl);
- if !found_impl {
- cx.struct_span_lint(DIAGNOSTIC_OUTSIDE_OF_IMPL, span, |lint| {
- lint.build(fluent::lint::diag_out_of_impl).emit();
- })
+ if !found_parent_with_attr && !found_impl {
+ cx.struct_span_lint(
+ DIAGNOSTIC_OUTSIDE_OF_IMPL,
+ span,
+ fluent::lint_diag_out_of_impl,
+ |lint| lint,
+ )
}
let mut found_diagnostic_message = false;
@@ -425,10 +431,13 @@ impl LateLintPass<'_> for Diagnostics {
}
}
debug!(?found_diagnostic_message);
- if !found_diagnostic_message {
- cx.struct_span_lint(UNTRANSLATABLE_DIAGNOSTIC, span, |lint| {
- lint.build(fluent::lint::untranslatable_diag).emit();
- })
+ if !found_parent_with_attr && !found_diagnostic_message {
+ cx.struct_span_lint(
+ UNTRANSLATABLE_DIAGNOSTIC,
+ span,
+ fluent::lint_untranslatable_diag,
+ |lint| lint,
+ )
}
}
}
@@ -460,8 +469,8 @@ impl LateLintPass<'_> for BadOptAccess {
let Some(literal) = item.literal() &&
let ast::LitKind::Str(val, _) = literal.kind
{
- cx.struct_span_lint(BAD_OPT_ACCESS, expr.span, |lint| {
- lint.build(val.as_str()).emit(); }
+ cx.struct_span_lint(BAD_OPT_ACCESS, expr.span, val.as_str(), |lint|
+ lint
);
}
}
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
index a329b3751..303fcb1a1 100644
--- a/compiler/rustc_lint/src/late.rs
+++ b/compiler/rustc_lint/src/late.rs
@@ -24,13 +24,11 @@ use rustc_hir::intravisit::Visitor;
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::LintPass;
-use rustc_span::symbol::Symbol;
use rustc_span::Span;
use std::any::Any;
use std::cell::Cell;
use std::slice;
-use tracing::debug;
/// Extract the `LintStore` from the query context.
/// This function exists because we've erased `LintStore` as `dyn Any` in the context.
@@ -78,8 +76,8 @@ impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> {
self.context.param_env = old_param_env;
}
- fn process_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) {
- lint_callback!(self, check_mod, m, s, n);
+ fn process_mod(&mut self, m: &'tcx hir::Mod<'tcx>, n: hir::HirId) {
+ lint_callback!(self, check_mod, m, n);
hir_visit::walk_mod(self, m, n);
}
}
@@ -189,19 +187,12 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
let old_cached_typeck_results = self.context.cached_typeck_results.take();
let body = self.context.tcx.hir().body(body_id);
lint_callback!(self, check_fn, fk, decl, body, span, id);
- hir_visit::walk_fn(self, fk, decl, body_id, span, id);
+ hir_visit::walk_fn(self, fk, decl, body_id, id);
self.context.enclosing_body = old_enclosing_body;
self.context.cached_typeck_results.set(old_cached_typeck_results);
}
- fn visit_variant_data(
- &mut self,
- s: &'tcx hir::VariantData<'tcx>,
- _: Symbol,
- _: &'tcx hir::Generics<'tcx>,
- _: hir::HirId,
- _: Span,
- ) {
+ fn visit_variant_data(&mut self, s: &'tcx hir::VariantData<'tcx>) {
lint_callback!(self, check_struct_def, s);
hir_visit::walk_struct_def(self, s);
}
@@ -213,15 +204,10 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
})
}
- fn visit_variant(
- &mut self,
- v: &'tcx hir::Variant<'tcx>,
- g: &'tcx hir::Generics<'tcx>,
- item_id: hir::HirId,
- ) {
+ fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) {
self.with_lint_attrs(v.id, |cx| {
lint_callback!(cx, check_variant, v);
- hir_visit::walk_variant(cx, v, g, item_id);
+ hir_visit::walk_variant(cx, v);
})
}
@@ -234,9 +220,9 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
hir_visit::walk_inf(self, inf);
}
- fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) {
+ fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, _: Span, n: hir::HirId) {
if !self.context.only_module {
- self.process_mod(m, s, n);
+ self.process_mod(m, n);
}
}
@@ -272,13 +258,9 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
hir_visit::walk_where_predicate(self, p);
}
- fn visit_poly_trait_ref(
- &mut self,
- t: &'tcx hir::PolyTraitRef<'tcx>,
- m: hir::TraitBoundModifier,
- ) {
- lint_callback!(self, check_poly_trait_ref, t, m);
- hir_visit::walk_poly_trait_ref(self, t, m);
+ fn visit_poly_trait_ref(&mut self, t: &'tcx hir::PolyTraitRef<'tcx>) {
+ lint_callback!(self, check_poly_trait_ref, t);
+ hir_visit::walk_poly_trait_ref(self, t);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
@@ -320,12 +302,12 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
}
}
-struct LateLintPassObjects<'a> {
- lints: &'a mut [LateLintPassObject],
+struct LateLintPassObjects<'a, 'tcx> {
+ lints: &'a mut [LateLintPassObject<'tcx>],
}
#[allow(rustc::lint_pass_impl_without_macro)]
-impl LintPass for LateLintPassObjects<'_> {
+impl LintPass for LateLintPassObjects<'_, '_> {
fn name(&self) -> &'static str {
panic!()
}
@@ -343,7 +325,7 @@ macro_rules! expand_late_lint_pass_impl_methods {
macro_rules! late_lint_pass_impl {
([], [$hir:tt], $methods:tt) => {
- impl<$hir> LateLintPass<$hir> for LateLintPassObjects<'_> {
+ impl<$hir> LateLintPass<$hir> for LateLintPassObjects<'_, $hir> {
expand_late_lint_pass_impl_methods!([$hir], $methods);
}
};
@@ -356,14 +338,14 @@ fn late_lint_mod_pass<'tcx, T: LateLintPass<'tcx>>(
module_def_id: LocalDefId,
pass: T,
) {
- let access_levels = &tcx.privacy_access_levels(());
+ let effective_visibilities = &tcx.effective_visibilities(());
let context = LateContext {
tcx,
enclosing_body: None,
cached_typeck_results: Cell::new(None),
param_env: ty::ParamEnv::empty(),
- access_levels,
+ effective_visibilities,
lint_store: unerased_lint_store(tcx),
last_node_with_lint_attrs: tcx.hir().local_def_id_to_hir_id(module_def_id),
generics: None,
@@ -372,8 +354,8 @@ fn late_lint_mod_pass<'tcx, T: LateLintPass<'tcx>>(
let mut cx = LateContextAndPass { context, pass };
- let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
- cx.process_mod(module, span, hir_id);
+ let (module, _span, hir_id) = tcx.hir().get_module(module_def_id);
+ cx.process_mod(module, hir_id);
// Visit the crate attributes
if hir_id == hir::CRATE_HIR_ID {
@@ -396,7 +378,7 @@ pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx>>(
late_lint_mod_pass(tcx, module_def_id, builtin_lints);
let mut passes: Vec<_> =
- unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect();
+ unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)(tcx)).collect();
if !passes.is_empty() {
late_lint_mod_pass(tcx, module_def_id, LateLintPassObjects { lints: &mut passes[..] });
@@ -404,14 +386,14 @@ pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx>>(
}
fn late_lint_pass_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, pass: T) {
- let access_levels = &tcx.privacy_access_levels(());
+ let effective_visibilities = &tcx.effective_visibilities(());
let context = LateContext {
tcx,
enclosing_body: None,
cached_typeck_results: Cell::new(None),
param_env: ty::ParamEnv::empty(),
- access_levels,
+ effective_visibilities,
lint_store: unerased_lint_store(tcx),
last_node_with_lint_attrs: hir::CRATE_HIR_ID,
generics: None,
@@ -432,7 +414,8 @@ fn late_lint_pass_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, pass: T)
}
fn late_lint_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, builtin_lints: T) {
- let mut passes = unerased_lint_store(tcx).late_passes.iter().map(|p| (p)()).collect::<Vec<_>>();
+ let mut passes =
+ unerased_lint_store(tcx).late_passes.iter().map(|p| (p)(tcx)).collect::<Vec<_>>();
if !tcx.sess.opts.unstable_opts.no_interleave_lints {
if !passes.is_empty() {
@@ -442,20 +425,23 @@ fn late_lint_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, builtin_lints
late_lint_pass_crate(tcx, builtin_lints);
} else {
for pass in &mut passes {
- tcx.sess.prof.extra_verbose_generic_activity("run_late_lint", pass.name()).run(|| {
- late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) });
- });
+ tcx.sess.prof.verbose_generic_activity_with_arg("run_late_lint", pass.name()).run(
+ || {
+ late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) });
+ },
+ );
}
let mut passes: Vec<_> =
- unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect();
+ unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)(tcx)).collect();
for pass in &mut passes {
- tcx.sess.prof.extra_verbose_generic_activity("run_late_module_lint", pass.name()).run(
- || {
+ tcx.sess
+ .prof
+ .verbose_generic_activity_with_arg("run_late_module_lint", pass.name())
+ .run(|| {
late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) });
- },
- );
+ });
}
}
}
diff --git a/compiler/rustc_lint/src/let_underscore.rs b/compiler/rustc_lint/src/let_underscore.rs
new file mode 100644
index 000000000..78f355ec3
--- /dev/null
+++ b/compiler/rustc_lint/src/let_underscore.rs
@@ -0,0 +1,168 @@
+use crate::{LateContext, LateLintPass, LintContext};
+use rustc_errors::{Applicability, DiagnosticBuilder, MultiSpan};
+use rustc_hir as hir;
+use rustc_middle::ty;
+use rustc_span::Symbol;
+
+declare_lint! {
+ /// The `let_underscore_drop` lint checks for statements which don't bind
+ /// an expression which has a non-trivial Drop implementation to anything,
+ /// causing the expression to be dropped immediately instead of at end of
+ /// scope.
+ ///
+ /// ### Example
+ /// ```
+ /// struct SomeStruct;
+ /// impl Drop for SomeStruct {
+ /// fn drop(&mut self) {
+ /// println!("Dropping SomeStruct");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// #[warn(let_underscore_drop)]
+ /// // SomeStuct is dropped immediately instead of at end of scope,
+ /// // so "Dropping SomeStruct" is printed before "end of main".
+ /// // The order of prints would be reversed if SomeStruct was bound to
+ /// // a name (such as "_foo").
+ /// let _ = SomeStruct;
+ /// println!("end of main");
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Statements which assign an expression to an underscore causes the
+ /// expression to immediately drop instead of extending the expression's
+ /// lifetime to the end of the scope. This is usually unintended,
+ /// especially for types like `MutexGuard`, which are typically used to
+ /// lock a mutex for the duration of an entire scope.
+ ///
+ /// If you want to extend the expression's lifetime to the end of the scope,
+ /// assign an underscore-prefixed name (such as `_foo`) to the expression.
+ /// If you do actually want to drop the expression immediately, then
+ /// calling `std::mem::drop` on the expression is clearer and helps convey
+ /// intent.
+ pub LET_UNDERSCORE_DROP,
+ Allow,
+ "non-binding let on a type that implements `Drop`"
+}
+
+declare_lint! {
+ /// The `let_underscore_lock` lint checks for statements which don't bind
+ /// a mutex to anything, causing the lock to be released immediately instead
+ /// of at end of scope, which is typically incorrect.
+ ///
+ /// ### Example
+ /// ```compile_fail
+ /// use std::sync::{Arc, Mutex};
+ /// use std::thread;
+ /// let data = Arc::new(Mutex::new(0));
+ ///
+ /// thread::spawn(move || {
+ /// // The lock is immediately released instead of at the end of the
+ /// // scope, which is probably not intended.
+ /// let _ = data.lock().unwrap();
+ /// println!("doing some work");
+ /// let mut lock = data.lock().unwrap();
+ /// *lock += 1;
+ /// });
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Statements which assign an expression to an underscore causes the
+ /// expression to immediately drop instead of extending the expression's
+ /// lifetime to the end of the scope. This is usually unintended,
+ /// especially for types like `MutexGuard`, which are typically used to
+ /// lock a mutex for the duration of an entire scope.
+ ///
+ /// If you want to extend the expression's lifetime to the end of the scope,
+ /// assign an underscore-prefixed name (such as `_foo`) to the expression.
+ /// If you do actually want to drop the expression immediately, then
+ /// calling `std::mem::drop` on the expression is clearer and helps convey
+ /// intent.
+ pub LET_UNDERSCORE_LOCK,
+ Deny,
+ "non-binding let on a synchronization lock"
+}
+
+declare_lint_pass!(LetUnderscore => [LET_UNDERSCORE_DROP, LET_UNDERSCORE_LOCK]);
+
+const SYNC_GUARD_SYMBOLS: [Symbol; 3] = [
+ rustc_span::sym::MutexGuard,
+ rustc_span::sym::RwLockReadGuard,
+ rustc_span::sym::RwLockWriteGuard,
+];
+
+impl<'tcx> LateLintPass<'tcx> for LetUnderscore {
+ fn check_local(&mut self, cx: &LateContext<'_>, local: &hir::Local<'_>) {
+ if !matches!(local.pat.kind, hir::PatKind::Wild) {
+ return;
+ }
+ if let Some(init) = local.init {
+ let init_ty = cx.typeck_results().expr_ty(init);
+ // If the type has a trivial Drop implementation, then it doesn't
+ // matter that we drop the value immediately.
+ if !init_ty.needs_drop(cx.tcx, cx.param_env) {
+ return;
+ }
+ let is_sync_lock = match init_ty.kind() {
+ ty::Adt(adt, _) => SYNC_GUARD_SYMBOLS
+ .iter()
+ .any(|guard_symbol| cx.tcx.is_diagnostic_item(*guard_symbol, adt.did())),
+ _ => false,
+ };
+
+ if is_sync_lock {
+ let mut span = MultiSpan::from_spans(vec![local.pat.span, init.span]);
+ span.push_span_label(
+ local.pat.span,
+ "this lock is not assigned to a binding and is immediately dropped".to_string(),
+ );
+ span.push_span_label(
+ init.span,
+ "this binding will immediately drop the value assigned to it".to_string(),
+ );
+ cx.struct_span_lint(
+ LET_UNDERSCORE_LOCK,
+ span,
+ "non-binding let on a synchronization lock",
+ |lint| build_lint(lint, local, init.span),
+ )
+ } else {
+ cx.struct_span_lint(
+ LET_UNDERSCORE_DROP,
+ local.span,
+ "non-binding let on a type that implements `Drop`",
+ |lint| build_lint(lint, local, init.span),
+ )
+ }
+ }
+ }
+}
+
+fn build_lint<'a, 'b>(
+ lint: &'a mut DiagnosticBuilder<'b, ()>,
+ local: &hir::Local<'_>,
+ init_span: rustc_span::Span,
+) -> &'a mut DiagnosticBuilder<'b, ()> {
+ lint.span_suggestion_verbose(
+ local.pat.span,
+ "consider binding to an unused variable to avoid immediately dropping the value",
+ "_unused",
+ Applicability::MachineApplicable,
+ )
+ .multipart_suggestion(
+ "consider immediately dropping the value",
+ vec![
+ (local.span.until(init_span), "drop(".to_string()),
+ (init_span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+}
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
index 00e96f20d..db0a3419e 100644
--- a/compiler/rustc_lint/src/levels.rs
+++ b/compiler/rustc_lint/src/levels.rs
@@ -3,13 +3,15 @@ use crate::late::unerased_lint_store;
use rustc_ast as ast;
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{struct_span_err, Applicability, Diagnostic, LintDiagnosticBuilder, MultiSpan};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, DiagnosticMessage, MultiSpan};
use rustc_hir as hir;
-use rustc_hir::{intravisit, HirId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::HirId;
+use rustc_index::vec::IndexVec;
use rustc_middle::hir::nested_filter;
use rustc_middle::lint::{
- struct_lint_level, LevelAndSource, LintExpectation, LintLevelMap, LintLevelSets,
- LintLevelSource, LintSet, LintStackIndex, COMMAND_LINE,
+ reveal_actual_level, struct_lint_level, LevelAndSource, LintExpectation, LintLevelSource,
+ ShallowLintLevelMap,
};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{RegisteredTools, TyCtxt};
@@ -21,49 +23,414 @@ use rustc_session::parse::{add_feature_diagnostics, feature_err};
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
-use tracing::debug;
-fn lint_levels(tcx: TyCtxt<'_>, (): ()) -> LintLevelMap {
- let store = unerased_lint_store(tcx);
- let levels =
- LintLevelsBuilder::new(tcx.sess, false, &store, &tcx.resolutions(()).registered_tools);
- let mut builder = LintLevelMapBuilder { levels, tcx };
- let krate = tcx.hir().krate();
+use crate::errors::{
+ MalformedAttribute, MalformedAttributeSub, OverruledAttribute, OverruledAttributeSub,
+ UnknownToolInScopedLint,
+};
+
+/// Collection of lint levels for the whole crate.
+/// This is used by AST-based lints, which do not
+/// wait until we have built HIR to be emitted.
+#[derive(Debug)]
+struct LintLevelSets {
+ /// Linked list of specifications.
+ list: IndexVec<LintStackIndex, LintSet>,
+}
+
+rustc_index::newtype_index! {
+ struct LintStackIndex {
+ ENCODABLE = custom, // we don't need encoding
+ const COMMAND_LINE = 0,
+ }
+}
+
+/// Specifications found at this position in the stack. This map only represents the lints
+/// found for one set of attributes (like `shallow_lint_levels_on` does).
+///
+/// We store the level specifications as a linked list.
+/// Each `LintSet` represents a set of attributes on the same AST node.
+/// The `parent` forms a linked list that matches the AST tree.
+/// This way, walking the linked list is equivalent to walking the AST bottom-up
+/// to find the specifications for a given lint.
+#[derive(Debug)]
+struct LintSet {
+ // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
+ // flag.
+ specs: FxHashMap<LintId, LevelAndSource>,
+ parent: LintStackIndex,
+}
- builder.levels.id_to_set.reserve(krate.owners.len() + 1);
+impl LintLevelSets {
+ fn new() -> Self {
+ LintLevelSets { list: IndexVec::new() }
+ }
- let push =
- builder.levels.push(tcx.hir().attrs(hir::CRATE_HIR_ID), true, Some(hir::CRATE_HIR_ID));
+ fn get_lint_level(
+ &self,
+ lint: &'static Lint,
+ idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ sess: &Session,
+ ) -> LevelAndSource {
+ let lint = LintId::of(lint);
+ let (level, mut src) = self.raw_lint_id_level(lint, idx, aux);
+ let level = reveal_actual_level(level, &mut src, sess, lint, |id| {
+ self.raw_lint_id_level(id, idx, aux)
+ });
+ (level, src)
+ }
+
+ fn raw_lint_id_level(
+ &self,
+ id: LintId,
+ mut idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ ) -> (Option<Level>, LintLevelSource) {
+ if let Some(specs) = aux {
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ }
+ loop {
+ let LintSet { ref specs, parent } = self.list[idx];
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ if idx == COMMAND_LINE {
+ return (None, LintLevelSource::Default);
+ }
+ idx = parent;
+ }
+ }
+}
- builder.levels.register_id(hir::CRATE_HIR_ID);
+fn lint_expectations(tcx: TyCtxt<'_>, (): ()) -> Vec<(LintExpectationId, LintExpectation)> {
+ let store = unerased_lint_store(tcx);
+
+ let mut builder = LintLevelsBuilder {
+ sess: tcx.sess,
+ provider: QueryMapExpectationsWrapper {
+ tcx,
+ cur: hir::CRATE_HIR_ID,
+ specs: ShallowLintLevelMap::default(),
+ expectations: Vec::new(),
+ unstable_to_stable_ids: FxHashMap::default(),
+ empty: FxHashMap::default(),
+ },
+ warn_about_weird_lints: false,
+ store,
+ registered_tools: &tcx.resolutions(()).registered_tools,
+ };
+
+ builder.add_command_line();
+ builder.add_id(hir::CRATE_HIR_ID);
tcx.hir().walk_toplevel_module(&mut builder);
- builder.levels.pop(push);
- builder.levels.update_unstable_expectation_ids();
- builder.levels.build_map()
+ tcx.sess.diagnostic().update_unstable_expectation_id(&builder.provider.unstable_to_stable_ids);
+
+ builder.provider.expectations
}
-pub struct LintLevelsBuilder<'s> {
- sess: &'s Session,
- lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
- /// Each expectation has a stable and an unstable identifier. This map
- /// is used to map from unstable to stable [`LintExpectationId`]s.
- expectation_id_map: FxHashMap<LintExpectationId, LintExpectationId>,
+#[instrument(level = "trace", skip(tcx), ret)]
+fn shallow_lint_levels_on(tcx: TyCtxt<'_>, owner: hir::OwnerId) -> ShallowLintLevelMap {
+ let store = unerased_lint_store(tcx);
+ let attrs = tcx.hir_attrs(owner);
+
+ let mut levels = LintLevelsBuilder {
+ sess: tcx.sess,
+ provider: LintLevelQueryMap {
+ tcx,
+ cur: owner.into(),
+ specs: ShallowLintLevelMap::default(),
+ empty: FxHashMap::default(),
+ attrs,
+ },
+ warn_about_weird_lints: false,
+ store,
+ registered_tools: &tcx.resolutions(()).registered_tools,
+ };
+
+ if owner == hir::CRATE_OWNER_ID {
+ levels.add_command_line();
+ }
+
+ match attrs.map.range(..) {
+ // There is only something to do if there are attributes at all.
+ [] => {}
+ // Most of the time, there is only one attribute. Avoid fetching HIR in that case.
+ [(local_id, _)] => levels.add_id(HirId { owner, local_id: *local_id }),
+ // Otherwise, we need to visit the attributes in source code order, so we fetch HIR and do
+ // a standard visit.
+ // FIXME(#102522) Just iterate on attrs once that iteration order matches HIR's.
+ _ => match tcx.hir().expect_owner(owner) {
+ hir::OwnerNode::Item(item) => levels.visit_item(item),
+ hir::OwnerNode::ForeignItem(item) => levels.visit_foreign_item(item),
+ hir::OwnerNode::TraitItem(item) => levels.visit_trait_item(item),
+ hir::OwnerNode::ImplItem(item) => levels.visit_impl_item(item),
+ hir::OwnerNode::Crate(mod_) => {
+ levels.add_id(hir::CRATE_HIR_ID);
+ levels.visit_mod(mod_, mod_.spans.inner_span, hir::CRATE_HIR_ID)
+ }
+ },
+ }
+
+ let specs = levels.provider.specs;
+
+ #[cfg(debug_assertions)]
+ for (_, v) in specs.specs.iter() {
+ debug_assert!(!v.is_empty());
+ }
+
+ specs
+}
+
+pub struct TopDown {
sets: LintLevelSets,
- id_to_set: FxHashMap<HirId, LintStackIndex>,
cur: LintStackIndex,
+}
+
+pub trait LintLevelsProvider {
+ fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource>;
+ fn insert(&mut self, id: LintId, lvl: LevelAndSource);
+ fn get_lint_level(&self, lint: &'static Lint, sess: &Session) -> LevelAndSource;
+ fn push_expectation(&mut self, _id: LintExpectationId, _expectation: LintExpectation) {}
+}
+
+impl LintLevelsProvider for TopDown {
+ fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource> {
+ &self.sets.list[self.cur].specs
+ }
+
+ fn insert(&mut self, id: LintId, lvl: LevelAndSource) {
+ self.sets.list[self.cur].specs.insert(id, lvl);
+ }
+
+ fn get_lint_level(&self, lint: &'static Lint, sess: &Session) -> LevelAndSource {
+ self.sets.get_lint_level(lint, self.cur, Some(self.current_specs()), sess)
+ }
+}
+
+struct LintLevelQueryMap<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ cur: HirId,
+ specs: ShallowLintLevelMap,
+ /// Empty hash map to simplify code.
+ empty: FxHashMap<LintId, LevelAndSource>,
+ attrs: &'tcx hir::AttributeMap<'tcx>,
+}
+
+impl LintLevelsProvider for LintLevelQueryMap<'_> {
+ fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource> {
+ self.specs.specs.get(&self.cur.local_id).unwrap_or(&self.empty)
+ }
+ fn insert(&mut self, id: LintId, lvl: LevelAndSource) {
+ self.specs.specs.get_mut_or_insert_default(self.cur.local_id).insert(id, lvl);
+ }
+ fn get_lint_level(&self, lint: &'static Lint, _: &Session) -> LevelAndSource {
+ self.specs.lint_level_id_at_node(self.tcx, LintId::of(lint), self.cur)
+ }
+}
+
+struct QueryMapExpectationsWrapper<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ cur: HirId,
+ specs: ShallowLintLevelMap,
+ expectations: Vec<(LintExpectationId, LintExpectation)>,
+ unstable_to_stable_ids: FxHashMap<LintExpectationId, LintExpectationId>,
+ /// Empty hash map to simplify code.
+ empty: FxHashMap<LintId, LevelAndSource>,
+}
+
+impl LintLevelsProvider for QueryMapExpectationsWrapper<'_> {
+ fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource> {
+ self.specs.specs.get(&self.cur.local_id).unwrap_or(&self.empty)
+ }
+ fn insert(&mut self, id: LintId, lvl: LevelAndSource) {
+ let specs = self.specs.specs.get_mut_or_insert_default(self.cur.local_id);
+ specs.clear();
+ specs.insert(id, lvl);
+ }
+ fn get_lint_level(&self, lint: &'static Lint, _: &Session) -> LevelAndSource {
+ self.specs.lint_level_id_at_node(self.tcx, LintId::of(lint), self.cur)
+ }
+ fn push_expectation(&mut self, id: LintExpectationId, expectation: LintExpectation) {
+ let LintExpectationId::Stable { attr_id: Some(attr_id), hir_id, attr_index, .. } = id else { bug!("unstable expectation id should already be mapped") };
+ let key = LintExpectationId::Unstable { attr_id, lint_index: None };
+
+ if !self.unstable_to_stable_ids.contains_key(&key) {
+ self.unstable_to_stable_ids.insert(
+ key,
+ LintExpectationId::Stable { hir_id, attr_index, lint_index: None, attr_id: None },
+ );
+ }
+
+ self.expectations.push((id.normalize(), expectation));
+ }
+}
+
+impl<'tcx> LintLevelsBuilder<'_, LintLevelQueryMap<'tcx>> {
+ fn add_id(&mut self, hir_id: HirId) {
+ self.provider.cur = hir_id;
+ self.add(
+ self.provider.attrs.get(hir_id.local_id),
+ hir_id == hir::CRATE_HIR_ID,
+ Some(hir_id),
+ );
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, LintLevelQueryMap<'tcx>> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.provider.tcx.hir()
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.add_id(param.hir_id);
+ intravisit::walk_param(self, param);
+ }
+
+ fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
+ self.add_id(it.hir_id());
+ intravisit::walk_item(self, it);
+ }
+
+ fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
+ self.add_id(it.hir_id());
+ intravisit::walk_foreign_item(self, it);
+ }
+
+ fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) {
+ // We will call `add_id` when we walk
+ // the `StmtKind`. The outer statement itself doesn't
+ // define the lint levels.
+ intravisit::walk_stmt(self, e);
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.add_id(e.hir_id);
+ intravisit::walk_expr(self, e);
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.add_id(s.hir_id);
+ intravisit::walk_field_def(self, s);
+ }
+
+ fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) {
+ self.add_id(v.id);
+ intravisit::walk_variant(self, v);
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ self.add_id(l.hir_id);
+ intravisit::walk_local(self, l);
+ }
+
+ fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
+ self.add_id(a.hir_id);
+ intravisit::walk_arm(self, a);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ self.add_id(trait_item.hir_id());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ self.add_id(impl_item.hir_id());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+}
+
+impl<'tcx> LintLevelsBuilder<'_, QueryMapExpectationsWrapper<'tcx>> {
+ fn add_id(&mut self, hir_id: HirId) {
+ self.provider.cur = hir_id;
+ self.add(self.provider.tcx.hir().attrs(hir_id), hir_id == hir::CRATE_HIR_ID, Some(hir_id));
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, QueryMapExpectationsWrapper<'tcx>> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.provider.tcx.hir()
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.add_id(param.hir_id);
+ intravisit::walk_param(self, param);
+ }
+
+ fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
+ self.add_id(it.hir_id());
+ intravisit::walk_item(self, it);
+ }
+
+ fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
+ self.add_id(it.hir_id());
+ intravisit::walk_foreign_item(self, it);
+ }
+
+ fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) {
+ // We will call `add_id` when we walk
+ // the `StmtKind`. The outer statement itself doesn't
+ // define the lint levels.
+ intravisit::walk_stmt(self, e);
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.add_id(e.hir_id);
+ intravisit::walk_expr(self, e);
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.add_id(s.hir_id);
+ intravisit::walk_field_def(self, s);
+ }
+
+ fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) {
+ self.add_id(v.id);
+ intravisit::walk_variant(self, v);
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ self.add_id(l.hir_id);
+ intravisit::walk_local(self, l);
+ }
+
+ fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
+ self.add_id(a.hir_id);
+ intravisit::walk_arm(self, a);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ self.add_id(trait_item.hir_id());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ self.add_id(impl_item.hir_id());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+}
+
+pub struct LintLevelsBuilder<'s, P> {
+ sess: &'s Session,
+ provider: P,
warn_about_weird_lints: bool,
store: &'s LintStore,
registered_tools: &'s RegisteredTools,
}
-pub struct BuilderPush {
+pub(crate) struct BuilderPush {
prev: LintStackIndex,
- pub changed: bool,
}
-impl<'s> LintLevelsBuilder<'s> {
- pub fn new(
+impl<'s> LintLevelsBuilder<'s, TopDown> {
+ pub(crate) fn new(
sess: &'s Session,
warn_about_weird_lints: bool,
store: &'s LintStore,
@@ -71,20 +438,74 @@ impl<'s> LintLevelsBuilder<'s> {
) -> Self {
let mut builder = LintLevelsBuilder {
sess,
- lint_expectations: Default::default(),
- expectation_id_map: Default::default(),
- sets: LintLevelSets::new(),
- cur: COMMAND_LINE,
- id_to_set: Default::default(),
+ provider: TopDown { sets: LintLevelSets::new(), cur: COMMAND_LINE },
warn_about_weird_lints,
store,
registered_tools,
};
- builder.process_command_line(sess, store);
- assert_eq!(builder.sets.list.len(), 1);
+ builder.process_command_line();
+ assert_eq!(builder.provider.sets.list.len(), 1);
builder
}
+ fn process_command_line(&mut self) {
+ self.provider.cur = self
+ .provider
+ .sets
+ .list
+ .push(LintSet { specs: FxHashMap::default(), parent: COMMAND_LINE });
+ self.add_command_line();
+ }
+
+ /// Pushes a list of AST lint attributes onto this context.
+ ///
+ /// This function will return a `BuilderPush` object which should be passed
+ /// to `pop` when this scope for the attributes provided is exited.
+ ///
+ /// This function will perform a number of tasks:
+ ///
+ /// * It'll validate all lint-related attributes in `attrs`
+ /// * It'll mark all lint-related attributes as used
+ /// * Lint levels will be updated based on the attributes provided
+ /// * Lint attributes are validated, e.g., a `#[forbid]` can't be switched to
+ /// `#[allow]`
+ ///
+ /// Don't forget to call `pop`!
+ pub(crate) fn push(
+ &mut self,
+ attrs: &[ast::Attribute],
+ is_crate_node: bool,
+ source_hir_id: Option<HirId>,
+ ) -> BuilderPush {
+ let prev = self.provider.cur;
+ self.provider.cur =
+ self.provider.sets.list.push(LintSet { specs: FxHashMap::default(), parent: prev });
+
+ self.add(attrs, is_crate_node, source_hir_id);
+
+ if self.provider.current_specs().is_empty() {
+ self.provider.sets.list.pop();
+ self.provider.cur = prev;
+ }
+
+ BuilderPush { prev }
+ }
+
+ /// Called after `push` when the scope of a set of attributes are exited.
+ pub(crate) fn pop(&mut self, push: BuilderPush) {
+ self.provider.cur = push.prev;
+ std::mem::forget(push);
+ }
+}
+
+#[cfg(debug_assertions)]
+impl Drop for BuilderPush {
+ fn drop(&mut self) {
+ panic!("Found a `push` without a `pop`.");
+ }
+}
+
+impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
pub(crate) fn sess(&self) -> &Session {
self.sess
}
@@ -94,24 +515,20 @@ impl<'s> LintLevelsBuilder<'s> {
}
fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource> {
- &self.sets.list[self.cur].specs
+ self.provider.current_specs()
}
- fn current_specs_mut(&mut self) -> &mut FxHashMap<LintId, LevelAndSource> {
- &mut self.sets.list[self.cur].specs
+ fn insert(&mut self, id: LintId, lvl: LevelAndSource) {
+ self.provider.insert(id, lvl)
}
- fn process_command_line(&mut self, sess: &Session, store: &LintStore) {
- self.sets.lint_cap = sess.opts.lint_cap.unwrap_or(Level::Forbid);
-
- self.cur =
- self.sets.list.push(LintSet { specs: FxHashMap::default(), parent: COMMAND_LINE });
- for &(ref lint_name, level) in &sess.opts.lint_opts {
- store.check_lint_name_cmdline(sess, &lint_name, level, self.registered_tools);
+ fn add_command_line(&mut self) {
+ for &(ref lint_name, level) in &self.sess.opts.lint_opts {
+ self.store.check_lint_name_cmdline(self.sess, &lint_name, level, self.registered_tools);
let orig_level = level;
let lint_flag_val = Symbol::intern(lint_name);
- let Ok(ids) = store.find_lints(&lint_name) else {
+ let Ok(ids) = self.store.find_lints(&lint_name) else {
// errors handled in check_lint_name_cmdline above
continue
};
@@ -125,7 +542,7 @@ impl<'s> LintLevelsBuilder<'s> {
if self.check_gated_lint(id, DUMMY_SP) {
let src = LintLevelSource::CommandLine(lint_flag_val, orig_level);
- self.current_specs_mut().insert(id, (level, src));
+ self.insert(id, (level, src));
}
}
}
@@ -134,9 +551,11 @@ impl<'s> LintLevelsBuilder<'s> {
/// Attempts to insert the `id` to `level_src` map entry. If unsuccessful
/// (e.g. if a forbid was already inserted on the same scope), then emits a
/// diagnostic with no change to `specs`.
- fn insert_spec(&mut self, id: LintId, (level, src): LevelAndSource) {
- let (old_level, old_src) =
- self.sets.get_lint_level(id.lint, self.cur, Some(self.current_specs()), &self.sess);
+ fn insert_spec(&mut self, id: LintId, (mut level, src): LevelAndSource) {
+ let (old_level, old_src) = self.provider.get_lint_level(id.lint, &self.sess);
+ if let Level::Expect(id) = &mut level && let LintExpectationId::Stable { .. } = id {
+ *id = id.normalize();
+ }
// Setting to a non-forbid level is an error if the lint previously had
// a forbid level. Note that this is not necessarily true even with a
// `#[forbid(..)]` attribute present, as that is overridden by `--cap-lints`.
@@ -154,7 +573,7 @@ impl<'s> LintLevelsBuilder<'s> {
let id_name = id.lint.name_lower();
let fcw_warning = match old_src {
LintLevelSource::Default => false,
- LintLevelSource::Node(symbol, _, _) => self.store.is_lint_group(symbol),
+ LintLevelSource::Node { name, .. } => self.store.is_lint_group(name),
LintLevelSource::CommandLine(symbol, _) => self.store.is_lint_group(symbol),
};
debug!(
@@ -174,8 +593,8 @@ impl<'s> LintLevelsBuilder<'s> {
id.to_string()
));
}
- LintLevelSource::Node(_, forbid_source_span, reason) => {
- diag.span_label(forbid_source_span, "`forbid` level set here");
+ LintLevelSource::Node { span, reason, .. } => {
+ diag.span_label(span, "`forbid` level set here");
if let Some(rationale) = reason {
diag.note(rationale.as_str());
}
@@ -186,28 +605,35 @@ impl<'s> LintLevelsBuilder<'s> {
}
};
if !fcw_warning {
- let mut diag_builder = struct_span_err!(
- self.sess,
- src.span(),
- E0453,
- "{}({}) incompatible with previous forbid",
- level.as_str(),
- src.name(),
- );
- decorate_diag(&mut diag_builder);
- diag_builder.emit();
+ self.sess.emit_err(OverruledAttribute {
+ span: src.span(),
+ overruled: src.span(),
+ lint_level: level.as_str().to_string(),
+ lint_source: src.name(),
+ sub: match old_src {
+ LintLevelSource::Default => {
+ OverruledAttributeSub::DefaultSource { id: id.to_string() }
+ }
+ LintLevelSource::Node { span, reason, .. } => {
+ OverruledAttributeSub::NodeSource { span, reason }
+ }
+ LintLevelSource::CommandLine(_, _) => {
+ OverruledAttributeSub::CommandLineSource
+ }
+ },
+ });
} else {
self.struct_lint(
FORBIDDEN_LINT_GROUPS,
Some(src.span().into()),
- |diag_builder| {
- let mut diag_builder = diag_builder.build(&format!(
- "{}({}) incompatible with previous forbid",
- level.as_str(),
- src.name(),
- ));
- decorate_diag(&mut diag_builder);
- diag_builder.emit();
+ format!(
+ "{}({}) incompatible with previous forbid",
+ level.as_str(),
+ src.name(),
+ ),
+ |lint| {
+ decorate_diag(lint);
+ lint
},
);
}
@@ -230,46 +656,21 @@ impl<'s> LintLevelsBuilder<'s> {
match (old_level, level) {
// If the new level is an expectation store it in `ForceWarn`
- (Level::ForceWarn(_), Level::Expect(expectation_id)) => self
- .current_specs_mut()
- .insert(id, (Level::ForceWarn(Some(expectation_id)), old_src)),
- // Keep `ForceWarn` level but drop the expectation
- (Level::ForceWarn(_), _) => {
- self.current_specs_mut().insert(id, (Level::ForceWarn(None), old_src))
+ (Level::ForceWarn(_), Level::Expect(expectation_id)) => {
+ self.insert(id, (Level::ForceWarn(Some(expectation_id)), old_src))
}
+ // Keep `ForceWarn` level but drop the expectation
+ (Level::ForceWarn(_), _) => self.insert(id, (Level::ForceWarn(None), old_src)),
// Set the lint level as normal
- _ => self.current_specs_mut().insert(id, (level, src)),
+ _ => self.insert(id, (level, src)),
};
}
- /// Pushes a list of AST lint attributes onto this context.
- ///
- /// This function will return a `BuilderPush` object which should be passed
- /// to `pop` when this scope for the attributes provided is exited.
- ///
- /// This function will perform a number of tasks:
- ///
- /// * It'll validate all lint-related attributes in `attrs`
- /// * It'll mark all lint-related attributes as used
- /// * Lint levels will be updated based on the attributes provided
- /// * Lint attributes are validated, e.g., a `#[forbid]` can't be switched to
- /// `#[allow]`
- ///
- /// Don't forget to call `pop`!
- pub(crate) fn push(
- &mut self,
- attrs: &[ast::Attribute],
- is_crate_node: bool,
- source_hir_id: Option<HirId>,
- ) -> BuilderPush {
- let prev = self.cur;
- self.cur = self.sets.list.push(LintSet { specs: FxHashMap::default(), parent: prev });
-
+ fn add(&mut self, attrs: &[ast::Attribute], is_crate_node: bool, source_hir_id: Option<HirId>) {
let sess = self.sess;
- let bad_attr = |span| struct_span_err!(sess, span, E0452, "malformed lint attribute input");
for (attr_index, attr) in attrs.iter().enumerate() {
if attr.has_name(sym::automatically_derived) {
- self.current_specs_mut().insert(
+ self.insert(
LintId::of(SINGLE_USE_LIFETIMES),
(Level::Allow, LintLevelSource::Default),
);
@@ -280,7 +681,17 @@ impl<'s> LintLevelsBuilder<'s> {
None => continue,
// This is the only lint level with a `LintExpectationId` that can be created from an attribute
Some(Level::Expect(unstable_id)) if let Some(hir_id) = source_hir_id => {
- let stable_id = self.create_stable_id(unstable_id, hir_id, attr_index);
+ let LintExpectationId::Unstable { attr_id, lint_index } = unstable_id
+ else { bug!("stable id Level::from_attr") };
+
+ let stable_id = LintExpectationId::Stable {
+ hir_id,
+ attr_index: attr_index.try_into().unwrap(),
+ lint_index,
+ // we pass the previous unstable attr_id such that we can trace the ast id when building a map
+ // to go from unstable to stable id.
+ attr_id: Some(attr_id),
+ };
Level::Expect(stable_id)
}
@@ -317,20 +728,27 @@ impl<'s> LintLevelsBuilder<'s> {
}
reason = Some(rationale);
} else {
- bad_attr(name_value.span)
- .span_label(name_value.span, "reason must be a string literal")
- .emit();
+ sess.emit_err(MalformedAttribute {
+ span: name_value.span,
+ sub: MalformedAttributeSub::ReasonMustBeStringLiteral(
+ name_value.span,
+ ),
+ });
}
// found reason, reslice meta list to exclude it
metas.pop().unwrap();
} else {
- bad_attr(item.span)
- .span_label(item.span, "bad attribute argument")
- .emit();
+ sess.emit_err(MalformedAttribute {
+ span: item.span,
+ sub: MalformedAttributeSub::BadAttributeArgument(item.span),
+ });
}
}
ast::MetaItemKind::List(_) => {
- bad_attr(item.span).span_label(item.span, "bad attribute argument").emit();
+ sess.emit_err(MalformedAttribute {
+ span: item.span,
+ sub: MalformedAttributeSub::BadAttributeArgument(item.span),
+ });
}
}
}
@@ -348,20 +766,21 @@ impl<'s> LintLevelsBuilder<'s> {
let meta_item = match li {
ast::NestedMetaItem::MetaItem(meta_item) if meta_item.is_word() => meta_item,
_ => {
- let mut err = bad_attr(sp);
- let mut add_label = true;
if let Some(item) = li.meta_item() {
if let ast::MetaItemKind::NameValue(_) = item.kind {
if item.path == sym::reason {
- err.span_label(sp, "reason in lint attribute must come last");
- add_label = false;
+ sess.emit_err(MalformedAttribute {
+ span: sp,
+ sub: MalformedAttributeSub::ReasonMustComeLast(sp),
+ });
+ continue;
}
}
}
- if add_label {
- err.span_label(sp, "bad attribute argument");
- }
- err.emit();
+ sess.emit_err(MalformedAttribute {
+ span: sp,
+ sub: MalformedAttributeSub::BadAttributeArgument(sp),
+ });
continue;
}
};
@@ -387,7 +806,7 @@ impl<'s> LintLevelsBuilder<'s> {
[lint] => *lint == LintId::of(UNFULFILLED_LINT_EXPECTATIONS),
_ => false,
};
- self.lint_expectations.push((
+ self.provider.push_expectation(
expect_id,
LintExpectation::new(
reason,
@@ -395,13 +814,19 @@ impl<'s> LintLevelsBuilder<'s> {
is_unfulfilled_lint_expectations,
tool_name,
),
- ));
+ );
}
- let src = LintLevelSource::Node(
- meta_item.path.segments.last().expect("empty lint name").ident.name,
- sp,
+ let src = LintLevelSource::Node {
+ name: meta_item
+ .path
+ .segments
+ .last()
+ .expect("empty lint name")
+ .ident
+ .name,
+ span: sp,
reason,
- );
+ };
for &id in *ids {
if self.check_gated_lint(id, attr.span) {
self.insert_spec(id, (level, src));
@@ -414,65 +839,60 @@ impl<'s> LintLevelsBuilder<'s> {
Ok(ids) => {
let complete_name =
&format!("{}::{}", tool_ident.unwrap().name, name);
- let src = LintLevelSource::Node(
- Symbol::intern(complete_name),
- sp,
+ let src = LintLevelSource::Node {
+ name: Symbol::intern(complete_name),
+ span: sp,
reason,
- );
- for id in ids {
- self.insert_spec(*id, (level, src));
+ };
+ for &id in ids {
+ if self.check_gated_lint(id, attr.span) {
+ self.insert_spec(id, (level, src));
+ }
}
if let Level::Expect(expect_id) = level {
- self.lint_expectations.push((
+ self.provider.push_expectation(
expect_id,
LintExpectation::new(reason, sp, false, tool_name),
- ));
+ );
}
}
Err((Some(ids), ref new_lint_name)) => {
let lint = builtin::RENAMED_AND_REMOVED_LINTS;
- let (lvl, src) = self.sets.get_lint_level(
- lint,
- self.cur,
- Some(self.current_specs()),
- &sess,
- );
+ let (lvl, src) = self.provider.get_lint_level(lint, &sess);
struct_lint_level(
self.sess,
lint,
lvl,
src,
Some(sp.into()),
+ format!(
+ "lint name `{}` is deprecated \
+ and may not have an effect in the future.",
+ name
+ ),
|lint| {
- let msg = format!(
- "lint name `{}` is deprecated \
- and may not have an effect in the future.",
- name
- );
- lint.build(&msg)
- .span_suggestion(
- sp,
- "change it to",
- new_lint_name,
- Applicability::MachineApplicable,
- )
- .emit();
+ lint.span_suggestion(
+ sp,
+ "change it to",
+ new_lint_name,
+ Applicability::MachineApplicable,
+ )
},
);
- let src = LintLevelSource::Node(
- Symbol::intern(&new_lint_name),
- sp,
+ let src = LintLevelSource::Node {
+ name: Symbol::intern(&new_lint_name),
+ span: sp,
reason,
- );
+ };
for id in ids {
self.insert_spec(*id, (level, src));
}
if let Level::Expect(expect_id) = level {
- self.lint_expectations.push((
+ self.provider.push_expectation(
expect_id,
LintExpectation::new(reason, sp, false, tool_name),
- ));
+ );
}
}
Err((None, _)) => {
@@ -485,22 +905,12 @@ impl<'s> LintLevelsBuilder<'s> {
}
&CheckLintNameResult::NoTool => {
- let mut err = struct_span_err!(
- sess,
- tool_ident.map_or(DUMMY_SP, |ident| ident.span),
- E0710,
- "unknown tool name `{}` found in scoped lint: `{}::{}`",
- tool_name.unwrap(),
- tool_name.unwrap(),
- pprust::path_to_string(&meta_item.path),
- );
- if sess.is_nightly_build() {
- err.help(&format!(
- "add `#![register_tool({})]` to the crate root",
- tool_name.unwrap()
- ));
- }
- err.emit();
+ sess.emit_err(UnknownToolInScopedLint {
+ span: tool_ident.map(|ident| ident.span),
+ tool_name: tool_name.unwrap(),
+ lint_name: pprust::path_to_string(&meta_item.path),
+ is_nightly_build: sess.is_nightly_build().then_some(()),
+ });
continue;
}
@@ -508,57 +918,54 @@ impl<'s> LintLevelsBuilder<'s> {
CheckLintNameResult::Warning(msg, renamed) => {
let lint = builtin::RENAMED_AND_REMOVED_LINTS;
- let (renamed_lint_level, src) = self.sets.get_lint_level(
- lint,
- self.cur,
- Some(self.current_specs()),
- &sess,
- );
+ let (renamed_lint_level, src) = self.provider.get_lint_level(lint, &sess);
struct_lint_level(
self.sess,
lint,
renamed_lint_level,
src,
Some(sp.into()),
+ msg,
|lint| {
- let mut err = lint.build(msg);
if let Some(new_name) = &renamed {
- err.span_suggestion(
+ lint.span_suggestion(
sp,
"use the new name",
new_name,
Applicability::MachineApplicable,
);
}
- err.emit();
+ lint
},
);
}
CheckLintNameResult::NoLint(suggestion) => {
let lint = builtin::UNKNOWN_LINTS;
- let (level, src) = self.sets.get_lint_level(
- lint,
- self.cur,
- Some(self.current_specs()),
+ let (level, src) = self.provider.get_lint_level(lint, self.sess);
+ let name = if let Some(tool_ident) = tool_ident {
+ format!("{}::{}", tool_ident.name, name)
+ } else {
+ name.to_string()
+ };
+ struct_lint_level(
self.sess,
+ lint,
+ level,
+ src,
+ Some(sp.into()),
+ format!("unknown lint: `{}`", name),
+ |lint| {
+ if let Some(suggestion) = suggestion {
+ lint.span_suggestion(
+ sp,
+ "did you mean",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ lint
+ },
);
- struct_lint_level(self.sess, lint, level, src, Some(sp.into()), |lint| {
- let name = if let Some(tool_ident) = tool_ident {
- format!("{}::{}", tool_ident.name, name)
- } else {
- name.to_string()
- };
- let mut db = lint.build(format!("unknown lint: `{}`", name));
- if let Some(suggestion) = suggestion {
- db.span_suggestion(
- sp,
- "did you mean",
- suggestion,
- Applicability::MachineApplicable,
- );
- }
- db.emit();
- });
}
}
// If this lint was renamed, apply the new lint instead of ignoring the attribute.
@@ -570,17 +977,21 @@ impl<'s> LintLevelsBuilder<'s> {
if let CheckLintNameResult::Ok(ids) =
self.store.check_lint_name(&new_name, None, self.registered_tools)
{
- let src = LintLevelSource::Node(Symbol::intern(&new_name), sp, reason);
+ let src = LintLevelSource::Node {
+ name: Symbol::intern(&new_name),
+ span: sp,
+ reason,
+ };
for &id in ids {
if self.check_gated_lint(id, attr.span) {
self.insert_spec(id, (level, src));
}
}
if let Level::Expect(expect_id) = level {
- self.lint_expectations.push((
+ self.provider.push_expectation(
expect_id,
LintExpectation::new(reason, sp, false, tool_name),
- ));
+ );
}
} else {
panic!("renamed lint does not exist: {}", new_name);
@@ -595,219 +1006,87 @@ impl<'s> LintLevelsBuilder<'s> {
continue;
}
- let LintLevelSource::Node(lint_attr_name, lint_attr_span, _) = *src else {
+ let LintLevelSource::Node { name: lint_attr_name, span: lint_attr_span, .. } = *src else {
continue
};
let lint = builtin::UNUSED_ATTRIBUTES;
- let (lint_level, lint_src) =
- self.sets.get_lint_level(lint, self.cur, Some(self.current_specs()), self.sess);
+ let (lint_level, lint_src) = self.provider.get_lint_level(lint, &self.sess);
struct_lint_level(
self.sess,
lint,
lint_level,
lint_src,
Some(lint_attr_span.into()),
- |lint| {
- let mut db = lint.build(&format!(
- "{}({}) is ignored unless specified at crate level",
- level.as_str(),
- lint_attr_name
- ));
- db.emit();
- },
+ format!(
+ "{}({}) is ignored unless specified at crate level",
+ level.as_str(),
+ lint_attr_name
+ ),
+ |lint| lint,
);
// don't set a separate error for every lint in the group
break;
}
}
-
- if self.current_specs().is_empty() {
- self.sets.list.pop();
- self.cur = prev;
- }
-
- BuilderPush { prev, changed: prev != self.cur }
- }
-
- fn create_stable_id(
- &mut self,
- unstable_id: LintExpectationId,
- hir_id: HirId,
- attr_index: usize,
- ) -> LintExpectationId {
- let stable_id =
- LintExpectationId::Stable { hir_id, attr_index: attr_index as u16, lint_index: None };
-
- self.expectation_id_map.insert(unstable_id, stable_id);
-
- stable_id
}
/// Checks if the lint is gated on a feature that is not enabled.
///
/// Returns `true` if the lint's feature is enabled.
+ // FIXME only emit this once for each attribute, instead of repeating it 4 times for
+ // pre-expansion lints, post-expansion lints, `shallow_lint_levels_on` and `lint_expectations`.
fn check_gated_lint(&self, lint_id: LintId, span: Span) -> bool {
if let Some(feature) = lint_id.lint.feature_gate {
if !self.sess.features_untracked().enabled(feature) {
let lint = builtin::UNKNOWN_LINTS;
let (level, src) = self.lint_level(builtin::UNKNOWN_LINTS);
- struct_lint_level(self.sess, lint, level, src, Some(span.into()), |lint_db| {
- let mut db =
- lint_db.build(&format!("unknown lint: `{}`", lint_id.lint.name_lower()));
- db.note(&format!("the `{}` lint is unstable", lint_id.lint.name_lower(),));
- add_feature_diagnostics(&mut db, &self.sess.parse_sess, feature);
- db.emit();
- });
+ struct_lint_level(
+ self.sess,
+ lint,
+ level,
+ src,
+ Some(span.into()),
+ format!("unknown lint: `{}`", lint_id.lint.name_lower()),
+ |lint| {
+ lint.note(
+ &format!("the `{}` lint is unstable", lint_id.lint.name_lower(),),
+ );
+ add_feature_diagnostics(lint, &self.sess.parse_sess, feature);
+ lint
+ },
+ );
return false;
}
}
true
}
- /// Called after `push` when the scope of a set of attributes are exited.
- pub fn pop(&mut self, push: BuilderPush) {
- self.cur = push.prev;
- }
-
/// Find the lint level for a lint.
- pub fn lint_level(&self, lint: &'static Lint) -> (Level, LintLevelSource) {
- self.sets.get_lint_level(lint, self.cur, None, self.sess)
+ pub fn lint_level(&self, lint: &'static Lint) -> LevelAndSource {
+ self.provider.get_lint_level(lint, self.sess)
}
/// Used to emit a lint-related diagnostic based on the current state of
/// this lint context.
- pub fn struct_lint(
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
+ pub(crate) fn struct_lint(
&self,
lint: &'static Lint,
span: Option<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let (level, src) = self.lint_level(lint);
- struct_lint_level(self.sess, lint, level, src, span, decorate)
- }
-
- /// Registers the ID provided with the current set of lints stored in
- /// this context.
- pub fn register_id(&mut self, id: HirId) {
- self.id_to_set.insert(id, self.cur);
- }
-
- fn update_unstable_expectation_ids(&self) {
- self.sess.diagnostic().update_unstable_expectation_id(&self.expectation_id_map);
- }
-
- pub fn build_map(self) -> LintLevelMap {
- LintLevelMap {
- sets: self.sets,
- id_to_set: self.id_to_set,
- lint_expectations: self.lint_expectations,
- }
- }
-}
-
-struct LintLevelMapBuilder<'tcx> {
- levels: LintLevelsBuilder<'tcx>,
- tcx: TyCtxt<'tcx>,
-}
-
-impl LintLevelMapBuilder<'_> {
- fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F)
- where
- F: FnOnce(&mut Self),
- {
- let is_crate_hir = id == hir::CRATE_HIR_ID;
- let attrs = self.tcx.hir().attrs(id);
- let push = self.levels.push(attrs, is_crate_hir, Some(id));
-
- if push.changed {
- self.levels.register_id(id);
- }
- f(self);
- self.levels.pop(push);
- }
-}
-
-impl<'tcx> intravisit::Visitor<'tcx> for LintLevelMapBuilder<'tcx> {
- type NestedFilter = nested_filter::All;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
- self.with_lint_attrs(param.hir_id, |builder| {
- intravisit::walk_param(builder, param);
- });
- }
-
- fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
- self.with_lint_attrs(it.hir_id(), |builder| {
- intravisit::walk_item(builder, it);
- });
- }
-
- fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
- self.with_lint_attrs(it.hir_id(), |builder| {
- intravisit::walk_foreign_item(builder, it);
- })
- }
-
- fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) {
- // We will call `with_lint_attrs` when we walk
- // the `StmtKind`. The outer statement itself doesn't
- // define the lint levels.
- intravisit::walk_stmt(self, e);
- }
-
- fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
- self.with_lint_attrs(e.hir_id, |builder| {
- intravisit::walk_expr(builder, e);
- })
- }
-
- fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
- self.with_lint_attrs(s.hir_id, |builder| {
- intravisit::walk_field_def(builder, s);
- })
- }
-
- fn visit_variant(
- &mut self,
- v: &'tcx hir::Variant<'tcx>,
- g: &'tcx hir::Generics<'tcx>,
- item_id: hir::HirId,
- ) {
- self.with_lint_attrs(v.id, |builder| {
- intravisit::walk_variant(builder, v, g, item_id);
- })
- }
-
- fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
- self.with_lint_attrs(l.hir_id, |builder| {
- intravisit::walk_local(builder, l);
- })
- }
-
- fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
- self.with_lint_attrs(a.hir_id, |builder| {
- intravisit::walk_arm(builder, a);
- })
- }
-
- fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- self.with_lint_attrs(trait_item.hir_id(), |builder| {
- intravisit::walk_trait_item(builder, trait_item);
- });
- }
-
- fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- self.with_lint_attrs(impl_item.hir_id(), |builder| {
- intravisit::walk_impl_item(builder, impl_item);
- });
+ struct_lint_level(self.sess, lint, level, src, span, msg, decorate)
}
}
-pub fn provide(providers: &mut Providers) {
- providers.lint_levels = lint_levels;
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { shallow_lint_levels_on, lint_expectations, ..*providers };
}
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index 389a0b5d1..5288fc542 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -34,7 +34,7 @@
#![feature(iter_intersperse)]
#![feature(iter_order_by)]
#![feature(let_chains)]
-#![feature(let_else)]
+#![feature(min_specialization)]
#![feature(never_type)]
#![recursion_limit = "256"]
@@ -42,22 +42,28 @@
extern crate rustc_middle;
#[macro_use]
extern crate rustc_session;
+#[macro_use]
+extern crate tracing;
mod array_into_iter;
pub mod builtin;
mod context;
mod early;
mod enum_intrinsics_non_enums;
+mod errors;
mod expect;
+mod for_loops_over_fallibles;
pub mod hidden_unicode_codepoints;
mod internal;
mod late;
+mod let_underscore;
mod levels;
mod methods;
mod non_ascii_idents;
mod non_fmt_panic;
mod nonstandard_style;
mod noop_method_call;
+mod opaque_hidden_inferred_bound;
mod pass_by_value;
mod passes;
mod redundant_semicolon;
@@ -81,13 +87,16 @@ use rustc_span::Span;
use array_into_iter::ArrayIntoIter;
use builtin::*;
use enum_intrinsics_non_enums::EnumIntrinsicsNonEnums;
+use for_loops_over_fallibles::*;
use hidden_unicode_codepoints::*;
use internal::*;
+use let_underscore::*;
use methods::*;
use non_ascii_idents::*;
use non_fmt_panic::NonPanicFmt;
use nonstandard_style::*;
use noop_method_call::*;
+use opaque_hidden_inferred_bound::*;
use pass_by_value::*;
use redundant_semicolon::*;
use traits::*;
@@ -130,6 +139,7 @@ macro_rules! early_lint_passes {
UnusedBraces: UnusedBraces,
UnusedImportBraces: UnusedImportBraces,
UnsafeCode: UnsafeCode,
+ SpecialModuleName: SpecialModuleName,
AnonymousParameters: AnonymousParameters,
EllipsisInclusiveRangePatterns: EllipsisInclusiveRangePatterns::default(),
NonCamelCaseTypes: NonCamelCaseTypes,
@@ -140,6 +150,7 @@ macro_rules! early_lint_passes {
IncompleteFeatures: IncompleteFeatures,
RedundantSemicolons: RedundantSemicolons,
UnusedDocComment: UnusedDocComment,
+ UnexpectedCfgs: UnexpectedCfgs,
]
);
};
@@ -179,12 +190,14 @@ macro_rules! late_lint_mod_passes {
$macro!(
$args,
[
+ ForLoopsOverFallibles: ForLoopsOverFallibles,
HardwiredLints: HardwiredLints,
ImproperCTypesDeclarations: ImproperCTypesDeclarations,
ImproperCTypesDefinitions: ImproperCTypesDefinitions,
VariantSizeDifferences: VariantSizeDifferences,
BoxPointers: BoxPointers,
PathStatements: PathStatements,
+ LetUnderscore: LetUnderscore,
// Depends on referenced function signatures in expressions
UnusedResults: UnusedResults,
NonUpperCaseGlobals: NonUpperCaseGlobals,
@@ -199,7 +212,7 @@ macro_rules! late_lint_mod_passes {
TypeLimits: TypeLimits::new(),
NonSnakeCase: NonSnakeCase,
InvalidNoMangleItems: InvalidNoMangleItems,
- // Depends on access levels
+ // Depends on effective visibilities
UnreachablePub: UnreachablePub,
ExplicitOutlivesRequirements: ExplicitOutlivesRequirements,
InvalidValue: InvalidValue,
@@ -215,6 +228,7 @@ macro_rules! late_lint_mod_passes {
EnumIntrinsicsNonEnums: EnumIntrinsicsNonEnums,
InvalidAtomicOrdering: InvalidAtomicOrdering,
NamedAsmLabels: NamedAsmLabels,
+ OpaqueHiddenInferredBound: OpaqueHiddenInferredBound,
]
);
};
@@ -252,26 +266,41 @@ fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) {
)
}
- macro_rules! register_pass {
+ macro_rules! register_early_pass {
($method:ident, $ty:ident, $constructor:expr) => {
store.register_lints(&$ty::get_lints());
store.$method(|| Box::new($constructor));
};
}
- macro_rules! register_passes {
+ macro_rules! register_late_pass {
+ ($method:ident, $ty:ident, $constructor:expr) => {
+ store.register_lints(&$ty::get_lints());
+ store.$method(|_| Box::new($constructor));
+ };
+ }
+
+ macro_rules! register_early_passes {
($method:ident, [$($passes:ident: $constructor:expr,)*]) => (
$(
- register_pass!($method, $passes, $constructor);
+ register_early_pass!($method, $passes, $constructor);
+ )*
+ )
+ }
+
+ macro_rules! register_late_passes {
+ ($method:ident, [$($passes:ident: $constructor:expr,)*]) => (
+ $(
+ register_late_pass!($method, $passes, $constructor);
)*
)
}
if no_interleave_lints {
- pre_expansion_lint_passes!(register_passes, register_pre_expansion_pass);
- early_lint_passes!(register_passes, register_early_pass);
- late_lint_passes!(register_passes, register_late_pass);
- late_lint_mod_passes!(register_passes, register_late_mod_pass);
+ pre_expansion_lint_passes!(register_early_passes, register_pre_expansion_pass);
+ early_lint_passes!(register_early_passes, register_early_pass);
+ late_lint_passes!(register_late_passes, register_late_pass);
+ late_lint_mod_passes!(register_late_passes, register_late_mod_pass);
} else {
store.register_lints(&BuiltinCombinedPreExpansionLintPass::get_lints());
store.register_lints(&BuiltinCombinedEarlyLintPass::get_lints());
@@ -311,6 +340,8 @@ fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) {
REDUNDANT_SEMICOLONS
);
+ add_lint_group!("let_underscore", LET_UNDERSCORE_DROP, LET_UNDERSCORE_LOCK);
+
add_lint_group!(
"rust_2018_idioms",
BARE_TRAIT_OBJECTS,
@@ -494,25 +525,30 @@ fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) {
"now allowed, see issue #59159 \
<https://github.com/rust-lang/rust/issues/59159> for more information",
);
+ store.register_removed(
+ "const_err",
+ "converted into hard error, see issue #71800 \
+ <https://github.com/rust-lang/rust/issues/71800> for more information",
+ );
}
fn register_internals(store: &mut LintStore) {
store.register_lints(&LintPassImpl::get_lints());
store.register_early_pass(|| Box::new(LintPassImpl));
store.register_lints(&DefaultHashTypes::get_lints());
- store.register_late_pass(|| Box::new(DefaultHashTypes));
+ store.register_late_pass(|_| Box::new(DefaultHashTypes));
store.register_lints(&QueryStability::get_lints());
- store.register_late_pass(|| Box::new(QueryStability));
+ store.register_late_pass(|_| Box::new(QueryStability));
store.register_lints(&ExistingDocKeyword::get_lints());
- store.register_late_pass(|| Box::new(ExistingDocKeyword));
+ store.register_late_pass(|_| Box::new(ExistingDocKeyword));
store.register_lints(&TyTyKind::get_lints());
- store.register_late_pass(|| Box::new(TyTyKind));
+ store.register_late_pass(|_| Box::new(TyTyKind));
store.register_lints(&Diagnostics::get_lints());
- store.register_late_pass(|| Box::new(Diagnostics));
+ store.register_late_pass(|_| Box::new(Diagnostics));
store.register_lints(&BadOptAccess::get_lints());
- store.register_late_pass(|| Box::new(BadOptAccess));
+ store.register_late_pass(|_| Box::new(BadOptAccess));
store.register_lints(&PassByValue::get_lints());
- store.register_late_pass(|| Box::new(PassByValue));
+ store.register_late_pass(|_| Box::new(PassByValue));
// FIXME(davidtwco): deliberately do not include `UNTRANSLATABLE_DIAGNOSTIC` and
// `DIAGNOSTIC_OUTSIDE_OF_IMPL` here because `-Wrustc::internal` is provided to every crate and
// these lints will trigger all of the time - change this once migration to diagnostic structs
diff --git a/compiler/rustc_lint/src/methods.rs b/compiler/rustc_lint/src/methods.rs
index ff5a01749..e2d7d5b49 100644
--- a/compiler/rustc_lint/src/methods.rs
+++ b/compiler/rustc_lint/src/methods.rs
@@ -44,9 +44,13 @@ fn in_macro(span: Span) -> bool {
fn first_method_call<'tcx>(
expr: &'tcx Expr<'tcx>,
-) -> Option<(&'tcx PathSegment<'tcx>, &'tcx [Expr<'tcx>])> {
- if let ExprKind::MethodCall(path, args, _) = &expr.kind {
- if args.iter().any(|e| e.span.from_expansion()) { None } else { Some((path, *args)) }
+) -> Option<(&'tcx PathSegment<'tcx>, &'tcx Expr<'tcx>)> {
+ if let ExprKind::MethodCall(path, receiver, args, ..) = &expr.kind {
+ if args.iter().any(|e| e.span.from_expansion()) || receiver.span.from_expansion() {
+ None
+ } else {
+ Some((path, *receiver))
+ }
} else {
None
}
@@ -59,15 +63,13 @@ impl<'tcx> LateLintPass<'tcx> for TemporaryCStringAsPtr {
}
match first_method_call(expr) {
- Some((path, args)) if path.ident.name == sym::as_ptr => {
- let unwrap_arg = &args[0];
+ Some((path, unwrap_arg)) if path.ident.name == sym::as_ptr => {
let as_ptr_span = path.ident.span;
match first_method_call(unwrap_arg) {
- Some((path, args))
+ Some((path, receiver))
if path.ident.name == sym::unwrap || path.ident.name == sym::expect =>
{
- let source_arg = &args[0];
- lint_cstring_as_ptr(cx, as_ptr_span, source_arg, unwrap_arg);
+ lint_cstring_as_ptr(cx, as_ptr_span, receiver, unwrap_arg);
}
_ => return,
}
@@ -88,14 +90,17 @@ fn lint_cstring_as_ptr(
if cx.tcx.is_diagnostic_item(sym::Result, def.did()) {
if let ty::Adt(adt, _) = substs.type_at(0).kind() {
if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did()) {
- cx.struct_span_lint(TEMPORARY_CSTRING_AS_PTR, as_ptr_span, |diag| {
- diag.build(fluent::lint::cstring_ptr)
- .span_label(as_ptr_span, fluent::lint::as_ptr_label)
- .span_label(unwrap.span, fluent::lint::unwrap_label)
- .note(fluent::lint::note)
- .help(fluent::lint::help)
- .emit();
- });
+ cx.struct_span_lint(
+ TEMPORARY_CSTRING_AS_PTR,
+ as_ptr_span,
+ fluent::lint_cstring_ptr,
+ |diag| {
+ diag.span_label(as_ptr_span, fluent::as_ptr_label)
+ .span_label(unwrap.span, fluent::unwrap_label)
+ .note(fluent::note)
+ .help(fluent::help)
+ },
+ );
}
}
}
diff --git a/compiler/rustc_lint/src/non_ascii_idents.rs b/compiler/rustc_lint/src/non_ascii_idents.rs
index 764003e61..dea9506ac 100644
--- a/compiler/rustc_lint/src/non_ascii_idents.rs
+++ b/compiler/rustc_lint/src/non_ascii_idents.rs
@@ -180,15 +180,21 @@ impl EarlyLintPass for NonAsciiIdents {
continue;
}
has_non_ascii_idents = true;
- cx.struct_span_lint(NON_ASCII_IDENTS, sp, |lint| {
- lint.build(fluent::lint::identifier_non_ascii_char).emit();
- });
+ cx.struct_span_lint(
+ NON_ASCII_IDENTS,
+ sp,
+ fluent::lint_identifier_non_ascii_char,
+ |lint| lint,
+ );
if check_uncommon_codepoints
&& !symbol_str.chars().all(GeneralSecurityProfile::identifier_allowed)
{
- cx.struct_span_lint(UNCOMMON_CODEPOINTS, sp, |lint| {
- lint.build(fluent::lint::identifier_uncommon_codepoints).emit();
- })
+ cx.struct_span_lint(
+ UNCOMMON_CODEPOINTS,
+ sp,
+ fluent::lint_identifier_uncommon_codepoints,
+ |lint| lint,
+ )
}
}
@@ -216,13 +222,16 @@ impl EarlyLintPass for NonAsciiIdents {
.entry(skeleton_sym)
.and_modify(|(existing_symbol, existing_span, existing_is_ascii)| {
if !*existing_is_ascii || !is_ascii {
- cx.struct_span_lint(CONFUSABLE_IDENTS, sp, |lint| {
- lint.build(fluent::lint::confusable_identifier_pair)
- .set_arg("existing_sym", *existing_symbol)
- .set_arg("sym", symbol)
- .span_label(*existing_span, fluent::lint::label)
- .emit();
- });
+ cx.struct_span_lint(
+ CONFUSABLE_IDENTS,
+ sp,
+ fluent::lint_confusable_identifier_pair,
+ |lint| {
+ lint.set_arg("existing_sym", *existing_symbol)
+ .set_arg("sym", symbol)
+ .span_label(*existing_span, fluent::label)
+ },
+ );
}
if *existing_is_ascii && !is_ascii {
*existing_symbol = symbol;
@@ -322,22 +331,25 @@ impl EarlyLintPass for NonAsciiIdents {
}
for ((sp, ch_list), script_set) in lint_reports {
- cx.struct_span_lint(MIXED_SCRIPT_CONFUSABLES, sp, |lint| {
- let mut includes = String::new();
- for (idx, ch) in ch_list.into_iter().enumerate() {
- if idx != 0 {
- includes += ", ";
+ cx.struct_span_lint(
+ MIXED_SCRIPT_CONFUSABLES,
+ sp,
+ fluent::lint_mixed_script_confusables,
+ |lint| {
+ let mut includes = String::new();
+ for (idx, ch) in ch_list.into_iter().enumerate() {
+ if idx != 0 {
+ includes += ", ";
+ }
+ let char_info = format!("'{}' (U+{:04X})", ch, ch as u32);
+ includes += &char_info;
}
- let char_info = format!("'{}' (U+{:04X})", ch, ch as u32);
- includes += &char_info;
- }
- lint.build(fluent::lint::mixed_script_confusables)
- .set_arg("set", script_set.to_string())
- .set_arg("includes", includes)
- .note(fluent::lint::includes_note)
- .note(fluent::lint::note)
- .emit();
- });
+ lint.set_arg("set", script_set.to_string())
+ .set_arg("includes", includes)
+ .note(fluent::includes_note)
+ .note(fluent::note)
+ },
+ );
}
}
}
diff --git a/compiler/rustc_lint/src/non_fmt_panic.rs b/compiler/rustc_lint/src/non_fmt_panic.rs
index cdad2d2e8..6ad2e0294 100644
--- a/compiler/rustc_lint/src/non_fmt_panic.rs
+++ b/compiler/rustc_lint/src/non_fmt_panic.rs
@@ -119,22 +119,20 @@ fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tc
arg_span = expn.call_site;
}
- cx.struct_span_lint(NON_FMT_PANICS, arg_span, |lint| {
- let mut l = lint.build(fluent::lint::non_fmt_panic);
- l.set_arg("name", symbol);
- l.note(fluent::lint::note);
- l.note(fluent::lint::more_info_note);
+ cx.struct_span_lint(NON_FMT_PANICS, arg_span, fluent::lint_non_fmt_panic, |lint| {
+ lint.set_arg("name", symbol);
+ lint.note(fluent::note);
+ lint.note(fluent::more_info_note);
if !is_arg_inside_call(arg_span, span) {
// No clue where this argument is coming from.
- l.emit();
- return;
+ return lint;
}
if arg_macro.map_or(false, |id| cx.tcx.is_diagnostic_item(sym::format_macro, id)) {
// A case of `panic!(format!(..))`.
- l.note(fluent::lint::supports_fmt_note);
+ lint.note(fluent::supports_fmt_note);
if let Some((open, close, _)) = find_delimiters(cx, arg_span) {
- l.multipart_suggestion(
- fluent::lint::supports_fmt_suggestion,
+ lint.multipart_suggestion(
+ fluent::supports_fmt_suggestion,
vec![
(arg_span.until(open.shrink_to_hi()), "".into()),
(close.until(arg_span.shrink_to_hi()), "".into()),
@@ -153,21 +151,19 @@ fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tc
Some(ty_def) if cx.tcx.is_diagnostic_item(sym::String, ty_def.did()),
);
- let (suggest_display, suggest_debug) = cx.tcx.infer_ctxt().enter(|infcx| {
- let display = is_str
- || cx.tcx.get_diagnostic_item(sym::Display).map(|t| {
- infcx
- .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
- .may_apply()
- }) == Some(true);
- let debug = !display
- && cx.tcx.get_diagnostic_item(sym::Debug).map(|t| {
- infcx
- .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
- .may_apply()
- }) == Some(true);
- (display, debug)
- });
+ let infcx = cx.tcx.infer_ctxt().build();
+ let suggest_display = is_str
+ || cx.tcx.get_diagnostic_item(sym::Display).map(|t| {
+ infcx
+ .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
+ .may_apply()
+ }) == Some(true);
+ let suggest_debug = !suggest_display
+ && cx.tcx.get_diagnostic_item(sym::Debug).map(|t| {
+ infcx
+ .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
+ .may_apply()
+ }) == Some(true);
let suggest_panic_any = !is_str && panic == sym::std_panic_macro;
@@ -180,17 +176,17 @@ fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tc
};
if suggest_display {
- l.span_suggestion_verbose(
+ lint.span_suggestion_verbose(
arg_span.shrink_to_lo(),
- fluent::lint::display_suggestion,
+ fluent::display_suggestion,
"\"{}\", ",
fmt_applicability,
);
} else if suggest_debug {
- l.set_arg("ty", ty);
- l.span_suggestion_verbose(
+ lint.set_arg("ty", ty);
+ lint.span_suggestion_verbose(
arg_span.shrink_to_lo(),
- fluent::lint::debug_suggestion,
+ fluent::debug_suggestion,
"\"{:?}\", ",
fmt_applicability,
);
@@ -198,9 +194,9 @@ fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tc
if suggest_panic_any {
if let Some((open, close, del)) = find_delimiters(cx, span) {
- l.set_arg("already_suggested", suggest_display || suggest_debug);
- l.multipart_suggestion(
- fluent::lint::panic_suggestion,
+ lint.set_arg("already_suggested", suggest_display || suggest_debug);
+ lint.multipart_suggestion(
+ fluent::panic_suggestion,
if del == '(' {
vec![(span.until(open), "std::panic::panic_any".into())]
} else {
@@ -214,7 +210,7 @@ fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tc
}
}
}
- l.emit();
+ lint
});
}
@@ -258,25 +254,24 @@ fn check_panic_str<'tcx>(
.map(|span| fmt_span.from_inner(InnerSpan::new(span.start, span.end)))
.collect(),
};
- cx.struct_span_lint(NON_FMT_PANICS, arg_spans, |lint| {
- let mut l = lint.build(fluent::lint::non_fmt_panic_unused);
- l.set_arg("count", n_arguments);
- l.note(fluent::lint::note);
+ cx.struct_span_lint(NON_FMT_PANICS, arg_spans, fluent::lint_non_fmt_panic_unused, |lint| {
+ lint.set_arg("count", n_arguments);
+ lint.note(fluent::note);
if is_arg_inside_call(arg.span, span) {
- l.span_suggestion(
+ lint.span_suggestion(
arg.span.shrink_to_hi(),
- fluent::lint::add_args_suggestion,
+ fluent::add_args_suggestion,
", ...",
Applicability::HasPlaceholders,
);
- l.span_suggestion(
+ lint.span_suggestion(
arg.span.shrink_to_lo(),
- fluent::lint::add_fmt_suggestion,
+ fluent::add_fmt_suggestion,
"\"{}\", ",
Applicability::MachineApplicable,
);
}
- l.emit();
+ lint
});
} else {
let brace_spans: Option<Vec<_>> =
@@ -287,20 +282,24 @@ fn check_panic_str<'tcx>(
.collect()
});
let count = brace_spans.as_ref().map(|v| v.len()).unwrap_or(/* any number >1 */ 2);
- cx.struct_span_lint(NON_FMT_PANICS, brace_spans.unwrap_or_else(|| vec![span]), |lint| {
- let mut l = lint.build(fluent::lint::non_fmt_panic_braces);
- l.set_arg("count", count);
- l.note(fluent::lint::note);
- if is_arg_inside_call(arg.span, span) {
- l.span_suggestion(
- arg.span.shrink_to_lo(),
- fluent::lint::suggestion,
- "\"{}\", ",
- Applicability::MachineApplicable,
- );
- }
- l.emit();
- });
+ cx.struct_span_lint(
+ NON_FMT_PANICS,
+ brace_spans.unwrap_or_else(|| vec![span]),
+ fluent::lint_non_fmt_panic_braces,
+ |lint| {
+ lint.set_arg("count", count);
+ lint.note(fluent::note);
+ if is_arg_inside_call(arg.span, span) {
+ lint.span_suggestion(
+ arg.span.shrink_to_lo(),
+ fluent::suggestion,
+ "\"{}\", ",
+ Applicability::MachineApplicable,
+ );
+ }
+ lint
+ },
+ );
}
}
diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs
index 8d04d68bf..7e50801f8 100644
--- a/compiler/rustc_lint/src/nonstandard_style.rs
+++ b/compiler/rustc_lint/src/nonstandard_style.rs
@@ -136,26 +136,30 @@ impl NonCamelCaseTypes {
let name = ident.name.as_str();
if !is_camel_case(name) {
- cx.struct_span_lint(NON_CAMEL_CASE_TYPES, ident.span, |lint| {
- let mut err = lint.build(fluent::lint::non_camel_case_type);
- let cc = to_camel_case(name);
- // We cannot provide meaningful suggestions
- // if the characters are in the category of "Lowercase Letter".
- if *name != cc {
- err.span_suggestion(
- ident.span,
- fluent::lint::suggestion,
- to_camel_case(name),
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_label(ident.span, fluent::lint::label);
- }
+ cx.struct_span_lint(
+ NON_CAMEL_CASE_TYPES,
+ ident.span,
+ fluent::lint_non_camel_case_type,
+ |lint| {
+ let cc = to_camel_case(name);
+ // We cannot provide meaningful suggestions
+ // if the characters are in the category of "Lowercase Letter".
+ if *name != cc {
+ lint.span_suggestion(
+ ident.span,
+ fluent::suggestion,
+ to_camel_case(name),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ lint.span_label(ident.span, fluent::label);
+ }
- err.set_arg("sort", sort);
- err.set_arg("name", name);
- err.emit();
- })
+ lint.set_arg("sort", sort);
+ lint.set_arg("name", name);
+ lint
+ },
+ )
}
}
}
@@ -183,7 +187,7 @@ impl EarlyLintPass for NonCamelCaseTypes {
}
fn check_trait_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) {
- if let ast::AssocItemKind::TyAlias(..) = it.kind {
+ if let ast::AssocItemKind::Type(..) = it.kind {
self.check_case(cx, "associated type", &it.ident);
}
}
@@ -280,9 +284,8 @@ impl NonSnakeCase {
let name = ident.name.as_str();
if !is_snake_case(name) {
- cx.struct_span_lint(NON_SNAKE_CASE, ident.span, |lint| {
+ cx.struct_span_lint(NON_SNAKE_CASE, ident.span, fluent::lint_non_snake_case, |lint| {
let sc = NonSnakeCase::to_snake_case(name);
- let mut err = lint.build(fluent::lint::non_snake_case);
// We cannot provide meaningful suggestions
// if the characters are in the category of "Uppercase Letter".
if name != sc {
@@ -295,45 +298,39 @@ impl NonSnakeCase {
// Instead, recommend renaming the identifier entirely or, if permitted,
// escaping it to create a raw identifier.
if sc_ident.name.can_be_raw() {
- (fluent::lint::rename_or_convert_suggestion, sc_ident.to_string())
+ (fluent::rename_or_convert_suggestion, sc_ident.to_string())
} else {
- err.note(fluent::lint::cannot_convert_note);
- (fluent::lint::rename_suggestion, String::new())
+ lint.note(fluent::cannot_convert_note);
+ (fluent::rename_suggestion, String::new())
}
} else {
- (fluent::lint::convert_suggestion, sc.clone())
+ (fluent::convert_suggestion, sc.clone())
};
- err.span_suggestion(
+ lint.span_suggestion(
ident.span,
message,
suggestion,
Applicability::MaybeIncorrect,
);
} else {
- err.help(fluent::lint::help);
+ lint.help(fluent::help);
}
} else {
- err.span_label(ident.span, fluent::lint::label);
+ lint.span_label(ident.span, fluent::label);
}
- err.set_arg("sort", sort);
- err.set_arg("name", name);
- err.set_arg("sc", sc);
- err.emit();
+ lint.set_arg("sort", sort);
+ lint.set_arg("name", name);
+ lint.set_arg("sc", sc);
+ lint
});
}
}
}
impl<'tcx> LateLintPass<'tcx> for NonSnakeCase {
- fn check_mod(
- &mut self,
- cx: &LateContext<'_>,
- _: &'tcx hir::Mod<'tcx>,
- _: Span,
- id: hir::HirId,
- ) {
+ fn check_mod(&mut self, cx: &LateContext<'_>, _: &'tcx hir::Mod<'tcx>, id: hir::HirId) {
if id != hir::CRATE_HIR_ID {
return;
}
@@ -437,19 +434,14 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase {
fn check_pat(&mut self, cx: &LateContext<'_>, p: &hir::Pat<'_>) {
if let PatKind::Binding(_, hid, ident, _) = p.kind {
- if let hir::Node::Pat(parent_pat) = cx.tcx.hir().get(cx.tcx.hir().get_parent_node(hid))
+ if let hir::Node::PatField(field) = cx.tcx.hir().get(cx.tcx.hir().get_parent_node(hid))
{
- if let PatKind::Struct(_, field_pats, _) = &parent_pat.kind {
- if field_pats
- .iter()
- .any(|field| !field.is_shorthand && field.pat.hir_id == p.hir_id)
- {
- // Only check if a new name has been introduced, to avoid warning
- // on both the struct definition and this pattern.
- self.check_snake_case(cx, "variable", &ident);
- }
- return;
+ if !field.is_shorthand {
+ // Only check if a new name has been introduced, to avoid warning
+ // on both the struct definition and this pattern.
+ self.check_snake_case(cx, "variable", &ident);
}
+ return;
}
self.check_snake_case(cx, "variable", &ident);
}
@@ -489,26 +481,30 @@ impl NonUpperCaseGlobals {
fn check_upper_case(cx: &LateContext<'_>, sort: &str, ident: &Ident) {
let name = ident.name.as_str();
if name.chars().any(|c| c.is_lowercase()) {
- cx.struct_span_lint(NON_UPPER_CASE_GLOBALS, ident.span, |lint| {
- let uc = NonSnakeCase::to_snake_case(&name).to_uppercase();
- let mut err = lint.build(fluent::lint::non_upper_case_global);
- // We cannot provide meaningful suggestions
- // if the characters are in the category of "Lowercase Letter".
- if *name != uc {
- err.span_suggestion(
- ident.span,
- fluent::lint::suggestion,
- uc,
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_label(ident.span, fluent::lint::label);
- }
+ cx.struct_span_lint(
+ NON_UPPER_CASE_GLOBALS,
+ ident.span,
+ fluent::lint_non_upper_case_global,
+ |lint| {
+ let uc = NonSnakeCase::to_snake_case(&name).to_uppercase();
+ // We cannot provide meaningful suggestions
+ // if the characters are in the category of "Lowercase Letter".
+ if *name != uc {
+ lint.span_suggestion(
+ ident.span,
+ fluent::suggestion,
+ uc,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ lint.span_label(ident.span, fluent::label);
+ }
- err.set_arg("sort", sort);
- err.set_arg("name", name);
- err.emit();
- })
+ lint.set_arg("sort", sort);
+ lint.set_arg("name", name);
+ lint
+ },
+ )
}
}
}
diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs
index 11a752ff0..2ef425a10 100644
--- a/compiler/rustc_lint/src/noop_method_call.rs
+++ b/compiler/rustc_lint/src/noop_method_call.rs
@@ -1,5 +1,4 @@
use crate::context::LintContext;
-use crate::rustc_middle::ty::TypeVisitable;
use crate::LateContext;
use crate::LateLintPass;
use rustc_errors::fluent;
@@ -41,12 +40,12 @@ declare_lint_pass!(NoopMethodCall => [NOOP_METHOD_CALL]);
impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
// We only care about method calls.
- let ExprKind::MethodCall(call, elements, _) = &expr.kind else {
+ let ExprKind::MethodCall(call, receiver, ..) = &expr.kind else {
return
};
// We only care about method calls corresponding to the `Clone`, `Deref` and `Borrow`
// traits and ignore any other method call.
- let (trait_id, did) = match cx.typeck_results().type_dependent_def(expr.hir_id) {
+ let did = match cx.typeck_results().type_dependent_def(expr.hir_id) {
// Verify we are dealing with a method/associated function.
Some((DefKind::AssocFn, did)) => match cx.tcx.trait_of_item(did) {
// Check that we're dealing with a trait method for one of the traits we care about.
@@ -56,21 +55,17 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
Some(sym::Borrow | sym::Clone | sym::Deref)
) =>
{
- (trait_id, did)
+ did
}
_ => return,
},
_ => return,
};
- let substs = cx.typeck_results().node_substs(expr.hir_id);
- if substs.needs_subst() {
- // We can't resolve on types that require monomorphization, so we don't handle them if
- // we need to perform substitution.
- return;
- }
- let param_env = cx.tcx.param_env(trait_id);
+ let substs = cx
+ .tcx
+ .normalize_erasing_regions(cx.param_env, cx.typeck_results().node_substs(expr.hir_id));
// Resolve the trait method instance.
- let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, param_env, did, substs) else {
+ let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, cx.param_env, did, substs) else {
return
};
// (Re)check that it implements the noop diagnostic.
@@ -81,7 +76,6 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
) {
return;
}
- let receiver = &elements[0];
let receiver_ty = cx.typeck_results().expr_ty(receiver);
let expr_ty = cx.typeck_results().expr_ty_adjusted(expr);
if receiver_ty != expr_ty {
@@ -91,13 +85,11 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
}
let expr_span = expr.span;
let span = expr_span.with_lo(receiver.span.hi());
- cx.struct_span_lint(NOOP_METHOD_CALL, span, |lint| {
- lint.build(fluent::lint::noop_method_call)
- .set_arg("method", call.ident.name)
+ cx.struct_span_lint(NOOP_METHOD_CALL, span, fluent::lint_noop_method_call, |lint| {
+ lint.set_arg("method", call.ident.name)
.set_arg("receiver_ty", receiver_ty)
- .span_label(span, fluent::lint::label)
- .note(fluent::lint::note)
- .emit();
+ .span_label(span, fluent::label)
+ .note(fluent::note)
});
}
}
diff --git a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
new file mode 100644
index 000000000..00bf287ba
--- /dev/null
+++ b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs
@@ -0,0 +1,163 @@
+use rustc_hir as hir;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_macros::{LintDiagnostic, Subdiagnostic};
+use rustc_middle::ty::{
+ self, fold::BottomUpFolder, print::TraitPredPrintModifiersAndPath, Ty, TypeFoldable,
+};
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+
+use crate::{LateContext, LateLintPass, LintContext};
+
+declare_lint! {
+ /// The `opaque_hidden_inferred_bound` lint detects cases in which nested
+ /// `impl Trait` in associated type bounds are not written generally enough
+ /// to satisfy the bounds of the associated type.
+ ///
+ /// ### Explanation
+ ///
+ /// This functionality was removed in #97346, but then rolled back in #99860
+ /// because it caused regressions.
+ ///
+ /// We plan on reintroducing this as a hard error, but in the mean time,
+ /// this lint serves to warn and suggest fixes for any use-cases which rely
+ /// on this behavior.
+ ///
+ /// ### Example
+ ///
+ /// ```
+ /// trait Trait {
+ /// type Assoc: Send;
+ /// }
+ ///
+ /// struct Struct;
+ ///
+ /// impl Trait for Struct {
+ /// type Assoc = i32;
+ /// }
+ ///
+ /// fn test() -> impl Trait<Assoc = impl Sized> {
+ /// Struct
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// In this example, `test` declares that the associated type `Assoc` for
+ /// `impl Trait` is `impl Sized`, which does not satisfy the `Send` bound
+ /// on the associated type.
+ ///
+ /// Although the hidden type, `i32` does satisfy this bound, we do not
+ /// consider the return type to be well-formed with this lint. It can be
+ /// fixed by changing `impl Sized` into `impl Sized + Send`.
+ pub OPAQUE_HIDDEN_INFERRED_BOUND,
+ Warn,
+ "detects the use of nested `impl Trait` types in associated type bounds that are not general enough"
+}
+
+declare_lint_pass!(OpaqueHiddenInferredBound => [OPAQUE_HIDDEN_INFERRED_BOUND]);
+
+impl<'tcx> LateLintPass<'tcx> for OpaqueHiddenInferredBound {
+ fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
+ let hir::ItemKind::OpaqueTy(_) = &item.kind else { return; };
+ let def_id = item.owner_id.def_id.to_def_id();
+ let infcx = &cx.tcx.infer_ctxt().build();
+ // For every projection predicate in the opaque type's explicit bounds,
+ // check that the type that we're assigning actually satisfies the bounds
+ // of the associated type.
+ for &(pred, pred_span) in cx.tcx.explicit_item_bounds(def_id) {
+ // Liberate bound regions in the predicate since we
+ // don't actually care about lifetimes in this check.
+ let predicate = cx.tcx.liberate_late_bound_regions(def_id, pred.kind());
+ let ty::PredicateKind::Projection(proj) = predicate else {
+ continue;
+ };
+ // Only check types, since those are the only things that may
+ // have opaques in them anyways.
+ let Some(proj_term) = proj.term.ty() else { continue };
+
+ let proj_ty =
+ cx.tcx.mk_projection(proj.projection_ty.item_def_id, proj.projection_ty.substs);
+ // For every instance of the projection type in the bounds,
+ // replace them with the term we're assigning to the associated
+ // type in our opaque type.
+ let proj_replacer = &mut BottomUpFolder {
+ tcx: cx.tcx,
+ ty_op: |ty| if ty == proj_ty { proj_term } else { ty },
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ };
+ // For example, in `impl Trait<Assoc = impl Send>`, for all of the bounds on `Assoc`,
+ // e.g. `type Assoc: OtherTrait`, replace `<impl Trait as Trait>::Assoc: OtherTrait`
+ // with `impl Send: OtherTrait`.
+ for (assoc_pred, assoc_pred_span) in cx
+ .tcx
+ .bound_explicit_item_bounds(proj.projection_ty.item_def_id)
+ .subst_iter_copied(cx.tcx, &proj.projection_ty.substs)
+ {
+ let assoc_pred = assoc_pred.fold_with(proj_replacer);
+ let Ok(assoc_pred) = traits::fully_normalize(infcx, traits::ObligationCause::dummy(), cx.param_env, assoc_pred) else {
+ continue;
+ };
+ // If that predicate doesn't hold modulo regions (but passed during type-check),
+ // then we must've taken advantage of the hack in `project_and_unify_types` where
+ // we replace opaques with inference vars. Emit a warning!
+ if !infcx.predicate_must_hold_modulo_regions(&traits::Obligation::new(
+ traits::ObligationCause::dummy(),
+ cx.param_env,
+ assoc_pred,
+ )) {
+ // If it's a trait bound and an opaque that doesn't satisfy it,
+ // then we can emit a suggestion to add the bound.
+ let add_bound = match (proj_term.kind(), assoc_pred.kind().skip_binder()) {
+ (ty::Opaque(def_id, _), ty::PredicateKind::Trait(trait_pred)) => {
+ Some(AddBound {
+ suggest_span: cx.tcx.def_span(*def_id).shrink_to_hi(),
+ trait_ref: trait_pred.print_modifiers_and_trait_path(),
+ })
+ }
+ _ => None,
+ };
+ cx.emit_spanned_lint(
+ OPAQUE_HIDDEN_INFERRED_BOUND,
+ pred_span,
+ OpaqueHiddenInferredBoundLint {
+ ty: cx.tcx.mk_opaque(
+ def_id,
+ ty::InternalSubsts::identity_for_item(cx.tcx, def_id),
+ ),
+ proj_ty: proj_term,
+ assoc_pred_span,
+ add_bound,
+ },
+ );
+ }
+ }
+ }
+ }
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_opaque_hidden_inferred_bound)]
+struct OpaqueHiddenInferredBoundLint<'tcx> {
+ ty: Ty<'tcx>,
+ proj_ty: Ty<'tcx>,
+ #[label(specifically)]
+ assoc_pred_span: Span,
+ #[subdiagnostic]
+ add_bound: Option<AddBound<'tcx>>,
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion_verbose(
+ lint_opaque_hidden_inferred_bound_sugg,
+ applicability = "machine-applicable",
+ code = " + {trait_ref}"
+)]
+struct AddBound<'tcx> {
+ #[primary_span]
+ suggest_span: Span,
+ #[skip_arg]
+ trait_ref: TraitPredPrintModifiersAndPath<'tcx>,
+}
diff --git a/compiler/rustc_lint/src/pass_by_value.rs b/compiler/rustc_lint/src/pass_by_value.rs
index af5e5faf1..01bface71 100644
--- a/compiler/rustc_lint/src/pass_by_value.rs
+++ b/compiler/rustc_lint/src/pass_by_value.rs
@@ -29,18 +29,20 @@ impl<'tcx> LateLintPass<'tcx> for PassByValue {
}
}
if let Some(t) = path_for_pass_by_value(cx, &inner_ty) {
- cx.struct_span_lint(PASS_BY_VALUE, ty.span, |lint| {
- lint.build(fluent::lint::pass_by_value)
- .set_arg("ty", t.clone())
- .span_suggestion(
+ cx.struct_span_lint(
+ PASS_BY_VALUE,
+ ty.span,
+ fluent::lint_pass_by_value,
+ |lint| {
+ lint.set_arg("ty", t.clone()).span_suggestion(
ty.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
t,
// Changing type of function argument
Applicability::MaybeIncorrect,
)
- .emit();
- })
+ },
+ )
}
}
_ => {}
@@ -56,7 +58,7 @@ fn path_for_pass_by_value(cx: &LateContext<'_>, ty: &hir::Ty<'_>) -> Option<Stri
let path_segment = path.segments.last().unwrap();
return Some(format!("{}{}", name, gen_args(cx, path_segment)));
}
- Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
+ Res::SelfTyAlias { alias_to: did, is_trait_impl: false, .. } => {
if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
if cx.tcx.has_attr(adt.did(), sym::rustc_pass_by_value) {
return Some(cx.tcx.def_path_str_with_substs(adt.did(), substs));
diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs
index cb7bd407e..1c6a057d1 100644
--- a/compiler/rustc_lint/src/passes.rs
+++ b/compiler/rustc_lint/src/passes.rs
@@ -16,7 +16,7 @@ macro_rules! late_lint_methods {
fn check_body_post(a: &$hir hir::Body<$hir>);
fn check_crate();
fn check_crate_post();
- fn check_mod(a: &$hir hir::Mod<$hir>, b: Span, c: hir::HirId);
+ fn check_mod(a: &$hir hir::Mod<$hir>, b: hir::HirId);
fn check_foreign_item(a: &$hir hir::ForeignItem<$hir>);
fn check_item(a: &$hir hir::Item<$hir>);
fn check_item_post(a: &$hir hir::Item<$hir>);
@@ -31,7 +31,7 @@ macro_rules! late_lint_methods {
fn check_ty(a: &$hir hir::Ty<$hir>);
fn check_generic_param(a: &$hir hir::GenericParam<$hir>);
fn check_generics(a: &$hir hir::Generics<$hir>);
- fn check_poly_trait_ref(a: &$hir hir::PolyTraitRef<$hir>, b: hir::TraitBoundModifier);
+ fn check_poly_trait_ref(a: &$hir hir::PolyTraitRef<$hir>);
fn check_fn(
a: rustc_hir::intravisit::FnKind<$hir>,
b: &$hir hir::FnDecl<$hir>,
@@ -156,14 +156,13 @@ macro_rules! early_lint_methods {
fn check_generic_arg(a: &ast::GenericArg);
fn check_generic_param(a: &ast::GenericParam);
fn check_generics(a: &ast::Generics);
- fn check_poly_trait_ref(a: &ast::PolyTraitRef,
- b: &ast::TraitBoundModifier);
+ fn check_poly_trait_ref(a: &ast::PolyTraitRef);
fn check_fn(a: rustc_ast::visit::FnKind<'_>, c: Span, d_: ast::NodeId);
fn check_trait_item(a: &ast::AssocItem);
fn check_impl_item(a: &ast::AssocItem);
fn check_variant(a: &ast::Variant);
fn check_attribute(a: &ast::Attribute);
- fn check_mac_def(a: &ast::MacroDef, b: ast::NodeId);
+ fn check_mac_def(a: &ast::MacroDef);
fn check_mac(a: &ast::MacCall);
/// Called when entering a syntax node that can have lint attributes such
@@ -244,6 +243,5 @@ macro_rules! declare_combined_early_lint_pass {
}
/// A lint pass boxed up as a trait object.
-pub type EarlyLintPassObject = Box<dyn EarlyLintPass + sync::Send + sync::Sync + 'static>;
-pub type LateLintPassObject =
- Box<dyn for<'tcx> LateLintPass<'tcx> + sync::Send + sync::Sync + 'static>;
+pub type EarlyLintPassObject = Box<dyn EarlyLintPass + sync::Send + 'static>;
+pub type LateLintPassObject<'tcx> = Box<dyn LateLintPass<'tcx> + sync::Send + 'tcx>;
diff --git a/compiler/rustc_lint/src/redundant_semicolon.rs b/compiler/rustc_lint/src/redundant_semicolon.rs
index 26f413453..3521de7fc 100644
--- a/compiler/rustc_lint/src/redundant_semicolon.rs
+++ b/compiler/rustc_lint/src/redundant_semicolon.rs
@@ -48,11 +48,18 @@ fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, boo
return;
}
- cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
- lint.build(fluent::lint::redundant_semicolons)
- .set_arg("multiple", multiple)
- .span_suggestion(span, fluent::lint::suggestion, "", Applicability::MaybeIncorrect)
- .emit();
- });
+ cx.struct_span_lint(
+ REDUNDANT_SEMICOLONS,
+ span,
+ fluent::lint_redundant_semicolons,
+ |lint| {
+ lint.set_arg("multiple", multiple).span_suggestion(
+ span,
+ fluent::suggestion,
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ },
+ );
}
}
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
index df1587c59..f22f38aa2 100644
--- a/compiler/rustc_lint/src/traits.rs
+++ b/compiler/rustc_lint/src/traits.rs
@@ -89,7 +89,7 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
use rustc_middle::ty::PredicateKind::*;
- let predicates = cx.tcx.explicit_predicates_of(item.def_id);
+ let predicates = cx.tcx.explicit_predicates_of(item.owner_id);
for &(predicate, span) in predicates.predicates {
let Trait(trait_predicate) = predicate.kind().skip_binder() else {
continue
@@ -100,15 +100,18 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
if trait_predicate.trait_ref.self_ty().is_impl_trait() {
continue;
}
- cx.struct_span_lint(DROP_BOUNDS, span, |lint| {
- let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
- return
- };
- lint.build(fluent::lint::drop_trait_constraints)
- .set_arg("predicate", predicate)
- .set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
- .emit();
- });
+ let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
+ continue;
+ };
+ cx.struct_span_lint(
+ DROP_BOUNDS,
+ span,
+ fluent::lint_drop_trait_constraints,
+ |lint| {
+ lint.set_arg("predicate", predicate)
+ .set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
+ },
+ );
}
}
}
@@ -119,14 +122,11 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
};
for bound in &bounds[..] {
let def_id = bound.trait_ref.trait_def_id();
- if cx.tcx.lang_items().drop_trait() == def_id {
- cx.struct_span_lint(DYN_DROP, bound.span, |lint| {
- let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
- return
- };
- lint.build(fluent::lint::drop_glue)
- .set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
- .emit();
+ if cx.tcx.lang_items().drop_trait() == def_id
+ && let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop)
+ {
+ cx.struct_span_lint(DYN_DROP, bound.span, fluent::lint_drop_glue, |lint| {
+ lint.set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
});
}
}
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 5c07afeb7..37caab2da 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -11,7 +11,7 @@ use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
use rustc_span::source_map;
use rustc_span::symbol::sym;
-use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_span::{Span, Symbol};
use rustc_target::abi::{Abi, WrappingRange};
use rustc_target::abi::{Integer, TagEncoding, Variants};
use rustc_target::spec::abi::Abi as SpecAbi;
@@ -19,7 +19,6 @@ use rustc_target::spec::abi::Abi as SpecAbi;
use std::cmp;
use std::iter;
use std::ops::ControlFlow;
-use tracing::debug;
declare_lint! {
/// The `unused_comparisons` lint detects comparisons made useless by
@@ -117,55 +116,70 @@ impl TypeLimits {
}
}
-/// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint.
-/// Returns `true` iff the lint was overridden.
+/// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint (`expr..MAX+1`).
+/// Returns `true` iff the lint was emitted.
fn lint_overflowing_range_endpoint<'tcx>(
cx: &LateContext<'tcx>,
lit: &hir::Lit,
lit_val: u128,
max: u128,
expr: &'tcx hir::Expr<'tcx>,
- parent_expr: &'tcx hir::Expr<'tcx>,
ty: &str,
) -> bool {
// We only want to handle exclusive (`..`) ranges,
// which are represented as `ExprKind::Struct`.
- let mut overwritten = false;
- if let ExprKind::Struct(_, eps, _) = &parent_expr.kind {
- if eps.len() != 2 {
- return false;
- }
- // We can suggest using an inclusive range
- // (`..=`) instead only if it is the `end` that is
- // overflowing and only by 1.
- if eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max {
- cx.struct_span_lint(OVERFLOWING_LITERALS, parent_expr.span, |lint| {
- let mut err = lint.build(fluent::lint::range_endpoint_out_of_range);
- err.set_arg("ty", ty);
- if let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) {
- use ast::{LitIntType, LitKind};
- // We need to preserve the literal's suffix,
- // as it may determine typing information.
- let suffix = match lit.node {
- LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
- LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
- LitKind::Int(_, LitIntType::Unsuffixed) => "",
- _ => bug!(),
- };
- let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
- err.span_suggestion(
- parent_expr.span,
- fluent::lint::suggestion,
- suggestion,
- Applicability::MachineApplicable,
- );
- err.emit();
- overwritten = true;
- }
- });
- }
+ let par_id = cx.tcx.hir().get_parent_node(expr.hir_id);
+ let Node::ExprField(field) = cx.tcx.hir().get(par_id) else { return false };
+ let field_par_id = cx.tcx.hir().get_parent_node(field.hir_id);
+ let Node::Expr(struct_expr) = cx.tcx.hir().get(field_par_id) else { return false };
+ if !is_range_literal(struct_expr) {
+ return false;
+ };
+ let ExprKind::Struct(_, eps, _) = &struct_expr.kind else { return false };
+ if eps.len() != 2 {
+ return false;
}
- overwritten
+
+ // We can suggest using an inclusive range
+ // (`..=`) instead only if it is the `end` that is
+ // overflowing and only by 1.
+ if !(eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max) {
+ return false;
+ };
+ let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) else { return false };
+
+ cx.struct_span_lint(
+ OVERFLOWING_LITERALS,
+ struct_expr.span,
+ fluent::lint_range_endpoint_out_of_range,
+ |lint| {
+ use ast::{LitIntType, LitKind};
+
+ lint.set_arg("ty", ty);
+
+ // We need to preserve the literal's suffix,
+ // as it may determine typing information.
+ let suffix = match lit.node {
+ LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
+ LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
+ LitKind::Int(_, LitIntType::Unsuffixed) => "",
+ _ => bug!(),
+ };
+ let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
+ lint.span_suggestion(
+ struct_expr.span,
+ fluent::suggestion,
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+
+ lint
+ },
+ );
+
+ // We've just emitted a lint, special cased for `(...)..MAX+1` ranges,
+ // return `true` so the callers don't also emit a lint
+ true
}
// For `isize` & `usize`, be conservative with the warnings, so that the
@@ -216,52 +230,58 @@ fn report_bin_hex_error(
negative: bool,
) {
let size = Integer::from_attr(&cx.tcx, ty).size();
- cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
- let (t, actually) = match ty {
- attr::IntType::SignedInt(t) => {
- let actually = if negative {
- -(size.sign_extend(val) as i128)
- } else {
- size.sign_extend(val) as i128
- };
- (t.name_str(), actually.to_string())
- }
- attr::IntType::UnsignedInt(t) => {
- let actually = size.truncate(val);
- (t.name_str(), actually.to_string())
- }
- };
- let mut err = lint.build(fluent::lint::overflowing_bin_hex);
- if negative {
- // If the value is negative,
- // emits a note about the value itself, apart from the literal.
- err.note(fluent::lint::negative_note);
- err.note(fluent::lint::negative_becomes_note);
- } else {
- err.note(fluent::lint::positive_note);
- }
- if let Some(sugg_ty) =
- get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative)
- {
- err.set_arg("suggestion_ty", sugg_ty);
- if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
- let (sans_suffix, _) = repr_str.split_at(pos);
- err.span_suggestion(
- expr.span,
- fluent::lint::suggestion,
- format!("{}{}", sans_suffix, sugg_ty),
- Applicability::MachineApplicable,
- );
+ cx.struct_span_lint(
+ OVERFLOWING_LITERALS,
+ expr.span,
+ fluent::lint_overflowing_bin_hex,
+ |lint| {
+ let (t, actually) = match ty {
+ attr::IntType::SignedInt(t) => {
+ let actually = if negative {
+ -(size.sign_extend(val) as i128)
+ } else {
+ size.sign_extend(val) as i128
+ };
+ (t.name_str(), actually.to_string())
+ }
+ attr::IntType::UnsignedInt(t) => {
+ let actually = size.truncate(val);
+ (t.name_str(), actually.to_string())
+ }
+ };
+
+ if negative {
+ // If the value is negative,
+ // emits a note about the value itself, apart from the literal.
+ lint.note(fluent::negative_note);
+ lint.note(fluent::negative_becomes_note);
} else {
- err.help(fluent::lint::help);
+ lint.note(fluent::positive_note);
}
- }
- err.set_arg("ty", t);
- err.set_arg("lit", repr_str);
- err.set_arg("dec", val);
- err.set_arg("actually", actually);
- err.emit();
- });
+ if let Some(sugg_ty) =
+ get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative)
+ {
+ lint.set_arg("suggestion_ty", sugg_ty);
+ if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
+ let (sans_suffix, _) = repr_str.split_at(pos);
+ lint.span_suggestion(
+ expr.span,
+ fluent::suggestion,
+ format!("{}{}", sans_suffix, sugg_ty),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ lint.help(fluent::help);
+ }
+ }
+ lint.set_arg("ty", t)
+ .set_arg("lit", repr_str)
+ .set_arg("dec", val)
+ .set_arg("actually", actually);
+
+ lint
+ },
+ );
}
// This function finds the next fitting type and generates a suggestion string.
@@ -339,38 +359,32 @@ fn lint_int_literal<'tcx>(
return;
}
- let par_id = cx.tcx.hir().get_parent_node(e.hir_id);
- if let Node::Expr(par_e) = cx.tcx.hir().get(par_id) {
- if let hir::ExprKind::Struct(..) = par_e.kind {
- if is_range_literal(par_e)
- && lint_overflowing_range_endpoint(cx, lit, v, max, e, par_e, t.name_str())
- {
- // The overflowing literal lint was overridden.
- return;
- }
- }
+ if lint_overflowing_range_endpoint(cx, lit, v, max, e, t.name_str()) {
+ // The overflowing literal lint was emited by `lint_overflowing_range_endpoint`.
+ return;
}
- cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
- let mut err = lint.build(fluent::lint::overflowing_int);
- err.set_arg("ty", t.name_str());
- err.set_arg(
- "lit",
- cx.sess()
- .source_map()
- .span_to_snippet(lit.span)
- .expect("must get snippet from literal"),
- );
- err.set_arg("min", min);
- err.set_arg("max", max);
- err.note(fluent::lint::note);
+ cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, fluent::lint_overflowing_int, |lint| {
+ lint.set_arg("ty", t.name_str())
+ .set_arg(
+ "lit",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ )
+ .set_arg("min", min)
+ .set_arg("max", max)
+ .note(fluent::note);
+
if let Some(sugg_ty) =
get_type_suggestion(cx.typeck_results().node_type(e.hir_id), v, negative)
{
- err.set_arg("suggestion_ty", sugg_ty);
- err.help(fluent::lint::help);
+ lint.set_arg("suggestion_ty", sugg_ty);
+ lint.help(fluent::help);
}
- err.emit();
+
+ lint
});
}
}
@@ -395,29 +409,29 @@ fn lint_uint_literal<'tcx>(
match par_e.kind {
hir::ExprKind::Cast(..) => {
if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
- cx.struct_span_lint(OVERFLOWING_LITERALS, par_e.span, |lint| {
- lint.build(fluent::lint::only_cast_u8_to_char)
- .span_suggestion(
+ cx.struct_span_lint(
+ OVERFLOWING_LITERALS,
+ par_e.span,
+ fluent::lint_only_cast_u8_to_char,
+ |lint| {
+ lint.span_suggestion(
par_e.span,
- fluent::lint::suggestion,
+ fluent::suggestion,
format!("'\\u{{{:X}}}'", lit_val),
Applicability::MachineApplicable,
)
- .emit();
- });
- return;
- }
- }
- hir::ExprKind::Struct(..) if is_range_literal(par_e) => {
- let t = t.name_str();
- if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, par_e, t) {
- // The overflowing literal lint was overridden.
+ },
+ );
return;
}
}
_ => {}
}
}
+ if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, t.name_str()) {
+ // The overflowing literal lint was emited by `lint_overflowing_range_endpoint`.
+ return;
+ }
if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
report_bin_hex_error(
cx,
@@ -429,9 +443,8 @@ fn lint_uint_literal<'tcx>(
);
return;
}
- cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
- lint.build(fluent::lint::overflowing_uint)
- .set_arg("ty", t.name_str())
+ cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, fluent::lint_overflowing_uint, |lint| {
+ lint.set_arg("ty", t.name_str())
.set_arg(
"lit",
cx.sess()
@@ -441,8 +454,7 @@ fn lint_uint_literal<'tcx>(
)
.set_arg("min", min)
.set_arg("max", max)
- .note(fluent::lint::note)
- .emit();
+ .note(fluent::note)
});
}
}
@@ -472,19 +484,22 @@ fn lint_literal<'tcx>(
_ => bug!(),
};
if is_infinite == Ok(true) {
- cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
- lint.build(fluent::lint::overflowing_literal)
- .set_arg("ty", t.name_str())
- .set_arg(
- "lit",
- cx.sess()
- .source_map()
- .span_to_snippet(lit.span)
- .expect("must get snippet from literal"),
- )
- .note(fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ OVERFLOWING_LITERALS,
+ e.span,
+ fluent::lint_overflowing_literal,
+ |lint| {
+ lint.set_arg("ty", t.name_str())
+ .set_arg(
+ "lit",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ )
+ .note(fluent::note)
+ },
+ );
}
}
_ => {}
@@ -502,9 +517,12 @@ impl<'tcx> LateLintPass<'tcx> for TypeLimits {
}
hir::ExprKind::Binary(binop, ref l, ref r) => {
if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
- cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| {
- lint.build(fluent::lint::unused_comparisons).emit();
- });
+ cx.struct_span_lint(
+ UNUSED_COMPARISONS,
+ e.span,
+ fluent::lint_unused_comparisons,
+ |lint| lint,
+ );
}
}
hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
@@ -724,7 +742,7 @@ fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'t
Some(match *ty.kind() {
ty::Adt(field_def, field_substs) => {
let inner_field_ty = {
- let first_non_zst_ty = field_def
+ let mut first_non_zst_ty = field_def
.variants()
.iter()
.filter_map(|v| transparent_newtype_field(cx.tcx, v));
@@ -734,7 +752,7 @@ fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'t
"Wrong number of fields for transparent type"
);
first_non_zst_ty
- .last()
+ .next_back()
.expect("No non-zst fields in transparent type.")
.ty(tcx, field_substs)
};
@@ -824,8 +842,8 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
self.emit_ffi_unsafe_type_lint(
ty,
sp,
- fluent::lint::improper_ctypes_array_reason,
- Some(fluent::lint::improper_ctypes_array_help),
+ fluent::lint_improper_ctypes_array_reason,
+ Some(fluent::lint_improper_ctypes_array_help),
);
true
} else {
@@ -868,7 +886,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
} else {
// All fields are ZSTs; this means that the type should behave
// like (), which is FFI-unsafe
- FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_struct_zst, help: None }
+ FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_struct_zst, help: None }
}
} else {
// We can't completely trust repr(C) markings; make sure the fields are
@@ -882,7 +900,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
FfiPhantom(..) if def.is_enum() => {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_enum_phantomdata,
+ reason: fluent::lint_improper_ctypes_enum_phantomdata,
help: None,
};
}
@@ -913,12 +931,12 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
match *ty.kind() {
ty::Adt(def, substs) => {
if def.is_box() && matches!(self.mode, CItemKind::Definition) {
- if ty.boxed_ty().is_sized(tcx.at(DUMMY_SP), self.cx.param_env) {
+ if ty.boxed_ty().is_sized(tcx, self.cx.param_env) {
return FfiSafe;
} else {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_box,
+ reason: fluent::lint_improper_ctypes_box,
help: None,
};
}
@@ -932,14 +950,14 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
return FfiUnsafe {
ty,
reason: if def.is_struct() {
- fluent::lint::improper_ctypes_struct_layout_reason
+ fluent::lint_improper_ctypes_struct_layout_reason
} else {
- fluent::lint::improper_ctypes_union_layout_reason
+ fluent::lint_improper_ctypes_union_layout_reason
},
help: if def.is_struct() {
- Some(fluent::lint::improper_ctypes_struct_layout_help)
+ Some(fluent::lint_improper_ctypes_struct_layout_help)
} else {
- Some(fluent::lint::improper_ctypes_union_layout_help)
+ Some(fluent::lint_improper_ctypes_union_layout_help)
},
};
}
@@ -950,9 +968,9 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
return FfiUnsafe {
ty,
reason: if def.is_struct() {
- fluent::lint::improper_ctypes_struct_non_exhaustive
+ fluent::lint_improper_ctypes_struct_non_exhaustive
} else {
- fluent::lint::improper_ctypes_union_non_exhaustive
+ fluent::lint_improper_ctypes_union_non_exhaustive
},
help: None,
};
@@ -962,14 +980,14 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
return FfiUnsafe {
ty,
reason: if def.is_struct() {
- fluent::lint::improper_ctypes_struct_fieldless_reason
+ fluent::lint_improper_ctypes_struct_fieldless_reason
} else {
- fluent::lint::improper_ctypes_union_fieldless_reason
+ fluent::lint_improper_ctypes_union_fieldless_reason
},
help: if def.is_struct() {
- Some(fluent::lint::improper_ctypes_struct_fieldless_help)
+ Some(fluent::lint_improper_ctypes_struct_fieldless_help)
} else {
- Some(fluent::lint::improper_ctypes_union_fieldless_help)
+ Some(fluent::lint_improper_ctypes_union_fieldless_help)
},
};
}
@@ -990,8 +1008,8 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_enum_repr_reason,
- help: Some(fluent::lint::improper_ctypes_enum_repr_help),
+ reason: fluent::lint_improper_ctypes_enum_repr_reason,
+ help: Some(fluent::lint_improper_ctypes_enum_repr_help),
};
}
}
@@ -999,7 +1017,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_non_exhaustive,
+ reason: fluent::lint_improper_ctypes_non_exhaustive,
help: None,
};
}
@@ -1010,7 +1028,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if is_non_exhaustive && !variant.def_id.is_local() {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_non_exhaustive_variant,
+ reason: fluent::lint_improper_ctypes_non_exhaustive_variant,
help: None,
};
}
@@ -1028,12 +1046,12 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
ty::Char => FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_char_reason,
- help: Some(fluent::lint::improper_ctypes_char_help),
+ reason: fluent::lint_improper_ctypes_char_reason,
+ help: Some(fluent::lint_improper_ctypes_char_help),
},
ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
- FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_128bit, help: None }
+ FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_128bit, help: None }
}
// Primitive types with a stable representation.
@@ -1041,30 +1059,30 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
ty::Slice(_) => FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_slice_reason,
- help: Some(fluent::lint::improper_ctypes_slice_help),
+ reason: fluent::lint_improper_ctypes_slice_reason,
+ help: Some(fluent::lint_improper_ctypes_slice_help),
},
ty::Dynamic(..) => {
- FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_dyn, help: None }
+ FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_dyn, help: None }
}
ty::Str => FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_str_reason,
- help: Some(fluent::lint::improper_ctypes_str_help),
+ reason: fluent::lint_improper_ctypes_str_reason,
+ help: Some(fluent::lint_improper_ctypes_str_help),
},
ty::Tuple(..) => FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_tuple_reason,
- help: Some(fluent::lint::improper_ctypes_tuple_help),
+ reason: fluent::lint_improper_ctypes_tuple_reason,
+ help: Some(fluent::lint_improper_ctypes_tuple_help),
},
ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
if {
matches!(self.mode, CItemKind::Definition)
- && ty.is_sized(self.cx.tcx.at(DUMMY_SP), self.cx.param_env)
+ && ty.is_sized(self.cx.tcx, self.cx.param_env)
} =>
{
FfiSafe
@@ -1089,8 +1107,8 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
if self.is_internal_abi(sig.abi()) {
return FfiUnsafe {
ty,
- reason: fluent::lint::improper_ctypes_fnptr_reason,
- help: Some(fluent::lint::improper_ctypes_fnptr_help),
+ reason: fluent::lint_improper_ctypes_fnptr_reason,
+ help: Some(fluent::lint_improper_ctypes_fnptr_help),
};
}
@@ -1121,7 +1139,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
// While opaque types are checked for earlier, if a projection in a struct field
// normalizes to an opaque type, then it will reach this branch.
ty::Opaque(..) => {
- FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_opaque, help: None }
+ FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_opaque, help: None }
}
// `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
@@ -1155,25 +1173,24 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
};
- self.cx.struct_span_lint(lint, sp, |lint| {
+ self.cx.struct_span_lint(lint, sp, fluent::lint_improper_ctypes, |lint| {
let item_description = match self.mode {
CItemKind::Declaration => "block",
CItemKind::Definition => "fn",
};
- let mut diag = lint.build(fluent::lint::improper_ctypes);
- diag.set_arg("ty", ty);
- diag.set_arg("desc", item_description);
- diag.span_label(sp, fluent::lint::label);
+ lint.set_arg("ty", ty);
+ lint.set_arg("desc", item_description);
+ lint.span_label(sp, fluent::label);
if let Some(help) = help {
- diag.help(help);
+ lint.help(help);
}
- diag.note(note);
+ lint.note(note);
if let ty::Adt(def, _) = ty.kind() {
if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
- diag.span_note(sp, fluent::lint::note);
+ lint.span_note(sp, fluent::note);
}
}
- diag.emit();
+ lint
});
}
@@ -1207,7 +1224,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
}
if let Some(ty) = ty.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }).break_value() {
- self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint::improper_ctypes_opaque, None);
+ self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint_improper_ctypes_opaque, None);
true
} else {
false
@@ -1252,7 +1269,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
self.emit_ffi_unsafe_type_lint(
ty,
sp,
- fluent::lint::improper_ctypes_only_phantomdata,
+ fluent::lint_improper_ctypes_only_phantomdata,
None,
);
}
@@ -1343,7 +1360,7 @@ declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
- let t = cx.tcx.type_of(it.def_id);
+ let t = cx.tcx.type_of(it.owner_id);
let ty = cx.tcx.erase_regions(t);
let Ok(layout) = cx.layout_of(ty) else { return };
let Variants::Multiple {
@@ -1386,11 +1403,8 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
cx.struct_span_lint(
VARIANT_SIZE_DIFFERENCES,
enum_definition.variants[largest_index].span,
- |lint| {
- lint.build(fluent::lint::variant_size_differences)
- .set_arg("largest", largest)
- .emit();
- },
+ fluent::lint_variant_size_differences,
+ |lint| lint.set_arg("largest", largest),
);
}
}
@@ -1463,7 +1477,7 @@ impl InvalidAtomicOrdering {
sym::AtomicI64,
sym::AtomicI128,
];
- if let ExprKind::MethodCall(ref method_path, args, _) = &expr.kind
+ if let ExprKind::MethodCall(ref method_path, _, args, _) = &expr.kind
&& recognized_names.contains(&method_path.ident.name)
&& let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
&& let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
@@ -1498,25 +1512,16 @@ impl InvalidAtomicOrdering {
fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
- && let Some((ordering_arg, invalid_ordering)) = match method {
- sym::load => Some((&args[1], sym::Release)),
- sym::store => Some((&args[2], sym::Acquire)),
+ && let Some((ordering_arg, invalid_ordering, msg)) = match method {
+ sym::load => Some((&args[0], sym::Release, fluent::lint_atomic_ordering_load)),
+ sym::store => Some((&args[1], sym::Acquire, fluent::lint_atomic_ordering_store)),
_ => None,
}
&& let Some(ordering) = Self::match_ordering(cx, ordering_arg)
&& (ordering == invalid_ordering || ordering == sym::AcqRel)
{
- cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, |diag| {
- if method == sym::load {
- diag.build(fluent::lint::atomic_ordering_load)
- .help(fluent::lint::help)
- .emit()
- } else {
- debug_assert_eq!(method, sym::store);
- diag.build(fluent::lint::atomic_ordering_store)
- .help(fluent::lint::help)
- .emit();
- }
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, msg, |lint| {
+ lint.help(fluent::help)
});
}
}
@@ -1528,10 +1533,9 @@ impl InvalidAtomicOrdering {
&& matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
&& Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
{
- cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, |diag| {
- diag.build(fluent::lint::atomic_ordering_fence)
- .help(fluent::lint::help)
- .emit();
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, fluent::lint_atomic_ordering_fence, |lint| {
+ lint
+ .help(fluent::help)
});
}
}
@@ -1541,8 +1545,8 @@ impl InvalidAtomicOrdering {
else {return };
let fail_order_arg = match method {
- sym::fetch_update => &args[2],
- sym::compare_exchange | sym::compare_exchange_weak => &args[4],
+ sym::fetch_update => &args[1],
+ sym::compare_exchange | sym::compare_exchange_weak => &args[3],
_ => return,
};
@@ -1550,7 +1554,7 @@ impl InvalidAtomicOrdering {
if matches!(fail_ordering, sym::Release | sym::AcqRel) {
#[derive(LintDiagnostic)]
- #[lint(lint::atomic_ordering_invalid)]
+ #[diag(lint_atomic_ordering_invalid)]
#[help]
struct InvalidAtomicOrderingDiag {
method: Symbol,
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index b6cf18291..46706e498 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -7,11 +7,12 @@ use rustc_errors::{fluent, pluralize, Applicability, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
+use rustc_infer::traits::util::elaborate_predicates_with_span;
use rustc_middle::ty::adjustment;
use rustc_middle::ty::{self, Ty};
use rustc_span::symbol::Symbol;
use rustc_span::symbol::{kw, sym};
-use rustc_span::{BytePos, Span, DUMMY_SP};
+use rustc_span::{BytePos, Span};
declare_lint! {
/// The `unused_must_use` lint detects unused result of a type flagged as
@@ -154,24 +155,22 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
};
if let Some(must_use_op) = must_use_op {
- cx.struct_span_lint(UNUSED_MUST_USE, expr.span, |lint| {
- lint.build(fluent::lint::unused_op)
- .set_arg("op", must_use_op)
- .span_label(expr.span, fluent::lint::label)
+ cx.struct_span_lint(UNUSED_MUST_USE, expr.span, fluent::lint_unused_op, |lint| {
+ lint.set_arg("op", must_use_op)
+ .span_label(expr.span, fluent::label)
.span_suggestion_verbose(
expr.span.shrink_to_lo(),
- fluent::lint::suggestion,
+ fluent::suggestion,
"let _ = ",
Applicability::MachineApplicable,
)
- .emit();
});
op_warned = true;
}
if !(type_permits_lack_of_use || fn_warned || op_warned) {
- cx.struct_span_lint(UNUSED_RESULTS, s.span, |lint| {
- lint.build(fluent::lint::unused_result).set_arg("ty", ty).emit();
+ cx.struct_span_lint(UNUSED_RESULTS, s.span, fluent::lint_unused_result, |lint| {
+ lint.set_arg("ty", ty)
});
}
@@ -206,10 +205,13 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
ty::Adt(def, _) => check_must_use_def(cx, def.did(), span, descr_pre, descr_post),
ty::Opaque(def, _) => {
let mut has_emitted = false;
- for &(predicate, _) in cx.tcx.explicit_item_bounds(def) {
+ for obligation in elaborate_predicates_with_span(
+ cx.tcx,
+ cx.tcx.explicit_item_bounds(def).iter().cloned(),
+ ) {
// We only look at the `DefId`, so it is safe to skip the binder here.
if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
- predicate.kind().skip_binder()
+ obligation.predicate.kind().skip_binder()
{
let def_id = poly_trait_predicate.trait_ref.def_id;
let descr_pre =
@@ -222,7 +224,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
}
has_emitted
}
- ty::Dynamic(binder, _) => {
+ ty::Dynamic(binder, _, _) => {
let mut has_emitted = false;
for predicate in binder.iter() {
if let ty::ExistentialPredicate::Trait(ref trait_ref) =
@@ -267,29 +269,35 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
}
},
ty::Closure(..) => {
- cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
- // FIXME(davidtwco): this isn't properly translatable becauses of the
- // pre/post strings
- lint.build(fluent::lint::unused_closure)
- .set_arg("count", plural_len)
- .set_arg("pre", descr_pre)
- .set_arg("post", descr_post)
- .note(fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ UNUSED_MUST_USE,
+ span,
+ fluent::lint_unused_closure,
+ |lint| {
+ // FIXME(davidtwco): this isn't properly translatable because of the
+ // pre/post strings
+ lint.set_arg("count", plural_len)
+ .set_arg("pre", descr_pre)
+ .set_arg("post", descr_post)
+ .note(fluent::note)
+ },
+ );
true
}
ty::Generator(..) => {
- cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
- // FIXME(davidtwco): this isn't properly translatable becauses of the
- // pre/post strings
- lint.build(fluent::lint::unused_generator)
- .set_arg("count", plural_len)
- .set_arg("pre", descr_pre)
- .set_arg("post", descr_post)
- .note(fluent::lint::note)
- .emit();
- });
+ cx.struct_span_lint(
+ UNUSED_MUST_USE,
+ span,
+ fluent::lint_unused_generator,
+ |lint| {
+ // FIXME(davidtwco): this isn't properly translatable because of the
+ // pre/post strings
+ lint.set_arg("count", plural_len)
+ .set_arg("pre", descr_pre)
+ .set_arg("post", descr_post)
+ .note(fluent::note)
+ },
+ );
true
}
_ => false,
@@ -309,18 +317,17 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
descr_post_path: &str,
) -> bool {
if let Some(attr) = cx.tcx.get_attr(def_id, sym::must_use) {
- cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
- // FIXME(davidtwco): this isn't properly translatable becauses of the pre/post
+ cx.struct_span_lint(UNUSED_MUST_USE, span, fluent::lint_unused_def, |lint| {
+ // FIXME(davidtwco): this isn't properly translatable because of the pre/post
// strings
- let mut err = lint.build(fluent::lint::unused_def);
- err.set_arg("pre", descr_pre_path);
- err.set_arg("post", descr_post_path);
- err.set_arg("def", cx.tcx.def_path_str(def_id));
+ lint.set_arg("pre", descr_pre_path);
+ lint.set_arg("post", descr_post_path);
+ lint.set_arg("def", cx.tcx.def_path_str(def_id));
// check for #[must_use = "..."]
if let Some(note) = attr.value_str() {
- err.note(note.as_str());
+ lint.note(note.as_str());
}
- err.emit();
+ lint
});
true
} else {
@@ -357,25 +364,34 @@ impl<'tcx> LateLintPass<'tcx> for PathStatements {
fn check_stmt(&mut self, cx: &LateContext<'_>, s: &hir::Stmt<'_>) {
if let hir::StmtKind::Semi(expr) = s.kind {
if let hir::ExprKind::Path(_) = expr.kind {
- cx.struct_span_lint(PATH_STATEMENTS, s.span, |lint| {
- let ty = cx.typeck_results().expr_ty(expr);
- if ty.needs_drop(cx.tcx, cx.param_env) {
- let mut lint = lint.build(fluent::lint::path_statement_drop);
- if let Ok(snippet) = cx.sess().source_map().span_to_snippet(expr.span) {
- lint.span_suggestion(
- s.span,
- fluent::lint::suggestion,
- format!("drop({});", snippet),
- Applicability::MachineApplicable,
- );
- } else {
- lint.span_help(s.span, fluent::lint::suggestion);
- }
- lint.emit();
- } else {
- lint.build(fluent::lint::path_statement_no_effect).emit();
- }
- });
+ let ty = cx.typeck_results().expr_ty(expr);
+ if ty.needs_drop(cx.tcx, cx.param_env) {
+ cx.struct_span_lint(
+ PATH_STATEMENTS,
+ s.span,
+ fluent::lint_path_statement_drop,
+ |lint| {
+ if let Ok(snippet) = cx.sess().source_map().span_to_snippet(expr.span) {
+ lint.span_suggestion(
+ s.span,
+ fluent::suggestion,
+ format!("drop({});", snippet),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ lint.span_help(s.span, fluent::suggestion);
+ }
+ lint
+ },
+ );
+ } else {
+ cx.struct_span_lint(
+ PATH_STATEMENTS,
+ s.span,
+ fluent::lint_path_statement_no_effect,
+ |lint| lint,
+ );
+ }
}
}
}
@@ -504,23 +520,23 @@ trait UnusedDelimLint {
ast::ExprKind::Block(ref block, None) if block.stmts.len() > 0 => {
let start = block.stmts[0].span;
let end = block.stmts[block.stmts.len() - 1].span;
- if value.span.from_expansion() || start.from_expansion() || end.from_expansion() {
- (
- value.span.with_hi(value.span.lo() + BytePos(1)),
- value.span.with_lo(value.span.hi() - BytePos(1)),
- )
+ if let Some(start) = start.find_ancestor_inside(value.span)
+ && let Some(end) = end.find_ancestor_inside(value.span)
+ {
+ Some((
+ value.span.with_hi(start.lo()),
+ value.span.with_lo(end.hi()),
+ ))
} else {
- (value.span.with_hi(start.lo()), value.span.with_lo(end.hi()))
+ None
}
}
ast::ExprKind::Paren(ref expr) => {
- if value.span.from_expansion() || expr.span.from_expansion() {
- (
- value.span.with_hi(value.span.lo() + BytePos(1)),
- value.span.with_lo(value.span.hi() - BytePos(1)),
- )
+ let expr_span = expr.span.find_ancestor_inside(value.span);
+ if let Some(expr_span) = expr_span {
+ Some((value.span.with_hi(expr_span.lo()), value.span.with_lo(expr_span.hi())))
} else {
- (value.span.with_hi(expr.span.lo()), value.span.with_lo(expr.span.hi()))
+ None
}
}
_ => return,
@@ -529,36 +545,37 @@ trait UnusedDelimLint {
left_pos.map_or(false, |s| s >= value.span.lo()),
right_pos.map_or(false, |s| s <= value.span.hi()),
);
- self.emit_unused_delims(cx, spans, ctx.into(), keep_space);
+ self.emit_unused_delims(cx, value.span, spans, ctx.into(), keep_space);
}
fn emit_unused_delims(
&self,
cx: &EarlyContext<'_>,
- spans: (Span, Span),
+ value_span: Span,
+ spans: Option<(Span, Span)>,
msg: &str,
keep_space: (bool, bool),
) {
- // FIXME(flip1995): Quick and dirty fix for #70814. This should be fixed in rustdoc
- // properly.
- if spans.0 == DUMMY_SP || spans.1 == DUMMY_SP {
- return;
- }
-
- cx.struct_span_lint(self.lint(), MultiSpan::from(vec![spans.0, spans.1]), |lint| {
- let replacement = vec![
- (spans.0, if keep_space.0 { " ".into() } else { "".into() }),
- (spans.1, if keep_space.1 { " ".into() } else { "".into() }),
- ];
- lint.build(fluent::lint::unused_delim)
- .set_arg("delim", Self::DELIM_STR)
- .set_arg("item", msg)
- .multipart_suggestion(
- fluent::lint::suggestion,
+ let primary_span = if let Some((lo, hi)) = spans {
+ MultiSpan::from(vec![lo, hi])
+ } else {
+ MultiSpan::from(value_span)
+ };
+ cx.struct_span_lint(self.lint(), primary_span, fluent::lint_unused_delim, |lint| {
+ lint.set_arg("delim", Self::DELIM_STR);
+ lint.set_arg("item", msg);
+ if let Some((lo, hi)) = spans {
+ let replacement = vec![
+ (lo, if keep_space.0 { " ".into() } else { "".into() }),
+ (hi, if keep_space.1 { " ".into() } else { "".into() }),
+ ];
+ lint.multipart_suggestion(
+ fluent::suggestion,
replacement,
Applicability::MachineApplicable,
- )
- .emit();
+ );
+ }
+ lint
});
}
@@ -606,8 +623,7 @@ trait UnusedDelimLint {
ref call_or_other => {
let (args_to_check, ctx) = match *call_or_other {
Call(_, ref args) => (&args[..], UnusedDelimsCtx::FunctionArg),
- // first "argument" is self (which sometimes needs delims)
- MethodCall(_, ref args, _) => (&args[1..], UnusedDelimsCtx::MethodArg),
+ MethodCall(_, _, ref args, _) => (&args[..], UnusedDelimsCtx::MethodArg),
// actual catch-all arm
_ => {
return;
@@ -750,7 +766,7 @@ impl UnusedParens {
avoid_or: bool,
avoid_mut: bool,
) {
- use ast::{BindingMode, Mutability, PatKind};
+ use ast::{BindingAnnotation, PatKind};
if let PatKind::Paren(inner) = &value.kind {
match inner.kind {
@@ -762,19 +778,18 @@ impl UnusedParens {
// Avoid `p0 | .. | pn` if we should.
PatKind::Or(..) if avoid_or => return,
// Avoid `mut x` and `mut x @ p` if we should:
- PatKind::Ident(BindingMode::ByValue(Mutability::Mut), ..) if avoid_mut => return,
+ PatKind::Ident(BindingAnnotation::MUT, ..) if avoid_mut => {
+ return;
+ }
// Otherwise proceed with linting.
_ => {}
}
- let spans = if value.span.from_expansion() || inner.span.from_expansion() {
- (
- value.span.with_hi(value.span.lo() + BytePos(1)),
- value.span.with_lo(value.span.hi() - BytePos(1)),
- )
+ let spans = if let Some(inner) = inner.span.find_ancestor_inside(value.span) {
+ Some((value.span.with_hi(inner.lo()), value.span.with_lo(inner.hi())))
} else {
- (value.span.with_hi(inner.span.lo()), value.span.with_lo(inner.span.hi()))
+ None
};
- self.emit_unused_delims(cx, spans, "pattern", (false, false));
+ self.emit_unused_delims(cx, value.span, spans, "pattern", (false, false));
}
}
}
@@ -879,15 +894,12 @@ impl EarlyLintPass for UnusedParens {
);
}
_ => {
- let spans = if ty.span.from_expansion() || r.span.from_expansion() {
- (
- ty.span.with_hi(ty.span.lo() + BytePos(1)),
- ty.span.with_lo(ty.span.hi() - BytePos(1)),
- )
+ let spans = if let Some(r) = r.span.find_ancestor_inside(ty.span) {
+ Some((ty.span.with_hi(r.lo()), ty.span.with_lo(r.hi())))
} else {
- (ty.span.with_hi(r.span.lo()), ty.span.with_lo(r.span.hi()))
+ None
};
- self.emit_unused_delims(cx, spans, "type", (false, false));
+ self.emit_unused_delims(cx, ty.span, spans, "type", (false, false));
}
}
}
@@ -1131,9 +1143,12 @@ impl UnusedImportBraces {
ast::UseTreeKind::Nested(_) => return,
};
- cx.struct_span_lint(UNUSED_IMPORT_BRACES, item.span, |lint| {
- lint.build(fluent::lint::unused_import_braces).set_arg("node", node_name).emit();
- });
+ cx.struct_span_lint(
+ UNUSED_IMPORT_BRACES,
+ item.span,
+ fluent::lint_unused_import_braces,
+ |lint| lint.set_arg("node", node_name),
+ );
}
}
}
@@ -1182,15 +1197,17 @@ impl<'tcx> LateLintPass<'tcx> for UnusedAllocation {
for adj in cx.typeck_results().expr_adjustments(e) {
if let adjustment::Adjust::Borrow(adjustment::AutoBorrow::Ref(_, m)) = adj.kind {
- cx.struct_span_lint(UNUSED_ALLOCATION, e.span, |lint| {
- lint.build(match m {
- adjustment::AutoBorrowMutability::Not => fluent::lint::unused_allocation,
+ cx.struct_span_lint(
+ UNUSED_ALLOCATION,
+ e.span,
+ match m {
+ adjustment::AutoBorrowMutability::Not => fluent::lint_unused_allocation,
adjustment::AutoBorrowMutability::Mut { .. } => {
- fluent::lint::unused_allocation_mut
+ fluent::lint_unused_allocation_mut
}
- })
- .emit();
- });
+ },
+ |lint| lint,
+ );
}
}
}
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index f00165cd3..61ee467f5 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -264,37 +264,6 @@ declare_lint! {
}
declare_lint! {
- /// The `const_err` lint detects an erroneous expression while doing
- /// constant evaluation.
- ///
- /// ### Example
- ///
- /// ```rust,compile_fail
- /// #![allow(unconditional_panic)]
- /// const C: i32 = 1/0;
- /// ```
- ///
- /// {{produces}}
- ///
- /// ### Explanation
- ///
- /// This lint detects constants that fail to evaluate. Allowing the lint will accept the
- /// constant declaration, but any use of this constant will still lead to a hard error. This is
- /// a future incompatibility lint; the plan is to eventually entirely forbid even declaring
- /// constants that cannot be evaluated. See [issue #71800] for more details.
- ///
- /// [issue #71800]: https://github.com/rust-lang/rust/issues/71800
- pub CONST_ERR,
- Deny,
- "constant evaluation encountered erroneous expression",
- @future_incompatible = FutureIncompatibleInfo {
- reference: "issue #71800 <https://github.com/rust-lang/rust/issues/71800>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
- };
- report_in_external_macro
-}
-
-declare_lint! {
/// The `unused_imports` lint detects imports that are never used.
///
/// ### Example
@@ -1458,6 +1427,7 @@ declare_lint! {
"trait-object types were treated as different depending on marker-trait order",
@future_incompatible = FutureIncompatibleInfo {
reference: "issue #56484 <https://github.com/rust-lang/rust/issues/56484>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -2909,7 +2879,7 @@ declare_lint! {
/// ### Example
///
/// ```rust
- /// #![feature(naked_functions)]
+ /// #![feature(asm_experimental_arch, naked_functions)]
///
/// use std::arch::asm;
///
@@ -3094,7 +3064,7 @@ declare_lint! {
///
/// ### Example
///
- /// ```rust
+ /// ```rust,compile_fail
/// #![cfg_attr(debug_assertions, crate_type = "lib")]
/// ```
///
@@ -3114,7 +3084,7 @@ declare_lint! {
/// rustc instead of `#![cfg_attr(..., crate_type = "...")]` and
/// `--crate-name` instead of `#![cfg_attr(..., crate_name = "...")]`.
pub DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
- Warn,
+ Deny,
"detects usage of `#![cfg_attr(..., crate_type/crate_name = \"...\")]`",
@future_incompatible = FutureIncompatibleInfo {
reference: "issue #91632 <https://github.com/rust-lang/rust/issues/91632>",
@@ -3206,12 +3176,62 @@ declare_lint! {
/// [future-incompatible]: ../index.md#future-incompatible-lints
pub REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
Warn,
- "tranparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
+ "transparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
@future_incompatible = FutureIncompatibleInfo {
reference: "issue #78586 <https://github.com/rust-lang/rust/issues/78586>",
};
}
+declare_lint! {
+ /// The `unstable_syntax_pre_expansion` lint detects the use of unstable
+ /// syntax that is discarded during attribute expansion.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #[cfg(FALSE)]
+ /// macro foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The input to active attributes such as `#[cfg]` or procedural macro
+ /// attributes is required to be valid syntax. Previously, the compiler only
+ /// gated the use of unstable syntax features after resolving `#[cfg]` gates
+ /// and expanding procedural macros.
+ ///
+ /// To avoid relying on unstable syntax, move the use of unstable syntax
+ /// into a position where the compiler does not parse the syntax, such as a
+ /// functionlike macro.
+ ///
+ /// ```rust
+ /// # #![deny(unstable_syntax_pre_expansion)]
+ ///
+ /// macro_rules! identity {
+ /// ( $($tokens:tt)* ) => { $($tokens)* }
+ /// }
+ ///
+ /// #[cfg(FALSE)]
+ /// identity! {
+ /// macro foo() {}
+ /// }
+ /// ```
+ ///
+ /// This is a [future-incompatible] lint to transition this
+ /// to a hard error in the future. See [issue #65860] for more details.
+ ///
+ /// [issue #65860]: https://github.com/rust-lang/rust/issues/65860
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub UNSTABLE_SYNTAX_PRE_EXPANSION,
+ Warn,
+ "unstable syntax can change at any point in the future, causing a hard error!",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #65860 <https://github.com/rust-lang/rust/issues/65860>",
+ };
+}
+
declare_lint_pass! {
/// Does nothing as a lint pass, but registers some `Lint`s
/// that are used by other parts of the compiler.
@@ -3245,7 +3265,6 @@ declare_lint_pass! {
EXPORTED_PRIVATE_DEPENDENCIES,
PUB_USE_OF_PRIVATE_EXTERN_CRATE,
INVALID_TYPE_PARAM_DEFAULT,
- CONST_ERR,
RENAMED_AND_REMOVED_LINTS,
UNALIGNED_REFERENCES,
CONST_ITEM_MUTATION,
@@ -3280,6 +3299,7 @@ declare_lint_pass! {
POINTER_STRUCTURAL_MATCH,
NONTRIVIAL_STRUCTURAL_MATCH,
SOFT_UNSTABLE,
+ UNSTABLE_SYNTAX_PRE_EXPANSION,
INLINE_NO_SANITIZE,
BAD_ASM_STYLE,
ASM_SUB_REGISTER,
@@ -3314,7 +3334,6 @@ declare_lint_pass! {
DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
DUPLICATE_MACRO_ATTRIBUTES,
SUSPICIOUS_AUTO_TRAIT_IMPLS,
- UNEXPECTED_CFGS,
DEPRECATED_WHERE_CLAUSE_LOCATION,
TEST_UNSTABLE_LINT,
FFI_UNWIND_CALLS,
@@ -3357,7 +3376,7 @@ declare_lint! {
///
/// ### Example of drop reorder
///
- /// ```rust,compile_fail
+ /// ```rust,edition2018,compile_fail
/// #![deny(rust_2021_incompatible_closure_captures)]
/// # #![allow(unused)]
///
@@ -3393,7 +3412,7 @@ declare_lint! {
///
/// ### Example of auto-trait
///
- /// ```rust,compile_fail
+ /// ```rust,edition2018,compile_fail
/// #![deny(rust_2021_incompatible_closure_captures)]
/// use std::thread;
///
@@ -3919,7 +3938,7 @@ declare_lint! {
///
/// The compiler disables the automatic implementation if an explicit one
/// exists for given type constructor. The exact rules governing this
- /// are currently unsound and quite subtle and and will be modified in the future.
+ /// are currently unsound, quite subtle, and will be modified in the future.
/// This change will cause the automatic implementation to be disabled in more
/// cases, potentially breaking some code.
pub SUSPICIOUS_AUTO_TRAIT_IMPLS,
@@ -3938,8 +3957,6 @@ declare_lint! {
/// ### Example
///
/// ```rust
- /// #![feature(generic_associated_types)]
- ///
/// trait Trait {
/// type Assoc<'a> where Self: 'a;
/// }
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
index 6acbe97a7..aa54b3d8a 100644
--- a/compiler/rustc_lint_defs/src/lib.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -1,4 +1,6 @@
#![feature(min_specialization)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
@@ -7,7 +9,7 @@ pub use self::Level::*;
use rustc_ast::node_id::{NodeId, NodeMap};
use rustc_ast::{AttrId, Attribute};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
-use rustc_error_messages::MultiSpan;
+use rustc_error_messages::{DiagnosticMessage, MultiSpan};
use rustc_hir::HashStableContext;
use rustc_hir::HirId;
use rustc_span::edition::Edition;
@@ -23,6 +25,9 @@ macro_rules! pluralize {
($x:expr) => {
if $x != 1 { "s" } else { "" }
};
+ ("has", $x:expr) => {
+ if $x == 1 { "has" } else { "have" }
+ };
("is", $x:expr) => {
if $x == 1 { "is" } else { "are" }
};
@@ -39,7 +44,8 @@ macro_rules! pluralize {
/// All suggestions are marked with an `Applicability`. Tools use the applicability of a suggestion
/// to determine whether it should be automatically applied or if the user should be consulted
/// before applying the suggestion.
-#[derive(Copy, Clone, Debug, PartialEq, Hash, Encodable, Decodable, Serialize, Deserialize)]
+#[derive(Copy, Clone, Debug, Hash, Encodable, Decodable, Serialize, Deserialize)]
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub enum Applicability {
/// The suggestion is definitely what the user intended, or maintains the exact meaning of the code.
/// This suggestion should be automatically applied.
@@ -89,7 +95,7 @@ pub enum LintExpectationId {
/// stable and can be cached. The additional index ensures that nodes with
/// several expectations can correctly match diagnostics to the individual
/// expectation.
- Stable { hir_id: HirId, attr_index: u16, lint_index: Option<u16> },
+ Stable { hir_id: HirId, attr_index: u16, lint_index: Option<u16>, attr_id: Option<AttrId> },
}
impl LintExpectationId {
@@ -113,13 +119,31 @@ impl LintExpectationId {
*lint_index = new_lint_index
}
+
+ /// Prepares the id for hashing. Removes references to the ast.
+ /// Should only be called when the id is stable.
+ pub fn normalize(self) -> Self {
+ match self {
+ Self::Stable { hir_id, attr_index, lint_index, .. } => {
+ Self::Stable { hir_id, attr_index, lint_index, attr_id: None }
+ }
+ Self::Unstable { .. } => {
+ unreachable!("`normalize` called when `ExpectationId` is unstable")
+ }
+ }
+ }
}
impl<HCX: rustc_hir::HashStableContext> HashStable<HCX> for LintExpectationId {
#[inline]
fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
match self {
- LintExpectationId::Stable { hir_id, attr_index, lint_index: Some(lint_index) } => {
+ LintExpectationId::Stable {
+ hir_id,
+ attr_index,
+ lint_index: Some(lint_index),
+ attr_id: _,
+ } => {
hir_id.hash_stable(hcx, hasher);
attr_index.hash_stable(hcx, hasher);
lint_index.hash_stable(hcx, hasher);
@@ -139,9 +163,12 @@ impl<HCX: rustc_hir::HashStableContext> ToStableHashKey<HCX> for LintExpectation
#[inline]
fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
match self {
- LintExpectationId::Stable { hir_id, attr_index, lint_index: Some(lint_index) } => {
- (*hir_id, *attr_index, *lint_index)
- }
+ LintExpectationId::Stable {
+ hir_id,
+ attr_index,
+ lint_index: Some(lint_index),
+ attr_id: _,
+ } => (*hir_id, *attr_index, *lint_index),
_ => {
unreachable!("HashStable should only be called for a filled `LintExpectationId`")
}
@@ -489,7 +516,7 @@ pub struct BufferedEarlyLint {
pub span: MultiSpan,
/// The lint message.
- pub msg: String,
+ pub msg: DiagnosticMessage,
/// The `NodeId` of the AST node that generated the lint.
pub node_id: NodeId,
@@ -518,11 +545,11 @@ impl LintBuffer {
lint: &'static Lint,
node_id: NodeId,
span: MultiSpan,
- msg: &str,
+ msg: impl Into<DiagnosticMessage>,
diagnostic: BuiltinLintDiagnostics,
) {
let lint_id = LintId::of(lint);
- let msg = msg.to_string();
+ let msg = msg.into();
self.add_early_lint(BufferedEarlyLint { lint_id, node_id, span, msg, diagnostic });
}
@@ -535,7 +562,7 @@ impl LintBuffer {
lint: &'static Lint,
id: NodeId,
sp: impl Into<MultiSpan>,
- msg: &str,
+ msg: impl Into<DiagnosticMessage>,
) {
self.add_lint(lint, id, sp.into(), msg, BuiltinLintDiagnostics::Normal)
}
@@ -545,7 +572,7 @@ impl LintBuffer {
lint: &'static Lint,
id: NodeId,
sp: impl Into<MultiSpan>,
- msg: &str,
+ msg: impl Into<DiagnosticMessage>,
diagnostic: BuiltinLintDiagnostics,
) {
self.add_lint(lint, id, sp.into(), msg, diagnostic)
@@ -655,18 +682,21 @@ macro_rules! declare_lint {
macro_rules! declare_tool_lint {
(
$(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level: ident, $desc: expr
+ $(, @feature_gate = $gate:expr;)?
) => (
- $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, false}
+ $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, false $(, @feature_gate = $gate;)?}
);
(
$(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level:ident, $desc:expr,
report_in_external_macro: $rep:expr
+ $(, @feature_gate = $gate:expr;)?
) => (
- $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, $rep}
+ $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, $rep $(, @feature_gate = $gate;)?}
);
(
$(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level:ident, $desc:expr,
$external:expr
+ $(, @feature_gate = $gate:expr;)?
) => (
$(#[$attr])*
$vis static $NAME: &$crate::Lint = &$crate::Lint {
@@ -677,8 +707,9 @@ macro_rules! declare_tool_lint {
report_in_external_macro: $external,
future_incompatible: None,
is_plugin: true,
- feature_gate: None,
+ $(feature_gate: Some($gate),)?
crate_level_only: false,
+ ..$crate::Lint::default_fields_for_macro()
};
);
}
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index 62ef5804d..28e092c1e 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -242,6 +242,13 @@ fn main() {
println!("cargo:rustc-link-lib=uuid");
} else if target.contains("netbsd") || target.contains("haiku") || target.contains("darwin") {
println!("cargo:rustc-link-lib=z");
+ } else if target.starts_with("arm")
+ || target.starts_with("mips-")
+ || target.starts_with("mipsel-")
+ || target.starts_with("powerpc-")
+ {
+ // 32-bit targets need to link libatomic.
+ println!("cargo:rustc-link-lib=atomic");
}
cmd.args(&components);
@@ -335,10 +342,10 @@ fn main() {
};
// RISC-V GCC erroneously requires libatomic for sub-word
- // atomic operations. FreeBSD uses Clang as its system
+ // atomic operations. Some BSD uses Clang as its system
// compiler and provides no libatomic in its base system so
// does not want this.
- if !target.contains("freebsd") && target.starts_with("riscv") {
+ if target.starts_with("riscv") && !target.contains("freebsd") && !target.contains("openbsd") {
println!("cargo:rustc-link-lib=atomic");
}
diff --git a/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
index 97541e615..448a1f62f 100644
--- a/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
@@ -154,19 +154,6 @@ LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef Child, size_t *Size) {
return Name.data();
}
-extern "C" const char *LLVMRustArchiveChildData(LLVMRustArchiveChildRef Child,
- size_t *Size) {
- StringRef Buf;
- Expected<StringRef> BufOrErr = Child->getBuffer();
- if (!BufOrErr) {
- LLVMRustSetLastError(toString(BufOrErr.takeError()).c_str());
- return nullptr;
- }
- Buf = BufOrErr.get();
- *Size = Buf.size();
- return Buf.data();
-}
-
extern "C" LLVMRustArchiveMemberRef
LLVMRustArchiveMemberNew(char *Filename, char *Name,
LLVMRustArchiveChildRef Child) {
diff --git a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
index 154f554d6..7da6ab713 100644
--- a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
@@ -24,17 +24,10 @@ extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
const char* const Filenames[],
size_t FilenamesLen,
RustStringRef BufferOut) {
-#if LLVM_VERSION_GE(13,0)
SmallVector<std::string,32> FilenameRefs;
for (size_t i = 0; i < FilenamesLen; i++) {
FilenameRefs.push_back(std::string(Filenames[i]));
}
-#else
- SmallVector<StringRef,32> FilenameRefs;
- for (size_t i = 0; i < FilenamesLen; i++) {
- FilenameRefs.push_back(StringRef(Filenames[i]));
- }
-#endif
auto FilenamesWriter = coverage::CoverageFilenamesSectionWriter(
makeArrayRef(FilenameRefs));
RawRustStringOstream OS(BufferOut);
@@ -109,9 +102,5 @@ extern "C" void LLVMRustCoverageWriteMappingVarNameToString(RustStringRef Str) {
}
extern "C" uint32_t LLVMRustCoverageMappingVersion() {
-#if LLVM_VERSION_GE(13, 0)
return coverage::CovMapVersion::Version6;
-#else
- return coverage::CovMapVersion::Version5;
-#endif
}
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 0a6bd4999..18d37d95a 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -31,10 +31,12 @@
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/FunctionImport.h"
+#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/Utils/AddDiscriminators.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#include "llvm/LTO/LTO.h"
-#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm-c/Transforms/PassManagerBuilder.h"
#include "llvm/Transforms/Instrumentation.h"
@@ -67,7 +69,9 @@ extern "C" void LLVMInitializePasses() {
initializeAnalysis(Registry);
initializeTransformUtils(Registry);
initializeInstCombine(Registry);
+#if LLVM_VERSION_LT(16, 0)
initializeInstrumentation(Registry);
+#endif
initializeTarget(Registry);
}
@@ -90,167 +94,6 @@ extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
timeTraceProfilerCleanup();
}
-extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
-#if LLVM_VERSION_LT(15, 0)
- StringRef SR(PassName);
- PassRegistry *PR = PassRegistry::getPassRegistry();
-
- const PassInfo *PI = PR->getPassInfo(SR);
- if (PI) {
- return wrap(PI->createPass());
- }
- return nullptr;
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
-#if LLVM_VERSION_LT(15, 0)
- const bool CompileKernel = false;
- const bool UseAfterScope = true;
-
- return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
-#if LLVM_VERSION_LT(15, 0)
- const bool CompileKernel = false;
-
- return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
-#if LLVM_VERSION_LT(15, 0)
- const bool CompileKernel = false;
-
- return wrap(createMemorySanitizerLegacyPassPass(
- MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
-#if LLVM_VERSION_LT(15, 0)
- return wrap(createThreadSanitizerLegacyPassPass());
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassRef LLVMRustCreateHWAddressSanitizerPass(bool Recover) {
-#if LLVM_VERSION_LT(15, 0)
- const bool CompileKernel = false;
-
- return wrap(createHWAddressSanitizerLegacyPassPass(CompileKernel, Recover));
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
-#if LLVM_VERSION_LT(15, 0)
- assert(RustPass);
- Pass *Pass = unwrap(RustPass);
- PassManagerBase *PMB = unwrap(PMR);
- PMB->add(Pass);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" LLVMPassManagerBuilderRef LLVMRustPassManagerBuilderCreate() {
-#if LLVM_VERSION_LT(15, 0)
- return LLVMPassManagerBuilderCreate();
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
-#if LLVM_VERSION_LT(15, 0)
- LLVMPassManagerBuilderDispose(PMB);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustPassManagerBuilderPopulateFunctionPassManager(
- LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) {
-#if LLVM_VERSION_LT(15, 0)
- LLVMPassManagerBuilderPopulateFunctionPassManager(PMB, PM);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustPassManagerBuilderPopulateModulePassManager(
- LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) {
-#if LLVM_VERSION_LT(15, 0)
- LLVMPassManagerBuilderPopulateModulePassManager(PMB, PM);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustPassManagerBuilderPopulateLTOPassManager(
- LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM, bool Internalize, bool RunInliner) {
-#if LLVM_VERSION_LT(15, 0)
- LLVMPassManagerBuilderPopulateLTOPassManager(PMB, PM, Internalize, RunInliner);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C"
-void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
- LLVMPassManagerBuilderRef PMBR,
- LLVMPassManagerRef PMR
-) {
-#if LLVM_VERSION_LT(15, 0)
- unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C" void LLVMRustPassManagerBuilderUseInlinerWithThreshold(
- LLVMPassManagerBuilderRef PMB, unsigned Threshold) {
-#if LLVM_VERSION_LT(15, 0)
- LLVMPassManagerBuilderUseInlinerWithThreshold(PMB, Threshold);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-extern "C"
-void LLVMRustAddLastExtensionPasses(
- LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
-#if LLVM_VERSION_LT(15, 0)
- auto AddExtensionPasses = [Passes, NumPasses](
- const PassManagerBuilder &Builder, PassManagerBase &PM) {
- for (size_t I = 0; I < NumPasses; I++) {
- PM.add(unwrap(Passes[I]));
- }
- };
- // Add the passes to both of the pre-finalization extension points,
- // so they are run for optimized and non-optimized builds.
- unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
- AddExtensionPasses);
- unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- AddExtensionPasses);
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
#ifdef LLVM_COMPONENT_X86
#define SUBTARGET_X86 SUBTARGET(X86)
#else
@@ -596,47 +439,6 @@ extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
delete unwrap(TM);
}
-extern "C" void LLVMRustConfigurePassManagerBuilder(
- LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
- bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
- const char* PGOGenPath, const char* PGOUsePath, const char* PGOSampleUsePath,
- int SizeLevel) {
-#if LLVM_VERSION_LT(15, 0)
- unwrap(PMBR)->MergeFunctions = MergeFunctions;
- unwrap(PMBR)->SLPVectorize = SLPVectorize;
- unwrap(PMBR)->OptLevel = fromRust(OptLevel);
- unwrap(PMBR)->LoopVectorize = LoopVectorize;
- unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
- unwrap(PMBR)->SizeLevel = SizeLevel;
- unwrap(PMBR)->DisableUnrollLoops = SizeLevel != 0;
-
- if (PGOGenPath) {
- assert(!PGOUsePath && !PGOSampleUsePath);
- unwrap(PMBR)->EnablePGOInstrGen = true;
- unwrap(PMBR)->PGOInstrGen = PGOGenPath;
- } else if (PGOUsePath) {
- assert(!PGOSampleUsePath);
- unwrap(PMBR)->PGOInstrUse = PGOUsePath;
- } else if (PGOSampleUsePath) {
- unwrap(PMBR)->PGOSampleUse = PGOSampleUsePath;
- }
-#else
- report_fatal_error("Legacy PM not supported with LLVM 15");
-#endif
-}
-
-// Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
-// field of a PassManagerBuilder, we expose our own method of doing so.
-extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
- LLVMModuleRef M,
- bool DisableSimplifyLibCalls) {
- Triple TargetTriple(unwrap(M)->getTargetTriple());
- TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
- if (DisableSimplifyLibCalls)
- TLI->disableAllFunctions();
- unwrap(PMBR)->LibraryInfo = TLI;
-}
-
// Unfortunately, the LLVM C API doesn't provide a way to create the
// TargetLibraryInfo pass, so we use this method to do so.
extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
@@ -648,27 +450,6 @@ extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
}
-// Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
-// all the functions in a module, so we do that manually here. You'll find
-// similar code in clang's BackendUtil.cpp file.
-extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
- LLVMModuleRef M) {
- llvm::legacy::FunctionPassManager *P =
- unwrap<llvm::legacy::FunctionPassManager>(PMR);
- P->doInitialization();
-
- // Upgrade all calls to old intrinsics first.
- for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
- UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
-
- for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
- ++I)
- if (!I->isDeclaration())
- P->run(*I);
-
- P->doFinalization();
-}
-
extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
// Initializing the command-line options more than once is not allowed. So,
// check if they've already been initialized. (This could happen if we're
@@ -812,7 +593,7 @@ struct LLVMRustSanitizerOptions {
};
extern "C" LLVMRustResult
-LLVMRustOptimizeWithNewPassManager(
+LLVMRustOptimize(
LLVMModuleRef ModuleRef,
LLVMTargetMachineRef TMRef,
LLVMRustPassBuilderOptLevel OptLevelRust,
@@ -822,7 +603,8 @@ LLVMRustOptimizeWithNewPassManager(
bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
LLVMRustSanitizerOptions *SanitizerOptions,
const char *PGOGenPath, const char *PGOUsePath,
- bool InstrumentCoverage, bool InstrumentGCOV,
+ bool InstrumentCoverage, const char *InstrProfileOutput,
+ bool InstrumentGCOV,
const char *PGOSampleUsePath, bool DebugInfoForProfiling,
void* LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
@@ -869,19 +651,11 @@ LLVMRustOptimizeWithNewPassManager(
PGOOptions::NoCSAction, DebugInfoForProfiling);
}
-#if LLVM_VERSION_GE(13, 0)
PassBuilder PB(TM, PTO, PGOOpt, &PIC);
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
CGSCCAnalysisManager CGAM;
ModuleAnalysisManager MAM;
-#else
- PassBuilder PB(DebugPassManager, TM, PTO, PGOOpt, &PIC);
- LoopAnalysisManager LAM(DebugPassManager);
- FunctionAnalysisManager FAM(DebugPassManager);
- CGSCCAnalysisManager CGAM(DebugPassManager);
- ModuleAnalysisManager MAM(DebugPassManager);
-#endif
FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
@@ -922,8 +696,11 @@ LLVMRustOptimizeWithNewPassManager(
if (InstrumentCoverage) {
PipelineStartEPCallbacks.push_back(
- [](ModulePassManager &MPM, OptimizationLevel Level) {
+ [InstrProfileOutput](ModulePassManager &MPM, OptimizationLevel Level) {
InstrProfOptions Options;
+ if (InstrProfileOutput) {
+ Options.InstrProfileOutput = InstrProfileOutput;
+ }
MPM.addPass(InstrProfiling(Options, false));
}
);
@@ -931,18 +708,28 @@ LLVMRustOptimizeWithNewPassManager(
if (SanitizerOptions) {
if (SanitizerOptions->SanitizeMemory) {
+#if LLVM_VERSION_GE(14, 0)
+ MemorySanitizerOptions Options(
+ SanitizerOptions->SanitizeMemoryTrackOrigins,
+ SanitizerOptions->SanitizeMemoryRecover,
+ /*CompileKernel=*/false,
+ /*EagerChecks=*/true);
+#else
MemorySanitizerOptions Options(
SanitizerOptions->SanitizeMemoryTrackOrigins,
SanitizerOptions->SanitizeMemoryRecover,
/*CompileKernel=*/false);
+#endif
OptimizerLastEPCallbacks.push_back(
[Options](ModulePassManager &MPM, OptimizationLevel Level) {
-#if LLVM_VERSION_GE(14, 0)
+#if LLVM_VERSION_GE(14, 0) && LLVM_VERSION_LT(16, 0)
MPM.addPass(ModuleMemorySanitizerPass(Options));
#else
MPM.addPass(MemorySanitizerPass(Options));
#endif
+#if LLVM_VERSION_LT(16, 0)
MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass(Options)));
+#endif
}
);
}
@@ -973,8 +760,12 @@ LLVMRustOptimizeWithNewPassManager(
/*UseAfterScope=*/true,
AsanDetectStackUseAfterReturnMode::Runtime,
};
+#if LLVM_VERSION_LT(16, 0)
MPM.addPass(ModuleAddressSanitizerPass(opts));
#else
+ MPM.addPass(AddressSanitizerPass(opts));
+#endif
+#else
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
@@ -1015,11 +806,7 @@ LLVMRustOptimizeWithNewPassManager(
}
}
-#if LLVM_VERSION_GE(13, 0)
ModulePassManager MPM;
-#else
- ModulePassManager MPM(DebugPassManager);
-#endif
bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
if (!NoPrepopulatePasses) {
// The pre-link pipelines don't support O0 and require using budilO0DefaultPipeline() instead.
@@ -1227,15 +1014,8 @@ extern "C" void LLVMRustPrintPasses() {
PR->enumerateWith(&Listener);
}
-extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
- bool AddLifetimes) {
- unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
-}
-
extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
size_t Len) {
- llvm::legacy::PassManager passes;
-
auto PreserveFunctions = [=](const GlobalValue &GV) {
for (size_t I = 0; I < Len; I++) {
if (GV.getName() == Symbols[I]) {
@@ -1245,9 +1025,7 @@ extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
return false;
};
- passes.add(llvm::createInternalizePass(PreserveFunctions));
-
- passes.run(*unwrap(M));
+ internalizeModule(*unwrap(M), PreserveFunctions);
}
extern "C" void
@@ -1268,7 +1046,7 @@ extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
LLVMRustCodeModel Model) {
auto CM = fromRust(Model);
- if (!CM.hasValue())
+ if (!CM)
return;
unwrap(M)->setCodeModel(*CM);
}
@@ -1434,17 +1212,13 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
};
-#if LLVM_VERSION_GE(13,0)
// Uses FromPrevailing visibility scheme which works for many binary
// formats. We probably could and should use ELF visibility scheme for many of
// our targets, however.
lto::Config conf;
thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing, recordNewLinkage,
Ret->GUIDPreservedSymbols);
-#else
- thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
- Ret->GUIDPreservedSymbols);
-#endif
+
// Here we calculate an `ExportedGUIDs` set for use in the `isExported`
// callback below. This callback below will dictate the linkage for all
// summaries in the index, and we basically just only want to ensure that dead
@@ -1599,13 +1373,23 @@ LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin) {
{
raw_string_ostream OS(Ret->data);
{
- legacy::PassManager PM;
if (is_thin) {
- PM.add(createWriteThinLTOBitcodePass(OS));
+ PassBuilder PB;
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+ ModulePassManager MPM;
+ MPM.addPass(ThinLTOBitcodeWriterPass(OS, nullptr));
+ MPM.run(*unwrap(M), MAM);
} else {
- PM.add(createBitcodeWriterPass(OS));
+ WriteBitcodeToFile(*unwrap(M), OS);
}
- PM.run(*unwrap(M));
}
}
return Ret.release();
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 5f5b5de79..6f36281af 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -12,7 +12,7 @@
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Pass.h"
-#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/Support/Signals.h"
#include "llvm/ADT/Optional.h"
@@ -406,51 +406,6 @@ extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
return wrap(SI);
}
-// FIXME: Use the C-API LLVMBuildAtomicCmpXchg and LLVMSetWeak
-// once we raise our minimum support to LLVM 10.
-extern "C" LLVMValueRef
-LLVMRustBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Target,
- LLVMValueRef Old, LLVMValueRef Source,
- LLVMAtomicOrdering Order,
- LLVMAtomicOrdering FailureOrder, LLVMBool Weak) {
-#if LLVM_VERSION_GE(13,0)
- // Rust probably knows the alignment of the target value and should be able to
- // specify something more precise than MaybeAlign here. See also
- // https://reviews.llvm.org/D97224 which may be a useful reference.
- AtomicCmpXchgInst *ACXI = unwrap(B)->CreateAtomicCmpXchg(
- unwrap(Target), unwrap(Old), unwrap(Source), llvm::MaybeAlign(), fromRust(Order),
- fromRust(FailureOrder));
-#else
- AtomicCmpXchgInst *ACXI = unwrap(B)->CreateAtomicCmpXchg(
- unwrap(Target), unwrap(Old), unwrap(Source), fromRust(Order),
- fromRust(FailureOrder));
-#endif
- ACXI->setWeak(Weak);
- return wrap(ACXI);
-}
-
-enum class LLVMRustSynchronizationScope {
- SingleThread,
- CrossThread,
-};
-
-static SyncScope::ID fromRust(LLVMRustSynchronizationScope Scope) {
- switch (Scope) {
- case LLVMRustSynchronizationScope::SingleThread:
- return SyncScope::SingleThread;
- case LLVMRustSynchronizationScope::CrossThread:
- return SyncScope::System;
- default:
- report_fatal_error("bad SynchronizationScope.");
- }
-}
-
-extern "C" LLVMValueRef
-LLVMRustBuildAtomicFence(LLVMBuilderRef B, LLVMAtomicOrdering Order,
- LLVMRustSynchronizationScope Scope) {
- return wrap(unwrap(B)->CreateFence(fromRust(Order), fromRust(Scope)));
-}
-
enum class LLVMRustAsmDialect {
Att,
Intel,
@@ -472,19 +427,11 @@ LLVMRustInlineAsm(LLVMTypeRef Ty, char *AsmString, size_t AsmStringLen,
char *Constraints, size_t ConstraintsLen,
LLVMBool HasSideEffects, LLVMBool IsAlignStack,
LLVMRustAsmDialect Dialect, LLVMBool CanThrow) {
-#if LLVM_VERSION_GE(13, 0)
return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
StringRef(AsmString, AsmStringLen),
StringRef(Constraints, ConstraintsLen),
HasSideEffects, IsAlignStack,
fromRust(Dialect), CanThrow));
-#else
- return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
- StringRef(AsmString, AsmStringLen),
- StringRef(Constraints, ConstraintsLen),
- HasSideEffects, IsAlignStack,
- fromRust(Dialect)));
-#endif
}
extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints,
@@ -924,6 +871,30 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantMemberType(
fromRust(Flags), unwrapDI<DIType>(Ty)));
}
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticMemberType(
+ LLVMRustDIBuilderRef Builder,
+ LLVMMetadataRef Scope,
+ const char *Name,
+ size_t NameLen,
+ LLVMMetadataRef File,
+ unsigned LineNo,
+ LLVMMetadataRef Ty,
+ LLVMRustDIFlags Flags,
+ LLVMValueRef val,
+ uint32_t AlignInBits
+) {
+ return wrap(Builder->createStaticMemberType(
+ unwrapDI<DIDescriptor>(Scope),
+ StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File),
+ LineNo,
+ unwrapDI<DIType>(Ty),
+ fromRust(Flags),
+ unwrap<llvm::ConstantInt>(val),
+ AlignInBits
+ ));
+}
+
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlock(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
LLVMMetadataRef File, unsigned Line, unsigned Col) {
@@ -1250,10 +1221,8 @@ static LLVMRustDiagnosticKind toRust(DiagnosticKind Kind) {
return LLVMRustDiagnosticKind::Linker;
case DK_Unsupported:
return LLVMRustDiagnosticKind::Unsupported;
-#if LLVM_VERSION_GE(13, 0)
case DK_SrcMgr:
return LLVMRustDiagnosticKind::SrcMgr;
-#endif
default:
return (Kind >= DK_FirstRemark && Kind <= DK_LastRemark)
? LLVMRustDiagnosticKind::OptimizationRemarkOther
@@ -1327,30 +1296,11 @@ extern "C" LLVMTypeKind LLVMRustGetTypeKind(LLVMTypeRef Ty) {
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SMDiagnostic, LLVMSMDiagnosticRef)
-#if LLVM_VERSION_LT(13, 0)
-using LLVMInlineAsmDiagHandlerTy = LLVMContext::InlineAsmDiagHandlerTy;
-#else
-using LLVMInlineAsmDiagHandlerTy = void*;
-#endif
-
-extern "C" void LLVMRustSetInlineAsmDiagnosticHandler(
- LLVMContextRef C, LLVMInlineAsmDiagHandlerTy H, void *CX) {
- // Diagnostic handlers were unified in LLVM change 5de2d189e6ad, so starting
- // with LLVM 13 this function is gone.
-#if LLVM_VERSION_LT(13, 0)
- unwrap(C)->setInlineAsmDiagnosticHandler(H, CX);
-#endif
-}
-
extern "C" LLVMSMDiagnosticRef LLVMRustGetSMDiagnostic(
LLVMDiagnosticInfoRef DI, unsigned *Cookie) {
-#if LLVM_VERSION_GE(13, 0)
llvm::DiagnosticInfoSrcMgr *SM = static_cast<llvm::DiagnosticInfoSrcMgr *>(unwrap(DI));
*Cookie = SM->getLocCookie();
return wrap(&SM->getSMDiag());
-#else
- report_fatal_error("Shouldn't get called on older versions");
-#endif
}
extern "C" bool LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef,
@@ -1629,6 +1579,14 @@ extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty,
return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
}
+extern "C" bool LLVMRustConstIntGetZExtValue(LLVMValueRef CV, uint64_t *value) {
+ auto C = unwrap<llvm::ConstantInt>(CV);
+ if (C->getBitWidth() > 64)
+ return false;
+ *value = C->getZExtValue();
+ return true;
+}
+
// Returns true if both high and low were successfully set. Fails in case constant wasn’t any of
// the common sizes (1, 8, 16, 32, 64, 128 bits)
extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)
@@ -1712,11 +1670,7 @@ LLVMRustModuleBufferCreate(LLVMModuleRef M) {
auto Ret = std::make_unique<LLVMRustModuleBuffer>();
{
raw_string_ostream OS(Ret->data);
- {
- legacy::PassManager PM;
- PM.add(createBitcodeWriterPass(OS));
- PM.run(*unwrap(M));
- }
+ WriteBitcodeToFile(*unwrap(M), OS);
}
return Ret.release();
}
diff --git a/compiler/rustc_llvm/src/lib.rs b/compiler/rustc_llvm/src/lib.rs
index 8eade02a4..8542dcf5b 100644
--- a/compiler/rustc_llvm/src/lib.rs
+++ b/compiler/rustc_llvm/src/lib.rs
@@ -1,3 +1,5 @@
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
// NOTE: This crate only exists to allow linking on mingw targets.
diff --git a/compiler/rustc_log/src/lib.rs b/compiler/rustc_log/src/lib.rs
index f2ec80b0c..458f5e87b 100644
--- a/compiler/rustc_log/src/lib.rs
+++ b/compiler/rustc_log/src/lib.rs
@@ -38,6 +38,9 @@
//! debugging, you can make changes inside those crates and quickly run main.rs
//! to read the debug logs.
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
use std::env::{self, VarError};
use std::fmt::{self, Display};
use std::io;
diff --git a/compiler/rustc_macros/Cargo.toml b/compiler/rustc_macros/Cargo.toml
index 25b3aadc1..547c8debb 100644
--- a/compiler/rustc_macros/Cargo.toml
+++ b/compiler/rustc_macros/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
proc-macro = true
[dependencies]
-annotate-snippets = "0.8.0"
+annotate-snippets = "0.9"
fluent-bundle = "0.15.2"
fluent-syntax = "0.11"
synstructure = "0.12.1"
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic.rs b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
index 6b5b8b593..ef1985b96 100644
--- a/compiler/rustc_macros/src/diagnostics/diagnostic.rs
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
@@ -2,123 +2,77 @@
use crate::diagnostics::diagnostic_builder::{DiagnosticDeriveBuilder, DiagnosticDeriveKind};
use crate::diagnostics::error::{span_err, DiagnosticDeriveError};
-use crate::diagnostics::utils::{build_field_mapping, SetOnce};
+use crate::diagnostics::utils::SetOnce;
use proc_macro2::TokenStream;
use quote::quote;
-use syn::spanned::Spanned;
use synstructure::Structure;
/// The central struct for constructing the `into_diagnostic` method from an annotated struct.
-pub(crate) struct SessionDiagnosticDerive<'a> {
+pub(crate) struct DiagnosticDerive<'a> {
structure: Structure<'a>,
- sess: syn::Ident,
builder: DiagnosticDeriveBuilder,
}
-impl<'a> SessionDiagnosticDerive<'a> {
- pub(crate) fn new(diag: syn::Ident, sess: syn::Ident, structure: Structure<'a>) -> Self {
+impl<'a> DiagnosticDerive<'a> {
+ pub(crate) fn new(diag: syn::Ident, handler: syn::Ident, structure: Structure<'a>) -> Self {
Self {
builder: DiagnosticDeriveBuilder {
diag,
- fields: build_field_mapping(&structure),
- kind: None,
- code: None,
- slug: None,
+ kind: DiagnosticDeriveKind::Diagnostic { handler },
},
- sess,
structure,
}
}
pub(crate) fn into_tokens(self) -> TokenStream {
- let SessionDiagnosticDerive { mut structure, sess, mut builder } = self;
-
- let ast = structure.ast();
- let (implementation, param_ty) = {
- if let syn::Data::Struct(..) = ast.data {
- let preamble = builder.preamble(&structure);
- let (attrs, args) = builder.body(&mut structure);
-
- let span = ast.span().unwrap();
- let diag = &builder.diag;
- let init = match (builder.kind.value(), builder.slug.value()) {
- (None, _) => {
- span_err(span, "diagnostic kind not specified")
- .help("use the `#[error(...)]` attribute to create an error")
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(kind), None) => {
- span_err(span, "diagnostic slug not specified")
- .help(&format!(
- "specify the slug as the first argument to the attribute, such as \
- `#[{}(typeck::example_error)]`",
- kind.descr()
- ))
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(DiagnosticDeriveKind::Lint), _) => {
- span_err(span, "only `#[error(..)]` and `#[warning(..)]` are supported")
- .help("use the `#[error(...)]` attribute to create a error")
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(DiagnosticDeriveKind::Error), Some(slug)) => {
- quote! {
- let mut #diag = #sess.struct_err(rustc_errors::fluent::#slug);
- }
- }
- (Some(DiagnosticDeriveKind::Warn), Some(slug)) => {
- quote! {
- let mut #diag = #sess.struct_warn(rustc_errors::fluent::#slug);
- }
- }
- };
-
- let implementation = quote! {
- #init
- #preamble
- match self {
- #attrs
- }
- match self {
- #args
- }
- #diag
- };
- let param_ty = match builder.kind {
- Some((DiagnosticDeriveKind::Error, _)) => {
- quote! { rustc_errors::ErrorGuaranteed }
- }
- Some((DiagnosticDeriveKind::Lint | DiagnosticDeriveKind::Warn, _)) => {
- quote! { () }
+ let DiagnosticDerive { mut structure, mut builder } = self;
+
+ let implementation = builder.each_variant(&mut structure, |mut builder, variant| {
+ let preamble = builder.preamble(&variant);
+ let body = builder.body(&variant);
+
+ let diag = &builder.parent.diag;
+ let DiagnosticDeriveKind::Diagnostic { handler } = &builder.parent.kind else {
+ unreachable!()
+ };
+ let init = match builder.slug.value_ref() {
+ None => {
+ span_err(builder.span, "diagnostic slug not specified")
+ .help(&format!(
+ "specify the slug as the first argument to the `#[diag(...)]` \
+ attribute, such as `#[diag(hir_analysis_example_error)]`",
+ ))
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ Some(slug) => {
+ quote! {
+ let mut #diag = #handler.struct_diagnostic(rustc_errors::fluent::#slug);
}
- _ => unreachable!(),
- };
-
- (implementation, param_ty)
- } else {
- span_err(
- ast.span().unwrap(),
- "`#[derive(SessionDiagnostic)]` can only be used on structs",
- )
- .emit();
-
- let implementation = DiagnosticDeriveError::ErrorHandled.to_compile_error();
- let param_ty = quote! { rustc_errors::ErrorGuaranteed };
- (implementation, param_ty)
+ }
+ };
+
+ let formatting_init = &builder.formatting_init;
+ quote! {
+ #init
+ #formatting_init
+ #preamble
+ #body
+ #diag
}
- };
+ });
+ let DiagnosticDeriveKind::Diagnostic { handler } = &builder.kind else { unreachable!() };
structure.gen_impl(quote! {
- gen impl<'__session_diagnostic_sess> rustc_session::SessionDiagnostic<'__session_diagnostic_sess, #param_ty>
+ gen impl<'__diagnostic_handler_sess, G>
+ rustc_errors::IntoDiagnostic<'__diagnostic_handler_sess, G>
for @Self
+ where G: rustc_errors::EmissionGuarantee
{
fn into_diagnostic(
self,
- #sess: &'__session_diagnostic_sess rustc_session::parse::ParseSess
- ) -> rustc_errors::DiagnosticBuilder<'__session_diagnostic_sess, #param_ty> {
+ #handler: &'__diagnostic_handler_sess rustc_errors::Handler
+ ) -> rustc_errors::DiagnosticBuilder<'__diagnostic_handler_sess, G> {
use rustc_errors::IntoDiagnosticArg;
#implementation
}
@@ -136,13 +90,7 @@ pub(crate) struct LintDiagnosticDerive<'a> {
impl<'a> LintDiagnosticDerive<'a> {
pub(crate) fn new(diag: syn::Ident, structure: Structure<'a>) -> Self {
Self {
- builder: DiagnosticDeriveBuilder {
- diag,
- fields: build_field_mapping(&structure),
- kind: None,
- code: None,
- slug: None,
- },
+ builder: DiagnosticDeriveBuilder { diag, kind: DiagnosticDeriveKind::LintDiagnostic },
structure,
}
}
@@ -150,75 +98,52 @@ impl<'a> LintDiagnosticDerive<'a> {
pub(crate) fn into_tokens(self) -> TokenStream {
let LintDiagnosticDerive { mut structure, mut builder } = self;
- let ast = structure.ast();
- let implementation = {
- if let syn::Data::Struct(..) = ast.data {
- let preamble = builder.preamble(&structure);
- let (attrs, args) = builder.body(&mut structure);
-
- let diag = &builder.diag;
- let span = ast.span().unwrap();
- let init = match (builder.kind.value(), builder.slug.value()) {
- (None, _) => {
- span_err(span, "diagnostic kind not specified")
- .help("use the `#[error(...)]` attribute to create an error")
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(kind), None) => {
- span_err(span, "diagnostic slug not specified")
- .help(&format!(
- "specify the slug as the first argument to the attribute, such as \
- `#[{}(typeck::example_error)]`",
- kind.descr()
- ))
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(DiagnosticDeriveKind::Error | DiagnosticDeriveKind::Warn), _) => {
- span_err(span, "only `#[lint(..)]` is supported")
- .help("use the `#[lint(...)]` attribute to create a lint")
- .emit();
- return DiagnosticDeriveError::ErrorHandled.to_compile_error();
- }
- (Some(DiagnosticDeriveKind::Lint), Some(slug)) => {
- quote! {
- let mut #diag = #diag.build(rustc_errors::fluent::#slug);
- }
- }
- };
-
- let implementation = quote! {
- #init
- #preamble
- match self {
- #attrs
- }
- match self {
- #args
- }
- #diag.emit();
- };
-
- implementation
- } else {
- span_err(
- ast.span().unwrap(),
- "`#[derive(LintDiagnostic)]` can only be used on structs",
- )
- .emit();
-
- DiagnosticDeriveError::ErrorHandled.to_compile_error()
+ let implementation = builder.each_variant(&mut structure, |mut builder, variant| {
+ let preamble = builder.preamble(&variant);
+ let body = builder.body(&variant);
+
+ let diag = &builder.parent.diag;
+ let formatting_init = &builder.formatting_init;
+ quote! {
+ #preamble
+ #formatting_init
+ #body
+ #diag
+ }
+ });
+
+ let msg = builder.each_variant(&mut structure, |mut builder, variant| {
+ // Collect the slug by generating the preamble.
+ let _ = builder.preamble(&variant);
+
+ match builder.slug.value_ref() {
+ None => {
+ span_err(builder.span, "diagnostic slug not specified")
+ .help(&format!(
+ "specify the slug as the first argument to the attribute, such as \
+ `#[diag(compiletest_example)]`",
+ ))
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ Some(slug) => quote! { rustc_errors::fluent::#slug.into() },
}
- };
+ });
let diag = &builder.diag;
structure.gen_impl(quote! {
gen impl<'__a> rustc_errors::DecorateLint<'__a, ()> for @Self {
- fn decorate_lint(self, #diag: rustc_errors::LintDiagnosticBuilder<'__a, ()>) {
+ fn decorate_lint<'__b>(
+ self,
+ #diag: &'__b mut rustc_errors::DiagnosticBuilder<'__a, ()>
+ ) -> &'__b mut rustc_errors::DiagnosticBuilder<'__a, ()> {
use rustc_errors::IntoDiagnosticArg;
#implementation
}
+
+ fn msg(&self) -> rustc_errors::DiagnosticMessage {
+ #msg
+ }
}
})
}
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
index 6c9561925..3ea83fd09 100644
--- a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
@@ -5,71 +5,124 @@ use crate::diagnostics::error::{
DiagnosticDeriveError,
};
use crate::diagnostics::utils::{
- report_error_if_not_applied_to_span, report_type_error, type_is_unit, type_matches_path,
- Applicability, FieldInfo, FieldInnerTy, HasFieldMap, SetOnce,
+ build_field_mapping, is_doc_comment, report_error_if_not_applied_to_span, report_type_error,
+ should_generate_set_arg, type_is_unit, type_matches_path, FieldInfo, FieldInnerTy, FieldMap,
+ HasFieldMap, SetOnce, SpannedOption, SubdiagnosticKind,
};
use proc_macro2::{Ident, Span, TokenStream};
use quote::{format_ident, quote};
-use std::collections::HashMap;
-use std::str::FromStr;
use syn::{
- parse_quote, spanned::Spanned, Attribute, Field, Meta, MetaList, MetaNameValue, NestedMeta,
- Path, Type,
+ parse_quote, spanned::Spanned, Attribute, Meta, MetaList, MetaNameValue, NestedMeta, Path, Type,
};
-use synstructure::{BindingInfo, Structure};
+use synstructure::{BindingInfo, Structure, VariantInfo};
-/// What kind of diagnostic is being derived - an error, a warning or a lint?
-#[derive(Copy, Clone)]
+/// What kind of diagnostic is being derived - a fatal/error/warning or a lint?
+#[derive(Clone, PartialEq, Eq)]
pub(crate) enum DiagnosticDeriveKind {
- /// `#[error(..)]`
- Error,
- /// `#[warn(..)]`
- Warn,
- /// `#[lint(..)]`
- Lint,
+ Diagnostic { handler: syn::Ident },
+ LintDiagnostic,
}
-impl DiagnosticDeriveKind {
- /// Returns human-readable string corresponding to the kind.
- pub fn descr(&self) -> &'static str {
- match self {
- DiagnosticDeriveKind::Error => "error",
- DiagnosticDeriveKind::Warn => "warning",
- DiagnosticDeriveKind::Lint => "lint",
- }
- }
-}
-
-/// Tracks persistent information required for building up individual calls to diagnostic methods
-/// for generated diagnostic derives - both `SessionDiagnostic` for errors/warnings and
-/// `LintDiagnostic` for lints.
+/// Tracks persistent information required for the entire type when building up individual calls to
+/// diagnostic methods for generated diagnostic derives - both `Diagnostic` for
+/// fatal/errors/warnings and `LintDiagnostic` for lints.
pub(crate) struct DiagnosticDeriveBuilder {
/// The identifier to use for the generated `DiagnosticBuilder` instance.
pub diag: syn::Ident,
+ /// Kind of diagnostic that should be derived.
+ pub kind: DiagnosticDeriveKind,
+}
+
+/// Tracks persistent information required for a specific variant when building up individual calls
+/// to diagnostic methods for generated diagnostic derives - both `Diagnostic` for
+/// fatal/errors/warnings and `LintDiagnostic` for lints.
+pub(crate) struct DiagnosticDeriveVariantBuilder<'parent> {
+ /// The parent builder for the entire type.
+ pub parent: &'parent DiagnosticDeriveBuilder,
+
+ /// Initialization of format strings for code suggestions.
+ pub formatting_init: TokenStream,
+
+ /// Span of the struct or the enum variant.
+ pub span: proc_macro::Span,
/// Store a map of field name to its corresponding field. This is built on construction of the
/// derive builder.
- pub fields: HashMap<String, TokenStream>,
+ pub field_map: FieldMap,
- /// Kind of diagnostic requested via the struct attribute.
- pub kind: Option<(DiagnosticDeriveKind, proc_macro::Span)>,
/// Slug is a mandatory part of the struct attribute as corresponds to the Fluent message that
/// has the actual diagnostic message.
- pub slug: Option<(Path, proc_macro::Span)>,
+ pub slug: SpannedOption<Path>,
/// Error codes are a optional part of the struct attribute - this is only set to detect
/// multiple specifications.
- pub code: Option<(String, proc_macro::Span)>,
+ pub code: SpannedOption<()>,
}
-impl HasFieldMap for DiagnosticDeriveBuilder {
+impl<'a> HasFieldMap for DiagnosticDeriveVariantBuilder<'a> {
fn get_field_binding(&self, field: &String) -> Option<&TokenStream> {
- self.fields.get(field)
+ self.field_map.get(field)
}
}
impl DiagnosticDeriveBuilder {
- pub fn preamble<'s>(&mut self, structure: &Structure<'s>) -> TokenStream {
+ /// Call `f` for the struct or for each variant of the enum, returning a `TokenStream` with the
+ /// tokens from `f` wrapped in an `match` expression. Emits errors for use of derive on unions
+ /// or attributes on the type itself when input is an enum.
+ pub fn each_variant<'s, F>(&mut self, structure: &mut Structure<'s>, f: F) -> TokenStream
+ where
+ F: for<'a, 'v> Fn(DiagnosticDeriveVariantBuilder<'a>, &VariantInfo<'v>) -> TokenStream,
+ {
let ast = structure.ast();
+ let span = ast.span().unwrap();
+ match ast.data {
+ syn::Data::Struct(..) | syn::Data::Enum(..) => (),
+ syn::Data::Union(..) => {
+ span_err(span, "diagnostic derives can only be used on structs and enums");
+ }
+ }
+
+ if matches!(ast.data, syn::Data::Enum(..)) {
+ for attr in &ast.attrs {
+ span_err(
+ attr.span().unwrap(),
+ "unsupported type attribute for diagnostic derive enum",
+ )
+ .emit();
+ }
+ }
+
+ structure.bind_with(|_| synstructure::BindStyle::Move);
+ let variants = structure.each_variant(|variant| {
+ let span = match structure.ast().data {
+ syn::Data::Struct(..) => span,
+ // There isn't a good way to get the span of the variant, so the variant's
+ // name will need to do.
+ _ => variant.ast().ident.span().unwrap(),
+ };
+ let builder = DiagnosticDeriveVariantBuilder {
+ parent: &self,
+ span,
+ field_map: build_field_mapping(variant),
+ formatting_init: TokenStream::new(),
+ slug: None,
+ code: None,
+ };
+ f(builder, variant)
+ });
+
+ quote! {
+ match self {
+ #variants
+ }
+ }
+ }
+}
+
+impl<'a> DiagnosticDeriveVariantBuilder<'a> {
+ /// Generates calls to `code` and similar functions based on the attributes on the type or
+ /// variant.
+ pub fn preamble<'s>(&mut self, variant: &VariantInfo<'s>) -> TokenStream {
+ let ast = variant.ast();
let attrs = &ast.attrs;
let preamble = attrs.iter().map(|attr| {
self.generate_structure_code_for_attr(attr).unwrap_or_else(|v| v.to_compile_error())
@@ -80,167 +133,121 @@ impl DiagnosticDeriveBuilder {
}
}
- pub fn body<'s>(&mut self, structure: &mut Structure<'s>) -> (TokenStream, TokenStream) {
- // Keep track of which fields need to be handled with a by-move binding.
- let mut needs_moved = std::collections::HashSet::new();
-
- // Generates calls to `span_label` and similar functions based on the attributes
- // on fields. Code for suggestions uses formatting machinery and the value of
- // other fields - because any given field can be referenced multiple times, it
- // should be accessed through a borrow. When passing fields to `add_subdiagnostic`
- // or `set_arg` (which happens below) for Fluent, we want to move the data, so that
- // has to happen in a separate pass over the fields.
- let attrs = structure
- .clone()
- .filter(|field_binding| {
- let ast = &field_binding.ast();
- !self.needs_move(ast) || {
- needs_moved.insert(field_binding.binding.clone());
- false
- }
- })
- .each(|field_binding| self.generate_field_attrs_code(field_binding));
-
- structure.bind_with(|_| synstructure::BindStyle::Move);
- // When a field has attributes like `#[label]` or `#[note]` then it doesn't
- // need to be passed as an argument to the diagnostic. But when a field has no
- // attributes or a `#[subdiagnostic]` attribute then it must be passed as an
- // argument to the diagnostic so that it can be referred to by Fluent messages.
- let args = structure
- .filter(|field_binding| needs_moved.contains(&field_binding.binding))
- .each(|field_binding| self.generate_field_attrs_code(field_binding));
-
- (attrs, args)
+ /// Generates calls to `span_label` and similar functions based on the attributes on fields or
+ /// calls to `set_arg` when no attributes are present.
+ pub fn body<'s>(&mut self, variant: &VariantInfo<'s>) -> TokenStream {
+ let mut body = quote! {};
+ // Generate `set_arg` calls first..
+ for binding in variant.bindings().iter().filter(|bi| should_generate_set_arg(bi.ast())) {
+ body.extend(self.generate_field_code(binding));
+ }
+ // ..and then subdiagnostic additions.
+ for binding in variant.bindings().iter().filter(|bi| !should_generate_set_arg(bi.ast())) {
+ body.extend(self.generate_field_attrs_code(binding));
+ }
+ body
}
- /// Returns `true` if `field` should generate a `set_arg` call rather than any other diagnostic
- /// call (like `span_label`).
- fn should_generate_set_arg(&self, field: &Field) -> bool {
- field.attrs.is_empty()
- }
+ /// Parse a `SubdiagnosticKind` from an `Attribute`.
+ fn parse_subdiag_attribute(
+ &self,
+ attr: &Attribute,
+ ) -> Result<Option<(SubdiagnosticKind, Path)>, DiagnosticDeriveError> {
+ let Some((subdiag, slug)) = SubdiagnosticKind::from_attr(attr, self)? else {
+ // Some attributes aren't errors - like documentation comments - but also aren't
+ // subdiagnostics.
+ return Ok(None);
+ };
- /// Returns `true` if `field` needs to have code generated in the by-move branch of the
- /// generated derive rather than the by-ref branch.
- fn needs_move(&self, field: &Field) -> bool {
- let generates_set_arg = self.should_generate_set_arg(field);
- let is_multispan = type_matches_path(&field.ty, &["rustc_errors", "MultiSpan"]);
- // FIXME(davidtwco): better support for one field needing to be in the by-move and
- // by-ref branches.
- let is_subdiagnostic = field
- .attrs
- .iter()
- .map(|attr| attr.path.segments.last().unwrap().ident.to_string())
- .any(|attr| attr == "subdiagnostic");
-
- // `set_arg` calls take their argument by-move..
- generates_set_arg
- // If this is a `MultiSpan` field then it needs to be moved to be used by any
- // attribute..
- || is_multispan
- // If this a `#[subdiagnostic]` then it needs to be moved as the other diagnostic is
- // unlikely to be `Copy`..
- || is_subdiagnostic
+ if let SubdiagnosticKind::MultipartSuggestion { .. } = subdiag {
+ let meta = attr.parse_meta()?;
+ throw_invalid_attr!(attr, &meta, |diag| diag
+ .help("consider creating a `Subdiagnostic` instead"));
+ }
+
+ let slug = slug.unwrap_or_else(|| match subdiag {
+ SubdiagnosticKind::Label => parse_quote! { _subdiag::label },
+ SubdiagnosticKind::Note => parse_quote! { _subdiag::note },
+ SubdiagnosticKind::Help => parse_quote! { _subdiag::help },
+ SubdiagnosticKind::Warn => parse_quote! { _subdiag::warn },
+ SubdiagnosticKind::Suggestion { .. } => parse_quote! { _subdiag::suggestion },
+ SubdiagnosticKind::MultipartSuggestion { .. } => unreachable!(),
+ });
+
+ Ok(Some((subdiag, slug)))
}
/// Establishes state in the `DiagnosticDeriveBuilder` resulting from the struct
- /// attributes like `#[error(..)`, such as the diagnostic kind and slug. Generates
+ /// attributes like `#[diag(..)]`, such as the slug and error code. Generates
/// diagnostic builder calls for setting error code and creating note/help messages.
fn generate_structure_code_for_attr(
&mut self,
attr: &Attribute,
) -> Result<TokenStream, DiagnosticDeriveError> {
- let diag = &self.diag;
- let span = attr.span().unwrap();
+ let diag = &self.parent.diag;
+
+ // Always allow documentation comments.
+ if is_doc_comment(attr) {
+ return Ok(quote! {});
+ }
let name = attr.path.segments.last().unwrap().ident.to_string();
let name = name.as_str();
let meta = attr.parse_meta()?;
- let is_help_note_or_warn = matches!(name, "help" | "note" | "warn_");
-
- let nested = match meta {
- // Most attributes are lists, like `#[error(..)]`/`#[warning(..)]` for most cases or
- // `#[help(..)]`/`#[note(..)]` when the user is specifying a alternative slug.
- Meta::List(MetaList { ref nested, .. }) => nested,
- // Subdiagnostics without spans can be applied to the type too, and these are just
- // paths: `#[help]` and `#[note]`
- Meta::Path(_) if is_help_note_or_warn => {
- let fn_name = if name == "warn_" {
- Ident::new("warn", attr.span())
- } else {
- Ident::new(name, attr.span())
- };
- return Ok(quote! { #diag.#fn_name(rustc_errors::fluent::_subdiag::#fn_name); });
- }
- _ => throw_invalid_attr!(attr, &meta),
- };
-
- // Check the kind before doing any further processing so that there aren't misleading
- // "no kind specified" errors if there are failures later.
- match name {
- "error" => self.kind.set_once((DiagnosticDeriveKind::Error, span)),
- "warning" => self.kind.set_once((DiagnosticDeriveKind::Warn, span)),
- "lint" => self.kind.set_once((DiagnosticDeriveKind::Lint, span)),
- "help" | "note" | "warn_" => (),
- _ => throw_invalid_attr!(attr, &meta, |diag| {
- diag.help(
- "only `error`, `warning`, `help`, `note` and `warn_` are valid attributes",
- )
- }),
- }
+ if name == "diag" {
+ let Meta::List(MetaList { ref nested, .. }) = meta else {
+ throw_invalid_attr!(
+ attr,
+ &meta
+ );
+ };
- // First nested element should always be the path, e.g. `#[error(typeck::invalid)]` or
- // `#[help(typeck::another_help)]`.
- let mut nested_iter = nested.into_iter();
- if let Some(nested_attr) = nested_iter.next() {
- // Report an error if there are any other list items after the path.
- if is_help_note_or_warn && nested_iter.next().is_some() {
- throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- diag.help(
- "`help`, `note` and `warn_` struct attributes can only have one argument",
- )
- });
- }
+ let mut nested_iter = nested.into_iter().peekable();
- match nested_attr {
- NestedMeta::Meta(Meta::Path(path)) if is_help_note_or_warn => {
- let fn_name = proc_macro2::Ident::new(name, attr.span());
- return Ok(quote! { #diag.#fn_name(rustc_errors::fluent::#path); });
- }
- NestedMeta::Meta(Meta::Path(path)) => {
- self.slug.set_once((path.clone(), span));
+ match nested_iter.peek() {
+ Some(NestedMeta::Meta(Meta::Path(slug))) => {
+ self.slug.set_once(slug.clone(), slug.span().unwrap());
+ nested_iter.next();
}
- NestedMeta::Meta(meta @ Meta::NameValue(_))
- if !is_help_note_or_warn
- && meta.path().segments.last().unwrap().ident == "code" =>
- {
- // don't error for valid follow-up attributes
- }
- nested_attr => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- diag.help("first argument of the attribute should be the diagnostic slug")
- }),
+ Some(NestedMeta::Meta(Meta::NameValue { .. })) => {}
+ Some(nested_attr) => throw_invalid_nested_attr!(attr, &nested_attr, |diag| diag
+ .help("a diagnostic slug is required as the first argument")),
+ None => throw_invalid_attr!(attr, &meta, |diag| diag
+ .help("a diagnostic slug is required as the first argument")),
};
- }
- // Remaining attributes are optional, only `code = ".."` at the moment.
- let mut tokens = Vec::new();
- for nested_attr in nested_iter {
- let meta = match nested_attr {
- syn::NestedMeta::Meta(meta) => meta,
- _ => throw_invalid_nested_attr!(attr, &nested_attr),
- };
+ // Remaining attributes are optional, only `code = ".."` at the moment.
+ let mut tokens = TokenStream::new();
+ for nested_attr in nested_iter {
+ let (value, path) = match nested_attr {
+ NestedMeta::Meta(Meta::NameValue(MetaNameValue {
+ lit: syn::Lit::Str(value),
+ path,
+ ..
+ })) => (value, path),
+ NestedMeta::Meta(Meta::Path(_)) => {
+ invalid_nested_attr(attr, &nested_attr)
+ .help("diagnostic slug must be the first argument")
+ .emit();
+ continue;
+ }
+ _ => {
+ invalid_nested_attr(attr, &nested_attr).emit();
+ continue;
+ }
+ };
- let path = meta.path();
- let nested_name = path.segments.last().unwrap().ident.to_string();
- // Struct attributes are only allowed to be applied once, and the diagnostic
- // changes will be set in the initialisation code.
- if let Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) = &meta {
- let span = s.span().unwrap();
+ let nested_name = path.segments.last().unwrap().ident.to_string();
+ // Struct attributes are only allowed to be applied once, and the diagnostic
+ // changes will be set in the initialisation code.
+ let span = value.span().unwrap();
match nested_name.as_str() {
"code" => {
- self.code.set_once((s.value(), span));
- let code = &self.code.as_ref().map(|(v, _)| v);
- tokens.push(quote! {
+ self.code.set_once((), span);
+
+ let code = value.value();
+ tokens.extend(quote! {
#diag.code(rustc_errors::DiagnosticId::Error(#code.to_string()));
});
}
@@ -248,46 +255,68 @@ impl DiagnosticDeriveBuilder {
.help("only `code` is a valid nested attributes following the slug")
.emit(),
}
- } else {
- invalid_nested_attr(attr, &nested_attr).emit()
}
+ return Ok(tokens);
}
- Ok(tokens.drain(..).collect())
+ let Some((subdiag, slug)) = self.parse_subdiag_attribute(attr)? else {
+ // Some attributes aren't errors - like documentation comments - but also aren't
+ // subdiagnostics.
+ return Ok(quote! {});
+ };
+ let fn_ident = format_ident!("{}", subdiag);
+ match subdiag {
+ SubdiagnosticKind::Note | SubdiagnosticKind::Help | SubdiagnosticKind::Warn => {
+ Ok(self.add_subdiagnostic(&fn_ident, slug))
+ }
+ SubdiagnosticKind::Label | SubdiagnosticKind::Suggestion { .. } => {
+ throw_invalid_attr!(attr, &meta, |diag| diag
+ .help("`#[label]` and `#[suggestion]` can only be applied to fields"));
+ }
+ SubdiagnosticKind::MultipartSuggestion { .. } => unreachable!(),
+ }
}
- fn generate_field_attrs_code(&mut self, binding_info: &BindingInfo<'_>) -> TokenStream {
+ fn generate_field_code(&mut self, binding_info: &BindingInfo<'_>) -> TokenStream {
+ let diag = &self.parent.diag;
+
let field = binding_info.ast();
let field_binding = &binding_info.binding;
- if self.should_generate_set_arg(&field) {
- let diag = &self.diag;
- let ident = field.ident.as_ref().unwrap();
- return quote! {
- #diag.set_arg(
- stringify!(#ident),
- #field_binding
- );
- };
+ let ident = field.ident.as_ref().unwrap();
+ let ident = format_ident!("{}", ident); // strip `r#` prefix, if present
+
+ quote! {
+ #diag.set_arg(
+ stringify!(#ident),
+ #field_binding
+ );
}
+ }
+
+ fn generate_field_attrs_code(&mut self, binding_info: &BindingInfo<'_>) -> TokenStream {
+ let field = binding_info.ast();
+ let field_binding = &binding_info.binding;
- let needs_move = self.needs_move(&field);
let inner_ty = FieldInnerTy::from_type(&field.ty);
field
.attrs
.iter()
.map(move |attr| {
+ // Always allow documentation comments.
+ if is_doc_comment(attr) {
+ return quote! {};
+ }
+
let name = attr.path.segments.last().unwrap().ident.to_string();
let needs_clone =
name == "primary_span" && matches!(inner_ty, FieldInnerTy::Vec(_));
let (binding, needs_destructure) = if needs_clone {
// `primary_span` can accept a `Vec<Span>` so don't destructure that.
(quote! { #field_binding.clone() }, false)
- } else if needs_move {
- (quote! { #field_binding }, true)
} else {
- (quote! { *#field_binding }, true)
+ (quote! { #field_binding }, true)
};
let generated_code = self
@@ -317,222 +346,125 @@ impl DiagnosticDeriveBuilder {
info: FieldInfo<'_>,
binding: TokenStream,
) -> Result<TokenStream, DiagnosticDeriveError> {
- let meta = attr.parse_meta()?;
- match meta {
- Meta::Path(_) => self.generate_inner_field_code_path(attr, info, binding),
- Meta::List(MetaList { .. }) => self.generate_inner_field_code_list(attr, info, binding),
- _ => throw_invalid_attr!(attr, &meta),
- }
- }
-
- fn generate_inner_field_code_path(
- &mut self,
- attr: &Attribute,
- info: FieldInfo<'_>,
- binding: TokenStream,
- ) -> Result<TokenStream, DiagnosticDeriveError> {
- assert!(matches!(attr.parse_meta()?, Meta::Path(_)));
- let diag = &self.diag;
-
+ let diag = &self.parent.diag;
let meta = attr.parse_meta()?;
let ident = &attr.path.segments.last().unwrap().ident;
let name = ident.to_string();
- let name = name.as_str();
- match name {
- "skip_arg" => {
- // Don't need to do anything - by virtue of the attribute existing, the
- // `set_arg` call will not be generated.
- Ok(quote! {})
+ match (&meta, name.as_str()) {
+ // Don't need to do anything - by virtue of the attribute existing, the
+ // `set_arg` call will not be generated.
+ (Meta::Path(_), "skip_arg") => return Ok(quote! {}),
+ (Meta::Path(_), "primary_span") => {
+ match self.parent.kind {
+ DiagnosticDeriveKind::Diagnostic { .. } => {
+ report_error_if_not_applied_to_span(attr, &info)?;
+
+ return Ok(quote! {
+ #diag.set_span(#binding);
+ });
+ }
+ DiagnosticDeriveKind::LintDiagnostic => {
+ throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help("the `primary_span` field attribute is not valid for lint diagnostics")
+ })
+ }
+ }
}
- "primary_span" => {
- report_error_if_not_applied_to_span(attr, &info)?;
- Ok(quote! {
- #diag.set_span(#binding);
- })
+ (Meta::Path(_), "subdiagnostic") => {
+ return Ok(quote! { #diag.subdiagnostic(#binding); });
}
- "label" => {
- report_error_if_not_applied_to_span(attr, &info)?;
- Ok(self.add_spanned_subdiagnostic(binding, ident, parse_quote! { _subdiag::label }))
+ (Meta::NameValue(_), "subdiagnostic") => {
+ throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help("`eager` is the only supported nested attribute for `subdiagnostic`")
+ })
}
- "note" | "help" | "warn_" => {
- let warn_ident = Ident::new("warn", Span::call_site());
- let (ident, path) = match name {
- "note" => (ident, parse_quote! { _subdiag::note }),
- "help" => (ident, parse_quote! { _subdiag::help }),
- "warn_" => (&warn_ident, parse_quote! { _subdiag::warn }),
- _ => unreachable!(),
- };
- if type_matches_path(&info.ty, &["rustc_span", "Span"]) {
- Ok(self.add_spanned_subdiagnostic(binding, ident, path))
- } else if type_is_unit(&info.ty) {
- Ok(self.add_subdiagnostic(ident, path))
- } else {
- report_type_error(attr, "`Span` or `()`")?
+ (Meta::List(MetaList { ref nested, .. }), "subdiagnostic") => {
+ if nested.len() != 1 {
+ throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help(
+ "`eager` is the only supported nested attribute for `subdiagnostic`",
+ )
+ })
}
- }
- "subdiagnostic" => Ok(quote! { #diag.subdiagnostic(#binding); }),
- _ => throw_invalid_attr!(attr, &meta, |diag| {
- diag.help(
- "only `skip_arg`, `primary_span`, `label`, `note`, `help` and `subdiagnostic` \
- are valid field attributes",
- )
- }),
- }
- }
- fn generate_inner_field_code_list(
- &mut self,
- attr: &Attribute,
- info: FieldInfo<'_>,
- binding: TokenStream,
- ) -> Result<TokenStream, DiagnosticDeriveError> {
- let meta = attr.parse_meta()?;
- let Meta::List(MetaList { ref path, ref nested, .. }) = meta else { unreachable!() };
+ let handler = match &self.parent.kind {
+ DiagnosticDeriveKind::Diagnostic { handler } => handler,
+ DiagnosticDeriveKind::LintDiagnostic => {
+ throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help("eager subdiagnostics are not supported on lints")
+ })
+ }
+ };
- let ident = &attr.path.segments.last().unwrap().ident;
- let name = path.segments.last().unwrap().ident.to_string();
- let name = name.as_ref();
- match name {
- "suggestion" | "suggestion_short" | "suggestion_hidden" | "suggestion_verbose" => {
- return self.generate_inner_field_code_suggestion(attr, info);
+ let nested_attr = nested.first().expect("pop failed for single element list");
+ match nested_attr {
+ NestedMeta::Meta(meta @ Meta::Path(_))
+ if meta.path().segments.last().unwrap().ident.to_string().as_str()
+ == "eager" =>
+ {
+ return Ok(quote! { #diag.eager_subdiagnostic(#handler, #binding); });
+ }
+ _ => {
+ throw_invalid_nested_attr!(attr, nested_attr, |diag| {
+ diag.help("`eager` is the only supported nested attribute for `subdiagnostic`")
+ })
+ }
+ }
}
- "label" | "help" | "note" | "warn_" => (),
- _ => throw_invalid_attr!(attr, &meta, |diag| {
- diag.help(
- "only `label`, `help`, `note`, `warn` or `suggestion{,_short,_hidden,_verbose}` are \
- valid field attributes",
- )
- }),
+ _ => (),
}
- // For `#[label(..)]`, `#[note(..)]` and `#[help(..)]`, the first nested element must be a
- // path, e.g. `#[label(typeck::label)]`.
- let mut nested_iter = nested.into_iter();
- let msg = match nested_iter.next() {
- Some(NestedMeta::Meta(Meta::Path(path))) => path.clone(),
- Some(nested_attr) => throw_invalid_nested_attr!(attr, &nested_attr),
- None => throw_invalid_attr!(attr, &meta),
+ let Some((subdiag, slug)) = self.parse_subdiag_attribute(attr)? else {
+ // Some attributes aren't errors - like documentation comments - but also aren't
+ // subdiagnostics.
+ return Ok(quote! {});
};
-
- // None of these attributes should have anything following the slug.
- if nested_iter.next().is_some() {
- throw_invalid_attr!(attr, &meta);
- }
-
- match name {
- "label" => {
+ let fn_ident = format_ident!("{}", subdiag);
+ match subdiag {
+ SubdiagnosticKind::Label => {
report_error_if_not_applied_to_span(attr, &info)?;
- Ok(self.add_spanned_subdiagnostic(binding, ident, msg))
- }
- "note" | "help" if type_matches_path(&info.ty, &["rustc_span", "Span"]) => {
- Ok(self.add_spanned_subdiagnostic(binding, ident, msg))
+ Ok(self.add_spanned_subdiagnostic(binding, &fn_ident, slug))
}
- "note" | "help" if type_is_unit(&info.ty) => Ok(self.add_subdiagnostic(ident, msg)),
- // `warn_` must be special-cased because the attribute `warn` already has meaning and
- // so isn't used, despite the diagnostic API being named `warn`.
- "warn_" if type_matches_path(&info.ty, &["rustc_span", "Span"]) => Ok(self
- .add_spanned_subdiagnostic(binding, &Ident::new("warn", Span::call_site()), msg)),
- "warn_" if type_is_unit(&info.ty) => {
- Ok(self.add_subdiagnostic(&Ident::new("warn", Span::call_site()), msg))
- }
- "note" | "help" | "warn_" => report_type_error(attr, "`Span` or `()`")?,
- _ => unreachable!(),
- }
- }
-
- fn generate_inner_field_code_suggestion(
- &mut self,
- attr: &Attribute,
- info: FieldInfo<'_>,
- ) -> Result<TokenStream, DiagnosticDeriveError> {
- let diag = &self.diag;
-
- let mut meta = attr.parse_meta()?;
- let Meta::List(MetaList { ref path, ref mut nested, .. }) = meta else { unreachable!() };
-
- let (span_field, mut applicability) = self.span_and_applicability_of_ty(info)?;
-
- let mut msg = None;
- let mut code = None;
-
- let mut nested_iter = nested.into_iter().peekable();
- if let Some(nested_attr) = nested_iter.peek() {
- if let NestedMeta::Meta(Meta::Path(path)) = nested_attr {
- msg = Some(path.clone());
+ SubdiagnosticKind::Note | SubdiagnosticKind::Help | SubdiagnosticKind::Warn => {
+ if type_matches_path(&info.ty, &["rustc_span", "Span"]) {
+ Ok(self.add_spanned_subdiagnostic(binding, &fn_ident, slug))
+ } else if type_is_unit(&info.ty) {
+ Ok(self.add_subdiagnostic(&fn_ident, slug))
+ } else {
+ report_type_error(attr, "`Span` or `()`")?
+ }
}
- };
- // Move the iterator forward if a path was found (don't otherwise so that
- // code/applicability can be found or an error emitted).
- if msg.is_some() {
- let _ = nested_iter.next();
- }
+ SubdiagnosticKind::Suggestion {
+ suggestion_kind,
+ applicability: static_applicability,
+ code_field,
+ code_init,
+ } => {
+ let (span_field, mut applicability) = self.span_and_applicability_of_ty(info)?;
+
+ if let Some((static_applicability, span)) = static_applicability {
+ applicability.set_once(quote! { #static_applicability }, span);
+ }
- for nested_attr in nested_iter {
- let meta = match nested_attr {
- syn::NestedMeta::Meta(ref meta) => meta,
- syn::NestedMeta::Lit(_) => throw_invalid_nested_attr!(attr, &nested_attr),
- };
+ let applicability = applicability
+ .value()
+ .unwrap_or_else(|| quote! { rustc_errors::Applicability::Unspecified });
+ let style = suggestion_kind.to_suggestion_style();
- let nested_name = meta.path().segments.last().unwrap().ident.to_string();
- let nested_name = nested_name.as_str();
- match meta {
- Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) => {
- let span = meta.span().unwrap();
- match nested_name {
- "code" => {
- let formatted_str = self.build_format(&s.value(), s.span());
- code = Some(formatted_str);
- }
- "applicability" => {
- applicability = match applicability {
- Some(v) => {
- span_err(
- span,
- "applicability cannot be set in both the field and \
- attribute",
- )
- .emit();
- Some(v)
- }
- None => match Applicability::from_str(&s.value()) {
- Ok(v) => Some(quote! { #v }),
- Err(()) => {
- span_err(span, "invalid applicability").emit();
- None
- }
- },
- }
- }
- _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- diag.help(
- "only `message`, `code` and `applicability` are valid field \
- attributes",
- )
- }),
- }
- }
- _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- if matches!(meta, Meta::Path(_)) {
- diag.help("a diagnostic slug must be the first argument to the attribute")
- } else {
- diag
- }
- }),
+ self.formatting_init.extend(code_init);
+ Ok(quote! {
+ #diag.span_suggestions_with_style(
+ #span_field,
+ rustc_errors::fluent::#slug,
+ #code_field,
+ #applicability,
+ #style
+ );
+ })
}
+ SubdiagnosticKind::MultipartSuggestion { .. } => unreachable!(),
}
-
- let applicability =
- applicability.unwrap_or_else(|| quote!(rustc_errors::Applicability::Unspecified));
-
- let name = path.segments.last().unwrap().ident.to_string();
- let method = format_ident!("span_{}", name);
-
- let msg = msg.unwrap_or_else(|| parse_quote! { _subdiag::suggestion });
- let msg = quote! { rustc_errors::fluent::#msg };
- let code = code.unwrap_or_else(|| quote! { String::new() });
-
- Ok(quote! { #diag.#method(#span_field, #msg, #code, #applicability); })
}
/// Adds a spanned subdiagnostic by generating a `diag.span_$kind` call with the current slug
@@ -543,7 +475,7 @@ impl DiagnosticDeriveBuilder {
kind: &Ident,
fluent_attr_identifier: Path,
) -> TokenStream {
- let diag = &self.diag;
+ let diag = &self.parent.diag;
let fn_name = format_ident!("span_{}", kind);
quote! {
#diag.#fn_name(
@@ -556,7 +488,7 @@ impl DiagnosticDeriveBuilder {
/// Adds a subdiagnostic by generating a `diag.span_$kind` call with the current slug
/// and `fluent_attr_identifier`.
fn add_subdiagnostic(&self, kind: &Ident, fluent_attr_identifier: Path) -> TokenStream {
- let diag = &self.diag;
+ let diag = &self.parent.diag;
quote! {
#diag.#kind(rustc_errors::fluent::#fluent_attr_identifier);
}
@@ -565,58 +497,49 @@ impl DiagnosticDeriveBuilder {
fn span_and_applicability_of_ty(
&self,
info: FieldInfo<'_>,
- ) -> Result<(TokenStream, Option<TokenStream>), DiagnosticDeriveError> {
+ ) -> Result<(TokenStream, SpannedOption<TokenStream>), DiagnosticDeriveError> {
match &info.ty {
// If `ty` is `Span` w/out applicability, then use `Applicability::Unspecified`.
ty @ Type::Path(..) if type_matches_path(ty, &["rustc_span", "Span"]) => {
let binding = &info.binding.binding;
- Ok((quote!(*#binding), None))
+ Ok((quote!(#binding), None))
}
// If `ty` is `(Span, Applicability)` then return tokens accessing those.
Type::Tuple(tup) => {
let mut span_idx = None;
let mut applicability_idx = None;
+ fn type_err(span: &Span) -> Result<!, DiagnosticDeriveError> {
+ span_err(span.unwrap(), "wrong types for suggestion")
+ .help(
+ "`#[suggestion(...)]` on a tuple field must be applied to fields \
+ of type `(Span, Applicability)`",
+ )
+ .emit();
+ Err(DiagnosticDeriveError::ErrorHandled)
+ }
+
for (idx, elem) in tup.elems.iter().enumerate() {
if type_matches_path(elem, &["rustc_span", "Span"]) {
- if span_idx.is_none() {
- span_idx = Some(syn::Index::from(idx));
- } else {
- throw_span_err!(
- info.span.unwrap(),
- "type of field annotated with `#[suggestion(...)]` contains more \
- than one `Span`"
- );
- }
+ span_idx.set_once(syn::Index::from(idx), elem.span().unwrap());
} else if type_matches_path(elem, &["rustc_errors", "Applicability"]) {
- if applicability_idx.is_none() {
- applicability_idx = Some(syn::Index::from(idx));
- } else {
- throw_span_err!(
- info.span.unwrap(),
- "type of field annotated with `#[suggestion(...)]` contains more \
- than one Applicability"
- );
- }
+ applicability_idx.set_once(syn::Index::from(idx), elem.span().unwrap());
+ } else {
+ type_err(&elem.span())?;
}
}
- if let Some(span_idx) = span_idx {
- let binding = &info.binding.binding;
- let span = quote!(#binding.#span_idx);
- let applicability = applicability_idx
- .map(|applicability_idx| quote!(#binding.#applicability_idx))
- .unwrap_or_else(|| quote!(rustc_errors::Applicability::Unspecified));
-
- return Ok((span, Some(applicability)));
- }
+ let Some((span_idx, _)) = span_idx else {
+ type_err(&tup.span())?;
+ };
+ let Some((applicability_idx, applicability_span)) = applicability_idx else {
+ type_err(&tup.span())?;
+ };
+ let binding = &info.binding.binding;
+ let span = quote!(#binding.#span_idx);
+ let applicability = quote!(#binding.#applicability_idx);
- throw_span_err!(info.span.unwrap(), "wrong types for suggestion", |diag| {
- diag.help(
- "`#[suggestion(...)]` on a tuple field must be applied to fields of type \
- `(Span, Applicability)`",
- )
- });
+ Ok((span, Some((applicability, applicability_span))))
}
// If `ty` isn't a `Span` or `(Span, Applicability)` then emit an error.
_ => throw_span_err!(info.span.unwrap(), "wrong field type for suggestion", |diag| {
diff --git a/compiler/rustc_macros/src/diagnostics/fluent.rs b/compiler/rustc_macros/src/diagnostics/fluent.rs
index 562d5e9f4..3e447c94e 100644
--- a/compiler/rustc_macros/src/diagnostics/fluent.rs
+++ b/compiler/rustc_macros/src/diagnostics/fluent.rs
@@ -25,18 +25,18 @@ use syn::{
use unic_langid::langid;
struct Resource {
- ident: Ident,
+ krate: Ident,
#[allow(dead_code)]
fat_arrow_token: token::FatArrow,
- resource: LitStr,
+ resource_path: LitStr,
}
impl Parse for Resource {
fn parse(input: ParseStream<'_>) -> Result<Self> {
Ok(Resource {
- ident: input.parse()?,
+ krate: input.parse()?,
fat_arrow_token: input.parse()?,
- resource: input.parse()?,
+ resource_path: input.parse()?,
})
}
}
@@ -94,19 +94,20 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
// diagnostics.
let mut previous_defns = HashMap::new();
+ // Set of Fluent attribute names already output, to avoid duplicate type errors - any given
+ // constant created for a given attribute is the same.
+ let mut previous_attrs = HashSet::new();
+
let mut includes = TokenStream::new();
let mut generated = TokenStream::new();
- for res in resources.0 {
- let ident_span = res.ident.span().unwrap();
- let path_span = res.resource.span().unwrap();
- // Set of Fluent attribute names already output, to avoid duplicate type errors - any given
- // constant created for a given attribute is the same.
- let mut previous_attrs = HashSet::new();
+ for res in resources.0 {
+ let krate_span = res.krate.span().unwrap();
+ let path_span = res.resource_path.span().unwrap();
- let relative_ftl_path = res.resource.value();
+ let relative_ftl_path = res.resource_path.value();
let absolute_ftl_path =
- invocation_relative_path_to_absolute(ident_span, &relative_ftl_path);
+ invocation_relative_path_to_absolute(krate_span, &relative_ftl_path);
// As this macro also outputs an `include_str!` for this file, the macro will always be
// re-executed when the file changes.
let mut resource_file = match File::open(absolute_ftl_path) {
@@ -185,19 +186,44 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
let mut constants = TokenStream::new();
for entry in resource.entries() {
- let span = res.ident.span();
+ let span = res.krate.span();
if let Entry::Message(Message { id: Identifier { name }, attributes, .. }) = entry {
- let _ = previous_defns.entry(name.to_string()).or_insert(ident_span);
+ let _ = previous_defns.entry(name.to_string()).or_insert(path_span);
+
+ if name.contains('-') {
+ Diagnostic::spanned(
+ path_span,
+ Level::Error,
+ format!("name `{name}` contains a '-' character"),
+ )
+ .help("replace any '-'s with '_'s")
+ .emit();
+ }
+
+ // Require that the message name starts with the crate name
+ // `hir_typeck_foo_bar` (in `hir_typeck.ftl`)
+ // `const_eval_baz` (in `const_eval.ftl`)
+ // `const-eval-hyphen-having` => `hyphen_having` (in `const_eval.ftl`)
+ // The last case we error about above, but we want to fall back gracefully
+ // so that only the error is being emitted and not also one about the macro
+ // failing.
+ let crate_prefix = format!("{}_", res.krate);
+
+ let snake_name = name.replace('-', "_");
+ if !snake_name.starts_with(&crate_prefix) {
+ Diagnostic::spanned(
+ path_span,
+ Level::Error,
+ format!("name `{name}` does not start with the crate name"),
+ )
+ .help(format!(
+ "prepend `{crate_prefix}` to the slug name: `{crate_prefix}{snake_name}`"
+ ))
+ .emit();
+ };
+
+ let snake_name = Ident::new(&snake_name, span);
- // `typeck-foo-bar` => `foo_bar` (in `typeck.ftl`)
- // `const-eval-baz` => `baz` (in `const_eval.ftl`)
- let snake_name = Ident::new(
- // FIXME: should probably trim prefix, not replace all occurrences
- &name
- .replace(&format!("{}-", res.ident).replace('_', "-"), "")
- .replace('-', "_"),
- span,
- );
constants.extend(quote! {
pub const #snake_name: crate::DiagnosticMessage =
crate::DiagnosticMessage::FluentIdentifier(
@@ -212,6 +238,16 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
continue;
}
+ if attr_name.contains('-') {
+ Diagnostic::spanned(
+ path_span,
+ Level::Error,
+ format!("attribute `{attr_name}` contains a '-' character"),
+ )
+ .help("replace any '-'s with '_'s")
+ .emit();
+ }
+
constants.extend(quote! {
pub const #snake_name: crate::SubdiagnosticMessage =
crate::SubdiagnosticMessage::FluentAttr(
@@ -227,7 +263,7 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
match e {
FluentError::Overriding { kind, id } => {
Diagnostic::spanned(
- ident_span,
+ path_span,
Level::Error,
format!("overrides existing {}: `{}`", kind, id),
)
@@ -241,12 +277,7 @@ pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::Tok
includes.extend(quote! { include_str!(#relative_ftl_path), });
- let ident = res.ident;
- generated.extend(quote! {
- pub mod #ident {
- #constants
- }
- });
+ generated.extend(constants);
}
quote! {
diff --git a/compiler/rustc_macros/src/diagnostics/mod.rs b/compiler/rustc_macros/src/diagnostics/mod.rs
index 399790026..860340b43 100644
--- a/compiler/rustc_macros/src/diagnostics/mod.rs
+++ b/compiler/rustc_macros/src/diagnostics/mod.rs
@@ -5,14 +5,14 @@ mod fluent;
mod subdiagnostic;
mod utils;
-use diagnostic::{LintDiagnosticDerive, SessionDiagnosticDerive};
+use diagnostic::{DiagnosticDerive, LintDiagnosticDerive};
pub(crate) use fluent::fluent_messages;
use proc_macro2::TokenStream;
use quote::format_ident;
-use subdiagnostic::SessionSubdiagnosticDerive;
+use subdiagnostic::SubdiagnosticDeriveBuilder;
use synstructure::Structure;
-/// Implements `#[derive(SessionDiagnostic)]`, which allows for errors to be specified as a struct,
+/// Implements `#[derive(Diagnostic)]`, which allows for errors to be specified as a struct,
/// independent from the actual diagnostics emitting code.
///
/// ```ignore (rust)
@@ -22,15 +22,15 @@ use synstructure::Structure;
/// # use rustc_span::{symbol::Ident, Span};
/// # extern crate rust_middle;
/// # use rustc_middle::ty::Ty;
-/// #[derive(SessionDiagnostic)]
-/// #[error(borrowck::move_out_of_borrow, code = "E0505")]
+/// #[derive(Diagnostic)]
+/// #[diag(borrowck_move_out_of_borrow, code = "E0505")]
/// pub struct MoveOutOfBorrowError<'tcx> {
/// pub name: Ident,
/// pub ty: Ty<'tcx>,
/// #[primary_span]
/// #[label]
/// pub span: Span,
-/// #[label(borrowck::first_borrow_label)]
+/// #[label(first_borrow_label)]
/// pub first_borrow_span: Span,
/// #[suggestion(code = "{name}.clone()")]
/// pub clone_sugg: Option<(Span, Applicability)>
@@ -38,9 +38,9 @@ use synstructure::Structure;
/// ```
///
/// ```fluent
-/// move-out-of-borrow = cannot move out of {$name} because it is borrowed
+/// move_out_of_borrow = cannot move out of {$name} because it is borrowed
/// .label = cannot move out of borrow
-/// .first-borrow-label = `{$ty}` first borrowed here
+/// .first_borrow_label = `{$ty}` first borrowed here
/// .suggestion = consider cloning here
/// ```
///
@@ -56,10 +56,10 @@ use synstructure::Structure;
/// });
/// ```
///
-/// See rustc dev guide for more examples on using the `#[derive(SessionDiagnostic)]`:
+/// See rustc dev guide for more examples on using the `#[derive(Diagnostic)]`:
/// <https://rustc-dev-guide.rust-lang.org/diagnostics/diagnostic-structs.html>
pub fn session_diagnostic_derive(s: Structure<'_>) -> TokenStream {
- SessionDiagnosticDerive::new(format_ident!("diag"), format_ident!("sess"), s).into_tokens()
+ DiagnosticDerive::new(format_ident!("diag"), format_ident!("handler"), s).into_tokens()
}
/// Implements `#[derive(LintDiagnostic)]`, which allows for lints to be specified as a struct,
@@ -67,14 +67,14 @@ pub fn session_diagnostic_derive(s: Structure<'_>) -> TokenStream {
///
/// ```ignore (rust)
/// #[derive(LintDiagnostic)]
-/// #[lint(lint::atomic_ordering_invalid_fail_success)]
+/// #[diag(lint_atomic_ordering_invalid_fail_success)]
/// pub struct AtomicOrderingInvalidLint {
/// method: Symbol,
/// success_ordering: Symbol,
/// fail_ordering: Symbol,
-/// #[label(lint::fail_label)]
+/// #[label(fail_label)]
/// fail_order_arg_span: Span,
-/// #[label(lint::success_label)]
+/// #[label(success_label)]
/// #[suggestion(
/// code = "std::sync::atomic::Ordering::{success_suggestion}",
/// applicability = "maybe-incorrect"
@@ -84,9 +84,9 @@ pub fn session_diagnostic_derive(s: Structure<'_>) -> TokenStream {
/// ```
///
/// ```fluent
-/// lint-atomic-ordering-invalid-fail-success = `{$method}`'s success ordering must be at least as strong as its failure ordering
-/// .fail-label = `{$fail_ordering}` failure ordering
-/// .success-label = `{$success_ordering}` success ordering
+/// lint_atomic_ordering_invalid_fail_success = `{$method}`'s success ordering must be at least as strong as its failure ordering
+/// .fail_label = `{$fail_ordering}` failure ordering
+/// .success_label = `{$success_ordering}` success ordering
/// .suggestion = consider using `{$success_suggestion}` success ordering instead
/// ```
///
@@ -103,24 +103,24 @@ pub fn session_diagnostic_derive(s: Structure<'_>) -> TokenStream {
/// ```
///
/// See rustc dev guide for more examples on using the `#[derive(LintDiagnostic)]`:
-/// <https://rustc-dev-guide.rust-lang.org/diagnostics/sessiondiagnostic.html>
+/// <https://rustc-dev-guide.rust-lang.org/diagnostics/diagnostic-structs.html#reference>
pub fn lint_diagnostic_derive(s: Structure<'_>) -> TokenStream {
LintDiagnosticDerive::new(format_ident!("diag"), s).into_tokens()
}
-/// Implements `#[derive(SessionSubdiagnostic)]`, which allows for labels, notes, helps and
+/// Implements `#[derive(Subdiagnostic)]`, which allows for labels, notes, helps and
/// suggestions to be specified as a structs or enums, independent from the actual diagnostics
/// emitting code or diagnostic derives.
///
/// ```ignore (rust)
-/// #[derive(SessionSubdiagnostic)]
+/// #[derive(Subdiagnostic)]
/// pub enum ExpectedIdentifierLabel<'tcx> {
-/// #[label(parser::expected_identifier)]
+/// #[label(expected_identifier)]
/// WithoutFound {
/// #[primary_span]
/// span: Span,
/// }
-/// #[label(parser::expected_identifier_found)]
+/// #[label(expected_identifier_found)]
/// WithFound {
/// #[primary_span]
/// span: Span,
@@ -128,7 +128,7 @@ pub fn lint_diagnostic_derive(s: Structure<'_>) -> TokenStream {
/// }
/// }
///
-/// #[derive(SessionSubdiagnostic)]
+/// #[derive(Subdiagnostic)]
/// #[suggestion_verbose(parser::raw_identifier)]
/// pub struct RawIdentifierSuggestion<'tcx> {
/// #[primary_span]
@@ -140,11 +140,11 @@ pub fn lint_diagnostic_derive(s: Structure<'_>) -> TokenStream {
/// ```
///
/// ```fluent
-/// parser-expected-identifier = expected identifier
+/// parser_expected_identifier = expected identifier
///
-/// parser-expected-identifier-found = expected identifier, found {$found}
+/// parser_expected_identifier-found = expected identifier, found {$found}
///
-/// parser-raw-identifier = escape `{$ident}` to use it as an identifier
+/// parser_raw_identifier = escape `{$ident}` to use it as an identifier
/// ```
///
/// Then, later, to add the subdiagnostic:
@@ -155,5 +155,5 @@ pub fn lint_diagnostic_derive(s: Structure<'_>) -> TokenStream {
/// diag.subdiagnostic(RawIdentifierSuggestion { span, applicability, ident });
/// ```
pub fn session_subdiagnostic_derive(s: Structure<'_>) -> TokenStream {
- SessionSubdiagnosticDerive::new(s).into_tokens()
+ SubdiagnosticDeriveBuilder::new().into_tokens(s)
}
diff --git a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
index edf4dbed9..fa0ca5a52 100644
--- a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
+++ b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
@@ -1,113 +1,35 @@
#![deny(unused_must_use)]
use crate::diagnostics::error::{
- span_err, throw_invalid_attr, throw_invalid_nested_attr, throw_span_err, DiagnosticDeriveError,
+ invalid_attr, span_err, throw_invalid_attr, throw_invalid_nested_attr, throw_span_err,
+ DiagnosticDeriveError,
};
use crate::diagnostics::utils::{
- report_error_if_not_applied_to_applicability, report_error_if_not_applied_to_span,
- Applicability, FieldInfo, FieldInnerTy, HasFieldMap, SetOnce,
+ build_field_mapping, is_doc_comment, new_code_ident,
+ report_error_if_not_applied_to_applicability, report_error_if_not_applied_to_span, FieldInfo,
+ FieldInnerTy, FieldMap, HasFieldMap, SetOnce, SpannedOption, SubdiagnosticKind,
};
use proc_macro2::TokenStream;
use quote::{format_ident, quote};
-use std::collections::HashMap;
-use std::fmt;
-use std::str::FromStr;
-use syn::{parse_quote, spanned::Spanned, Meta, MetaList, MetaNameValue, NestedMeta, Path};
+use syn::{spanned::Spanned, Attribute, Meta, MetaList, NestedMeta, Path};
use synstructure::{BindingInfo, Structure, VariantInfo};
-/// Which kind of suggestion is being created?
-#[derive(Clone, Copy)]
-enum SubdiagnosticSuggestionKind {
- /// `#[suggestion]`
- Normal,
- /// `#[suggestion_short]`
- Short,
- /// `#[suggestion_hidden]`
- Hidden,
- /// `#[suggestion_verbose]`
- Verbose,
-}
-
-/// Which kind of subdiagnostic is being created from a variant?
-#[derive(Clone, Copy)]
-enum SubdiagnosticKind {
- /// `#[label(...)]`
- Label,
- /// `#[note(...)]`
- Note,
- /// `#[help(...)]`
- Help,
- /// `#[warn_(...)]`
- Warn,
- /// `#[suggestion{,_short,_hidden,_verbose}]`
- Suggestion(SubdiagnosticSuggestionKind),
-}
-
-impl FromStr for SubdiagnosticKind {
- type Err = ();
-
- fn from_str(s: &str) -> Result<Self, Self::Err> {
- match s {
- "label" => Ok(SubdiagnosticKind::Label),
- "note" => Ok(SubdiagnosticKind::Note),
- "help" => Ok(SubdiagnosticKind::Help),
- "warn_" => Ok(SubdiagnosticKind::Warn),
- "suggestion" => Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Normal)),
- "suggestion_short" => {
- Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Short))
- }
- "suggestion_hidden" => {
- Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Hidden))
- }
- "suggestion_verbose" => {
- Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Verbose))
- }
- _ => Err(()),
- }
- }
-}
-
-impl quote::IdentFragment for SubdiagnosticKind {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- SubdiagnosticKind::Label => write!(f, "label"),
- SubdiagnosticKind::Note => write!(f, "note"),
- SubdiagnosticKind::Help => write!(f, "help"),
- SubdiagnosticKind::Warn => write!(f, "warn"),
- SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Normal) => {
- write!(f, "suggestion")
- }
- SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Short) => {
- write!(f, "suggestion_short")
- }
- SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Hidden) => {
- write!(f, "suggestion_hidden")
- }
- SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Verbose) => {
- write!(f, "suggestion_verbose")
- }
- }
- }
-
- fn span(&self) -> Option<proc_macro2::Span> {
- None
- }
-}
+use super::utils::{build_suggestion_code, AllowMultipleAlternatives};
/// The central struct for constructing the `add_to_diagnostic` method from an annotated struct.
-pub(crate) struct SessionSubdiagnosticDerive<'a> {
- structure: Structure<'a>,
+pub(crate) struct SubdiagnosticDeriveBuilder {
diag: syn::Ident,
+ f: syn::Ident,
}
-impl<'a> SessionSubdiagnosticDerive<'a> {
- pub(crate) fn new(structure: Structure<'a>) -> Self {
+impl SubdiagnosticDeriveBuilder {
+ pub(crate) fn new() -> Self {
let diag = format_ident!("diag");
- Self { structure, diag }
+ let f = format_ident!("f");
+ Self { diag, f }
}
- pub(crate) fn into_tokens(self) -> TokenStream {
- let SessionSubdiagnosticDerive { mut structure, diag } = self;
+ pub(crate) fn into_tokens<'a>(self, mut structure: Structure<'a>) -> TokenStream {
let implementation = {
let ast = structure.ast();
let span = ast.span().unwrap();
@@ -116,13 +38,19 @@ impl<'a> SessionSubdiagnosticDerive<'a> {
syn::Data::Union(..) => {
span_err(
span,
- "`#[derive(SessionSubdiagnostic)]` can only be used on structs and enums",
+ "`#[derive(Subdiagnostic)]` can only be used on structs and enums",
);
}
}
- if matches!(ast.data, syn::Data::Enum(..)) {
+ let is_enum = matches!(ast.data, syn::Data::Enum(..));
+ if is_enum {
for attr in &ast.attrs {
+ // Always allow documentation comments.
+ if is_doc_comment(attr) {
+ continue;
+ }
+
span_err(
attr.span().unwrap(),
"unsupported type attribute for subdiagnostic enum",
@@ -133,26 +61,16 @@ impl<'a> SessionSubdiagnosticDerive<'a> {
structure.bind_with(|_| synstructure::BindStyle::Move);
let variants_ = structure.each_variant(|variant| {
- // Build the mapping of field names to fields. This allows attributes to peek
- // values from other fields.
- let mut fields_map = HashMap::new();
- for binding in variant.bindings() {
- let field = binding.ast();
- if let Some(ident) = &field.ident {
- fields_map.insert(ident.to_string(), quote! { #binding });
- }
- }
-
- let mut builder = SessionSubdiagnosticDeriveBuilder {
- diag: &diag,
+ let mut builder = SubdiagnosticDeriveVariantBuilder {
+ parent: &self,
variant,
span,
- fields: fields_map,
- kind: None,
- slug: None,
- code: None,
+ formatting_init: TokenStream::new(),
+ fields: build_field_mapping(variant),
span_field: None,
applicability: None,
+ has_suggestion_parts: false,
+ is_enum,
};
builder.into_tokens().unwrap_or_else(|v| v.to_compile_error())
});
@@ -164,9 +82,17 @@ impl<'a> SessionSubdiagnosticDerive<'a> {
}
};
+ let diag = &self.diag;
+ let f = &self.f;
let ret = structure.gen_impl(quote! {
- gen impl rustc_errors::AddSubdiagnostic for @Self {
- fn add_to_diagnostic(self, #diag: &mut rustc_errors::Diagnostic) {
+ gen impl rustc_errors::AddToDiagnostic for @Self {
+ fn add_to_diagnostic_with<__F>(self, #diag: &mut rustc_errors::Diagnostic, #f: __F)
+ where
+ __F: core::ops::Fn(
+ &mut rustc_errors::Diagnostic,
+ rustc_errors::SubdiagnosticMessage
+ ) -> rustc_errors::SubdiagnosticMessage,
+ {
use rustc_errors::{Applicability, IntoDiagnosticArg};
#implementation
}
@@ -177,315 +103,485 @@ impl<'a> SessionSubdiagnosticDerive<'a> {
}
/// Tracks persistent information required for building up the call to add to the diagnostic
-/// for the final generated method. This is a separate struct to `SessionSubdiagnosticDerive`
+/// for the final generated method. This is a separate struct to `SubdiagnosticDerive`
/// only to be able to destructure and split `self.builder` and the `self.structure` up to avoid a
/// double mut borrow later on.
-struct SessionSubdiagnosticDeriveBuilder<'a> {
+struct SubdiagnosticDeriveVariantBuilder<'parent, 'a> {
/// The identifier to use for the generated `DiagnosticBuilder` instance.
- diag: &'a syn::Ident,
+ parent: &'parent SubdiagnosticDeriveBuilder,
/// Info for the current variant (or the type if not an enum).
variant: &'a VariantInfo<'a>,
/// Span for the entire type.
span: proc_macro::Span,
+ /// Initialization of format strings for code suggestions.
+ formatting_init: TokenStream,
+
/// Store a map of field name to its corresponding field. This is built on construction of the
/// derive builder.
- fields: HashMap<String, TokenStream>,
+ fields: FieldMap,
- /// Subdiagnostic kind of the type/variant.
- kind: Option<(SubdiagnosticKind, proc_macro::Span)>,
+ /// Identifier for the binding to the `#[primary_span]` field.
+ span_field: SpannedOption<proc_macro2::Ident>,
- /// Slug of the subdiagnostic - corresponds to the Fluent identifier for the message - from the
- /// `#[kind(slug)]` attribute on the type or variant.
- slug: Option<(Path, proc_macro::Span)>,
- /// If a suggestion, the code to suggest as a replacement - from the `#[kind(code = "...")]`
- /// attribute on the type or variant.
- code: Option<(TokenStream, proc_macro::Span)>,
+ /// The binding to the `#[applicability]` field, if present.
+ applicability: SpannedOption<TokenStream>,
- /// Identifier for the binding to the `#[primary_span]` field.
- span_field: Option<(proc_macro2::Ident, proc_macro::Span)>,
- /// If a suggestion, the identifier for the binding to the `#[applicability]` field or a
- /// `rustc_errors::Applicability::*` variant directly.
- applicability: Option<(TokenStream, proc_macro::Span)>,
+ /// Set to true when a `#[suggestion_part]` field is encountered, used to generate an error
+ /// during finalization if still `false`.
+ has_suggestion_parts: bool,
+
+ /// Set to true when this variant is an enum variant rather than just the body of a struct.
+ is_enum: bool,
}
-impl<'a> HasFieldMap for SessionSubdiagnosticDeriveBuilder<'a> {
+impl<'parent, 'a> HasFieldMap for SubdiagnosticDeriveVariantBuilder<'parent, 'a> {
fn get_field_binding(&self, field: &String) -> Option<&TokenStream> {
self.fields.get(field)
}
}
-impl<'a> SessionSubdiagnosticDeriveBuilder<'a> {
- fn identify_kind(&mut self) -> Result<(), DiagnosticDeriveError> {
- for attr in self.variant.ast().attrs {
- let span = attr.span().unwrap();
-
- let name = attr.path.segments.last().unwrap().ident.to_string();
- let name = name.as_str();
-
- let meta = attr.parse_meta()?;
- let kind = match meta {
- Meta::List(MetaList { ref nested, .. }) => {
- let mut nested_iter = nested.into_iter();
- if let Some(nested_attr) = nested_iter.next() {
- match nested_attr {
- NestedMeta::Meta(Meta::Path(path)) => {
- self.slug.set_once((path.clone(), span));
- }
- NestedMeta::Meta(meta @ Meta::NameValue(_))
- if matches!(
- meta.path().segments.last().unwrap().ident.to_string().as_str(),
- "code" | "applicability"
- ) =>
- {
- // don't error for valid follow-up attributes
- }
- nested_attr => {
- throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- diag.help(
- "first argument of the attribute should be the diagnostic \
- slug",
- )
- })
- }
- };
- }
-
- for nested_attr in nested_iter {
- let meta = match nested_attr {
- NestedMeta::Meta(ref meta) => meta,
- _ => throw_invalid_nested_attr!(attr, &nested_attr),
- };
-
- let span = meta.span().unwrap();
- let nested_name = meta.path().segments.last().unwrap().ident.to_string();
- let nested_name = nested_name.as_str();
-
- match meta {
- Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) => {
- match nested_name {
- "code" => {
- let formatted_str = self.build_format(&s.value(), s.span());
- self.code.set_once((formatted_str, span));
- }
- "applicability" => {
- let value = match Applicability::from_str(&s.value()) {
- Ok(v) => v,
- Err(()) => {
- span_err(span, "invalid applicability").emit();
- Applicability::Unspecified
- }
- };
- self.applicability.set_once((quote! { #value }, span));
- }
- _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- diag.help(
- "only `code` and `applicability` are valid nested \
- attributes",
- )
- }),
- }
- }
- _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
- if matches!(meta, Meta::Path(_)) {
- diag.help(
- "a diagnostic slug must be the first argument to the \
- attribute",
- )
- } else {
- diag
- }
- }),
- }
- }
-
- let Ok(kind) = SubdiagnosticKind::from_str(name) else {
- throw_invalid_attr!(attr, &meta)
- };
+/// Provides frequently-needed information about the diagnostic kinds being derived for this type.
+#[derive(Clone, Copy, Debug)]
+struct KindsStatistics {
+ has_multipart_suggestion: bool,
+ all_multipart_suggestions: bool,
+ has_normal_suggestion: bool,
+ all_applicabilities_static: bool,
+}
- kind
- }
- _ => throw_invalid_attr!(attr, &meta),
- };
+impl<'a> FromIterator<&'a SubdiagnosticKind> for KindsStatistics {
+ fn from_iter<T: IntoIterator<Item = &'a SubdiagnosticKind>>(kinds: T) -> Self {
+ let mut ret = Self {
+ has_multipart_suggestion: false,
+ all_multipart_suggestions: true,
+ has_normal_suggestion: false,
+ all_applicabilities_static: true,
+ };
- if matches!(
- kind,
- SubdiagnosticKind::Label | SubdiagnosticKind::Help | SubdiagnosticKind::Note
- ) && self.code.is_some()
+ for kind in kinds {
+ if let SubdiagnosticKind::MultipartSuggestion { applicability: None, .. }
+ | SubdiagnosticKind::Suggestion { applicability: None, .. } = kind
{
- throw_span_err!(
- span,
- &format!("`code` is not a valid nested attribute of a `{}` attribute", name)
- );
+ ret.all_applicabilities_static = false;
+ }
+ if let SubdiagnosticKind::MultipartSuggestion { .. } = kind {
+ ret.has_multipart_suggestion = true;
+ } else {
+ ret.all_multipart_suggestions = false;
}
- if matches!(
- kind,
- SubdiagnosticKind::Label | SubdiagnosticKind::Help | SubdiagnosticKind::Note
- ) && self.applicability.is_some()
- {
- throw_span_err!(
- span,
- &format!(
- "`applicability` is not a valid nested attribute of a `{}` attribute",
- name
- )
- );
+ if let SubdiagnosticKind::Suggestion { .. } = kind {
+ ret.has_normal_suggestion = true;
}
+ }
+ ret
+ }
+}
+
+impl<'parent, 'a> SubdiagnosticDeriveVariantBuilder<'parent, 'a> {
+ fn identify_kind(&mut self) -> Result<Vec<(SubdiagnosticKind, Path)>, DiagnosticDeriveError> {
+ let mut kind_slugs = vec![];
+
+ for attr in self.variant.ast().attrs {
+ let Some((kind, slug)) = SubdiagnosticKind::from_attr(attr, self)? else {
+ // Some attributes aren't errors - like documentation comments - but also aren't
+ // subdiagnostics.
+ continue;
+ };
+
+ let Some(slug) = slug else {
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
- if self.slug.is_none() {
throw_span_err!(
- span,
+ attr.span().unwrap(),
&format!(
"diagnostic slug must be first argument of a `#[{}(...)]` attribute",
name
)
);
- }
+ };
- self.kind.set_once((kind, span));
+ kind_slugs.push((kind, slug));
}
- Ok(())
+ Ok(kind_slugs)
}
- fn generate_field_code(
- &mut self,
- binding: &BindingInfo<'_>,
- is_suggestion: bool,
- ) -> Result<TokenStream, DiagnosticDeriveError> {
+ /// Generates the code for a field with no attributes.
+ fn generate_field_set_arg(&mut self, binding: &BindingInfo<'_>) -> TokenStream {
let ast = binding.ast();
+ assert_eq!(ast.attrs.len(), 0, "field with attribute used as diagnostic arg");
- let inner_ty = FieldInnerTy::from_type(&ast.ty);
- let info = FieldInfo {
- binding: binding,
- ty: inner_ty.inner_type().unwrap_or(&ast.ty),
- span: &ast.span(),
- };
-
- for attr in &ast.attrs {
- let name = attr.path.segments.last().unwrap().ident.to_string();
- let name = name.as_str();
- let span = attr.span().unwrap();
-
- let meta = attr.parse_meta()?;
- match meta {
- Meta::Path(_) => match name {
- "primary_span" => {
- report_error_if_not_applied_to_span(attr, &info)?;
- self.span_field.set_once((binding.binding.clone(), span));
- return Ok(quote! {});
- }
- "applicability" if is_suggestion => {
- report_error_if_not_applied_to_applicability(attr, &info)?;
- let binding = binding.binding.clone();
- self.applicability.set_once((quote! { #binding }, span));
- return Ok(quote! {});
- }
- "applicability" => {
- span_err(span, "`#[applicability]` is only valid on suggestions").emit();
- return Ok(quote! {});
- }
- "skip_arg" => {
- return Ok(quote! {});
- }
- _ => throw_invalid_attr!(attr, &meta, |diag| {
- diag.help(
- "only `primary_span`, `applicability` and `skip_arg` are valid field \
- attributes",
- )
- }),
- },
- _ => throw_invalid_attr!(attr, &meta),
- }
- }
-
+ let diag = &self.parent.diag;
let ident = ast.ident.as_ref().unwrap();
+ // strip `r#` prefix, if present
+ let ident = format_ident!("{}", ident);
- let diag = &self.diag;
- let generated = quote! {
+ quote! {
#diag.set_arg(
stringify!(#ident),
#binding
);
- };
-
- Ok(inner_ty.with(binding, generated))
+ }
}
- fn into_tokens(&mut self) -> Result<TokenStream, DiagnosticDeriveError> {
- self.identify_kind()?;
- let Some(kind) = self.kind.map(|(kind, _)| kind) else {
- throw_span_err!(
- self.variant.ast().ident.span().unwrap(),
- "subdiagnostic kind not specified"
- );
- };
+ /// Generates the necessary code for all attributes on a field.
+ fn generate_field_attr_code(
+ &mut self,
+ binding: &BindingInfo<'_>,
+ kind_stats: KindsStatistics,
+ ) -> TokenStream {
+ let ast = binding.ast();
+ assert!(ast.attrs.len() > 0, "field without attributes generating attr code");
- let is_suggestion = matches!(kind, SubdiagnosticKind::Suggestion(_));
+ // Abstract over `Vec<T>` and `Option<T>` fields using `FieldInnerTy`, which will
+ // apply the generated code on each element in the `Vec` or `Option`.
+ let inner_ty = FieldInnerTy::from_type(&ast.ty);
+ ast.attrs
+ .iter()
+ .map(|attr| {
+ // Always allow documentation comments.
+ if is_doc_comment(attr) {
+ return quote! {};
+ }
+
+ let info = FieldInfo {
+ binding,
+ ty: inner_ty.inner_type().unwrap_or(&ast.ty),
+ span: &ast.span(),
+ };
- let mut args = TokenStream::new();
- for binding in self.variant.bindings() {
- let arg = self
- .generate_field_code(binding, is_suggestion)
- .unwrap_or_else(|v| v.to_compile_error());
- args.extend(arg);
+ let generated = self
+ .generate_field_code_inner(kind_stats, attr, info, inner_ty.will_iterate())
+ .unwrap_or_else(|v| v.to_compile_error());
+
+ inner_ty.with(binding, generated)
+ })
+ .collect()
+ }
+
+ fn generate_field_code_inner(
+ &mut self,
+ kind_stats: KindsStatistics,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ clone_suggestion_code: bool,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let meta = attr.parse_meta()?;
+ match meta {
+ Meta::Path(path) => self.generate_field_code_inner_path(kind_stats, attr, info, path),
+ Meta::List(list @ MetaList { .. }) => self.generate_field_code_inner_list(
+ kind_stats,
+ attr,
+ info,
+ list,
+ clone_suggestion_code,
+ ),
+ _ => throw_invalid_attr!(attr, &meta),
}
+ }
+
+ /// Generates the code for a `[Meta::Path]`-like attribute on a field (e.g. `#[primary_span]`).
+ fn generate_field_code_inner_path(
+ &mut self,
+ kind_stats: KindsStatistics,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ path: Path,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let span = attr.span().unwrap();
+ let ident = &path.segments.last().unwrap().ident;
+ let name = ident.to_string();
+ let name = name.as_str();
+
+ match name {
+ "skip_arg" => Ok(quote! {}),
+ "primary_span" => {
+ if kind_stats.has_multipart_suggestion {
+ invalid_attr(attr, &Meta::Path(path))
+ .help(
+ "multipart suggestions use one or more `#[suggestion_part]`s rather \
+ than one `#[primary_span]`",
+ )
+ .emit();
+ } else {
+ report_error_if_not_applied_to_span(attr, &info)?;
+
+ let binding = info.binding.binding.clone();
+ // FIXME(#100717): support `Option<Span>` on `primary_span` like in the
+ // diagnostic derive
+ self.span_field.set_once(binding, span);
+ }
- // Missing slug errors will already have been reported.
- let slug = self
- .slug
- .as_ref()
- .map(|(slug, _)| slug.clone())
- .unwrap_or_else(|| parse_quote! { you::need::to::specify::a::slug });
- let code = match self.code.as_ref() {
- Some((code, _)) => Some(quote! { #code }),
- None if is_suggestion => {
- span_err(self.span, "suggestion without `code = \"...\"`").emit();
- Some(quote! { /* macro error */ "..." })
+ Ok(quote! {})
}
- None => None,
- };
+ "suggestion_part" => {
+ self.has_suggestion_parts = true;
+
+ if kind_stats.has_multipart_suggestion {
+ span_err(span, "`#[suggestion_part(...)]` attribute without `code = \"...\"`")
+ .emit();
+ } else {
+ invalid_attr(attr, &Meta::Path(path))
+ .help(
+ "`#[suggestion_part(...)]` is only valid in multipart suggestions, \
+ use `#[primary_span]` instead",
+ )
+ .emit();
+ }
- let span_field = self.span_field.as_ref().map(|(span, _)| span);
- let applicability = match self.applicability.clone() {
- Some((applicability, _)) => Some(applicability),
- None if is_suggestion => {
- span_err(self.span, "suggestion without `applicability`").emit();
- Some(quote! { rustc_errors::Applicability::Unspecified })
+ Ok(quote! {})
}
- None => None,
- };
+ "applicability" => {
+ if kind_stats.has_multipart_suggestion || kind_stats.has_normal_suggestion {
+ report_error_if_not_applied_to_applicability(attr, &info)?;
+
+ if kind_stats.all_applicabilities_static {
+ span_err(
+ span,
+ "`#[applicability]` has no effect if all `#[suggestion]`/\
+ `#[multipart_suggestion]` attributes have a static \
+ `applicability = \"...\"`",
+ )
+ .emit();
+ }
+ let binding = info.binding.binding.clone();
+ self.applicability.set_once(quote! { #binding }, span);
+ } else {
+ span_err(span, "`#[applicability]` is only valid on suggestions").emit();
+ }
- let diag = &self.diag;
- let name = format_ident!("{}{}", if span_field.is_some() { "span_" } else { "" }, kind);
- let message = quote! { rustc_errors::fluent::#slug };
- let call = if matches!(kind, SubdiagnosticKind::Suggestion(..)) {
- if let Some(span) = span_field {
- quote! { #diag.#name(#span, #message, #code, #applicability); }
- } else {
- span_err(self.span, "suggestion without `#[primary_span]` field").emit();
- quote! { unreachable!(); }
+ Ok(quote! {})
}
- } else if matches!(kind, SubdiagnosticKind::Label) {
- if let Some(span) = span_field {
- quote! { #diag.#name(#span, #message); }
- } else {
- span_err(self.span, "label without `#[primary_span]` field").emit();
- quote! { unreachable!(); }
+ _ => {
+ let mut span_attrs = vec![];
+ if kind_stats.has_multipart_suggestion {
+ span_attrs.push("suggestion_part");
+ }
+ if !kind_stats.all_multipart_suggestions {
+ span_attrs.push("primary_span")
+ }
+
+ invalid_attr(attr, &Meta::Path(path))
+ .help(format!(
+ "only `{}`, `applicability` and `skip_arg` are valid field attributes",
+ span_attrs.join(", ")
+ ))
+ .emit();
+
+ Ok(quote! {})
}
- } else {
- if let Some(span) = span_field {
- quote! { #diag.#name(#span, #message); }
+ }
+ }
+
+ /// Generates the code for a `[Meta::List]`-like attribute on a field (e.g.
+ /// `#[suggestion_part(code = "...")]`).
+ fn generate_field_code_inner_list(
+ &mut self,
+ kind_stats: KindsStatistics,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ list: MetaList,
+ clone_suggestion_code: bool,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let span = attr.span().unwrap();
+ let ident = &list.path.segments.last().unwrap().ident;
+ let name = ident.to_string();
+ let name = name.as_str();
+
+ match name {
+ "suggestion_part" => {
+ if !kind_stats.has_multipart_suggestion {
+ throw_invalid_attr!(attr, &Meta::List(list), |diag| {
+ diag.help(
+ "`#[suggestion_part(...)]` is only valid in multipart suggestions",
+ )
+ })
+ }
+
+ self.has_suggestion_parts = true;
+
+ report_error_if_not_applied_to_span(attr, &info)?;
+
+ let mut code = None;
+ for nested_attr in list.nested.iter() {
+ let NestedMeta::Meta(ref meta) = nested_attr else {
+ throw_invalid_nested_attr!(attr, &nested_attr);
+ };
+
+ let span = meta.span().unwrap();
+ let nested_name = meta.path().segments.last().unwrap().ident.to_string();
+ let nested_name = nested_name.as_str();
+
+ match nested_name {
+ "code" => {
+ let code_field = new_code_ident();
+ let formatting_init = build_suggestion_code(
+ &code_field,
+ meta,
+ self,
+ AllowMultipleAlternatives::No,
+ );
+ code.set_once((code_field, formatting_init), span);
+ }
+ _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help("`code` is the only valid nested attribute")
+ }),
+ }
+ }
+
+ let Some((code_field, formatting_init)) = code.value() else {
+ span_err(span, "`#[suggestion_part(...)]` attribute without `code = \"...\"`")
+ .emit();
+ return Ok(quote! {});
+ };
+ let binding = info.binding;
+
+ self.formatting_init.extend(formatting_init);
+ let code_field = if clone_suggestion_code {
+ quote! { #code_field.clone() }
+ } else {
+ quote! { #code_field }
+ };
+ Ok(quote! { suggestions.push((#binding, #code_field)); })
+ }
+ _ => throw_invalid_attr!(attr, &Meta::List(list), |diag| {
+ let mut span_attrs = vec![];
+ if kind_stats.has_multipart_suggestion {
+ span_attrs.push("suggestion_part");
+ }
+ if !kind_stats.all_multipart_suggestions {
+ span_attrs.push("primary_span")
+ }
+ diag.help(format!(
+ "only `{}`, `applicability` and `skip_arg` are valid field attributes",
+ span_attrs.join(", ")
+ ))
+ }),
+ }
+ }
+
+ pub fn into_tokens(&mut self) -> Result<TokenStream, DiagnosticDeriveError> {
+ let kind_slugs = self.identify_kind()?;
+ if kind_slugs.is_empty() {
+ if self.is_enum {
+ // It's okay for a variant to not be a subdiagnostic at all..
+ return Ok(quote! {});
} else {
- quote! { #diag.#name(#message); }
+ // ..but structs should always be _something_.
+ throw_span_err!(
+ self.variant.ast().ident.span().unwrap(),
+ "subdiagnostic kind not specified"
+ );
}
};
+ let kind_stats: KindsStatistics = kind_slugs.iter().map(|(kind, _slug)| kind).collect();
+
+ let init = if kind_stats.has_multipart_suggestion {
+ quote! { let mut suggestions = Vec::new(); }
+ } else {
+ quote! {}
+ };
+
+ let attr_args: TokenStream = self
+ .variant
+ .bindings()
+ .iter()
+ .filter(|binding| !binding.ast().attrs.is_empty())
+ .map(|binding| self.generate_field_attr_code(binding, kind_stats))
+ .collect();
+
+ let span_field = self.span_field.value_ref();
+
+ let diag = &self.parent.diag;
+ let f = &self.parent.f;
+ let mut calls = TokenStream::new();
+ for (kind, slug) in kind_slugs {
+ let message = format_ident!("__message");
+ calls.extend(quote! { let #message = #f(#diag, rustc_errors::fluent::#slug.into()); });
+
+ let name = format_ident!("{}{}", if span_field.is_some() { "span_" } else { "" }, kind);
+ let call = match kind {
+ SubdiagnosticKind::Suggestion {
+ suggestion_kind,
+ applicability,
+ code_init,
+ code_field,
+ } => {
+ self.formatting_init.extend(code_init);
+
+ let applicability = applicability
+ .value()
+ .map(|a| quote! { #a })
+ .or_else(|| self.applicability.take().value())
+ .unwrap_or_else(|| quote! { rustc_errors::Applicability::Unspecified });
+
+ if let Some(span) = span_field {
+ let style = suggestion_kind.to_suggestion_style();
+ quote! { #diag.#name(#span, #message, #code_field, #applicability, #style); }
+ } else {
+ span_err(self.span, "suggestion without `#[primary_span]` field").emit();
+ quote! { unreachable!(); }
+ }
+ }
+ SubdiagnosticKind::MultipartSuggestion { suggestion_kind, applicability } => {
+ let applicability = applicability
+ .value()
+ .map(|a| quote! { #a })
+ .or_else(|| self.applicability.take().value())
+ .unwrap_or_else(|| quote! { rustc_errors::Applicability::Unspecified });
+
+ if !self.has_suggestion_parts {
+ span_err(
+ self.span,
+ "multipart suggestion without any `#[suggestion_part(...)]` fields",
+ )
+ .emit();
+ }
+
+ let style = suggestion_kind.to_suggestion_style();
+
+ quote! { #diag.#name(#message, suggestions, #applicability, #style); }
+ }
+ SubdiagnosticKind::Label => {
+ if let Some(span) = span_field {
+ quote! { #diag.#name(#span, #message); }
+ } else {
+ span_err(self.span, "label without `#[primary_span]` field").emit();
+ quote! { unreachable!(); }
+ }
+ }
+ _ => {
+ if let Some(span) = span_field {
+ quote! { #diag.#name(#span, #message); }
+ } else {
+ quote! { #diag.#name(#message); }
+ }
+ }
+ };
+
+ calls.extend(call);
+ }
+
+ let plain_args: TokenStream = self
+ .variant
+ .bindings()
+ .iter()
+ .filter(|binding| binding.ast().attrs.is_empty())
+ .map(|binding| self.generate_field_set_arg(binding))
+ .collect();
+
+ let formatting_init = &self.formatting_init;
Ok(quote! {
- #call
- #args
+ #init
+ #formatting_init
+ #attr_args
+ #plain_args
+ #calls
})
}
}
diff --git a/compiler/rustc_macros/src/diagnostics/utils.rs b/compiler/rustc_macros/src/diagnostics/utils.rs
index 002abb152..374c795d0 100644
--- a/compiler/rustc_macros/src/diagnostics/utils.rs
+++ b/compiler/rustc_macros/src/diagnostics/utils.rs
@@ -1,11 +1,31 @@
-use crate::diagnostics::error::{span_err, throw_span_err, DiagnosticDeriveError};
+use crate::diagnostics::error::{
+ span_err, throw_invalid_attr, throw_invalid_nested_attr, throw_span_err, DiagnosticDeriveError,
+};
use proc_macro::Span;
-use proc_macro2::TokenStream;
+use proc_macro2::{Ident, TokenStream};
use quote::{format_ident, quote, ToTokens};
+use std::cell::RefCell;
use std::collections::{BTreeSet, HashMap};
+use std::fmt;
use std::str::FromStr;
-use syn::{spanned::Spanned, Attribute, Meta, Type, TypeTuple};
-use synstructure::{BindingInfo, Structure};
+use syn::{spanned::Spanned, Attribute, Field, Meta, Type, TypeTuple};
+use syn::{MetaList, MetaNameValue, NestedMeta, Path};
+use synstructure::{BindingInfo, VariantInfo};
+
+use super::error::invalid_nested_attr;
+
+thread_local! {
+ pub static CODE_IDENT_COUNT: RefCell<u32> = RefCell::new(0);
+}
+
+/// Returns an ident of the form `__code_N` where `N` is incremented once with every call.
+pub(crate) fn new_code_ident() -> syn::Ident {
+ CODE_IDENT_COUNT.with(|count| {
+ let ident = format_ident!("__code_{}", *count.borrow());
+ *count.borrow_mut() += 1;
+ ident
+ })
+}
/// Checks whether the type name of `ty` matches `name`.
///
@@ -135,6 +155,15 @@ impl<'ty> FieldInnerTy<'ty> {
unreachable!();
}
+ /// Returns `true` if `FieldInnerTy::with` will result in iteration for this inner type (i.e.
+ /// that cloning might be required for values moved in the loop body).
+ pub(crate) fn will_iterate(&self) -> bool {
+ match self {
+ FieldInnerTy::Vec(..) => true,
+ FieldInnerTy::Option(..) | FieldInnerTy::None => false,
+ }
+ }
+
/// Returns `Option` containing inner type if there is one.
pub(crate) fn inner_type(&self) -> Option<&'ty Type> {
match self {
@@ -172,13 +201,17 @@ pub(crate) struct FieldInfo<'a> {
/// Small helper trait for abstracting over `Option` fields that contain a value and a `Span`
/// for error reporting if they are set more than once.
pub(crate) trait SetOnce<T> {
- fn set_once(&mut self, _: (T, Span));
+ fn set_once(&mut self, value: T, span: Span);
fn value(self) -> Option<T>;
+ fn value_ref(&self) -> Option<&T>;
}
-impl<T> SetOnce<T> for Option<(T, Span)> {
- fn set_once(&mut self, (value, span): (T, Span)) {
+/// An [`Option<T>`] that keeps track of the span that caused it to be set; used with [`SetOnce`].
+pub(super) type SpannedOption<T> = Option<(T, Span)>;
+
+impl<T> SetOnce<T> for SpannedOption<T> {
+ fn set_once(&mut self, value: T, span: Span) {
match self {
None => {
*self = Some((value, span));
@@ -194,8 +227,14 @@ impl<T> SetOnce<T> for Option<(T, Span)> {
fn value(self) -> Option<T> {
self.map(|(v, _)| v)
}
+
+ fn value_ref(&self) -> Option<&T> {
+ self.as_ref().map(|(v, _)| v)
+ }
}
+pub(super) type FieldMap = HashMap<String, TokenStream>;
+
pub(crate) trait HasFieldMap {
/// Returns the binding for the field with the given name, if it exists on the type.
fn get_field_binding(&self, field: &String) -> Option<&TokenStream>;
@@ -235,35 +274,40 @@ pub(crate) trait HasFieldMap {
// the referenced fields. Leaves `it` sitting on the closing brace of the format string, so
// the next call to `it.next()` retrieves the next character.
while let Some(c) = it.next() {
- if c == '{' && *it.peek().unwrap_or(&'\0') != '{' {
- let mut eat_argument = || -> Option<String> {
- let mut result = String::new();
- // Format specifiers look like:
- //
- // format := '{' [ argument ] [ ':' format_spec ] '}' .
- //
- // Therefore, we only need to eat until ':' or '}' to find the argument.
- while let Some(c) = it.next() {
- result.push(c);
- let next = *it.peek().unwrap_or(&'\0');
- if next == '}' {
- break;
- } else if next == ':' {
- // Eat the ':' character.
- assert_eq!(it.next().unwrap(), ':');
- break;
- }
- }
- // Eat until (and including) the matching '}'
- while it.next()? != '}' {
- continue;
+ if c != '{' {
+ continue;
+ }
+ if *it.peek().unwrap_or(&'\0') == '{' {
+ assert_eq!(it.next().unwrap(), '{');
+ continue;
+ }
+ let mut eat_argument = || -> Option<String> {
+ let mut result = String::new();
+ // Format specifiers look like:
+ //
+ // format := '{' [ argument ] [ ':' format_spec ] '}' .
+ //
+ // Therefore, we only need to eat until ':' or '}' to find the argument.
+ while let Some(c) = it.next() {
+ result.push(c);
+ let next = *it.peek().unwrap_or(&'\0');
+ if next == '}' {
+ break;
+ } else if next == ':' {
+ // Eat the ':' character.
+ assert_eq!(it.next().unwrap(), ':');
+ break;
}
- Some(result)
- };
-
- if let Some(referenced_field) = eat_argument() {
- referenced_fields.insert(referenced_field);
}
+ // Eat until (and including) the matching '}'
+ while it.next()? != '}' {
+ continue;
+ }
+ Some(result)
+ };
+
+ if let Some(referenced_field) = eat_argument() {
+ referenced_fields.insert(referenced_field);
}
}
@@ -298,6 +342,7 @@ pub(crate) trait HasFieldMap {
/// `Applicability` of a suggestion - mirrors `rustc_errors::Applicability` - and used to represent
/// the user's selection of applicability if specified in an attribute.
+#[derive(Clone, Copy)]
pub(crate) enum Applicability {
MachineApplicable,
MaybeIncorrect,
@@ -340,17 +385,366 @@ impl quote::ToTokens for Applicability {
/// Build the mapping of field names to fields. This allows attributes to peek values from
/// other fields.
-pub(crate) fn build_field_mapping<'a>(structure: &Structure<'a>) -> HashMap<String, TokenStream> {
- let mut fields_map = HashMap::new();
-
- let ast = structure.ast();
- if let syn::Data::Struct(syn::DataStruct { fields, .. }) = &ast.data {
- for field in fields.iter() {
- if let Some(ident) = &field.ident {
- fields_map.insert(ident.to_string(), quote! { &self.#ident });
+pub(super) fn build_field_mapping<'v>(variant: &VariantInfo<'v>) -> HashMap<String, TokenStream> {
+ let mut fields_map = FieldMap::new();
+ for binding in variant.bindings() {
+ if let Some(ident) = &binding.ast().ident {
+ fields_map.insert(ident.to_string(), quote! { #binding });
+ }
+ }
+ fields_map
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(super) enum AllowMultipleAlternatives {
+ No,
+ Yes,
+}
+
+/// Constructs the `format!()` invocation(s) necessary for a `#[suggestion*(code = "foo")]` or
+/// `#[suggestion*(code("foo", "bar"))]` attribute field
+pub(super) fn build_suggestion_code(
+ code_field: &Ident,
+ meta: &Meta,
+ fields: &impl HasFieldMap,
+ allow_multiple: AllowMultipleAlternatives,
+) -> TokenStream {
+ let values = match meta {
+ // `code = "foo"`
+ Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) => vec![s],
+ // `code("foo", "bar")`
+ Meta::List(MetaList { nested, .. }) => {
+ if let AllowMultipleAlternatives::No = allow_multiple {
+ span_err(
+ meta.span().unwrap(),
+ "expected exactly one string literal for `code = ...`",
+ )
+ .emit();
+ vec![]
+ } else if nested.is_empty() {
+ span_err(
+ meta.span().unwrap(),
+ "expected at least one string literal for `code(...)`",
+ )
+ .emit();
+ vec![]
+ } else {
+ nested
+ .into_iter()
+ .filter_map(|item| {
+ if let NestedMeta::Lit(syn::Lit::Str(s)) = item {
+ Some(s)
+ } else {
+ span_err(
+ item.span().unwrap(),
+ "`code(...)` must contain only string literals",
+ )
+ .emit();
+ None
+ }
+ })
+ .collect()
}
}
+ _ => {
+ span_err(
+ meta.span().unwrap(),
+ r#"`code = "..."`/`code(...)` must contain only string literals"#,
+ )
+ .emit();
+ vec![]
+ }
+ };
+
+ if let AllowMultipleAlternatives::Yes = allow_multiple {
+ let formatted_strings: Vec<_> = values
+ .into_iter()
+ .map(|value| fields.build_format(&value.value(), value.span()))
+ .collect();
+ quote! { let #code_field = [#(#formatted_strings),*].into_iter(); }
+ } else if let [value] = values.as_slice() {
+ let formatted_str = fields.build_format(&value.value(), value.span());
+ quote! { let #code_field = #formatted_str; }
+ } else {
+ // error handled previously
+ quote! { let #code_field = String::new(); }
}
+}
- fields_map
+/// Possible styles for suggestion subdiagnostics.
+#[derive(Clone, Copy)]
+pub(super) enum SuggestionKind {
+ /// `#[suggestion]`
+ Normal,
+ /// `#[suggestion_short]`
+ Short,
+ /// `#[suggestion_hidden]`
+ Hidden,
+ /// `#[suggestion_verbose]`
+ Verbose,
+}
+
+impl FromStr for SuggestionKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "" => Ok(SuggestionKind::Normal),
+ "_short" => Ok(SuggestionKind::Short),
+ "_hidden" => Ok(SuggestionKind::Hidden),
+ "_verbose" => Ok(SuggestionKind::Verbose),
+ _ => Err(()),
+ }
+ }
+}
+
+impl SuggestionKind {
+ pub fn to_suggestion_style(&self) -> TokenStream {
+ match self {
+ SuggestionKind::Normal => {
+ quote! { rustc_errors::SuggestionStyle::ShowCode }
+ }
+ SuggestionKind::Short => {
+ quote! { rustc_errors::SuggestionStyle::HideCodeInline }
+ }
+ SuggestionKind::Hidden => {
+ quote! { rustc_errors::SuggestionStyle::HideCodeAlways }
+ }
+ SuggestionKind::Verbose => {
+ quote! { rustc_errors::SuggestionStyle::ShowAlways }
+ }
+ }
+ }
+}
+
+/// Types of subdiagnostics that can be created using attributes
+#[derive(Clone)]
+pub(super) enum SubdiagnosticKind {
+ /// `#[label(...)]`
+ Label,
+ /// `#[note(...)]`
+ Note,
+ /// `#[help(...)]`
+ Help,
+ /// `#[warning(...)]`
+ Warn,
+ /// `#[suggestion{,_short,_hidden,_verbose}]`
+ Suggestion {
+ suggestion_kind: SuggestionKind,
+ applicability: SpannedOption<Applicability>,
+ /// Identifier for variable used for formatted code, e.g. `___code_0`. Enables separation
+ /// of formatting and diagnostic emission so that `set_arg` calls can happen in-between..
+ code_field: syn::Ident,
+ /// Initialization logic for `code_field`'s variable, e.g.
+ /// `let __formatted_code = /* whatever */;`
+ code_init: TokenStream,
+ },
+ /// `#[multipart_suggestion{,_short,_hidden,_verbose}]`
+ MultipartSuggestion {
+ suggestion_kind: SuggestionKind,
+ applicability: SpannedOption<Applicability>,
+ },
+}
+
+impl SubdiagnosticKind {
+ /// Constructs a `SubdiagnosticKind` from a field or type attribute such as `#[note]`,
+ /// `#[error(parser::add_paren)]` or `#[suggestion(code = "...")]`. Returns the
+ /// `SubdiagnosticKind` and the diagnostic slug, if specified.
+ pub(super) fn from_attr(
+ attr: &Attribute,
+ fields: &impl HasFieldMap,
+ ) -> Result<Option<(SubdiagnosticKind, Option<Path>)>, DiagnosticDeriveError> {
+ // Always allow documentation comments.
+ if is_doc_comment(attr) {
+ return Ok(None);
+ }
+
+ let span = attr.span().unwrap();
+
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
+
+ let meta = attr.parse_meta()?;
+ let mut kind = match name {
+ "label" => SubdiagnosticKind::Label,
+ "note" => SubdiagnosticKind::Note,
+ "help" => SubdiagnosticKind::Help,
+ "warning" => SubdiagnosticKind::Warn,
+ _ => {
+ if let Some(suggestion_kind) =
+ name.strip_prefix("suggestion").and_then(|s| s.parse().ok())
+ {
+ SubdiagnosticKind::Suggestion {
+ suggestion_kind,
+ applicability: None,
+ code_field: new_code_ident(),
+ code_init: TokenStream::new(),
+ }
+ } else if let Some(suggestion_kind) =
+ name.strip_prefix("multipart_suggestion").and_then(|s| s.parse().ok())
+ {
+ SubdiagnosticKind::MultipartSuggestion { suggestion_kind, applicability: None }
+ } else {
+ throw_invalid_attr!(attr, &meta);
+ }
+ }
+ };
+
+ let nested = match meta {
+ Meta::List(MetaList { ref nested, .. }) => {
+ // An attribute with properties, such as `#[suggestion(code = "...")]` or
+ // `#[error(some::slug)]`
+ nested
+ }
+ Meta::Path(_) => {
+ // An attribute without a slug or other properties, such as `#[note]` - return
+ // without further processing.
+ //
+ // Only allow this if there are no mandatory properties, such as `code = "..."` in
+ // `#[suggestion(...)]`
+ match kind {
+ SubdiagnosticKind::Label
+ | SubdiagnosticKind::Note
+ | SubdiagnosticKind::Help
+ | SubdiagnosticKind::Warn
+ | SubdiagnosticKind::MultipartSuggestion { .. } => {
+ return Ok(Some((kind, None)));
+ }
+ SubdiagnosticKind::Suggestion { .. } => {
+ throw_span_err!(span, "suggestion without `code = \"...\"`")
+ }
+ }
+ }
+ _ => {
+ throw_invalid_attr!(attr, &meta)
+ }
+ };
+
+ let mut code = None;
+
+ let mut nested_iter = nested.into_iter().peekable();
+
+ // Peek at the first nested attribute: if it's a slug path, consume it.
+ let slug = if let Some(NestedMeta::Meta(Meta::Path(path))) = nested_iter.peek() {
+ let path = path.clone();
+ // Advance the iterator.
+ nested_iter.next();
+ Some(path)
+ } else {
+ None
+ };
+
+ for nested_attr in nested_iter {
+ let meta = match nested_attr {
+ NestedMeta::Meta(ref meta) => meta,
+ NestedMeta::Lit(_) => {
+ invalid_nested_attr(attr, &nested_attr).emit();
+ continue;
+ }
+ };
+
+ let span = meta.span().unwrap();
+ let nested_name = meta.path().segments.last().unwrap().ident.to_string();
+ let nested_name = nested_name.as_str();
+
+ let string_value = match meta {
+ Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(value), .. }) => Some(value),
+
+ Meta::Path(_) => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help("a diagnostic slug must be the first argument to the attribute")
+ }),
+ _ => None,
+ };
+
+ match (nested_name, &mut kind) {
+ ("code", SubdiagnosticKind::Suggestion { code_field, .. }) => {
+ let code_init = build_suggestion_code(
+ code_field,
+ meta,
+ fields,
+ AllowMultipleAlternatives::Yes,
+ );
+ code.set_once(code_init, span);
+ }
+ (
+ "applicability",
+ SubdiagnosticKind::Suggestion { ref mut applicability, .. }
+ | SubdiagnosticKind::MultipartSuggestion { ref mut applicability, .. },
+ ) => {
+ let Some(value) = string_value else {
+ invalid_nested_attr(attr, &nested_attr).emit();
+ continue;
+ };
+
+ let value = Applicability::from_str(&value.value()).unwrap_or_else(|()| {
+ span_err(span, "invalid applicability").emit();
+ Applicability::Unspecified
+ });
+ applicability.set_once(value, span);
+ }
+
+ // Invalid nested attribute
+ (_, SubdiagnosticKind::Suggestion { .. }) => {
+ invalid_nested_attr(attr, &nested_attr)
+ .help("only `code` and `applicability` are valid nested attributes")
+ .emit();
+ }
+ (_, SubdiagnosticKind::MultipartSuggestion { .. }) => {
+ invalid_nested_attr(attr, &nested_attr)
+ .help("only `applicability` is a valid nested attributes")
+ .emit()
+ }
+ _ => {
+ invalid_nested_attr(attr, &nested_attr).emit();
+ }
+ }
+ }
+
+ match kind {
+ SubdiagnosticKind::Suggestion { ref code_field, ref mut code_init, .. } => {
+ *code_init = if let Some(init) = code.value() {
+ init
+ } else {
+ span_err(span, "suggestion without `code = \"...\"`").emit();
+ quote! { let #code_field = std::iter::empty(); }
+ };
+ }
+ SubdiagnosticKind::Label
+ | SubdiagnosticKind::Note
+ | SubdiagnosticKind::Help
+ | SubdiagnosticKind::Warn
+ | SubdiagnosticKind::MultipartSuggestion { .. } => {}
+ }
+
+ Ok(Some((kind, slug)))
+ }
+}
+
+impl quote::IdentFragment for SubdiagnosticKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ SubdiagnosticKind::Label => write!(f, "label"),
+ SubdiagnosticKind::Note => write!(f, "note"),
+ SubdiagnosticKind::Help => write!(f, "help"),
+ SubdiagnosticKind::Warn => write!(f, "warn"),
+ SubdiagnosticKind::Suggestion { .. } => write!(f, "suggestions_with_style"),
+ SubdiagnosticKind::MultipartSuggestion { .. } => {
+ write!(f, "multipart_suggestion_with_style")
+ }
+ }
+ }
+
+ fn span(&self) -> Option<proc_macro2::Span> {
+ None
+ }
+}
+
+/// Returns `true` if `field` should generate a `set_arg` call rather than any other diagnostic
+/// call (like `span_label`).
+pub(super) fn should_generate_set_arg(field: &Field) -> bool {
+ field.attrs.is_empty()
+}
+
+pub(super) fn is_doc_comment(attr: &Attribute) -> bool {
+ attr.path.segments.last().unwrap().ident.to_string() == "doc"
}
diff --git a/compiler/rustc_macros/src/lib.rs b/compiler/rustc_macros/src/lib.rs
index ab509b26f..36bda3e0f 100644
--- a/compiler/rustc_macros/src/lib.rs
+++ b/compiler/rustc_macros/src/lib.rs
@@ -1,9 +1,10 @@
#![feature(allow_internal_unstable)]
-#![feature(let_else)]
#![feature(never_type)]
#![feature(proc_macro_diagnostic)]
#![feature(proc_macro_span)]
#![allow(rustc::default_hash_types)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![recursion_limit = "128"]
use synstructure::decl_derive;
@@ -65,10 +66,10 @@ pub fn newtype_index(input: TokenStream) -> TokenStream {
/// ..where `typeck.ftl` has the following contents..
///
/// ```fluent
-/// typeck-field-multiply-specified-in-initializer =
+/// typeck_field_multiply_specified_in_initializer =
/// field `{$ident}` specified more than once
/// .label = used more than once
-/// .label-previous-use = first use of `{$ident}`
+/// .label_previous_use = first use of `{$ident}`
/// ```
/// ...then the macro parse the Fluent resource, emitting a diagnostic if it fails to do so, and
/// will generate the following code:
@@ -81,11 +82,11 @@ pub fn newtype_index(input: TokenStream) -> TokenStream {
/// mod fluent_generated {
/// mod typeck {
/// pub const field_multiply_specified_in_initializer: DiagnosticMessage =
-/// DiagnosticMessage::fluent("typeck-field-multiply-specified-in-initializer");
+/// DiagnosticMessage::fluent("typeck_field_multiply_specified_in_initializer");
/// pub const field_multiply_specified_in_initializer_label_previous_use: DiagnosticMessage =
/// DiagnosticMessage::fluent_attr(
-/// "typeck-field-multiply-specified-in-initializer",
-/// "previous-use-label"
+/// "typeck_field_multiply_specified_in_initializer",
+/// "previous_use_label"
/// );
/// }
/// }
@@ -125,14 +126,12 @@ decl_derive!([TypeFoldable, attributes(type_foldable)] => type_foldable::type_fo
decl_derive!([TypeVisitable, attributes(type_visitable)] => type_visitable::type_visitable_derive);
decl_derive!([Lift, attributes(lift)] => lift::lift_derive);
decl_derive!(
- [SessionDiagnostic, attributes(
+ [Diagnostic, attributes(
// struct attributes
- warning,
- error,
- lint,
+ diag,
help,
note,
- warn_,
+ warning,
// field attributes
skip_arg,
primary_span,
@@ -146,12 +145,10 @@ decl_derive!(
decl_derive!(
[LintDiagnostic, attributes(
// struct attributes
- warning,
- error,
- lint,
+ diag,
help,
note,
- warn_,
+ warning,
// field attributes
skip_arg,
primary_span,
@@ -163,18 +160,23 @@ decl_derive!(
suggestion_verbose)] => diagnostics::lint_diagnostic_derive
);
decl_derive!(
- [SessionSubdiagnostic, attributes(
+ [Subdiagnostic, attributes(
// struct/variant attributes
label,
help,
note,
- warn_,
+ warning,
suggestion,
suggestion_short,
suggestion_hidden,
suggestion_verbose,
+ multipart_suggestion,
+ multipart_suggestion_short,
+ multipart_suggestion_hidden,
+ multipart_suggestion_verbose,
// field attributes
skip_arg,
primary_span,
+ suggestion_part,
applicability)] => diagnostics::session_subdiagnostic_derive
);
diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs
index a69126533..7cefafef9 100644
--- a/compiler/rustc_macros/src/query.rs
+++ b/compiler/rustc_macros/src/query.rs
@@ -1,139 +1,17 @@
use proc_macro::TokenStream;
-use proc_macro2::{Delimiter, TokenTree};
use quote::{quote, quote_spanned};
use syn::parse::{Parse, ParseStream, Result};
use syn::punctuated::Punctuated;
use syn::spanned::Spanned;
use syn::{
- braced, parenthesized, parse_macro_input, parse_quote, AttrStyle, Attribute, Block, Error,
- Expr, Ident, ReturnType, Token, Type,
+ braced, parenthesized, parse_macro_input, parse_quote, token, AttrStyle, Attribute, Block,
+ Error, Expr, Ident, Pat, ReturnType, Token, Type,
};
mod kw {
syn::custom_keyword!(query);
}
-/// Ident or a wildcard `_`.
-struct IdentOrWild(Ident);
-
-impl Parse for IdentOrWild {
- fn parse(input: ParseStream<'_>) -> Result<Self> {
- Ok(if input.peek(Token![_]) {
- let underscore = input.parse::<Token![_]>()?;
- IdentOrWild(Ident::new("_", underscore.span()))
- } else {
- IdentOrWild(input.parse()?)
- })
- }
-}
-
-/// A modifier for a query
-enum QueryModifier {
- /// The description of the query.
- Desc(Option<Ident>, Punctuated<Expr, Token![,]>),
-
- /// Use this type for the in-memory cache.
- Storage(Type),
-
- /// Cache the query to disk if the `Expr` returns true.
- Cache(Option<IdentOrWild>, Block),
-
- /// Custom code to load the query from disk.
- LoadCached(Ident, Ident, Block),
-
- /// A cycle error for this query aborting the compilation with a fatal error.
- FatalCycle(Ident),
-
- /// A cycle error results in a delay_bug call
- CycleDelayBug(Ident),
-
- /// Don't hash the result, instead just mark a query red if it runs
- NoHash(Ident),
-
- /// Generate a dep node based on the dependencies of the query
- Anon(Ident),
-
- /// Always evaluate the query, ignoring its dependencies
- EvalAlways(Ident),
-
- /// Use a separate query provider for local and extern crates
- SeparateProvideExtern(Ident),
-
- /// Always remap the ParamEnv's constness before hashing and passing to the query provider
- RemapEnvConstness(Ident),
-}
-
-impl Parse for QueryModifier {
- fn parse(input: ParseStream<'_>) -> Result<Self> {
- let modifier: Ident = input.parse()?;
- if modifier == "desc" {
- // Parse a description modifier like:
- // `desc { |tcx| "foo {}", tcx.item_path(key) }`
- let attr_content;
- braced!(attr_content in input);
- let tcx = if attr_content.peek(Token![|]) {
- attr_content.parse::<Token![|]>()?;
- let tcx = attr_content.parse()?;
- attr_content.parse::<Token![|]>()?;
- Some(tcx)
- } else {
- None
- };
- let desc = attr_content.parse_terminated(Expr::parse)?;
- Ok(QueryModifier::Desc(tcx, desc))
- } else if modifier == "cache_on_disk_if" {
- // Parse a cache modifier like:
- // `cache(tcx, value) { |tcx| key.is_local() }`
- let has_args = if let TokenTree::Group(group) = input.fork().parse()? {
- group.delimiter() == Delimiter::Parenthesis
- } else {
- false
- };
- let args = if has_args {
- let args;
- parenthesized!(args in input);
- let tcx = args.parse()?;
- Some(tcx)
- } else {
- None
- };
- let block = input.parse()?;
- Ok(QueryModifier::Cache(args, block))
- } else if modifier == "load_cached" {
- // Parse a load_cached modifier like:
- // `load_cached(tcx, id) { tcx.on_disk_cache.try_load_query_result(tcx, id) }`
- let args;
- parenthesized!(args in input);
- let tcx = args.parse()?;
- args.parse::<Token![,]>()?;
- let id = args.parse()?;
- let block = input.parse()?;
- Ok(QueryModifier::LoadCached(tcx, id, block))
- } else if modifier == "storage" {
- let args;
- parenthesized!(args in input);
- let ty = args.parse()?;
- Ok(QueryModifier::Storage(ty))
- } else if modifier == "fatal_cycle" {
- Ok(QueryModifier::FatalCycle(modifier))
- } else if modifier == "cycle_delay_bug" {
- Ok(QueryModifier::CycleDelayBug(modifier))
- } else if modifier == "no_hash" {
- Ok(QueryModifier::NoHash(modifier))
- } else if modifier == "anon" {
- Ok(QueryModifier::Anon(modifier))
- } else if modifier == "eval_always" {
- Ok(QueryModifier::EvalAlways(modifier))
- } else if modifier == "separate_provide_extern" {
- Ok(QueryModifier::SeparateProvideExtern(modifier))
- } else if modifier == "remap_env_constness" {
- Ok(QueryModifier::RemapEnvConstness(modifier))
- } else {
- Err(Error::new(modifier.span(), "unknown query modifier"))
- }
- }
-}
-
/// Ensures only doc comment attributes are used
fn check_attributes(attrs: Vec<Attribute>) -> Result<Vec<Attribute>> {
let inner = |attr: Attribute| {
@@ -154,16 +32,16 @@ fn check_attributes(attrs: Vec<Attribute>) -> Result<Vec<Attribute>> {
/// A compiler query. `query ... { ... }`
struct Query {
doc_comments: Vec<Attribute>,
- modifiers: List<QueryModifier>,
+ modifiers: QueryModifiers,
name: Ident,
- key: IdentOrWild,
+ key: Pat,
arg: Type,
result: ReturnType,
}
impl Parse for Query {
fn parse(input: ParseStream<'_>) -> Result<Self> {
- let doc_comments = check_attributes(input.call(Attribute::parse_outer)?)?;
+ let mut doc_comments = check_attributes(input.call(Attribute::parse_outer)?)?;
// Parse the query declaration. Like `query type_of(key: DefId) -> Ty<'tcx>`
input.parse::<kw::query>()?;
@@ -178,7 +56,13 @@ impl Parse for Query {
// Parse the query modifiers
let content;
braced!(content in input);
- let modifiers = content.parse()?;
+ let modifiers = parse_query_modifiers(&content)?;
+
+ // If there are no doc-comments, give at least some idea of what
+ // it does by showing the query description.
+ if doc_comments.is_empty() {
+ doc_comments.push(doc_comment_from_desc(&modifiers.desc.1)?);
+ }
Ok(Query { doc_comments, modifiers, name, key, arg, result })
}
@@ -202,13 +86,10 @@ struct QueryModifiers {
desc: (Option<Ident>, Punctuated<Expr, Token![,]>),
/// Use this type for the in-memory cache.
- storage: Option<Type>,
+ arena_cache: Option<Ident>,
/// Cache the query to disk if the `Block` returns true.
- cache: Option<(Option<IdentOrWild>, Block)>,
-
- /// Custom code to load the query from disk.
- load_cached: Option<(Ident, Ident, Block)>,
+ cache: Option<(Option<Pat>, Block)>,
/// A cycle error for this query aborting the compilation with a fatal error.
fatal_cycle: Option<Ident>,
@@ -222,9 +103,12 @@ struct QueryModifiers {
/// Generate a dep node based on the dependencies of the query
anon: Option<Ident>,
- // Always evaluate the query, ignoring its dependencies
+ /// Always evaluate the query, ignoring its dependencies
eval_always: Option<Ident>,
+ /// Whether the query has a call depth limit
+ depth_limit: Option<Ident>,
+
/// Use a separate query provider for local and extern crates
separate_provide_extern: Option<Ident>,
@@ -232,10 +116,8 @@ struct QueryModifiers {
remap_env_constness: Option<Ident>,
}
-/// Process query modifiers into a struct, erroring on duplicates
-fn process_modifiers(query: &mut Query) -> QueryModifiers {
- let mut load_cached = None;
- let mut storage = None;
+fn parse_query_modifiers(input: ParseStream<'_>) -> Result<QueryModifiers> {
+ let mut arena_cache = None;
let mut cache = None;
let mut desc = None;
let mut fatal_cycle = None;
@@ -243,121 +125,77 @@ fn process_modifiers(query: &mut Query) -> QueryModifiers {
let mut no_hash = None;
let mut anon = None;
let mut eval_always = None;
+ let mut depth_limit = None;
let mut separate_provide_extern = None;
let mut remap_env_constness = None;
- for modifier in query.modifiers.0.drain(..) {
- match modifier {
- QueryModifier::LoadCached(tcx, id, block) => {
- if load_cached.is_some() {
- panic!("duplicate modifier `load_cached` for query `{}`", query.name);
- }
- load_cached = Some((tcx, id, block));
- }
- QueryModifier::Storage(ty) => {
- if storage.is_some() {
- panic!("duplicate modifier `storage` for query `{}`", query.name);
- }
- storage = Some(ty);
- }
- QueryModifier::Cache(args, expr) => {
- if cache.is_some() {
- panic!("duplicate modifier `cache` for query `{}`", query.name);
- }
- cache = Some((args, expr));
- }
- QueryModifier::Desc(tcx, list) => {
- if desc.is_some() {
- panic!("duplicate modifier `desc` for query `{}`", query.name);
- }
- // If there are no doc-comments, give at least some idea of what
- // it does by showing the query description.
- if query.doc_comments.is_empty() {
- use ::syn::*;
- let mut list = list.iter();
- let format_str: String = match list.next() {
- Some(&Expr::Lit(ExprLit { lit: Lit::Str(ref lit_str), .. })) => {
- lit_str.value().replace("`{}`", "{}") // We add them later anyways for consistency
- }
- _ => panic!("Expected a string literal"),
- };
- let mut fmt_fragments = format_str.split("{}");
- let mut doc_string = fmt_fragments.next().unwrap().to_string();
- list.map(::quote::ToTokens::to_token_stream).zip(fmt_fragments).for_each(
- |(tts, next_fmt_fragment)| {
- use ::core::fmt::Write;
- write!(
- &mut doc_string,
- " `{}` {}",
- tts.to_string().replace(" . ", "."),
- next_fmt_fragment,
- )
- .unwrap();
- },
- );
- let doc_string = format!(
- "[query description - consider adding a doc-comment!] {}",
- doc_string
- );
- let comment = parse_quote! {
- #[doc = #doc_string]
- };
- query.doc_comments.push(comment);
- }
- desc = Some((tcx, list));
- }
- QueryModifier::FatalCycle(ident) => {
- if fatal_cycle.is_some() {
- panic!("duplicate modifier `fatal_cycle` for query `{}`", query.name);
- }
- fatal_cycle = Some(ident);
- }
- QueryModifier::CycleDelayBug(ident) => {
- if cycle_delay_bug.is_some() {
- panic!("duplicate modifier `cycle_delay_bug` for query `{}`", query.name);
- }
- cycle_delay_bug = Some(ident);
- }
- QueryModifier::NoHash(ident) => {
- if no_hash.is_some() {
- panic!("duplicate modifier `no_hash` for query `{}`", query.name);
- }
- no_hash = Some(ident);
- }
- QueryModifier::Anon(ident) => {
- if anon.is_some() {
- panic!("duplicate modifier `anon` for query `{}`", query.name);
- }
- anon = Some(ident);
- }
- QueryModifier::EvalAlways(ident) => {
- if eval_always.is_some() {
- panic!("duplicate modifier `eval_always` for query `{}`", query.name);
- }
- eval_always = Some(ident);
- }
- QueryModifier::SeparateProvideExtern(ident) => {
- if separate_provide_extern.is_some() {
- panic!(
- "duplicate modifier `separate_provide_extern` for query `{}`",
- query.name
- );
- }
- separate_provide_extern = Some(ident);
- }
- QueryModifier::RemapEnvConstness(ident) => {
- if remap_env_constness.is_some() {
- panic!("duplicate modifier `remap_env_constness` for query `{}`", query.name);
+
+ while !input.is_empty() {
+ let modifier: Ident = input.parse()?;
+
+ macro_rules! try_insert {
+ ($name:ident = $expr:expr) => {
+ if $name.is_some() {
+ return Err(Error::new(modifier.span(), "duplicate modifier"));
}
- remap_env_constness = Some(ident)
- }
+ $name = Some($expr);
+ };
+ }
+
+ if modifier == "desc" {
+ // Parse a description modifier like:
+ // `desc { |tcx| "foo {}", tcx.item_path(key) }`
+ let attr_content;
+ braced!(attr_content in input);
+ let tcx = if attr_content.peek(Token![|]) {
+ attr_content.parse::<Token![|]>()?;
+ let tcx = attr_content.parse()?;
+ attr_content.parse::<Token![|]>()?;
+ Some(tcx)
+ } else {
+ None
+ };
+ let list = attr_content.parse_terminated(Expr::parse)?;
+ try_insert!(desc = (tcx, list));
+ } else if modifier == "cache_on_disk_if" {
+ // Parse a cache modifier like:
+ // `cache(tcx) { |tcx| key.is_local() }`
+ let args = if input.peek(token::Paren) {
+ let args;
+ parenthesized!(args in input);
+ let tcx = args.parse()?;
+ Some(tcx)
+ } else {
+ None
+ };
+ let block = input.parse()?;
+ try_insert!(cache = (args, block));
+ } else if modifier == "arena_cache" {
+ try_insert!(arena_cache = modifier);
+ } else if modifier == "fatal_cycle" {
+ try_insert!(fatal_cycle = modifier);
+ } else if modifier == "cycle_delay_bug" {
+ try_insert!(cycle_delay_bug = modifier);
+ } else if modifier == "no_hash" {
+ try_insert!(no_hash = modifier);
+ } else if modifier == "anon" {
+ try_insert!(anon = modifier);
+ } else if modifier == "eval_always" {
+ try_insert!(eval_always = modifier);
+ } else if modifier == "depth_limit" {
+ try_insert!(depth_limit = modifier);
+ } else if modifier == "separate_provide_extern" {
+ try_insert!(separate_provide_extern = modifier);
+ } else if modifier == "remap_env_constness" {
+ try_insert!(remap_env_constness = modifier);
+ } else {
+ return Err(Error::new(modifier.span(), "unknown query modifier"));
}
}
- let desc = desc.unwrap_or_else(|| {
- panic!("no description provided for query `{}`", query.name);
- });
- QueryModifiers {
- load_cached,
- storage,
+ let Some(desc) = desc else {
+ return Err(input.error("no description provided"));
+ };
+ Ok(QueryModifiers {
+ arena_cache,
cache,
desc,
fatal_cycle,
@@ -365,86 +203,90 @@ fn process_modifiers(query: &mut Query) -> QueryModifiers {
no_hash,
anon,
eval_always,
+ depth_limit,
separate_provide_extern,
remap_env_constness,
- }
+ })
+}
+
+fn doc_comment_from_desc(list: &Punctuated<Expr, token::Comma>) -> Result<Attribute> {
+ use ::syn::*;
+ let mut iter = list.iter();
+ let format_str: String = match iter.next() {
+ Some(&Expr::Lit(ExprLit { lit: Lit::Str(ref lit_str), .. })) => {
+ lit_str.value().replace("`{}`", "{}") // We add them later anyways for consistency
+ }
+ _ => return Err(Error::new(list.span(), "Expected a string literal")),
+ };
+ let mut fmt_fragments = format_str.split("{}");
+ let mut doc_string = fmt_fragments.next().unwrap().to_string();
+ iter.map(::quote::ToTokens::to_token_stream).zip(fmt_fragments).for_each(
+ |(tts, next_fmt_fragment)| {
+ use ::core::fmt::Write;
+ write!(
+ &mut doc_string,
+ " `{}` {}",
+ tts.to_string().replace(" . ", "."),
+ next_fmt_fragment,
+ )
+ .unwrap();
+ },
+ );
+ let doc_string = format!("[query description - consider adding a doc-comment!] {}", doc_string);
+ Ok(parse_quote! { #[doc = #doc_string] })
}
/// Add the impl of QueryDescription for the query to `impls` if one is requested
-fn add_query_description_impl(
+fn add_query_desc_cached_impl(
query: &Query,
- modifiers: QueryModifiers,
- impls: &mut proc_macro2::TokenStream,
+ descs: &mut proc_macro2::TokenStream,
+ cached: &mut proc_macro2::TokenStream,
) {
- let name = &query.name;
- let key = &query.key.0;
+ let Query { name, key, modifiers, .. } = &query;
// Find out if we should cache the query on disk
let cache = if let Some((args, expr)) = modifiers.cache.as_ref() {
- let try_load_from_disk = if let Some((tcx, id, block)) = modifiers.load_cached.as_ref() {
- // Use custom code to load the query from disk
- quote! {
- const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>>
- = Some(|#tcx, #id| { #block });
- }
- } else {
- // Use the default code to load the query from disk
- quote! {
- const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>>
- = Some(|tcx, id| tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id));
- }
- };
-
- let tcx = args
- .as_ref()
- .map(|t| {
- let t = &t.0;
- quote! { #t }
- })
- .unwrap_or_else(|| quote! { _ });
+ let tcx = args.as_ref().map(|t| quote! { #t }).unwrap_or_else(|| quote! { _ });
// expr is a `Block`, meaning that `{ #expr }` gets expanded
// to `{ { stmts... } }`, which triggers the `unused_braces` lint.
+ // we're taking `key` by reference, but some rustc types usually prefer being passed by value
quote! {
- #[allow(unused_variables, unused_braces)]
+ #[allow(unused_variables, unused_braces, rustc::pass_by_value)]
#[inline]
- fn cache_on_disk(#tcx: TyCtxt<'tcx>, #key: &Self::Key) -> bool {
+ pub fn #name<'tcx>(#tcx: TyCtxt<'tcx>, #key: &crate::ty::query::query_keys::#name<'tcx>) -> bool {
#expr
}
-
- #try_load_from_disk
}
} else {
- if modifiers.load_cached.is_some() {
- panic!("load_cached modifier on query `{}` without a cache modifier", name);
- }
quote! {
+ // we're taking `key` by reference, but some rustc types usually prefer being passed by value
+ #[allow(rustc::pass_by_value)]
#[inline]
- fn cache_on_disk(_: TyCtxt<'tcx>, _: &Self::Key) -> bool {
+ pub fn #name<'tcx>(_: TyCtxt<'tcx>, _: &crate::ty::query::query_keys::#name<'tcx>) -> bool {
false
}
-
- const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>> = None;
}
};
- let (tcx, desc) = modifiers.desc;
+ let (tcx, desc) = &modifiers.desc;
let tcx = tcx.as_ref().map_or_else(|| quote! { _ }, |t| quote! { #t });
let desc = quote! {
#[allow(unused_variables)]
- fn describe(tcx: QueryCtxt<$tcx>, key: Self::Key) -> String {
- let (#tcx, #key) = (*tcx, key);
+ pub fn #name<'tcx>(tcx: TyCtxt<'tcx>, key: crate::ty::query::query_keys::#name<'tcx>) -> String {
+ let (#tcx, #key) = (tcx, key);
::rustc_middle::ty::print::with_no_trimmed_paths!(
format!(#desc)
)
}
};
- impls.extend(quote! {
- (#name<$tcx:tt>) => {
- #desc
- #cache
- };
+ descs.extend(quote! {
+ #desc
+ });
+
+ cached.extend(quote! {
+ #cache
});
}
@@ -453,59 +295,44 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
let mut query_stream = quote! {};
let mut query_description_stream = quote! {};
- let mut dep_node_def_stream = quote! {};
- let mut cached_queries = quote! {};
+ let mut query_cached_stream = quote! {};
- for mut query in queries.0 {
- let modifiers = process_modifiers(&mut query);
- let name = &query.name;
- let arg = &query.arg;
+ for query in queries.0 {
+ let Query { name, arg, modifiers, .. } = &query;
let result_full = &query.result;
let result = match query.result {
ReturnType::Default => quote! { -> () },
_ => quote! { #result_full },
};
- if modifiers.cache.is_some() {
- cached_queries.extend(quote! {
- #name,
- });
+ let mut attributes = Vec::new();
+
+ macro_rules! passthrough {
+ ( $( $modifier:ident ),+ $(,)? ) => {
+ $( if let Some($modifier) = &modifiers.$modifier {
+ attributes.push(quote! { (#$modifier) });
+ }; )+
+ }
}
- let mut attributes = Vec::new();
+ passthrough!(
+ fatal_cycle,
+ arena_cache,
+ cycle_delay_bug,
+ no_hash,
+ anon,
+ eval_always,
+ depth_limit,
+ separate_provide_extern,
+ remap_env_constness,
+ );
- // Pass on the fatal_cycle modifier
- if let Some(fatal_cycle) = &modifiers.fatal_cycle {
- attributes.push(quote! { (#fatal_cycle) });
- };
- // Pass on the storage modifier
- if let Some(ref ty) = modifiers.storage {
- let span = ty.span();
- attributes.push(quote_spanned! {span=> (storage #ty) });
- };
- // Pass on the cycle_delay_bug modifier
- if let Some(cycle_delay_bug) = &modifiers.cycle_delay_bug {
- attributes.push(quote! { (#cycle_delay_bug) });
- };
- // Pass on the no_hash modifier
- if let Some(no_hash) = &modifiers.no_hash {
- attributes.push(quote! { (#no_hash) });
- };
- // Pass on the anon modifier
- if let Some(anon) = &modifiers.anon {
- attributes.push(quote! { (#anon) });
- };
- // Pass on the eval_always modifier
- if let Some(eval_always) = &modifiers.eval_always {
- attributes.push(quote! { (#eval_always) });
- };
- // Pass on the separate_provide_extern modifier
- if let Some(separate_provide_extern) = &modifiers.separate_provide_extern {
- attributes.push(quote! { (#separate_provide_extern) });
+ if modifiers.cache.is_some() {
+ attributes.push(quote! { (cache) });
}
- // Pass on the remap_env_constness modifier
- if let Some(remap_env_constness) = &modifiers.remap_env_constness {
- attributes.push(quote! { (#remap_env_constness) });
+ // Pass on the cache modifier
+ if modifiers.cache.is_some() {
+ attributes.push(quote! { (cache) });
}
// This uses the span of the query definition for the commas,
@@ -516,51 +343,34 @@ pub fn rustc_queries(input: TokenStream) -> TokenStream {
// be very useful.
let span = name.span();
let attribute_stream = quote_spanned! {span=> #(#attributes),*};
- let doc_comments = query.doc_comments.iter();
+ let doc_comments = &query.doc_comments;
// Add the query to the group
query_stream.extend(quote! {
#(#doc_comments)*
[#attribute_stream] fn #name(#arg) #result,
});
- // Create a dep node for the query
- dep_node_def_stream.extend(quote! {
- [#attribute_stream] #name(#arg),
- });
-
- add_query_description_impl(&query, modifiers, &mut query_description_stream);
+ add_query_desc_cached_impl(&query, &mut query_description_stream, &mut query_cached_stream);
}
TokenStream::from(quote! {
#[macro_export]
macro_rules! rustc_query_append {
- ([$($macro:tt)*][$($other:tt)*]) => {
- $($macro)* {
- $($other)*
-
+ ($macro:ident! $( [$($other:tt)*] )?) => {
+ $macro! {
+ $( $($other)* )?
#query_stream
-
}
}
}
- macro_rules! rustc_dep_node_append {
- ([$($macro:tt)*][$($other:tt)*]) => {
- $($macro)*(
- $($other)*
- #dep_node_def_stream
- );
- }
- }
- #[macro_export]
- macro_rules! rustc_cached_queries {
- ($($macro:tt)*) => {
- $($macro)*(#cached_queries);
- }
- }
- #[macro_export]
- macro_rules! rustc_query_description {
+ pub mod descs {
+ use super::*;
#query_description_stream
}
+ pub mod cached {
+ use super::*;
+ #query_cached_stream
+ }
})
}
diff --git a/compiler/rustc_macros/src/symbols.rs b/compiler/rustc_macros/src/symbols.rs
index 1b245f2a7..92590c33b 100644
--- a/compiler/rustc_macros/src/symbols.rs
+++ b/compiler/rustc_macros/src/symbols.rs
@@ -195,10 +195,10 @@ fn symbols_with_errors(input: TokenStream) -> (TokenStream, Vec<syn::Error>) {
#n,
});
}
- let _ = counter; // for future use
let output = quote! {
const SYMBOL_DIGITS_BASE: u32 = #digits_base;
+ const PREINTERNED_SYMBOLS_COUNT: u32 = #counter;
#[doc(hidden)]
#[allow(non_upper_case_globals)]
diff --git a/compiler/rustc_metadata/Cargo.toml b/compiler/rustc_metadata/Cargo.toml
index 2c5db9d8b..6d85103c9 100644
--- a/compiler/rustc_metadata/Cargo.toml
+++ b/compiler/rustc_metadata/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
libloading = "0.7.1"
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index 708d0b1fd..cfcceecbe 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -1,5 +1,9 @@
//! Validates all used crates and extern libraries and loads their metadata
+use crate::errors::{
+ ConflictingGlobalAlloc, CrateNotPanicRuntime, GlobalAllocRequired, NoMultipleGlobalAlloc,
+ NoPanicStrategy, NoTransitiveNeedsDep, NotProfilerRuntime, ProfilerBuiltinsNeedsCore,
+};
use crate::locator::{CrateError, CrateLocator, CratePaths};
use crate::rmeta::{CrateDep, CrateMetadata, CrateNumMap, CrateRoot, MetadataBlob};
@@ -29,7 +33,6 @@ use proc_macro::bridge::client::ProcMacro;
use std::ops::Fn;
use std::path::Path;
use std::{cmp, env};
-use tracing::{debug, info};
#[derive(Clone)]
pub struct CStore {
@@ -263,7 +266,7 @@ impl<'a> CrateLoader<'a> {
fn existing_match(&self, name: Symbol, hash: Option<Svh>, kind: PathKind) -> Option<CrateNum> {
for (cnum, data) in self.cstore.iter_crate_data() {
if data.name() != name {
- tracing::trace!("{} did not match {}", data.name(), name);
+ trace!("{} did not match {}", data.name(), name);
continue;
}
@@ -746,15 +749,10 @@ impl<'a> CrateLoader<'a> {
// Sanity check the loaded crate to ensure it is indeed a panic runtime
// and the panic strategy is indeed what we thought it was.
if !data.is_panic_runtime() {
- self.sess.err(&format!("the crate `{}` is not a panic runtime", name));
+ self.sess.emit_err(CrateNotPanicRuntime { crate_name: name });
}
if data.required_panic_strategy() != Some(desired_strategy) {
- self.sess.err(&format!(
- "the crate `{}` does not have the panic \
- strategy `{}`",
- name,
- desired_strategy.desc()
- ));
+ self.sess.emit_err(NoPanicStrategy { crate_name: name, strategy: desired_strategy });
}
self.cstore.injected_panic_runtime = Some(cnum);
@@ -774,10 +772,7 @@ impl<'a> CrateLoader<'a> {
let name = Symbol::intern(&self.sess.opts.unstable_opts.profiler_runtime);
if name == sym::profiler_builtins && self.sess.contains_name(&krate.attrs, sym::no_core) {
- self.sess.err(
- "`profiler_builtins` crate (required by compiler options) \
- is not compatible with crate attribute `#![no_core]`",
- );
+ self.sess.emit_err(ProfilerBuiltinsNeedsCore);
}
let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else { return; };
@@ -785,18 +780,14 @@ impl<'a> CrateLoader<'a> {
// Sanity check the loaded crate to ensure it is indeed a profiler runtime
if !data.is_profiler_runtime() {
- self.sess.err(&format!("the crate `{}` is not a profiler runtime", name));
+ self.sess.emit_err(NotProfilerRuntime { crate_name: name });
}
}
fn inject_allocator_crate(&mut self, krate: &ast::Crate) {
self.cstore.has_global_allocator = match &*global_allocator_spans(&self.sess, krate) {
[span1, span2, ..] => {
- self.sess
- .struct_span_err(*span2, "cannot define multiple global allocators")
- .span_label(*span2, "cannot define a new global allocator")
- .span_label(*span1, "previous global allocator defined here")
- .emit();
+ self.sess.emit_err(NoMultipleGlobalAlloc { span2: *span2, span1: *span1 });
true
}
spans => !spans.is_empty(),
@@ -832,11 +823,10 @@ impl<'a> CrateLoader<'a> {
if data.has_global_allocator() {
match global_allocator {
Some(other_crate) => {
- self.sess.err(&format!(
- "the `#[global_allocator]` in {} conflicts with global allocator in: {}",
- other_crate,
- data.name()
- ));
+ self.sess.emit_err(ConflictingGlobalAlloc {
+ crate_name: data.name(),
+ other_crate_name: other_crate,
+ });
}
None => global_allocator = Some(data.name()),
}
@@ -855,10 +845,7 @@ impl<'a> CrateLoader<'a> {
if !self.sess.contains_name(&krate.attrs, sym::default_lib_allocator)
&& !self.cstore.iter_crate_data().any(|(_, data)| data.has_default_lib_allocator())
{
- self.sess.err(
- "no global memory allocator found but one is required; link to std or add \
- `#[global_allocator]` to a static item that implements the GlobalAlloc trait",
- );
+ self.sess.emit_err(GlobalAllocRequired);
}
self.cstore.allocator_kind = Some(AllocatorKind::Default);
}
@@ -882,14 +869,11 @@ impl<'a> CrateLoader<'a> {
for dep in self.cstore.crate_dependencies_in_reverse_postorder(krate) {
let data = self.cstore.get_crate_data(dep);
if needs_dep(&data) {
- self.sess.err(&format!(
- "the crate `{}` cannot depend \
- on a crate that needs {}, but \
- it depends on `{}`",
- self.cstore.get_crate_data(krate).name(),
- what,
- data.name()
- ));
+ self.sess.emit_err(NoTransitiveNeedsDep {
+ crate_name: self.cstore.get_crate_data(krate).name(),
+ needs_crate_name: what,
+ deps_crate_name: data.name(),
+ });
}
}
diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs
index b765c34f8..6112ec9e4 100644
--- a/compiler/rustc_metadata/src/dependency_format.rs
+++ b/compiler/rustc_metadata/src/dependency_format.rs
@@ -52,6 +52,10 @@
//! than finding a number of solutions (there are normally quite a few).
use crate::creader::CStore;
+use crate::errors::{
+ BadPanicStrategy, CrateDepMultiple, IncompatiblePanicInDropStrategy, LibRequired,
+ RequiredPanicStrategy, RlibRequired, TwoPanicRuntimes,
+};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::CrateNum;
@@ -136,11 +140,7 @@ fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList {
if src.rlib.is_some() {
continue;
}
- sess.err(&format!(
- "crate `{}` required to be available in rlib format, \
- but was not found in this form",
- tcx.crate_name(cnum)
- ));
+ sess.emit_err(RlibRequired { crate_name: tcx.crate_name(cnum) });
}
return Vec::new();
}
@@ -158,11 +158,11 @@ fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList {
let name = tcx.crate_name(cnum);
let src = tcx.used_crate_source(cnum);
if src.dylib.is_some() {
- tracing::info!("adding dylib: {}", name);
+ info!("adding dylib: {}", name);
add_library(tcx, cnum, RequireDynamic, &mut formats);
let deps = tcx.dylib_dependency_formats(cnum);
for &(depnum, style) in deps.iter() {
- tracing::info!("adding {:?}: {}", style, tcx.crate_name(depnum));
+ info!("adding {:?}: {}", style, tcx.crate_name(depnum));
add_library(tcx, depnum, style, &mut formats);
}
}
@@ -190,7 +190,7 @@ fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList {
&& tcx.dep_kind(cnum) == CrateDepKind::Explicit
{
assert!(src.rlib.is_some() || src.rmeta.is_some());
- tracing::info!("adding staticlib: {}", tcx.crate_name(cnum));
+ info!("adding staticlib: {}", tcx.crate_name(cnum));
add_library(tcx, cnum, RequireStatic, &mut formats);
ret[cnum.as_usize() - 1] = Linkage::Static;
}
@@ -224,12 +224,7 @@ fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList {
Linkage::Static => "rlib",
_ => "dylib",
};
- sess.err(&format!(
- "crate `{}` required to be available in {} format, \
- but was not found in this form",
- tcx.crate_name(cnum),
- kind
- ));
+ sess.emit_err(LibRequired { crate_name: tcx.crate_name(cnum), kind: kind });
}
}
}
@@ -253,17 +248,7 @@ fn add_library(
// This error is probably a little obscure, but I imagine that it
// can be refined over time.
if link2 != link || link == RequireStatic {
- tcx.sess
- .struct_err(&format!(
- "cannot satisfy dependencies so `{}` only \
- shows up once",
- tcx.crate_name(cnum)
- ))
- .help(
- "having upstream crates all available in one format \
- will likely make this go away",
- )
- .emit();
+ tcx.sess.emit_err(CrateDepMultiple { crate_name: tcx.crate_name(cnum) });
}
}
None => {
@@ -360,11 +345,7 @@ fn verify_ok(tcx: TyCtxt<'_>, list: &[Linkage]) {
if let Some((prev, _)) = panic_runtime {
let prev_name = tcx.crate_name(prev);
let cur_name = tcx.crate_name(cnum);
- sess.err(&format!(
- "cannot link together two \
- panic runtimes: {} and {}",
- prev_name, cur_name
- ));
+ sess.emit_err(TwoPanicRuntimes { prev_name, cur_name });
}
panic_runtime = Some((
cnum,
@@ -384,13 +365,10 @@ fn verify_ok(tcx: TyCtxt<'_>, list: &[Linkage]) {
// First up, validate that our selected panic runtime is indeed exactly
// our same strategy.
if found_strategy != desired_strategy {
- sess.err(&format!(
- "the linked panic runtime `{}` is \
- not compiled with this crate's \
- panic strategy `{}`",
- tcx.crate_name(runtime_cnum),
- desired_strategy.desc()
- ));
+ sess.emit_err(BadPanicStrategy {
+ runtime: tcx.crate_name(runtime_cnum),
+ strategy: desired_strategy,
+ });
}
// Next up, verify that all other crates are compatible with this panic
@@ -407,28 +385,19 @@ fn verify_ok(tcx: TyCtxt<'_>, list: &[Linkage]) {
}
if let Some(found_strategy) = tcx.required_panic_strategy(cnum) && desired_strategy != found_strategy {
- sess.err(&format!(
- "the crate `{}` requires \
- panic strategy `{}` which is \
- incompatible with this crate's \
- strategy of `{}`",
- tcx.crate_name(cnum),
- found_strategy.desc(),
- desired_strategy.desc()
- ));
+ sess.emit_err(RequiredPanicStrategy {
+ crate_name: tcx.crate_name(cnum),
+ found_strategy,
+ desired_strategy});
}
let found_drop_strategy = tcx.panic_in_drop_strategy(cnum);
if tcx.sess.opts.unstable_opts.panic_in_drop != found_drop_strategy {
- sess.err(&format!(
- "the crate `{}` is compiled with the \
- panic-in-drop strategy `{}` which is \
- incompatible with this crate's \
- strategy of `{}`",
- tcx.crate_name(cnum),
- found_drop_strategy.desc(),
- tcx.sess.opts.unstable_opts.panic_in_drop.desc()
- ));
+ sess.emit_err(IncompatiblePanicInDropStrategy {
+ crate_name: tcx.crate_name(cnum),
+ found_strategy: found_drop_strategy,
+ desired_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
+ });
}
}
}
diff --git a/compiler/rustc_metadata/src/errors.rs b/compiler/rustc_metadata/src/errors.rs
new file mode 100644
index 000000000..7c387b9a9
--- /dev/null
+++ b/compiler/rustc_metadata/src/errors.rs
@@ -0,0 +1,713 @@
+use std::{
+ io::Error,
+ path::{Path, PathBuf},
+};
+
+use rustc_errors::{error_code, ErrorGuaranteed, IntoDiagnostic};
+use rustc_macros::Diagnostic;
+use rustc_session::config;
+use rustc_span::{sym, Span, Symbol};
+use rustc_target::spec::{PanicStrategy, TargetTriple};
+
+use crate::locator::CrateFlavor;
+
+#[derive(Diagnostic)]
+#[diag(metadata_rlib_required)]
+pub struct RlibRequired {
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_lib_required)]
+pub struct LibRequired<'a> {
+ pub crate_name: Symbol,
+ pub kind: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_crate_dep_multiple)]
+#[help]
+pub struct CrateDepMultiple {
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_two_panic_runtimes)]
+pub struct TwoPanicRuntimes {
+ pub prev_name: Symbol,
+ pub cur_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_bad_panic_strategy)]
+pub struct BadPanicStrategy {
+ pub runtime: Symbol,
+ pub strategy: PanicStrategy,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_required_panic_strategy)]
+pub struct RequiredPanicStrategy {
+ pub crate_name: Symbol,
+ pub found_strategy: PanicStrategy,
+ pub desired_strategy: PanicStrategy,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_incompatible_panic_in_drop_strategy)]
+pub struct IncompatiblePanicInDropStrategy {
+ pub crate_name: Symbol,
+ pub found_strategy: PanicStrategy,
+ pub desired_strategy: PanicStrategy,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_names_in_link)]
+pub struct MultipleNamesInLink {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_kinds_in_link)]
+pub struct MultipleKindsInLink {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_name_form)]
+pub struct LinkNameForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_kind_form)]
+pub struct LinkKindForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_modifiers_form)]
+pub struct LinkModifiersForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_cfg_form)]
+pub struct LinkCfgForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_wasm_import_form)]
+pub struct WasmImportForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_empty_link_name, code = "E0454")]
+pub struct EmptyLinkName {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_framework_apple, code = "E0455")]
+pub struct LinkFrameworkApple {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_framework_only_windows, code = "E0455")]
+pub struct FrameworkOnlyWindows {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unknown_link_kind, code = "E0458")]
+pub struct UnknownLinkKind<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub kind: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_link_modifiers)]
+pub struct MultipleLinkModifiers {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_cfgs)]
+pub struct MultipleCfgs {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_cfg_single_predicate)]
+pub struct LinkCfgSinglePredicate {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_wasm_import)]
+pub struct MultipleWasmImport {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unexpected_link_arg)]
+pub struct UnexpectedLinkArg {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_invalid_link_modifier)]
+pub struct InvalidLinkModifier {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_modifiers)]
+pub struct MultipleModifiers<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub modifier: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_bundle_needs_static)]
+pub struct BundleNeedsStatic {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_whole_archive_needs_static)]
+pub struct WholeArchiveNeedsStatic {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_as_needed_compatibility)]
+pub struct AsNeededCompatibility {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unknown_link_modifier)]
+pub struct UnknownLinkModifier<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub modifier: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_incompatible_wasm_link)]
+pub struct IncompatibleWasmLink {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_requires_name, code = "E0459")]
+pub struct LinkRequiresName {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_raw_dylib_no_nul)]
+pub struct RawDylibNoNul {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_link_ordinal_raw_dylib)]
+pub struct LinkOrdinalRawDylib {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_lib_framework_apple)]
+pub struct LibFrameworkApple;
+
+#[derive(Diagnostic)]
+#[diag(metadata_empty_renaming_target)]
+pub struct EmptyRenamingTarget<'a> {
+ pub lib_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_renaming_no_link)]
+pub struct RenamingNoLink<'a> {
+ pub lib_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_renamings)]
+pub struct MultipleRenamings<'a> {
+ pub lib_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_link_mod_override)]
+pub struct NoLinkModOverride {
+ #[primary_span]
+ pub span: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unsupported_abi_i686)]
+pub struct UnsupportedAbiI686 {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unsupported_abi)]
+pub struct UnsupportedAbi {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_fail_create_file_encoder)]
+pub struct FailCreateFileEncoder {
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_fail_seek_file)]
+pub struct FailSeekFile {
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_fail_write_file)]
+pub struct FailWriteFile {
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_crate_not_panic_runtime)]
+pub struct CrateNotPanicRuntime {
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_panic_strategy)]
+pub struct NoPanicStrategy {
+ pub crate_name: Symbol,
+ pub strategy: PanicStrategy,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_profiler_builtins_needs_core)]
+pub struct ProfilerBuiltinsNeedsCore;
+
+#[derive(Diagnostic)]
+#[diag(metadata_not_profiler_runtime)]
+pub struct NotProfilerRuntime {
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_multiple_global_alloc)]
+pub struct NoMultipleGlobalAlloc {
+ #[primary_span]
+ #[label]
+ pub span2: Span,
+ #[label(metadata_prev_global_alloc)]
+ pub span1: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_conflicting_global_alloc)]
+pub struct ConflictingGlobalAlloc {
+ pub crate_name: Symbol,
+ pub other_crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_global_alloc_required)]
+pub struct GlobalAllocRequired;
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_transitive_needs_dep)]
+pub struct NoTransitiveNeedsDep<'a> {
+ pub crate_name: Symbol,
+ pub needs_crate_name: &'a str,
+ pub deps_crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_failed_write_error)]
+pub struct FailedWriteError {
+ pub filename: PathBuf,
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_missing_native_library)]
+pub struct MissingNativeLibrary<'a> {
+ libname: &'a str,
+ #[subdiagnostic]
+ suggest_name: Option<SuggestLibraryName<'a>>,
+}
+
+impl<'a> MissingNativeLibrary<'a> {
+ pub fn new(libname: &'a str, verbatim: bool) -> Self {
+ // if it looks like the user has provided a complete filename rather just the bare lib name,
+ // then provide a note that they might want to try trimming the name
+ let suggested_name = if !verbatim {
+ if let Some(libname) = libname.strip_prefix("lib") && let Some(libname) = libname.strip_suffix(".a") {
+ // this is a unix style filename so trim prefix & suffix
+ Some(libname)
+ } else if let Some(libname) = libname.strip_suffix(".lib") {
+ // this is a Windows style filename so just trim the suffix
+ Some(libname)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ Self {
+ libname,
+ suggest_name: suggested_name
+ .map(|suggested_name| SuggestLibraryName { suggested_name }),
+ }
+ }
+}
+
+#[derive(Subdiagnostic)]
+#[help(metadata_only_provide_library_name)]
+pub struct SuggestLibraryName<'a> {
+ suggested_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_failed_create_tempdir)]
+pub struct FailedCreateTempdir {
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_failed_create_file)]
+pub struct FailedCreateFile<'a> {
+ pub filename: &'a Path,
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_failed_create_encoded_metadata)]
+pub struct FailedCreateEncodedMetadata {
+ pub err: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_non_ascii_name)]
+pub struct NonAsciiName {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_extern_location_not_exist)]
+pub struct ExternLocationNotExist<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub location: &'a Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_extern_location_not_file)]
+pub struct ExternLocationNotFile<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub location: &'a Path,
+}
+
+pub(crate) struct MultipleCandidates {
+ pub span: Span,
+ pub flavor: CrateFlavor,
+ pub crate_name: Symbol,
+ pub candidates: Vec<PathBuf>,
+}
+
+impl IntoDiagnostic<'_> for MultipleCandidates {
+ fn into_diagnostic(
+ self,
+ handler: &'_ rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(rustc_errors::fluent::metadata_multiple_candidates);
+ diag.set_arg("crate_name", self.crate_name);
+ diag.set_arg("flavor", self.flavor);
+ diag.code(error_code!(E0465));
+ diag.set_span(self.span);
+ for (i, candidate) in self.candidates.iter().enumerate() {
+ diag.span_note(self.span, &format!("candidate #{}: {}", i + 1, candidate.display()));
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_matching_crates, code = "E0464")]
+#[note]
+pub struct MultipleMatchingCrates {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub candidates: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_symbol_conflicts_current, code = "E0519")]
+pub struct SymbolConflictsCurrent {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_symbol_conflicts_others, code = "E0523")]
+pub struct SymbolConflictsOthers {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_stable_crate_id_collision)]
+pub struct StableCrateIdCollision {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name0: Symbol,
+ pub crate_name1: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_dl_error)]
+pub struct DlError {
+ #[primary_span]
+ pub span: Span,
+ pub err: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_newer_crate_version, code = "E0460")]
+#[note]
+#[note(metadata_found_crate_versions)]
+pub struct NewerCrateVersion {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub add_info: String,
+ pub found_crates: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_crate_with_triple, code = "E0461")]
+#[note(metadata_found_crate_versions)]
+pub struct NoCrateWithTriple<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub locator_triple: &'a str,
+ pub add_info: String,
+ pub found_crates: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_found_staticlib, code = "E0462")]
+#[note(metadata_found_crate_versions)]
+#[help]
+pub struct FoundStaticlib {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub add_info: String,
+ pub found_crates: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_incompatible_rustc, code = "E0514")]
+#[note(metadata_found_crate_versions)]
+#[help]
+pub struct IncompatibleRustc {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub add_info: String,
+ pub found_crates: String,
+ pub rustc_version: String,
+}
+
+pub struct InvalidMetadataFiles {
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub add_info: String,
+ pub crate_rejections: Vec<String>,
+}
+
+impl IntoDiagnostic<'_> for InvalidMetadataFiles {
+ fn into_diagnostic(
+ self,
+ handler: &'_ rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(rustc_errors::fluent::metadata_invalid_meta_files);
+ diag.set_arg("crate_name", self.crate_name);
+ diag.set_arg("add_info", self.add_info);
+ diag.code(error_code!(E0786));
+ diag.set_span(self.span);
+ for crate_rejection in self.crate_rejections {
+ diag.note(crate_rejection);
+ }
+ diag
+ }
+}
+
+pub struct CannotFindCrate {
+ pub span: Span,
+ pub crate_name: Symbol,
+ pub add_info: String,
+ pub missing_core: bool,
+ pub current_crate: String,
+ pub is_nightly_build: bool,
+ pub profiler_runtime: Symbol,
+ pub locator_triple: TargetTriple,
+}
+
+impl IntoDiagnostic<'_> for CannotFindCrate {
+ fn into_diagnostic(
+ self,
+ handler: &'_ rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(rustc_errors::fluent::metadata_cannot_find_crate);
+ diag.set_arg("crate_name", self.crate_name);
+ diag.set_arg("current_crate", self.current_crate);
+ diag.set_arg("add_info", self.add_info);
+ diag.set_arg("locator_triple", self.locator_triple.triple());
+ diag.code(error_code!(E0463));
+ diag.set_span(self.span);
+ if (self.crate_name == sym::std || self.crate_name == sym::core)
+ && self.locator_triple != TargetTriple::from_triple(config::host_triple())
+ {
+ if self.missing_core {
+ diag.note(rustc_errors::fluent::metadata_target_not_installed);
+ } else {
+ diag.note(rustc_errors::fluent::metadata_target_no_std_support);
+ }
+ // NOTE: this suggests using rustup, even though the user may not have it installed.
+ // That's because they could choose to install it; or this may give them a hint which
+ // target they need to install from their distro.
+ if self.missing_core {
+ diag.help(rustc_errors::fluent::metadata_consider_downloading_target);
+ }
+ // Suggest using #![no_std]. #[no_core] is unstable and not really supported anyway.
+ // NOTE: this is a dummy span if `extern crate std` was injected by the compiler.
+ // If it's not a dummy, that means someone added `extern crate std` explicitly and
+ // `#![no_std]` won't help.
+ if !self.missing_core && self.span.is_dummy() {
+ diag.note(rustc_errors::fluent::metadata_std_required);
+ }
+ if self.is_nightly_build {
+ diag.help(rustc_errors::fluent::metadata_consider_building_std);
+ }
+ } else if self.crate_name == self.profiler_runtime {
+ diag.note(rustc_errors::fluent::metadata_compiler_missing_profiler);
+ } else if self.crate_name.as_str().starts_with("rustc_") {
+ diag.help(rustc_errors::fluent::metadata_install_missing_components);
+ }
+ diag.span_label(self.span, rustc_errors::fluent::metadata_cant_find_crate);
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_no_dylib_plugin, code = "E0457")]
+pub struct NoDylibPlugin {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_crate_location_unknown_type)]
+pub struct CrateLocationUnknownType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub path: &'a Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_lib_filename_form)]
+pub struct LibFilenameForm<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub dll_prefix: &'a str,
+ pub dll_suffix: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_multiple_import_name_type)]
+pub struct MultipleImportNameType {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_import_name_type_form)]
+pub struct ImportNameTypeForm {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_import_name_type_x86)]
+pub struct ImportNameTypeX86 {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_unknown_import_name_type)]
+pub struct UnknownImportNameType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub import_name_type: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(metadata_import_name_type_raw)]
+pub struct ImportNameTypeRaw {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_metadata/src/foreign_modules.rs b/compiler/rustc_metadata/src/foreign_modules.rs
index 2ca4cd17f..d1c2f3104 100644
--- a/compiler/rustc_metadata/src/foreign_modules.rs
+++ b/compiler/rustc_metadata/src/foreign_modules.rs
@@ -6,13 +6,13 @@ use rustc_session::cstore::ForeignModule;
pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<ForeignModule> {
let mut modules = Vec::new();
for id in tcx.hir().items() {
- if !matches!(tcx.def_kind(id.def_id), DefKind::ForeignMod) {
+ if !matches!(tcx.def_kind(id.owner_id), DefKind::ForeignMod) {
continue;
}
let item = tcx.hir().item(id);
if let hir::ItemKind::ForeignMod { items, .. } = item.kind {
- let foreign_items = items.iter().map(|it| it.id.def_id.to_def_id()).collect();
- modules.push(ForeignModule { foreign_items, def_id: id.def_id.to_def_id() });
+ let foreign_items = items.iter().map(|it| it.id.owner_id.to_def_id()).collect();
+ modules.push(ForeignModule { foreign_items, def_id: id.owner_id.to_def_id() });
}
}
modules
diff --git a/compiler/rustc_metadata/src/fs.rs b/compiler/rustc_metadata/src/fs.rs
index e6072901a..f360a5864 100644
--- a/compiler/rustc_metadata/src/fs.rs
+++ b/compiler/rustc_metadata/src/fs.rs
@@ -1,3 +1,6 @@
+use crate::errors::{
+ FailedCreateEncodedMetadata, FailedCreateFile, FailedCreateTempdir, FailedWriteError,
+};
use crate::{encode_metadata, EncodedMetadata};
use rustc_data_structures::temp_dir::MaybeTempDir;
@@ -23,8 +26,8 @@ pub fn emit_metadata(sess: &Session, metadata: &[u8], tmpdir: &MaybeTempDir) ->
let out_filename = tmpdir.as_ref().join(METADATA_FILENAME);
let result = fs::write(&out_filename, metadata);
- if let Err(e) = result {
- sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+ if let Err(err) = result {
+ sess.emit_fatal(FailedWriteError { filename: out_filename, err });
}
out_filename
@@ -65,7 +68,7 @@ pub fn encode_and_write_metadata(
let metadata_tmpdir = TempFileBuilder::new()
.prefix("rmeta")
.tempdir_in(out_filename.parent().unwrap_or_else(|| Path::new("")))
- .unwrap_or_else(|err| tcx.sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+ .unwrap_or_else(|err| tcx.sess.emit_fatal(FailedCreateTempdir { err }));
let metadata_tmpdir = MaybeTempDir::new(metadata_tmpdir, tcx.sess.opts.cg.save_temps);
let metadata_filename = metadata_tmpdir.as_ref().join(METADATA_FILENAME);
@@ -73,12 +76,8 @@ pub fn encode_and_write_metadata(
// This simplifies the creation of the output `out_filename` when requested.
match metadata_kind {
MetadataKind::None => {
- std::fs::File::create(&metadata_filename).unwrap_or_else(|e| {
- tcx.sess.fatal(&format!(
- "failed to create the file {}: {}",
- metadata_filename.display(),
- e
- ))
+ std::fs::File::create(&metadata_filename).unwrap_or_else(|err| {
+ tcx.sess.emit_fatal(FailedCreateFile { filename: &metadata_filename, err });
});
}
MetadataKind::Uncompressed | MetadataKind::Compressed => {
@@ -93,8 +92,8 @@ pub fn encode_and_write_metadata(
// this file always exists.
let need_metadata_file = tcx.sess.opts.output_types.contains_key(&OutputType::Metadata);
let (metadata_filename, metadata_tmpdir) = if need_metadata_file {
- if let Err(e) = non_durable_rename(&metadata_filename, &out_filename) {
- tcx.sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+ if let Err(err) = non_durable_rename(&metadata_filename, &out_filename) {
+ tcx.sess.emit_fatal(FailedWriteError { filename: out_filename, err });
}
if tcx.sess.opts.json_artifact_notifications {
tcx.sess
@@ -109,8 +108,8 @@ pub fn encode_and_write_metadata(
// Load metadata back to memory: codegen may need to include it in object files.
let metadata =
- EncodedMetadata::from_path(metadata_filename, metadata_tmpdir).unwrap_or_else(|e| {
- tcx.sess.fatal(&format!("failed to create encoded metadata from file: {}", e))
+ EncodedMetadata::from_path(metadata_filename, metadata_tmpdir).unwrap_or_else(|err| {
+ tcx.sess.emit_fatal(FailedCreateEncodedMetadata { err });
});
let need_metadata_module = metadata_kind == MetadataKind::Compressed;
diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs
index 6440f3e39..98cf6fef5 100644
--- a/compiler/rustc_metadata/src/lib.rs
+++ b/compiler/rustc_metadata/src/lib.rs
@@ -2,10 +2,8 @@
#![feature(decl_macro)]
#![feature(drain_filter)]
#![feature(generators)]
-#![feature(generic_associated_types)]
#![feature(iter_from_generator)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(once_cell)]
#![feature(proc_macro_internals)]
#![feature(macro_metavar_expr)]
@@ -16,6 +14,8 @@
#![feature(never_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
extern crate proc_macro;
@@ -26,6 +26,9 @@ extern crate rustc_middle;
#[macro_use]
extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+
pub use rmeta::{provide, provide_extern};
mod dependency_format;
@@ -34,8 +37,10 @@ mod native_libs;
mod rmeta;
pub mod creader;
+pub mod errors;
pub mod fs;
pub mod locator;
pub use fs::{emit_metadata, METADATA_FILENAME};
+pub use native_libs::find_native_static_library;
pub use rmeta::{encode_metadata, EncodedMetadata, METADATA_HEADER};
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index 2c1c84b0b..35f9ef92a 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -213,6 +213,13 @@
//! metadata::locator or metadata::creader for all the juicy details!
use crate::creader::Library;
+use crate::errors::{
+ CannotFindCrate, CrateLocationUnknownType, DlError, ExternLocationNotExist,
+ ExternLocationNotFile, FoundStaticlib, IncompatibleRustc, InvalidMetadataFiles,
+ LibFilenameForm, MultipleCandidates, MultipleMatchingCrates, NewerCrateVersion,
+ NoCrateWithTriple, NoDylibPlugin, NonAsciiName, StableCrateIdCollision, SymbolConflictsCurrent,
+ SymbolConflictsOthers,
+};
use crate::rmeta::{rustc_version, MetadataBlob, METADATA_HEADER};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -220,23 +227,23 @@ use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::owning_ref::OwningRef;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::MetadataRef;
-use rustc_errors::{struct_span_err, FatalError};
+use rustc_errors::{DiagnosticArgValue, FatalError, IntoDiagnosticArg};
use rustc_session::config::{self, CrateType};
use rustc_session::cstore::{CrateSource, MetadataLoader};
use rustc_session::filesearch::FileSearch;
use rustc_session::search_paths::PathKind;
use rustc_session::utils::CanonicalizedPath;
use rustc_session::Session;
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::Symbol;
use rustc_span::Span;
use rustc_target::spec::{Target, TargetTriple};
use snap::read::FrameDecoder;
+use std::borrow::Cow;
use std::fmt::Write as _;
use std::io::{Read, Result as IoResult, Write};
use std::path::{Path, PathBuf};
use std::{cmp, fmt, fs};
-use tracing::{debug, info};
#[derive(Clone)]
pub(crate) struct CrateLocator<'a> {
@@ -288,6 +295,16 @@ impl fmt::Display for CrateFlavor {
}
}
+impl IntoDiagnosticArg for CrateFlavor {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ match self {
+ CrateFlavor::Rlib => DiagnosticArgValue::Str(Cow::Borrowed("rlib")),
+ CrateFlavor::Rmeta => DiagnosticArgValue::Str(Cow::Borrowed("rmeta")),
+ CrateFlavor::Dylib => DiagnosticArgValue::Str(Cow::Borrowed("dylib")),
+ }
+ }
+}
+
impl<'a> CrateLocator<'a> {
pub(crate) fn new(
sess: &'a Session,
@@ -938,41 +955,20 @@ impl fmt::Display for MetadataError<'_> {
impl CrateError {
pub(crate) fn report(self, sess: &Session, span: Span, missing_core: bool) {
- let mut diag = match self {
- CrateError::NonAsciiName(crate_name) => sess.struct_span_err(
- span,
- &format!("cannot load a crate with a non-ascii name `{}`", crate_name),
- ),
- CrateError::ExternLocationNotExist(crate_name, loc) => sess.struct_span_err(
- span,
- &format!("extern location for {} does not exist: {}", crate_name, loc.display()),
- ),
- CrateError::ExternLocationNotFile(crate_name, loc) => sess.struct_span_err(
- span,
- &format!("extern location for {} is not a file: {}", crate_name, loc.display()),
- ),
+ match self {
+ CrateError::NonAsciiName(crate_name) => {
+ sess.emit_err(NonAsciiName { span, crate_name });
+ }
+ CrateError::ExternLocationNotExist(crate_name, loc) => {
+ sess.emit_err(ExternLocationNotExist { span, crate_name, location: &loc });
+ }
+ CrateError::ExternLocationNotFile(crate_name, loc) => {
+ sess.emit_err(ExternLocationNotFile { span, crate_name, location: &loc });
+ }
CrateError::MultipleCandidates(crate_name, flavor, candidates) => {
- let mut err = struct_span_err!(
- sess,
- span,
- E0465,
- "multiple {} candidates for `{}` found",
- flavor,
- crate_name,
- );
- for (i, candidate) in candidates.iter().enumerate() {
- err.span_note(span, &format!("candidate #{}: {}", i + 1, candidate.display()));
- }
- err
+ sess.emit_err(MultipleCandidates { span, flavor: flavor, crate_name, candidates });
}
CrateError::MultipleMatchingCrates(crate_name, libraries) => {
- let mut err = struct_span_err!(
- sess,
- span,
- E0464,
- "multiple matching crates for `{}`",
- crate_name
- );
let mut libraries: Vec<_> = libraries.into_values().collect();
// Make ordering of candidates deterministic.
// This has to `clone()` to work around lifetime restrictions with `sort_by_key()`.
@@ -1000,223 +996,142 @@ impl CrateError {
s
})
.collect::<String>();
- err.note(&format!("candidates:{}", candidates));
- err
+ sess.emit_err(MultipleMatchingCrates { span, crate_name, candidates });
+ }
+ CrateError::SymbolConflictsCurrent(root_name) => {
+ sess.emit_err(SymbolConflictsCurrent { span, crate_name: root_name });
+ }
+ CrateError::SymbolConflictsOthers(root_name) => {
+ sess.emit_err(SymbolConflictsOthers { span, crate_name: root_name });
}
- CrateError::SymbolConflictsCurrent(root_name) => struct_span_err!(
- sess,
- span,
- E0519,
- "the current crate is indistinguishable from one of its dependencies: it has the \
- same crate-name `{}` and was compiled with the same `-C metadata` arguments. \
- This will result in symbol conflicts between the two.",
- root_name,
- ),
- CrateError::SymbolConflictsOthers(root_name) => struct_span_err!(
- sess,
- span,
- E0523,
- "found two different crates with name `{}` that are not distinguished by differing \
- `-C metadata`. This will result in symbol conflicts between the two.",
- root_name,
- ),
CrateError::StableCrateIdCollision(crate_name0, crate_name1) => {
- let msg = format!(
- "found crates (`{}` and `{}`) with colliding StableCrateId values.",
- crate_name0, crate_name1
- );
- sess.struct_span_err(span, &msg)
+ sess.emit_err(StableCrateIdCollision {
+ span,
+ crate_name0: crate_name0,
+ crate_name1: crate_name1,
+ });
+ }
+ CrateError::DlOpen(s) | CrateError::DlSym(s) => {
+ sess.emit_err(DlError { span, err: s });
}
- CrateError::DlOpen(s) | CrateError::DlSym(s) => sess.struct_span_err(span, &s),
CrateError::LocatorCombined(locator) => {
let crate_name = locator.crate_name;
- let add = match &locator.root {
+ let add_info = match &locator.root {
None => String::new(),
Some(r) => format!(" which `{}` depends on", r.name),
};
- let mut msg = "the following crate versions were found:".to_string();
- let mut err = if !locator.crate_rejections.via_hash.is_empty() {
- let mut err = struct_span_err!(
- sess,
- span,
- E0460,
- "found possibly newer version of crate `{}`{}",
- crate_name,
- add,
- );
- err.note("perhaps that crate needs to be recompiled?");
+ // FIXME: There are no tests for CrateLocationUnknownType or LibFilenameForm
+ if !locator.crate_rejections.via_filename.is_empty() {
+ let mismatches = locator.crate_rejections.via_filename.iter();
+ for CrateMismatch { path, .. } in mismatches {
+ sess.emit_err(CrateLocationUnknownType { span, path: &path });
+ sess.emit_err(LibFilenameForm {
+ span,
+ dll_prefix: &locator.dll_prefix,
+ dll_suffix: &locator.dll_suffix,
+ });
+ }
+ }
+ let mut found_crates = String::new();
+ if !locator.crate_rejections.via_hash.is_empty() {
let mismatches = locator.crate_rejections.via_hash.iter();
for CrateMismatch { path, .. } in mismatches {
- msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
+ found_crates.push_str(&format!(
+ "\ncrate `{}`: {}",
+ crate_name,
+ path.display()
+ ));
}
if let Some(r) = locator.root {
for path in r.source.paths() {
- msg.push_str(&format!("\ncrate `{}`: {}", r.name, path.display()));
+ found_crates.push_str(&format!(
+ "\ncrate `{}`: {}",
+ r.name,
+ path.display()
+ ));
}
}
- err.note(&msg);
- err
- } else if !locator.crate_rejections.via_triple.is_empty() {
- let mut err = struct_span_err!(
- sess,
+ sess.emit_err(NewerCrateVersion {
span,
- E0461,
- "couldn't find crate `{}` with expected target triple {}{}",
- crate_name,
- locator.triple,
- add,
- );
+ crate_name: crate_name,
+ add_info,
+ found_crates,
+ });
+ } else if !locator.crate_rejections.via_triple.is_empty() {
let mismatches = locator.crate_rejections.via_triple.iter();
for CrateMismatch { path, got } in mismatches {
- msg.push_str(&format!(
+ found_crates.push_str(&format!(
"\ncrate `{}`, target triple {}: {}",
crate_name,
got,
path.display(),
));
}
- err.note(&msg);
- err
- } else if !locator.crate_rejections.via_kind.is_empty() {
- let mut err = struct_span_err!(
- sess,
+ sess.emit_err(NoCrateWithTriple {
span,
- E0462,
- "found staticlib `{}` instead of rlib or dylib{}",
- crate_name,
- add,
- );
- err.help("please recompile that crate using --crate-type lib");
+ crate_name: crate_name,
+ locator_triple: locator.triple.triple(),
+ add_info,
+ found_crates,
+ });
+ } else if !locator.crate_rejections.via_kind.is_empty() {
let mismatches = locator.crate_rejections.via_kind.iter();
for CrateMismatch { path, .. } in mismatches {
- msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
+ found_crates.push_str(&format!(
+ "\ncrate `{}`: {}",
+ crate_name,
+ path.display()
+ ));
}
- err.note(&msg);
- err
+ sess.emit_err(FoundStaticlib { span, crate_name, add_info, found_crates });
} else if !locator.crate_rejections.via_version.is_empty() {
- let mut err = struct_span_err!(
- sess,
- span,
- E0514,
- "found crate `{}` compiled by an incompatible version of rustc{}",
- crate_name,
- add,
- );
- err.help(&format!(
- "please recompile that crate using this compiler ({}) \
- (consider running `cargo clean` first)",
- rustc_version(),
- ));
let mismatches = locator.crate_rejections.via_version.iter();
for CrateMismatch { path, got } in mismatches {
- msg.push_str(&format!(
+ found_crates.push_str(&format!(
"\ncrate `{}` compiled by {}: {}",
crate_name,
got,
path.display(),
));
}
- err.note(&msg);
- err
- } else if !locator.crate_rejections.via_invalid.is_empty() {
- let mut err = struct_span_err!(
- sess,
+ sess.emit_err(IncompatibleRustc {
span,
- E0786,
- "found invalid metadata files for crate `{}`{}",
crate_name,
- add,
- );
+ add_info,
+ found_crates,
+ rustc_version: rustc_version(),
+ });
+ } else if !locator.crate_rejections.via_invalid.is_empty() {
+ let mut crate_rejections = Vec::new();
for CrateMismatch { path: _, got } in locator.crate_rejections.via_invalid {
- err.note(&got);
+ crate_rejections.push(got);
}
- err
+ sess.emit_err(InvalidMetadataFiles {
+ span,
+ crate_name,
+ add_info,
+ crate_rejections,
+ });
} else {
- let mut err = struct_span_err!(
- sess,
+ sess.emit_err(CannotFindCrate {
span,
- E0463,
- "can't find crate for `{}`{}",
crate_name,
- add,
- );
-
- if (crate_name == sym::std || crate_name == sym::core)
- && locator.triple != TargetTriple::from_triple(config::host_triple())
- {
- if missing_core {
- err.note(&format!(
- "the `{}` target may not be installed",
- locator.triple
- ));
- } else {
- err.note(&format!(
- "the `{}` target may not support the standard library",
- locator.triple
- ));
- }
- // NOTE: this suggests using rustup, even though the user may not have it installed.
- // That's because they could choose to install it; or this may give them a hint which
- // target they need to install from their distro.
- if missing_core {
- err.help(&format!(
- "consider downloading the target with `rustup target add {}`",
- locator.triple
- ));
- }
- // Suggest using #![no_std]. #[no_core] is unstable and not really supported anyway.
- // NOTE: this is a dummy span if `extern crate std` was injected by the compiler.
- // If it's not a dummy, that means someone added `extern crate std` explicitly and `#![no_std]` won't help.
- if !missing_core && span.is_dummy() {
- let current_crate =
- sess.opts.crate_name.as_deref().unwrap_or("<unknown>");
- err.note(&format!(
- "`std` is required by `{}` because it does not declare `#![no_std]`",
- current_crate
- ));
- }
- if sess.is_nightly_build() {
- err.help("consider building the standard library from source with `cargo build -Zbuild-std`");
- }
- } else if crate_name
- == Symbol::intern(&sess.opts.unstable_opts.profiler_runtime)
- {
- err.note("the compiler may have been built without the profiler runtime");
- } else if crate_name.as_str().starts_with("rustc_") {
- err.help(
- "maybe you need to install the missing components with: \
- `rustup component add rust-src rustc-dev llvm-tools-preview`",
- );
- }
- err.span_label(span, "can't find crate");
- err
- };
-
- if !locator.crate_rejections.via_filename.is_empty() {
- let mismatches = locator.crate_rejections.via_filename.iter();
- for CrateMismatch { path, .. } in mismatches {
- err.note(&format!(
- "extern location for {} is of an unknown type: {}",
- crate_name,
- path.display(),
- ))
- .help(&format!(
- "file name should be lib*.rlib or {}*.{}",
- locator.dll_prefix, locator.dll_suffix
- ));
- }
+ add_info,
+ missing_core,
+ current_crate: sess
+ .opts
+ .crate_name
+ .clone()
+ .unwrap_or("<unknown>".to_string()),
+ is_nightly_build: sess.is_nightly_build(),
+ profiler_runtime: Symbol::intern(&sess.opts.unstable_opts.profiler_runtime),
+ locator_triple: locator.triple,
+ });
}
- err
}
- CrateError::NonDylibPlugin(crate_name) => struct_span_err!(
- sess,
- span,
- E0457,
- "plugin `{}` only found in rlib format, but must be available in dylib format",
- crate_name,
- ),
- };
-
- diag.emit();
+ CrateError::NonDylibPlugin(crate_name) => {
+ sess.emit_err(NoDylibPlugin { span, crate_name });
+ }
+ }
}
}
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
index 9f6079ecb..20a2e7829 100644
--- a/compiler/rustc_metadata/src/native_libs.rs
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -1,17 +1,80 @@
use rustc_ast::{NestedMetaItem, CRATE_NODE_ID};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_middle::ty::{List, ParamEnv, ParamEnvAnd, Ty, TyCtxt};
-use rustc_session::cstore::{DllCallingConvention, DllImport, NativeLib};
+use rustc_session::config::CrateType;
+use rustc_session::cstore::{DllCallingConvention, DllImport, NativeLib, PeImportNameType};
use rustc_session::parse::feature_err;
+use rustc_session::search_paths::PathKind;
use rustc_session::utils::NativeLibKind;
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_target::spec::abi::Abi;
+use crate::errors::{
+ AsNeededCompatibility, BundleNeedsStatic, EmptyLinkName, EmptyRenamingTarget,
+ FrameworkOnlyWindows, ImportNameTypeForm, ImportNameTypeRaw, ImportNameTypeX86,
+ IncompatibleWasmLink, InvalidLinkModifier, LibFrameworkApple, LinkCfgForm,
+ LinkCfgSinglePredicate, LinkFrameworkApple, LinkKindForm, LinkModifiersForm, LinkNameForm,
+ LinkOrdinalRawDylib, LinkRequiresName, MissingNativeLibrary, MultipleCfgs,
+ MultipleImportNameType, MultipleKindsInLink, MultipleLinkModifiers, MultipleModifiers,
+ MultipleNamesInLink, MultipleRenamings, MultipleWasmImport, NoLinkModOverride, RawDylibNoNul,
+ RenamingNoLink, UnexpectedLinkArg, UnknownImportNameType, UnknownLinkKind, UnknownLinkModifier,
+ UnsupportedAbi, UnsupportedAbiI686, WasmImportForm, WholeArchiveNeedsStatic,
+};
+
+use std::path::PathBuf;
+
+pub fn find_native_static_library(
+ name: &str,
+ verbatim: Option<bool>,
+ search_paths: &[PathBuf],
+ sess: &Session,
+) -> PathBuf {
+ let formats = if verbatim.unwrap_or(false) {
+ vec![("".into(), "".into())]
+ } else {
+ let os = (sess.target.staticlib_prefix.clone(), sess.target.staticlib_suffix.clone());
+ // On Windows, static libraries sometimes show up as libfoo.a and other
+ // times show up as foo.lib
+ let unix = ("lib".into(), ".a".into());
+ if os == unix { vec![os] } else { vec![os, unix] }
+ };
+
+ for path in search_paths {
+ for (prefix, suffix) in &formats {
+ let test = path.join(format!("{}{}{}", prefix, name, suffix));
+ if test.exists() {
+ return test;
+ }
+ }
+ }
+
+ sess.emit_fatal(MissingNativeLibrary::new(name, verbatim.unwrap_or(false)));
+}
+
+fn find_bundled_library(
+ name: Option<Symbol>,
+ verbatim: Option<bool>,
+ kind: NativeLibKind,
+ sess: &Session,
+) -> Option<Symbol> {
+ if sess.opts.unstable_opts.packed_bundled_libs &&
+ sess.crate_types().iter().any(|ct| ct == &CrateType::Rlib || ct == &CrateType::Staticlib) &&
+ let NativeLibKind::Static { bundle: Some(true) | None, .. } = kind {
+ find_native_static_library(
+ name.unwrap().as_str(),
+ verbatim,
+ &sess.target_filesearch(PathKind::Native).search_path_dirs(),
+ sess,
+ ).file_name().and_then(|s| s.to_str()).map(Symbol::intern)
+ } else {
+ None
+ }
+}
+
pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<NativeLib> {
let mut collector = Collector { tcx, libs: Vec::new() };
for id in tcx.hir().items() {
@@ -35,7 +98,7 @@ struct Collector<'tcx> {
impl<'tcx> Collector<'tcx> {
fn process_item(&mut self, id: rustc_hir::ItemId) {
- if !matches!(self.tcx.def_kind(id.def_id), DefKind::ForeignMod) {
+ if !matches!(self.tcx.def_kind(id.owner_id), DefKind::ForeignMod) {
return;
}
@@ -61,36 +124,31 @@ impl<'tcx> Collector<'tcx> {
let mut modifiers = None;
let mut cfg = None;
let mut wasm_import_module = None;
+ let mut import_name_type = None;
for item in items.iter() {
match item.name_or_empty() {
sym::name => {
if name.is_some() {
- let msg = "multiple `name` arguments in a single `#[link]` attribute";
- sess.span_err(item.span(), msg);
+ sess.emit_err(MultipleNamesInLink { span: item.span() });
continue;
}
let Some(link_name) = item.value_str() else {
- let msg = "link name must be of the form `name = \"string\"`";
- sess.span_err(item.span(), msg);
+ sess.emit_err(LinkNameForm { span: item.span() });
continue;
};
let span = item.name_value_literal_span().unwrap();
if link_name.is_empty() {
- struct_span_err!(sess, span, E0454, "link name must not be empty")
- .span_label(span, "empty link name")
- .emit();
+ sess.emit_err(EmptyLinkName { span });
}
name = Some((link_name, span));
}
sym::kind => {
if kind.is_some() {
- let msg = "multiple `kind` arguments in a single `#[link]` attribute";
- sess.span_err(item.span(), msg);
+ sess.emit_err(MultipleKindsInLink { span: item.span() });
continue;
}
let Some(link_kind) = item.value_str() else {
- let msg = "link kind must be of the form `kind = \"string\"`";
- sess.span_err(item.span(), msg);
+ sess.emit_err(LinkKindForm { span: item.span() });
continue;
};
@@ -100,44 +158,26 @@ impl<'tcx> Collector<'tcx> {
"dylib" => NativeLibKind::Dylib { as_needed: None },
"framework" => {
if !sess.target.is_like_osx {
- struct_span_err!(
- sess,
- span,
- E0455,
- "link kind `framework` is only supported on Apple targets"
- )
- .emit();
+ sess.emit_err(LinkFrameworkApple { span });
}
NativeLibKind::Framework { as_needed: None }
}
"raw-dylib" => {
if !sess.target.is_like_windows {
- struct_span_err!(
- sess,
- span,
- E0455,
- "link kind `raw-dylib` is only supported on Windows targets"
- )
- .emit();
- } else if !features.raw_dylib {
+ sess.emit_err(FrameworkOnlyWindows { span });
+ } else if !features.raw_dylib && sess.target.arch == "x86" {
feature_err(
&sess.parse_sess,
sym::raw_dylib,
span,
- "link kind `raw-dylib` is unstable",
+ "link kind `raw-dylib` is unstable on x86",
)
.emit();
}
NativeLibKind::RawDylib
}
kind => {
- let msg = format!(
- "unknown link kind `{kind}`, expected one of: \
- static, dylib, framework, raw-dylib"
- );
- struct_span_err!(sess, span, E0458, "{}", msg)
- .span_label(span, "unknown link kind")
- .emit();
+ sess.emit_err(UnknownLinkKind { span, kind });
continue;
}
};
@@ -145,32 +185,26 @@ impl<'tcx> Collector<'tcx> {
}
sym::modifiers => {
if modifiers.is_some() {
- let msg =
- "multiple `modifiers` arguments in a single `#[link]` attribute";
- sess.span_err(item.span(), msg);
+ sess.emit_err(MultipleLinkModifiers { span: item.span() });
continue;
}
let Some(link_modifiers) = item.value_str() else {
- let msg = "link modifiers must be of the form `modifiers = \"string\"`";
- sess.span_err(item.span(), msg);
+ sess.emit_err(LinkModifiersForm { span: item.span() });
continue;
};
modifiers = Some((link_modifiers, item.name_value_literal_span().unwrap()));
}
sym::cfg => {
if cfg.is_some() {
- let msg = "multiple `cfg` arguments in a single `#[link]` attribute";
- sess.span_err(item.span(), msg);
+ sess.emit_err(MultipleCfgs { span: item.span() });
continue;
}
let Some(link_cfg) = item.meta_item_list() else {
- let msg = "link cfg must be of the form `cfg(/* predicate */)`";
- sess.span_err(item.span(), msg);
+ sess.emit_err(LinkCfgForm { span: item.span() });
continue;
};
let [NestedMetaItem::MetaItem(link_cfg)] = link_cfg else {
- let msg = "link cfg must have a single predicate argument";
- sess.span_err(item.span(), msg);
+ sess.emit_err(LinkCfgSinglePredicate { span: item.span() });
continue;
};
if !features.link_cfg {
@@ -186,23 +220,55 @@ impl<'tcx> Collector<'tcx> {
}
sym::wasm_import_module => {
if wasm_import_module.is_some() {
- let msg = "multiple `wasm_import_module` arguments \
- in a single `#[link]` attribute";
- sess.span_err(item.span(), msg);
+ sess.emit_err(MultipleWasmImport { span: item.span() });
continue;
}
let Some(link_wasm_import_module) = item.value_str() else {
- let msg = "wasm import module must be of the form \
- `wasm_import_module = \"string\"`";
- sess.span_err(item.span(), msg);
+ sess.emit_err(WasmImportForm { span: item.span() });
continue;
};
wasm_import_module = Some((link_wasm_import_module, item.span()));
}
+ sym::import_name_type => {
+ if import_name_type.is_some() {
+ sess.emit_err(MultipleImportNameType { span: item.span() });
+ continue;
+ }
+ let Some(link_import_name_type) = item.value_str() else {
+ sess.emit_err(ImportNameTypeForm { span: item.span() });
+ continue;
+ };
+ if self.tcx.sess.target.arch != "x86" {
+ sess.emit_err(ImportNameTypeX86 { span: item.span() });
+ continue;
+ }
+
+ let link_import_name_type = match link_import_name_type.as_str() {
+ "decorated" => PeImportNameType::Decorated,
+ "noprefix" => PeImportNameType::NoPrefix,
+ "undecorated" => PeImportNameType::Undecorated,
+ import_name_type => {
+ sess.emit_err(UnknownImportNameType {
+ span: item.span(),
+ import_name_type,
+ });
+ continue;
+ }
+ };
+ if !features.raw_dylib {
+ let span = item.name_value_literal_span().unwrap();
+ feature_err(
+ &sess.parse_sess,
+ sym::raw_dylib,
+ span,
+ "import name type is unstable",
+ )
+ .emit();
+ }
+ import_name_type = Some((link_import_name_type, item.span()));
+ }
_ => {
- let msg = "unexpected `#[link]` argument, expected one of: \
- name, kind, modifiers, cfg, wasm_import_module";
- sess.span_err(item.span(), msg);
+ sess.emit_err(UnexpectedLinkArg { span: item.span() });
}
}
}
@@ -214,11 +280,7 @@ impl<'tcx> Collector<'tcx> {
let (modifier, value) = match modifier.strip_prefix(&['+', '-']) {
Some(m) => (m, modifier.starts_with('+')),
None => {
- sess.span_err(
- span,
- "invalid linking modifier syntax, expected '+' or '-' prefix \
- before one of: bundle, verbatim, whole-archive, as-needed",
- );
+ sess.emit_err(InvalidLinkModifier { span });
continue;
}
};
@@ -236,10 +298,7 @@ impl<'tcx> Collector<'tcx> {
}
let assign_modifier = |dst: &mut Option<bool>| {
if dst.is_some() {
- let msg = format!(
- "multiple `{modifier}` modifiers in a single `modifiers` argument"
- );
- sess.span_err(span, &msg);
+ sess.emit_err(MultipleModifiers { span, modifier });
} else {
*dst = Some(value);
}
@@ -249,11 +308,7 @@ impl<'tcx> Collector<'tcx> {
assign_modifier(bundle)
}
("bundle", _) => {
- sess.span_err(
- span,
- "linking modifier `bundle` is only compatible with \
- `static` linking kind",
- );
+ sess.emit_err(BundleNeedsStatic { span });
}
("verbatim", _) => {
@@ -265,11 +320,7 @@ impl<'tcx> Collector<'tcx> {
assign_modifier(whole_archive)
}
("whole-archive", _) => {
- sess.span_err(
- span,
- "linking modifier `whole-archive` is only compatible with \
- `static` linking kind",
- );
+ sess.emit_err(WholeArchiveNeedsStatic { span });
}
("as-needed", Some(NativeLibKind::Dylib { as_needed }))
@@ -278,21 +329,11 @@ impl<'tcx> Collector<'tcx> {
assign_modifier(as_needed)
}
("as-needed", _) => {
- sess.span_err(
- span,
- "linking modifier `as-needed` is only compatible with \
- `dylib` and `framework` linking kinds",
- );
+ sess.emit_err(AsNeededCompatibility { span });
}
_ => {
- sess.span_err(
- span,
- format!(
- "unknown linking modifier `{modifier}`, expected one of: \
- bundle, verbatim, whole-archive, as-needed"
- ),
- );
+ sess.emit_err(UnknownLinkModifier { span, modifier });
}
}
}
@@ -300,41 +341,68 @@ impl<'tcx> Collector<'tcx> {
if let Some((_, span)) = wasm_import_module {
if name.is_some() || kind.is_some() || modifiers.is_some() || cfg.is_some() {
- let msg = "`wasm_import_module` is incompatible with \
- other arguments in `#[link]` attributes";
- sess.span_err(span, msg);
+ sess.emit_err(IncompatibleWasmLink { span });
}
} else if name.is_none() {
- struct_span_err!(
- sess,
- m.span,
- E0459,
- "`#[link]` attribute requires a `name = \"string\"` argument"
- )
- .span_label(m.span, "missing `name` argument")
- .emit();
+ sess.emit_err(LinkRequiresName { span: m.span });
+ }
+
+ // Do this outside of the loop so that `import_name_type` can be specified before `kind`.
+ if let Some((_, span)) = import_name_type {
+ if kind != Some(NativeLibKind::RawDylib) {
+ sess.emit_err(ImportNameTypeRaw { span });
+ }
}
let dll_imports = match kind {
Some(NativeLibKind::RawDylib) => {
if let Some((name, span)) = name && name.as_str().contains('\0') {
- sess.span_err(
- span,
- "link name must not contain NUL characters if link kind is `raw-dylib`",
- );
+ sess.emit_err(RawDylibNoNul { span });
}
foreign_mod_items
.iter()
- .map(|child_item| self.build_dll_import(abi, child_item))
+ .map(|child_item| {
+ self.build_dll_import(
+ abi,
+ import_name_type.map(|(import_name_type, _)| import_name_type),
+ child_item,
+ )
+ })
.collect()
}
- _ => Vec::new(),
+ _ => {
+ for child_item in foreign_mod_items {
+ if self.tcx.def_kind(child_item.id.owner_id).has_codegen_attrs()
+ && self
+ .tcx
+ .codegen_fn_attrs(child_item.id.owner_id)
+ .link_ordinal
+ .is_some()
+ {
+ let link_ordinal_attr = self
+ .tcx
+ .hir()
+ .attrs(child_item.id.owner_id.into())
+ .iter()
+ .find(|a| a.has_name(sym::link_ordinal))
+ .unwrap();
+ sess.emit_err(LinkOrdinalRawDylib { span: link_ordinal_attr.span });
+ }
+ }
+
+ Vec::new()
+ }
};
+
+ let name = name.map(|(name, _)| name);
+ let kind = kind.unwrap_or(NativeLibKind::Unspecified);
+ let filename = find_bundled_library(name, verbatim, kind, sess);
self.libs.push(NativeLib {
- name: name.map(|(name, _)| name),
- kind: kind.unwrap_or(NativeLibKind::Unspecified),
+ name,
+ filename,
+ kind,
cfg,
- foreign_module: Some(it.def_id.to_def_id()),
+ foreign_module: Some(it.owner_id.to_def_id()),
wasm_import_module: wasm_import_module.map(|(name, _)| name),
verbatim,
dll_imports,
@@ -349,7 +417,7 @@ impl<'tcx> Collector<'tcx> {
for lib in &self.tcx.sess.opts.libs {
if let NativeLibKind::Framework { .. } = lib.kind && !self.tcx.sess.target.is_like_osx {
// Cannot check this when parsing options because the target is not yet available.
- self.tcx.sess.err("library kind `framework` is only supported on Apple targets");
+ self.tcx.sess.emit_err(LibFrameworkApple);
}
if let Some(ref new_name) = lib.new_name {
let any_duplicate = self
@@ -358,23 +426,11 @@ impl<'tcx> Collector<'tcx> {
.filter_map(|lib| lib.name.as_ref())
.any(|n| n.as_str() == lib.name);
if new_name.is_empty() {
- self.tcx.sess.err(format!(
- "an empty renaming target was specified for library `{}`",
- lib.name
- ));
+ self.tcx.sess.emit_err(EmptyRenamingTarget { lib_name: &lib.name });
} else if !any_duplicate {
- self.tcx.sess.err(format!(
- "renaming of the library `{}` was specified, \
- however this crate contains no `#[link(...)]` \
- attributes referencing this library",
- lib.name
- ));
+ self.tcx.sess.emit_err(RenamingNoLink { lib_name: &lib.name });
} else if !renames.insert(&lib.name) {
- self.tcx.sess.err(format!(
- "multiple renamings were \
- specified for library `{}`",
- lib.name
- ));
+ self.tcx.sess.emit_err(MultipleRenamings { lib_name: &lib.name });
}
}
}
@@ -399,10 +455,13 @@ impl<'tcx> Collector<'tcx> {
// involved or not, library reordering and kind overriding without
// explicit `:rename` in particular.
if lib.has_modifiers() || passed_lib.has_modifiers() {
- let msg = "overriding linking modifiers from command line is not supported";
match lib.foreign_module {
- Some(def_id) => self.tcx.sess.span_err(self.tcx.def_span(def_id), msg),
- None => self.tcx.sess.err(msg),
+ Some(def_id) => self.tcx.sess.emit_err(NoLinkModOverride {
+ span: Some(self.tcx.def_span(def_id)),
+ }),
+ None => {
+ self.tcx.sess.emit_err(NoLinkModOverride { span: None })
+ }
};
}
if passed_lib.kind != NativeLibKind::Unspecified {
@@ -421,8 +480,13 @@ impl<'tcx> Collector<'tcx> {
if existing.is_empty() {
// Add if not found
let new_name: Option<&str> = passed_lib.new_name.as_deref();
+ let name = Some(Symbol::intern(new_name.unwrap_or(&passed_lib.name)));
+ let sess = self.tcx.sess;
+ let filename =
+ find_bundled_library(name, passed_lib.verbatim, passed_lib.kind, sess);
self.libs.push(NativeLib {
- name: Some(Symbol::intern(new_name.unwrap_or(&passed_lib.name))),
+ name,
+ filename,
kind: passed_lib.kind,
cfg: None,
foreign_module: None,
@@ -441,7 +505,7 @@ impl<'tcx> Collector<'tcx> {
fn i686_arg_list_size(&self, item: &hir::ForeignItemRef) -> usize {
let argument_types: &List<Ty<'_>> = self.tcx.erase_late_bound_regions(
self.tcx
- .type_of(item.id.def_id)
+ .type_of(item.id.owner_id)
.fn_sig(self.tcx)
.inputs()
.map_bound(|slice| self.tcx.mk_type_list(slice.iter())),
@@ -462,7 +526,12 @@ impl<'tcx> Collector<'tcx> {
.sum()
}
- fn build_dll_import(&self, abi: Abi, item: &hir::ForeignItemRef) -> DllImport {
+ fn build_dll_import(
+ &self,
+ abi: Abi,
+ import_name_type: Option<PeImportNameType>,
+ item: &hir::ForeignItemRef,
+ ) -> DllImport {
let calling_convention = if self.tcx.sess.target.arch == "x86" {
match abi {
Abi::C { .. } | Abi::Cdecl { .. } => DllCallingConvention::C,
@@ -476,29 +545,29 @@ impl<'tcx> Collector<'tcx> {
DllCallingConvention::Vectorcall(self.i686_arg_list_size(item))
}
_ => {
- self.tcx.sess.span_fatal(
- item.span,
- r#"ABI not supported by `#[link(kind = "raw-dylib")]` on i686"#,
- );
+ self.tcx.sess.emit_fatal(UnsupportedAbiI686 { span: item.span });
}
}
} else {
match abi {
Abi::C { .. } | Abi::Win64 { .. } | Abi::System { .. } => DllCallingConvention::C,
_ => {
- self.tcx.sess.span_fatal(
- item.span,
- r#"ABI not supported by `#[link(kind = "raw-dylib")]` on this architecture"#,
- );
+ self.tcx.sess.emit_fatal(UnsupportedAbi { span: item.span });
}
}
};
+ let codegen_fn_attrs = self.tcx.codegen_fn_attrs(item.id.owner_id);
+ let import_name_type = codegen_fn_attrs
+ .link_ordinal
+ .map_or(import_name_type, |ord| Some(PeImportNameType::Ordinal(ord)));
+
DllImport {
- name: item.ident.name,
- ordinal: self.tcx.codegen_fn_attrs(item.id.def_id).link_ordinal,
+ name: codegen_fn_attrs.link_name.unwrap_or(item.ident.name),
+ import_name_type,
calling_convention,
span: item.span,
+ is_fn: self.tcx.def_kind(item.id.owner_id).is_fn_like(),
}
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index 40dc4fb05..691e3d0f8 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -4,7 +4,6 @@ use crate::creader::{CStore, CrateMetadataRef};
use crate::rmeta::*;
use rustc_ast as ast;
-use rustc_ast::ptr::P;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::svh::Svh;
@@ -33,7 +32,7 @@ use rustc_session::cstore::{
use rustc_session::Session;
use rustc_span::hygiene::{ExpnIndex, MacroKind};
use rustc_span::source_map::{respan, Spanned};
-use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{self, BytePos, ExpnId, Pos, Span, SyntaxContext, DUMMY_SP};
use proc_macro::bridge::client::ProcMacro;
@@ -42,7 +41,6 @@ use std::iter::TrustedLen;
use std::mem;
use std::num::NonZeroUsize;
use std::path::Path;
-use tracing::debug;
pub(super) use cstore_impl::provide;
pub use cstore_impl::provide_extern;
@@ -99,7 +97,7 @@ pub(crate) struct CrateMetadata {
/// Proc macro descriptions for this crate, if it's a proc macro crate.
raw_proc_macros: Option<&'static [ProcMacro]>,
/// Source maps for code from the crate.
- source_map_import_info: OnceCell<Vec<ImportedSourceFile>>,
+ source_map_import_info: Lock<Vec<Option<ImportedSourceFile>>>,
/// For every definition in this crate, maps its `DefPathHash` to its `DefIndex`.
def_path_hash_map: DefPathHashMapRef<'static>,
/// Likewise for ExpnHash.
@@ -143,7 +141,8 @@ pub(crate) struct CrateMetadata {
}
/// Holds information about a rustc_span::SourceFile imported from another crate.
-/// See `imported_source_files()` for more information.
+/// See `imported_source_file()` for more information.
+#[derive(Clone)]
struct ImportedSourceFile {
/// This SourceFile's byte-offset within the source_map of its original crate
original_start_pos: rustc_span::BytePos,
@@ -160,9 +159,6 @@ pub(super) struct DecodeContext<'a, 'tcx> {
sess: Option<&'tcx Session>,
tcx: Option<TyCtxt<'tcx>>,
- // Cache the last used source_file for translating spans as an optimization.
- last_source_file_index: usize,
-
lazy_state: LazyState,
// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
@@ -191,7 +187,6 @@ pub(super) trait Metadata<'a, 'tcx>: Copy {
blob: self.blob(),
sess: self.sess().or(tcx.map(|tcx| tcx.sess)),
tcx,
- last_source_file_index: 0,
lazy_state: LazyState::NoNode,
alloc_decoding_session: self
.cdata()
@@ -455,6 +450,13 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex {
}
}
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ast::AttrId {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ast::AttrId {
+ let sess = d.sess.expect("can't decode AttrId without Session");
+ sess.parse_sess.attr_id_generator.mk_attr_id()
+ }
+}
+
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> SyntaxContext {
let cdata = decoder.cdata();
@@ -527,6 +529,9 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
bug!("Cannot decode Span without Session.")
};
+ // Index of the file in the corresponding crate's list of encoded files.
+ let metadata_index = u32::decode(decoder);
+
// There are two possibilities here:
// 1. This is a 'local span', which is located inside a `SourceFile`
// that came from this crate. In this case, we use the source map data
@@ -553,10 +558,10 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
// to be based on the *foreign* crate (e.g. crate C), not the crate
// we are writing metadata for (e.g. crate B). This allows us to
// treat the 'local' and 'foreign' cases almost identically during deserialization:
- // we can call `imported_source_files` for the proper crate, and binary search
+ // we can call `imported_source_file` for the proper crate, and binary search
// through the returned slice using our span.
- let imported_source_files = if tag == TAG_VALID_SPAN_LOCAL {
- decoder.cdata().imported_source_files(sess)
+ let source_file = if tag == TAG_VALID_SPAN_LOCAL {
+ decoder.cdata().imported_source_file(metadata_index, sess)
} else {
// When we encode a proc-macro crate, all `Span`s should be encoded
// with `TAG_VALID_SPAN_LOCAL`
@@ -577,66 +582,69 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
cnum
);
- // Decoding 'foreign' spans should be rare enough that it's
- // not worth it to maintain a per-CrateNum cache for `last_source_file_index`.
- // We just set it to 0, to ensure that we don't try to access something out
- // of bounds for our initial 'guess'
- decoder.last_source_file_index = 0;
-
let foreign_data = decoder.cdata().cstore.get_crate_data(cnum);
- foreign_data.imported_source_files(sess)
- };
-
- let source_file = {
- // Optimize for the case that most spans within a translated item
- // originate from the same source_file.
- let last_source_file = &imported_source_files[decoder.last_source_file_index];
-
- if lo >= last_source_file.original_start_pos && lo <= last_source_file.original_end_pos
- {
- last_source_file
- } else {
- let index = imported_source_files
- .binary_search_by_key(&lo, |source_file| source_file.original_start_pos)
- .unwrap_or_else(|index| index - 1);
-
- // Don't try to cache the index for foreign spans,
- // as this would require a map from CrateNums to indices
- if tag == TAG_VALID_SPAN_LOCAL {
- decoder.last_source_file_index = index;
- }
- &imported_source_files[index]
- }
+ foreign_data.imported_source_file(metadata_index, sess)
};
- // Make sure our binary search above is correct.
+ // Make sure our span is well-formed.
debug_assert!(
- lo >= source_file.original_start_pos && lo <= source_file.original_end_pos,
- "Bad binary search: lo={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
+ lo + source_file.original_start_pos <= source_file.original_end_pos,
+ "Malformed encoded span: lo={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
lo,
source_file.original_start_pos,
source_file.original_end_pos
);
- // Make sure we correctly filtered out invalid spans during encoding
+ // Make sure we correctly filtered out invalid spans during encoding.
debug_assert!(
- hi >= source_file.original_start_pos && hi <= source_file.original_end_pos,
- "Bad binary search: hi={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
+ hi + source_file.original_start_pos <= source_file.original_end_pos,
+ "Malformed encoded span: hi={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
hi,
source_file.original_start_pos,
source_file.original_end_pos
);
- let lo =
- (lo + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
- let hi =
- (hi + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
+ let lo = lo + source_file.translated_source_file.start_pos;
+ let hi = hi + source_file.translated_source_file.start_pos;
// Do not try to decode parent for foreign spans.
Span::new(lo, hi, ctxt, None)
}
}
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Symbol {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self {
+ let tag = d.read_u8();
+
+ match tag {
+ SYMBOL_STR => {
+ let s = d.read_str();
+ Symbol::intern(s)
+ }
+ SYMBOL_OFFSET => {
+ // read str offset
+ let pos = d.read_usize();
+ let old_pos = d.opaque.position();
+
+ // move to str ofset and read
+ d.opaque.set_position(pos);
+ let s = d.read_str();
+ let sym = Symbol::intern(s);
+
+ // restore position
+ d.opaque.set_position(old_pos);
+
+ sym
+ }
+ SYMBOL_PREINTERNED => {
+ let symbol_index = d.read_u32();
+ Symbol::new_from_decoded(symbol_index)
+ }
+ _ => unreachable!(),
+ }
+ }
+}
+
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [ty::abstract_const::Node<'tcx>] {
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self {
ty::codec::RefDecodable::decode(d)
@@ -765,7 +773,15 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
fn opt_item_name(self, item_index: DefIndex) -> Option<Symbol> {
- self.def_key(item_index).disambiguated_data.data.get_opt_name()
+ let def_key = self.def_key(item_index);
+ def_key.disambiguated_data.data.get_opt_name().or_else(|| {
+ if def_key.disambiguated_data.data == DefPathData::Ctor {
+ let parent_index = def_key.parent.expect("no parent for a constructor");
+ self.def_key(parent_index).disambiguated_data.data.get_opt_name()
+ } else {
+ None
+ }
+ })
}
fn item_name(self, item_index: DefIndex) -> Symbol {
@@ -783,26 +799,11 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
self.opt_item_ident(item_index, sess).expect("no encoded ident for item")
}
- fn maybe_kind(self, item_id: DefIndex) -> Option<EntryKind> {
- self.root.tables.kind.get(self, item_id).map(|k| k.decode(self))
- }
-
#[inline]
pub(super) fn map_encoded_cnum_to_current(self, cnum: CrateNum) -> CrateNum {
if cnum == LOCAL_CRATE { self.cnum } else { self.cnum_map[cnum] }
}
- fn kind(self, item_id: DefIndex) -> EntryKind {
- self.maybe_kind(item_id).unwrap_or_else(|| {
- bug!(
- "CrateMetadata::kind({:?}): id not found, in crate {:?} with number {}",
- item_id,
- self.root.name,
- self.cnum,
- )
- })
- }
-
fn def_kind(self, item_id: DefIndex) -> DefKind {
self.root.tables.opt_def_kind.get(self, item_id).unwrap_or_else(|| {
bug!(
@@ -854,21 +855,16 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
)
}
- fn get_variant(self, kind: &EntryKind, index: DefIndex, parent_did: DefId) -> ty::VariantDef {
- let data = match kind {
- EntryKind::Variant(data) | EntryKind::Struct(data) | EntryKind::Union(data) => {
- data.decode(self)
- }
- _ => bug!(),
- };
-
+ fn get_variant(self, kind: &DefKind, index: DefIndex, parent_did: DefId) -> ty::VariantDef {
let adt_kind = match kind {
- EntryKind::Variant(_) => ty::AdtKind::Enum,
- EntryKind::Struct(..) => ty::AdtKind::Struct,
- EntryKind::Union(..) => ty::AdtKind::Union,
+ DefKind::Variant => ty::AdtKind::Enum,
+ DefKind::Struct => ty::AdtKind::Struct,
+ DefKind::Union => ty::AdtKind::Union,
_ => bug!(),
};
+ let data = self.root.tables.variant_data.get(self, index).unwrap().decode(self);
+
let variant_did =
if adt_kind == ty::AdtKind::Enum { Some(self.local_def_id(index)) } else { None };
let ctor_did = data.ctor.map(|index| self.local_def_id(index));
@@ -899,13 +895,13 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
fn get_adt_def(self, item_id: DefIndex, tcx: TyCtxt<'tcx>) -> ty::AdtDef<'tcx> {
- let kind = self.kind(item_id);
+ let kind = self.def_kind(item_id);
let did = self.local_def_id(item_id);
let adt_kind = match kind {
- EntryKind::Enum => ty::AdtKind::Enum,
- EntryKind::Struct(_) => ty::AdtKind::Struct,
- EntryKind::Union(_) => ty::AdtKind::Union,
+ DefKind::Enum => ty::AdtKind::Enum,
+ DefKind::Struct => ty::AdtKind::Struct,
+ DefKind::Union => ty::AdtKind::Union,
_ => bug!("get_adt_def called on a non-ADT {:?}", did),
};
let repr = self.root.tables.repr_options.get(self, item_id).unwrap().decode(self);
@@ -917,7 +913,13 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
.get(self, item_id)
.unwrap_or_else(LazyArray::empty)
.decode(self)
- .map(|index| self.get_variant(&self.kind(index), index, did))
+ .filter_map(|index| {
+ let kind = self.def_kind(index);
+ match kind {
+ DefKind::Ctor(..) => None,
+ _ => Some(self.get_variant(&kind, index, did)),
+ }
+ })
.collect()
} else {
std::iter::once(self.get_variant(&kind, item_id, did)).collect()
@@ -930,8 +932,14 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
self.root.tables.generics_of.get(self, item_id).unwrap().decode((self, sess))
}
- fn get_visibility(self, id: DefIndex) -> ty::Visibility {
- self.root.tables.visibility.get(self, id).unwrap().decode(self)
+ fn get_visibility(self, id: DefIndex) -> ty::Visibility<DefId> {
+ self.root
+ .tables
+ .visibility
+ .get(self, id)
+ .unwrap()
+ .decode(self)
+ .map_id(|index| self.local_def_id(index))
}
fn get_trait_item_def_id(self, id: DefIndex) -> Option<DefId> {
@@ -1027,71 +1035,43 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
let vis = self.get_visibility(child_index);
let span = self.get_span(child_index, sess);
let macro_rules = match kind {
- DefKind::Macro(..) => match self.kind(child_index) {
- EntryKind::MacroDef(_, macro_rules) => macro_rules,
- _ => unreachable!(),
- },
+ DefKind::Macro(..) => {
+ self.root.tables.macro_rules.get(self, child_index).is_some()
+ }
_ => false,
};
callback(ModChild { ident, res, vis, span, macro_rules });
- // For non-re-export structs and variants add their constructors to children.
- // Re-export lists automatically contain constructors when necessary.
- match kind {
- DefKind::Struct => {
- if let Some((ctor_def_id, ctor_kind)) =
- self.get_ctor_def_id_and_kind(child_index)
- {
- let ctor_res =
- Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id);
- let vis = self.get_visibility(ctor_def_id.index);
- callback(ModChild {
- ident,
- res: ctor_res,
- vis,
- span,
- macro_rules: false,
- });
- }
- }
- DefKind::Variant => {
- // Braced variants, unlike structs, generate unusable names in
- // value namespace, they are reserved for possible future use.
- // It's ok to use the variant's id as a ctor id since an
- // error will be reported on any use of such resolution anyway.
- let (ctor_def_id, ctor_kind) = self
- .get_ctor_def_id_and_kind(child_index)
- .unwrap_or((def_id, CtorKind::Fictive));
- let ctor_res =
- Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id);
- let mut vis = self.get_visibility(ctor_def_id.index);
- if ctor_def_id == def_id && vis.is_public() {
- // For non-exhaustive variants lower the constructor visibility to
- // within the crate. We only need this for fictive constructors,
- // for other constructors correct visibilities
- // were already encoded in metadata.
- let mut attrs = self.get_item_attrs(def_id.index, sess);
- if attrs.any(|item| item.has_name(sym::non_exhaustive)) {
- let crate_def_id = self.local_def_id(CRATE_DEF_INDEX);
- vis = ty::Visibility::Restricted(crate_def_id);
- }
+ // For non-reexport variants add their fictive constructors to children.
+ // Braced variants, unlike structs, generate unusable names in value namespace,
+ // they are reserved for possible future use. It's ok to use the variant's id as
+ // a ctor id since an error will be reported on any use of such resolution anyway.
+ // Reexport lists automatically contain such constructors when necessary.
+ if kind == DefKind::Variant && self.get_ctor_def_id_and_kind(child_index).is_none()
+ {
+ let ctor_res =
+ Res::Def(DefKind::Ctor(CtorOf::Variant, CtorKind::Fictive), def_id);
+ let mut vis = vis;
+ if vis.is_public() {
+ // For non-exhaustive variants lower the constructor visibility to
+ // within the crate. We only need this for fictive constructors,
+ // for other constructors correct visibilities
+ // were already encoded in metadata.
+ let mut attrs = self.get_item_attrs(def_id.index, sess);
+ if attrs.any(|item| item.has_name(sym::non_exhaustive)) {
+ vis = ty::Visibility::Restricted(self.local_def_id(CRATE_DEF_INDEX));
}
- callback(ModChild { ident, res: ctor_res, vis, span, macro_rules: false });
}
- _ => {}
+ callback(ModChild { ident, res: ctor_res, vis, span, macro_rules: false });
}
}
}
- match self.kind(id) {
- EntryKind::Mod(exports) => {
- for exp in exports.decode((self, sess)) {
- callback(exp);
- }
+ if let Some(exports) = self.root.tables.module_reexports.get(self, id) {
+ for exp in exports.decode((self, sess)) {
+ callback(exp);
}
- EntryKind::Enum | EntryKind::Trait => {}
- _ => bug!("`for_each_module_child` is called on a non-module: {:?}", self.def_kind(id)),
}
}
@@ -1104,19 +1084,21 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
fn module_expansion(self, id: DefIndex, sess: &Session) -> ExpnId {
- match self.kind(id) {
- EntryKind::Mod(_) | EntryKind::Enum | EntryKind::Trait => {
- self.get_expn_that_defined(id, sess)
- }
+ match self.def_kind(id) {
+ DefKind::Mod | DefKind::Enum | DefKind::Trait => self.get_expn_that_defined(id, sess),
_ => panic!("Expected module, found {:?}", self.local_def_id(id)),
}
}
- fn get_fn_has_self_parameter(self, id: DefIndex) -> bool {
- match self.kind(id) {
- EntryKind::AssocFn { has_self, .. } => has_self,
- _ => false,
- }
+ fn get_fn_has_self_parameter(self, id: DefIndex, sess: &'a Session) -> bool {
+ self.root
+ .tables
+ .fn_arg_names
+ .get(self, id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode((self, sess))
+ .nth(0)
+ .map_or(false, |ident| ident.name == kw::SelfLower)
}
fn get_associated_item_def_ids(
@@ -1133,15 +1115,17 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
.map(move |child_index| self.local_def_id(child_index))
}
- fn get_associated_item(self, id: DefIndex) -> ty::AssocItem {
+ fn get_associated_item(self, id: DefIndex, sess: &'a Session) -> ty::AssocItem {
let name = self.item_name(id);
- let (kind, container, has_self) = match self.kind(id) {
- EntryKind::AssocConst(container) => (ty::AssocKind::Const, container, false),
- EntryKind::AssocFn { container, has_self } => (ty::AssocKind::Fn, container, has_self),
- EntryKind::AssocType(container) => (ty::AssocKind::Type, container, false),
- _ => bug!("cannot get associated-item of `{:?}`", id),
+ let kind = match self.def_kind(id) {
+ DefKind::AssocConst => ty::AssocKind::Const,
+ DefKind::AssocFn => ty::AssocKind::Fn,
+ DefKind::AssocTy => ty::AssocKind::Type,
+ _ => bug!("cannot get associated-item of `{:?}`", self.def_key(id)),
};
+ let has_self = self.get_fn_has_self_parameter(id, sess);
+ let container = self.root.tables.assoc_container.get(self, id).unwrap();
ty::AssocItem {
name,
@@ -1154,9 +1138,9 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
fn get_ctor_def_id_and_kind(self, node_id: DefIndex) -> Option<(DefId, CtorKind)> {
- match self.kind(node_id) {
- EntryKind::Struct(data) | EntryKind::Variant(data) => {
- let vdata = data.decode(self);
+ match self.def_kind(node_id) {
+ DefKind::Struct | DefKind::Variant => {
+ let vdata = self.root.tables.variant_data.get(self, node_id).unwrap().decode(self);
vdata.ctor.map(|index| (self.local_def_id(index), vdata.ctor_kind))
}
_ => None,
@@ -1202,7 +1186,10 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
.map(move |index| respan(self.get_span(index, sess), self.item_name(index)))
}
- fn get_struct_field_visibilities(self, id: DefIndex) -> impl Iterator<Item = Visibility> + 'a {
+ fn get_struct_field_visibilities(
+ self,
+ id: DefIndex,
+ ) -> impl Iterator<Item = Visibility<DefId>> + 'a {
self.root
.tables
.children
@@ -1344,18 +1331,22 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
fn get_macro(self, id: DefIndex, sess: &Session) -> ast::MacroDef {
- match self.kind(id) {
- EntryKind::MacroDef(mac_args, macro_rules) => {
- ast::MacroDef { body: P(mac_args.decode((self, sess))), macro_rules }
+ match self.def_kind(id) {
+ DefKind::Macro(_) => {
+ let macro_rules = self.root.tables.macro_rules.get(self, id).is_some();
+ let body =
+ self.root.tables.macro_definition.get(self, id).unwrap().decode((self, sess));
+ ast::MacroDef { macro_rules, body: ast::ptr::P(body) }
}
_ => bug!(),
}
}
fn is_foreign_item(self, id: DefIndex) -> bool {
- match self.kind(id) {
- EntryKind::ForeignStatic | EntryKind::ForeignFn => true,
- _ => false,
+ if let Some(parent) = self.def_key(id).parent {
+ matches!(self.def_kind(parent), DefKind::ForeignMod)
+ } else {
+ false
}
}
@@ -1453,7 +1444,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
///
/// Proc macro crates don't currently export spans, so this function does not have
/// to work for them.
- fn imported_source_files(self, sess: &Session) -> &'a [ImportedSourceFile] {
+ fn imported_source_file(self, source_file_index: u32, sess: &Session) -> ImportedSourceFile {
fn filter<'a>(sess: &Session, path: Option<&'a Path>) -> Option<&'a Path> {
path.filter(|_| {
// Only spend time on further checks if we have what to translate *to*.
@@ -1541,90 +1532,96 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
}
};
- self.cdata.source_map_import_info.get_or_init(|| {
- let external_source_map = self.root.source_map.decode(self);
-
- external_source_map
- .map(|source_file_to_import| {
- // We can't reuse an existing SourceFile, so allocate a new one
- // containing the information we need.
- let rustc_span::SourceFile {
- mut name,
- src_hash,
- start_pos,
- end_pos,
- lines,
- multibyte_chars,
- non_narrow_chars,
- normalized_pos,
- name_hash,
- ..
- } = source_file_to_import;
-
- // If this file is under $sysroot/lib/rustlib/src/ but has not been remapped
- // during rust bootstrapping by `remap-debuginfo = true`, and the user
- // wish to simulate that behaviour by -Z simulate-remapped-rust-src-base,
- // then we change `name` to a similar state as if the rust was bootstrapped
- // with `remap-debuginfo = true`.
- // This is useful for testing so that tests about the effects of
- // `try_to_translate_virtual_to_real` don't have to worry about how the
- // compiler is bootstrapped.
- if let Some(virtual_dir) =
- &sess.opts.unstable_opts.simulate_remapped_rust_src_base
- {
- if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
- if let rustc_span::FileName::Real(ref mut old_name) = name {
- if let rustc_span::RealFileName::LocalPath(local) = old_name {
- if let Ok(rest) = local.strip_prefix(real_dir) {
- *old_name = rustc_span::RealFileName::Remapped {
- local_path: None,
- virtual_name: virtual_dir.join(rest),
- };
- }
+ let mut import_info = self.cdata.source_map_import_info.lock();
+ for _ in import_info.len()..=(source_file_index as usize) {
+ import_info.push(None);
+ }
+ import_info[source_file_index as usize]
+ .get_or_insert_with(|| {
+ let source_file_to_import = self
+ .root
+ .source_map
+ .get(self, source_file_index)
+ .expect("missing source file")
+ .decode(self);
+
+ // We can't reuse an existing SourceFile, so allocate a new one
+ // containing the information we need.
+ let rustc_span::SourceFile {
+ mut name,
+ src_hash,
+ start_pos,
+ end_pos,
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ name_hash,
+ ..
+ } = source_file_to_import;
+
+ // If this file is under $sysroot/lib/rustlib/src/ but has not been remapped
+ // during rust bootstrapping by `remap-debuginfo = true`, and the user
+ // wish to simulate that behaviour by -Z simulate-remapped-rust-src-base,
+ // then we change `name` to a similar state as if the rust was bootstrapped
+ // with `remap-debuginfo = true`.
+ // This is useful for testing so that tests about the effects of
+ // `try_to_translate_virtual_to_real` don't have to worry about how the
+ // compiler is bootstrapped.
+ if let Some(virtual_dir) = &sess.opts.unstable_opts.simulate_remapped_rust_src_base
+ {
+ if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
+ if let rustc_span::FileName::Real(ref mut old_name) = name {
+ if let rustc_span::RealFileName::LocalPath(local) = old_name {
+ if let Ok(rest) = local.strip_prefix(real_dir) {
+ *old_name = rustc_span::RealFileName::Remapped {
+ local_path: None,
+ virtual_name: virtual_dir.join(rest),
+ };
}
}
}
}
+ }
- // If this file's path has been remapped to `/rustc/$hash`,
- // we might be able to reverse that (also see comments above,
- // on `try_to_translate_virtual_to_real`).
- try_to_translate_virtual_to_real(&mut name);
-
- let source_length = (end_pos - start_pos).to_usize();
-
- let local_version = sess.source_map().new_imported_source_file(
- name,
- src_hash,
- name_hash,
- source_length,
- self.cnum,
- lines,
- multibyte_chars,
- non_narrow_chars,
- normalized_pos,
- start_pos,
- end_pos,
- );
- debug!(
- "CrateMetaData::imported_source_files alloc \
+ // If this file's path has been remapped to `/rustc/$hash`,
+ // we might be able to reverse that (also see comments above,
+ // on `try_to_translate_virtual_to_real`).
+ try_to_translate_virtual_to_real(&mut name);
+
+ let source_length = (end_pos - start_pos).to_usize();
+
+ let local_version = sess.source_map().new_imported_source_file(
+ name,
+ src_hash,
+ name_hash,
+ source_length,
+ self.cnum,
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ start_pos,
+ source_file_index,
+ );
+ debug!(
+ "CrateMetaData::imported_source_files alloc \
source_file {:?} original (start_pos {:?} end_pos {:?}) \
translated (start_pos {:?} end_pos {:?})",
- local_version.name,
- start_pos,
- end_pos,
- local_version.start_pos,
- local_version.end_pos
- );
+ local_version.name,
+ start_pos,
+ end_pos,
+ local_version.start_pos,
+ local_version.end_pos
+ );
- ImportedSourceFile {
- original_start_pos: start_pos,
- original_end_pos: end_pos,
- translated_source_file: local_version,
- }
- })
- .collect()
- })
+ ImportedSourceFile {
+ original_start_pos: start_pos,
+ original_end_pos: end_pos,
+ translated_source_file: local_version,
+ }
+ })
+ .clone()
}
fn get_generator_diagnostic_data(
@@ -1687,7 +1684,7 @@ impl CrateMetadata {
trait_impls,
incoherent_impls: Default::default(),
raw_proc_macros,
- source_map_import_info: OnceCell::new(),
+ source_map_import_info: Lock::new(Vec::new()),
def_path_hash_map,
expn_hash_map: Default::default(),
alloc_decoding_state,
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index 38ce50e83..a0a085525 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -15,7 +15,6 @@ use rustc_middle::ty::fast_reject::SimplifiedType;
use rustc_middle::ty::query::{ExternProviders, Providers};
use rustc_middle::ty::{self, TyCtxt, Visibility};
use rustc_session::cstore::{CrateSource, CrateStore};
-use rustc_session::utils::NativeLibKind;
use rustc_session::{Session, StableCrateId};
use rustc_span::hygiene::{ExpnHash, ExpnId};
use rustc_span::source_map::{Span, Spanned};
@@ -76,9 +75,9 @@ impl ProcessQueryValue<'_, Option<DeprecationEntry>> for Option<Deprecation> {
}
macro_rules! provide_one {
- (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table }) => {
+ ($tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table }) => {
provide_one! {
- <$lt> $tcx, $def_id, $other, $cdata, $name => {
+ $tcx, $def_id, $other, $cdata, $name => {
$cdata
.root
.tables
@@ -89,9 +88,9 @@ macro_rules! provide_one {
}
}
};
- (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table_direct }) => {
+ ($tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table_direct }) => {
provide_one! {
- <$lt> $tcx, $def_id, $other, $cdata, $name => {
+ $tcx, $def_id, $other, $cdata, $name => {
// We don't decode `table_direct`, since it's not a Lazy, but an actual value
$cdata
.root
@@ -102,11 +101,11 @@ macro_rules! provide_one {
}
}
};
- (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => $compute:block) => {
- fn $name<$lt>(
- $tcx: TyCtxt<$lt>,
- def_id_arg: ty::query::query_keys::$name<$lt>,
- ) -> ty::query::query_values::$name<$lt> {
+ ($tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => $compute:block) => {
+ fn $name<'tcx>(
+ $tcx: TyCtxt<'tcx>,
+ def_id_arg: ty::query::query_keys::$name<'tcx>,
+ ) -> ty::query::query_values::$name<'tcx> {
let _prof_timer =
$tcx.prof.generic_activity(concat!("metadata_decode_entry_", stringify!($name)));
@@ -130,11 +129,11 @@ macro_rules! provide_one {
}
macro_rules! provide {
- (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident,
+ ($tcx:ident, $def_id:ident, $other:ident, $cdata:ident,
$($name:ident => { $($compute:tt)* })*) => {
pub fn provide_extern(providers: &mut ExternProviders) {
$(provide_one! {
- <$lt> $tcx, $def_id, $other, $cdata, $name => { $($compute)* }
+ $tcx, $def_id, $other, $cdata, $name => { $($compute)* }
})*
*providers = ExternProviders {
@@ -187,7 +186,7 @@ impl IntoArgs for (CrateNum, SimplifiedType) {
}
}
-provide! { <'tcx> tcx, def_id, other, cdata,
+provide! { tcx, def_id, other, cdata,
explicit_item_bounds => { table }
explicit_predicates_of => { table }
generics_of => { table }
@@ -199,6 +198,7 @@ provide! { <'tcx> tcx, def_id, other, cdata,
codegen_fn_attrs => { table }
impl_trait_ref => { table }
const_param_default => { table }
+ object_lifetime_default => { table }
thir_abstract_const => { table }
optimized_mir => { table }
mir_for_ctfe => { table }
@@ -207,8 +207,9 @@ provide! { <'tcx> tcx, def_id, other, cdata,
def_ident_span => { table }
lookup_stability => { table }
lookup_const_stability => { table }
+ lookup_default_body_stability => { table }
lookup_deprecation_entry => { table }
- visibility => { table }
+ params_in_repr => { table }
unused_generic_params => { table }
opt_def_kind => { table_direct }
impl_parent => { table }
@@ -222,7 +223,18 @@ provide! { <'tcx> tcx, def_id, other, cdata,
fn_arg_names => { table }
generator_kind => { table }
trait_def => { table }
-
+ deduced_param_attrs => { table }
+ collect_trait_impl_trait_tys => {
+ Ok(cdata
+ .root
+ .tables
+ .trait_impl_trait_tys
+ .get(cdata, def_id.index)
+ .map(|lazy| lazy.decode((cdata, tcx)))
+ .process_decoded(tcx, || panic!("{:?} does not have trait_impl_trait_tys", def_id)))
+ }
+
+ visibility => { cdata.get_visibility(def_id.index) }
adt_def => { cdata.get_adt_def(def_id.index, tcx) }
adt_destructor => {
let _ = cdata;
@@ -231,7 +243,7 @@ provide! { <'tcx> tcx, def_id, other, cdata,
associated_item_def_ids => {
tcx.arena.alloc_from_iter(cdata.get_associated_item_def_ids(def_id.index, tcx.sess))
}
- associated_item => { cdata.get_associated_item(def_id.index) }
+ associated_item => { cdata.get_associated_item(def_id.index, tcx.sess) }
inherent_impls => { cdata.get_inherent_implementations_for_type(tcx, def_id.index) }
is_foreign_item => { cdata.is_foreign_item(def_id.index) }
item_attrs => { tcx.arena.alloc_from_iter(cdata.get_item_attrs(def_id.index, tcx.sess)) }
@@ -327,20 +339,11 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
// resolve! Does this work? Unsure! That's what the issue is about
*providers = Providers {
allocator_kind: |tcx, ()| CStore::from_tcx(tcx).allocator_kind(),
- is_dllimport_foreign_item: |tcx, id| match tcx.native_library_kind(id) {
- Some(
- NativeLibKind::Dylib { .. } | NativeLibKind::RawDylib | NativeLibKind::Unspecified,
- ) => true,
- _ => false,
- },
- is_statically_included_foreign_item: |tcx, id| {
- matches!(tcx.native_library_kind(id), Some(NativeLibKind::Static { .. }))
- },
is_private_dep: |_tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
false
},
- native_library_kind: |tcx, id| {
+ native_library: |tcx, id| {
tcx.native_libraries(id.krate)
.iter()
.filter(|lib| native_libs::relevant_lib(&tcx.sess, lib))
@@ -354,7 +357,6 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
.foreign_items
.contains(&id)
})
- .map(|l| l.kind)
},
native_libraries: |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
@@ -483,7 +485,7 @@ impl CStore {
pub fn struct_field_visibilities_untracked(
&self,
def: DefId,
- ) -> impl Iterator<Item = Visibility> + '_ {
+ ) -> impl Iterator<Item = Visibility<DefId>> + '_ {
self.get_crate_data(def.krate).get_struct_field_visibilities(def.index)
}
@@ -491,7 +493,7 @@ impl CStore {
self.get_crate_data(def.krate).get_ctor_def_id_and_kind(def.index)
}
- pub fn visibility_untracked(&self, def: DefId) -> Visibility {
+ pub fn visibility_untracked(&self, def: DefId) -> Visibility<DefId> {
self.get_crate_data(def.krate).get_visibility(def.index)
}
@@ -533,8 +535,8 @@ impl CStore {
)
}
- pub fn fn_has_self_parameter_untracked(&self, def: DefId) -> bool {
- self.get_crate_data(def.krate).get_fn_has_self_parameter(def.index)
+ pub fn fn_has_self_parameter_untracked(&self, def: DefId, sess: &Session) -> bool {
+ self.get_crate_data(def.krate).get_fn_has_self_parameter(def.index, sess)
}
pub fn crate_source_untracked(&self, cnum: CrateNum) -> Lrc<CrateSource> {
@@ -585,11 +587,6 @@ impl CStore {
self.get_crate_data(cnum).get_proc_macro_quoted_span(id, sess)
}
- /// Decodes all traits in the crate (for rustdoc).
- pub fn traits_in_crate_untracked(&self, cnum: CrateNum) -> impl Iterator<Item = DefId> + '_ {
- self.get_crate_data(cnum).get_traits()
- }
-
/// Decodes all trait impls in the crate (for rustdoc).
pub fn trait_impls_in_crate_untracked(
&self,
@@ -675,6 +672,9 @@ impl CrateStore for CStore {
}
fn import_source_files(&self, sess: &Session, cnum: CrateNum) {
- self.get_crate_data(cnum).imported_source_files(sess);
+ let cdata = self.get_crate_data(cnum);
+ for file_index in 0..cdata.root.source_map.size() {
+ cdata.imported_source_file(file_index as u32, sess);
+ }
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index 33278367c..049514ec7 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -1,7 +1,9 @@
+use crate::errors::{FailCreateFileEncoder, FailSeekFile, FailWriteFile};
use crate::rmeta::def_path_hash_map::DefPathHashMapRef;
use crate::rmeta::table::TableBuilder;
use crate::rmeta::*;
+use rustc_ast::Attribute;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::memmap::{Mmap, MmapMut};
@@ -16,8 +18,6 @@ use rustc_hir::def_id::{
use rustc_hir::definitions::DefPathData;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::lang_items;
-use rustc_hir::{AnonConst, GenericParamKind};
-use rustc_index::bit_set::GrowableBitSet;
use rustc_middle::hir::nested_filter;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::{
@@ -29,8 +29,9 @@ use rustc_middle::ty::codec::TyEncoder;
use rustc_middle::ty::fast_reject::{self, SimplifiedType, TreatParams};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, SymbolName, Ty, TyCtxt};
+use rustc_middle::util::common::to_readable_str;
use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
-use rustc_session::config::CrateType;
+use rustc_session::config::{CrateType, OptLevel};
use rustc_session::cstore::{ForeignModule, LinkagePreference, NativeLib};
use rustc_span::hygiene::{ExpnIndex, HygieneEncodeContext, MacroKind};
use rustc_span::symbol::{sym, Symbol};
@@ -39,12 +40,12 @@ use rustc_span::{
};
use rustc_target::abi::VariantIdx;
use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
use std::hash::Hash;
use std::io::{Read, Seek, Write};
use std::iter;
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
-use tracing::{debug, trace};
pub(super) struct EncodeContext<'a, 'tcx> {
opaque: opaque::FileEncoder,
@@ -66,15 +67,13 @@ pub(super) struct EncodeContext<'a, 'tcx> {
// The indices (into the `SourceMap`'s `MonotonicVec`)
// of all of the `SourceFiles` that we need to serialize.
// When we serialize a `Span`, we insert the index of its
- // `SourceFile` into the `GrowableBitSet`.
- //
- // This needs to be a `GrowableBitSet` and not a
- // regular `BitSet` because we may actually import new `SourceFiles`
- // during metadata encoding, due to executing a query
- // with a result containing a foreign `Span`.
- required_source_files: Option<GrowableBitSet<usize>>,
+ // `SourceFile` into the `FxIndexSet`.
+ // The order inside the `FxIndexSet` is used as on-disk
+ // order of `SourceFiles`, and encoded inside `Span`s.
+ required_source_files: Option<FxIndexSet<usize>>,
is_proc_macro: bool,
hygiene_ctxt: &'a HygieneEncodeContext,
+ symbol_table: FxHashMap<Symbol, usize>,
}
/// If the current crate is a proc-macro, returns early with `Lazy:empty()`.
@@ -238,17 +237,15 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
s.source_file_cache =
(source_map.files()[source_file_index].clone(), source_file_index);
}
+ let (ref source_file, source_file_index) = s.source_file_cache;
+ debug_assert!(source_file.contains(span.lo));
- if !s.source_file_cache.0.contains(span.hi) {
+ if !source_file.contains(span.hi) {
// Unfortunately, macro expansion still sometimes generates Spans
// that malformed in this way.
return TAG_PARTIAL_SPAN.encode(s);
}
- let source_files = s.required_source_files.as_mut().expect("Already encoded SourceMap!");
- // Record the fact that we need to encode the data for this `SourceFile`
- source_files.insert(s.source_file_cache.1);
-
// There are two possible cases here:
// 1. This span comes from a 'foreign' crate - e.g. some crate upstream of the
// crate we are writing metadata for. When the metadata for *this* crate gets
@@ -265,39 +262,51 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
// if we're a proc-macro crate.
// This allows us to avoid loading the dependencies of proc-macro crates: all of
// the information we need to decode `Span`s is stored in the proc-macro crate.
- let (tag, lo, hi) = if s.source_file_cache.0.is_imported() && !s.is_proc_macro {
- // To simplify deserialization, we 'rebase' this span onto the crate it originally came from
- // (the crate that 'owns' the file it references. These rebased 'lo' and 'hi' values
- // are relative to the source map information for the 'foreign' crate whose CrateNum
- // we write into the metadata. This allows `imported_source_files` to binary
+ let (tag, metadata_index) = if source_file.is_imported() && !s.is_proc_macro {
+ // To simplify deserialization, we 'rebase' this span onto the crate it originally came
+ // from (the crate that 'owns' the file it references. These rebased 'lo' and 'hi'
+ // values are relative to the source map information for the 'foreign' crate whose
+ // CrateNum we write into the metadata. This allows `imported_source_files` to binary
// search through the 'foreign' crate's source map information, using the
// deserialized 'lo' and 'hi' values directly.
//
// All of this logic ensures that the final result of deserialization is a 'normal'
// Span that can be used without any additional trouble.
- let external_start_pos = {
+ let metadata_index = {
// Introduce a new scope so that we drop the 'lock()' temporary
- match &*s.source_file_cache.0.external_src.lock() {
- ExternalSource::Foreign { original_start_pos, .. } => *original_start_pos,
+ match &*source_file.external_src.lock() {
+ ExternalSource::Foreign { metadata_index, .. } => *metadata_index,
src => panic!("Unexpected external source {:?}", src),
}
};
- let lo = (span.lo - s.source_file_cache.0.start_pos) + external_start_pos;
- let hi = (span.hi - s.source_file_cache.0.start_pos) + external_start_pos;
- (TAG_VALID_SPAN_FOREIGN, lo, hi)
+ (TAG_VALID_SPAN_FOREIGN, metadata_index)
} else {
- (TAG_VALID_SPAN_LOCAL, span.lo, span.hi)
+ // Record the fact that we need to encode the data for this `SourceFile`
+ let source_files =
+ s.required_source_files.as_mut().expect("Already encoded SourceMap!");
+ let (metadata_index, _) = source_files.insert_full(source_file_index);
+ let metadata_index: u32 =
+ metadata_index.try_into().expect("cannot export more than U32_MAX files");
+
+ (TAG_VALID_SPAN_LOCAL, metadata_index)
};
- tag.encode(s);
- lo.encode(s);
+ // Encode the start position relative to the file start, so we profit more from the
+ // variable-length integer encoding.
+ let lo = span.lo - source_file.start_pos;
// Encode length which is usually less than span.hi and profits more
// from the variable-length integer encoding that we use.
- let len = hi - lo;
+ let len = span.hi - span.lo;
+
+ tag.encode(s);
+ lo.encode(s);
len.encode(s);
+ // Encode the index of the `SourceFile` for the span, in order to make decoding faster.
+ metadata_index.encode(s);
+
if tag == TAG_VALID_SPAN_FOREIGN {
// This needs to be two lines to avoid holding the `s.source_file_cache`
// while calling `cnum.encode(s)`
@@ -307,6 +316,31 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
}
}
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Symbol {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ // if symbol preinterned, emit tag and symbol index
+ if self.is_preinterned() {
+ s.opaque.emit_u8(SYMBOL_PREINTERNED);
+ s.opaque.emit_u32(self.as_u32());
+ } else {
+ // otherwise write it as string or as offset to it
+ match s.symbol_table.entry(*self) {
+ Entry::Vacant(o) => {
+ s.opaque.emit_u8(SYMBOL_STR);
+ let pos = s.opaque.position();
+ o.insert(pos);
+ s.emit_str(self.as_str());
+ }
+ Entry::Occupied(o) => {
+ let x = o.get().clone();
+ s.emit_u8(SYMBOL_OFFSET);
+ s.emit_usize(x);
+ }
+ }
+ }
+ }
+}
+
impl<'a, 'tcx> TyEncoder for EncodeContext<'a, 'tcx> {
const CLEAR_CROSS_CRATE: bool = true;
@@ -449,7 +483,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
self.lazy(DefPathHashMapRef::BorrowedFromTcx(self.tcx.def_path_hash_to_def_index_map()))
}
- fn encode_source_map(&mut self) -> LazyArray<rustc_span::SourceFile> {
+ fn encode_source_map(&mut self) -> LazyTable<u32, LazyValue<rustc_span::SourceFile>> {
let source_map = self.tcx.sess.source_map();
let all_source_files = source_map.files();
@@ -460,142 +494,118 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let working_directory = &self.tcx.sess.opts.working_dir;
- let adapted = all_source_files
- .iter()
- .enumerate()
- .filter(|(idx, source_file)| {
- // Only serialize `SourceFile`s that were used
- // during the encoding of a `Span`
- required_source_files.contains(*idx) &&
- // Don't serialize imported `SourceFile`s, unless
- // we're in a proc-macro crate.
- (!source_file.is_imported() || self.is_proc_macro)
- })
- .map(|(_, source_file)| {
- // At export time we expand all source file paths to absolute paths because
- // downstream compilation sessions can have a different compiler working
- // directory, so relative paths from this or any other upstream crate
- // won't be valid anymore.
- //
- // At this point we also erase the actual on-disk path and only keep
- // the remapped version -- as is necessary for reproducible builds.
- match source_file.name {
- FileName::Real(ref original_file_name) => {
- let adapted_file_name =
- source_map.path_mapping().to_embeddable_absolute_path(
- original_file_name.clone(),
- working_directory,
- );
-
- if adapted_file_name != *original_file_name {
- let mut adapted: SourceFile = (**source_file).clone();
- adapted.name = FileName::Real(adapted_file_name);
- adapted.name_hash = {
- let mut hasher: StableHasher = StableHasher::new();
- adapted.name.hash(&mut hasher);
- hasher.finish::<u128>()
- };
- Lrc::new(adapted)
- } else {
- // Nothing to adapt
- source_file.clone()
- }
+ let mut adapted = TableBuilder::default();
+
+ // Only serialize `SourceFile`s that were used during the encoding of a `Span`.
+ //
+ // The order in which we encode source files is important here: the on-disk format for
+ // `Span` contains the index of the corresponding `SourceFile`.
+ for (on_disk_index, &source_file_index) in required_source_files.iter().enumerate() {
+ let source_file = &all_source_files[source_file_index];
+ // Don't serialize imported `SourceFile`s, unless we're in a proc-macro crate.
+ assert!(!source_file.is_imported() || self.is_proc_macro);
+
+ // At export time we expand all source file paths to absolute paths because
+ // downstream compilation sessions can have a different compiler working
+ // directory, so relative paths from this or any other upstream crate
+ // won't be valid anymore.
+ //
+ // At this point we also erase the actual on-disk path and only keep
+ // the remapped version -- as is necessary for reproducible builds.
+ let mut source_file = match source_file.name {
+ FileName::Real(ref original_file_name) => {
+ let adapted_file_name = source_map
+ .path_mapping()
+ .to_embeddable_absolute_path(original_file_name.clone(), working_directory);
+
+ if adapted_file_name != *original_file_name {
+ let mut adapted: SourceFile = (**source_file).clone();
+ adapted.name = FileName::Real(adapted_file_name);
+ adapted.name_hash = {
+ let mut hasher: StableHasher = StableHasher::new();
+ adapted.name.hash(&mut hasher);
+ hasher.finish::<u128>()
+ };
+ Lrc::new(adapted)
+ } else {
+ // Nothing to adapt
+ source_file.clone()
}
- // expanded code, not from a file
- _ => source_file.clone(),
- }
- })
- .map(|mut source_file| {
- // We're serializing this `SourceFile` into our crate metadata,
- // so mark it as coming from this crate.
- // This also ensures that we don't try to deserialize the
- // `CrateNum` for a proc-macro dependency - since proc macro
- // dependencies aren't loaded when we deserialize a proc-macro,
- // trying to remap the `CrateNum` would fail.
- if self.is_proc_macro {
- Lrc::make_mut(&mut source_file).cnum = LOCAL_CRATE;
}
- source_file
- })
- .collect::<Vec<_>>();
+ // expanded code, not from a file
+ _ => source_file.clone(),
+ };
+
+ // We're serializing this `SourceFile` into our crate metadata,
+ // so mark it as coming from this crate.
+ // This also ensures that we don't try to deserialize the
+ // `CrateNum` for a proc-macro dependency - since proc macro
+ // dependencies aren't loaded when we deserialize a proc-macro,
+ // trying to remap the `CrateNum` would fail.
+ if self.is_proc_macro {
+ Lrc::make_mut(&mut source_file).cnum = LOCAL_CRATE;
+ }
- self.lazy_array(adapted.iter().map(|rc| &**rc))
+ let on_disk_index: u32 =
+ on_disk_index.try_into().expect("cannot export more than U32_MAX files");
+ adapted.set(on_disk_index, self.lazy(source_file));
+ }
+
+ adapted.encode(&mut self.opaque)
}
fn encode_crate_root(&mut self) -> LazyValue<CrateRoot> {
let tcx = self.tcx;
- let mut i = 0;
- let preamble_bytes = self.position() - i;
-
- // Encode the crate deps
- i = self.position();
- let crate_deps = self.encode_crate_deps();
- let dylib_dependency_formats = self.encode_dylib_dependency_formats();
- let dep_bytes = self.position() - i;
-
- // Encode the lib features.
- i = self.position();
- let lib_features = self.encode_lib_features();
- let lib_feature_bytes = self.position() - i;
-
- // Encode the stability implications.
- i = self.position();
- let stability_implications = self.encode_stability_implications();
- let stability_implications_bytes = self.position() - i;
-
- // Encode the language items.
- i = self.position();
- let lang_items = self.encode_lang_items();
- let lang_items_missing = self.encode_lang_items_missing();
- let lang_item_bytes = self.position() - i;
-
- // Encode the diagnostic items.
- i = self.position();
- let diagnostic_items = self.encode_diagnostic_items();
- let diagnostic_item_bytes = self.position() - i;
-
- // Encode the native libraries used
- i = self.position();
- let native_libraries = self.encode_native_libraries();
- let native_lib_bytes = self.position() - i;
-
- i = self.position();
- let foreign_modules = self.encode_foreign_modules();
- let foreign_modules_bytes = self.position() - i;
-
- // Encode DefPathTable
- i = self.position();
- self.encode_def_path_table();
- let def_path_table_bytes = self.position() - i;
+ let mut stats: Vec<(&'static str, usize)> = Vec::with_capacity(32);
+
+ macro_rules! stat {
+ ($label:literal, $f:expr) => {{
+ let orig_pos = self.position();
+ let res = $f();
+ stats.push(($label, self.position() - orig_pos));
+ res
+ }};
+ }
+
+ // We have already encoded some things. Get their combined size from the current position.
+ stats.push(("preamble", self.position()));
+
+ let (crate_deps, dylib_dependency_formats) =
+ stat!("dep", || (self.encode_crate_deps(), self.encode_dylib_dependency_formats()));
+
+ let lib_features = stat!("lib-features", || self.encode_lib_features());
+
+ let stability_implications =
+ stat!("stability-implications", || self.encode_stability_implications());
+
+ let (lang_items, lang_items_missing) = stat!("lang-items", || {
+ (self.encode_lang_items(), self.encode_lang_items_missing())
+ });
+
+ let diagnostic_items = stat!("diagnostic-items", || self.encode_diagnostic_items());
+
+ let native_libraries = stat!("native-libs", || self.encode_native_libraries());
+
+ let foreign_modules = stat!("foreign-modules", || self.encode_foreign_modules());
+
+ _ = stat!("def-path-table", || self.encode_def_path_table());
// Encode the def IDs of traits, for rustdoc and diagnostics.
- i = self.position();
- let traits = self.encode_traits();
- let traits_bytes = self.position() - i;
+ let traits = stat!("traits", || self.encode_traits());
// Encode the def IDs of impls, for coherence checking.
- i = self.position();
- let impls = self.encode_impls();
- let impls_bytes = self.position() - i;
-
- i = self.position();
- let incoherent_impls = self.encode_incoherent_impls();
- let incoherent_impls_bytes = self.position() - i;
-
- // Encode MIR.
- i = self.position();
- self.encode_mir();
- let mir_bytes = self.position() - i;
-
- // Encode the items.
- i = self.position();
- self.encode_def_ids();
- self.encode_info_for_items();
- let item_bytes = self.position() - i;
-
- // Encode the allocation index
- i = self.position();
- let interpret_alloc_index = {
+ let impls = stat!("impls", || self.encode_impls());
+
+ let incoherent_impls = stat!("incoherent-impls", || self.encode_incoherent_impls());
+
+ _ = stat!("mir", || self.encode_mir());
+
+ _ = stat!("items", || {
+ self.encode_def_ids();
+ self.encode_info_for_items();
+ });
+
+ let interpret_alloc_index = stat!("interpret-alloc-index", || {
let mut interpret_alloc_index = Vec::new();
let mut n = 0;
trace!("beginning to encode alloc ids");
@@ -616,126 +626,90 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
n = new_n;
}
self.lazy_array(interpret_alloc_index)
- };
- let interpret_alloc_index_bytes = self.position() - i;
+ });
- // Encode the proc macro data. This affects 'tables',
- // so we need to do this before we encode the tables.
- // This overwrites def_keys, so it must happen after encode_def_path_table.
- i = self.position();
- let proc_macro_data = self.encode_proc_macros();
- let proc_macro_data_bytes = self.position() - i;
+ // Encode the proc macro data. This affects `tables`, so we need to do this before we
+ // encode the tables. This overwrites def_keys, so it must happen after
+ // encode_def_path_table.
+ let proc_macro_data = stat!("proc-macro-data", || self.encode_proc_macros());
- i = self.position();
- let tables = self.tables.encode(&mut self.opaque);
- let tables_bytes = self.position() - i;
+ let tables = stat!("tables", || self.tables.encode(&mut self.opaque));
- i = self.position();
- let debugger_visualizers = self.encode_debugger_visualizers();
- let debugger_visualizers_bytes = self.position() - i;
+ let debugger_visualizers =
+ stat!("debugger-visualizers", || self.encode_debugger_visualizers());
// Encode exported symbols info. This is prefetched in `encode_metadata` so we encode
// this as late as possible to give the prefetching as much time as possible to complete.
- i = self.position();
- let exported_symbols = tcx.exported_symbols(LOCAL_CRATE);
- let exported_symbols = self.encode_exported_symbols(&exported_symbols);
- let exported_symbols_bytes = self.position() - i;
-
- // Encode the hygiene data,
- // IMPORTANT: this *must* be the last thing that we encode (other than `SourceMap`). The process
- // of encoding other items (e.g. `optimized_mir`) may cause us to load
- // data from the incremental cache. If this causes us to deserialize a `Span`,
- // then we may load additional `SyntaxContext`s into the global `HygieneData`.
- // Therefore, we need to encode the hygiene data last to ensure that we encode
- // any `SyntaxContext`s that might be used.
- i = self.position();
- let (syntax_contexts, expn_data, expn_hashes) = self.encode_hygiene();
- let hygiene_bytes = self.position() - i;
-
- i = self.position();
- let def_path_hash_map = self.encode_def_path_hash_map();
- let def_path_hash_map_bytes = self.position() - i;
-
- // Encode source_map. This needs to be done last,
- // since encoding `Span`s tells us which `SourceFiles` we actually
- // need to encode.
- i = self.position();
- let source_map = self.encode_source_map();
- let source_map_bytes = self.position() - i;
-
- i = self.position();
- let attrs = tcx.hir().krate_attrs();
- let has_default_lib_allocator = tcx.sess.contains_name(&attrs, sym::default_lib_allocator);
- let root = self.lazy(CrateRoot {
- name: tcx.crate_name(LOCAL_CRATE),
- extra_filename: tcx.sess.opts.cg.extra_filename.clone(),
- triple: tcx.sess.opts.target_triple.clone(),
- hash: tcx.crate_hash(LOCAL_CRATE),
- stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
- required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE),
- panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
- edition: tcx.sess.edition(),
- has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
- has_panic_handler: tcx.has_panic_handler(LOCAL_CRATE),
- has_default_lib_allocator,
- proc_macro_data,
- debugger_visualizers,
- compiler_builtins: tcx.sess.contains_name(&attrs, sym::compiler_builtins),
- needs_allocator: tcx.sess.contains_name(&attrs, sym::needs_allocator),
- needs_panic_runtime: tcx.sess.contains_name(&attrs, sym::needs_panic_runtime),
- no_builtins: tcx.sess.contains_name(&attrs, sym::no_builtins),
- panic_runtime: tcx.sess.contains_name(&attrs, sym::panic_runtime),
- profiler_runtime: tcx.sess.contains_name(&attrs, sym::profiler_runtime),
- symbol_mangling_version: tcx.sess.opts.get_symbol_mangling_version(),
-
- crate_deps,
- dylib_dependency_formats,
- lib_features,
- stability_implications,
- lang_items,
- diagnostic_items,
- lang_items_missing,
- native_libraries,
- foreign_modules,
- source_map,
- traits,
- impls,
- incoherent_impls,
- exported_symbols,
- interpret_alloc_index,
- tables,
- syntax_contexts,
- expn_data,
- expn_hashes,
- def_path_hash_map,
+ let exported_symbols = stat!("exported-symbols", || {
+ self.encode_exported_symbols(&tcx.exported_symbols(LOCAL_CRATE))
+ });
+
+ // Encode the hygiene data.
+ // IMPORTANT: this *must* be the last thing that we encode (other than `SourceMap`). The
+ // process of encoding other items (e.g. `optimized_mir`) may cause us to load data from
+ // the incremental cache. If this causes us to deserialize a `Span`, then we may load
+ // additional `SyntaxContext`s into the global `HygieneData`. Therefore, we need to encode
+ // the hygiene data last to ensure that we encode any `SyntaxContext`s that might be used.
+ let (syntax_contexts, expn_data, expn_hashes) = stat!("hygiene", || self.encode_hygiene());
+
+ let def_path_hash_map = stat!("def-path-hash-map", || self.encode_def_path_hash_map());
+
+ // Encode source_map. This needs to be done last, because encoding `Span`s tells us which
+ // `SourceFiles` we actually need to encode.
+ let source_map = stat!("source-map", || self.encode_source_map());
+
+ let root = stat!("final", || {
+ let attrs = tcx.hir().krate_attrs();
+ self.lazy(CrateRoot {
+ name: tcx.crate_name(LOCAL_CRATE),
+ extra_filename: tcx.sess.opts.cg.extra_filename.clone(),
+ triple: tcx.sess.opts.target_triple.clone(),
+ hash: tcx.crate_hash(LOCAL_CRATE),
+ stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
+ required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE),
+ panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
+ edition: tcx.sess.edition(),
+ has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
+ has_panic_handler: tcx.has_panic_handler(LOCAL_CRATE),
+ has_default_lib_allocator: tcx
+ .sess
+ .contains_name(&attrs, sym::default_lib_allocator),
+ proc_macro_data,
+ debugger_visualizers,
+ compiler_builtins: tcx.sess.contains_name(&attrs, sym::compiler_builtins),
+ needs_allocator: tcx.sess.contains_name(&attrs, sym::needs_allocator),
+ needs_panic_runtime: tcx.sess.contains_name(&attrs, sym::needs_panic_runtime),
+ no_builtins: tcx.sess.contains_name(&attrs, sym::no_builtins),
+ panic_runtime: tcx.sess.contains_name(&attrs, sym::panic_runtime),
+ profiler_runtime: tcx.sess.contains_name(&attrs, sym::profiler_runtime),
+ symbol_mangling_version: tcx.sess.opts.get_symbol_mangling_version(),
+
+ crate_deps,
+ dylib_dependency_formats,
+ lib_features,
+ stability_implications,
+ lang_items,
+ diagnostic_items,
+ lang_items_missing,
+ native_libraries,
+ foreign_modules,
+ source_map,
+ traits,
+ impls,
+ incoherent_impls,
+ exported_symbols,
+ interpret_alloc_index,
+ tables,
+ syntax_contexts,
+ expn_data,
+ expn_hashes,
+ def_path_hash_map,
+ })
});
- let final_bytes = self.position() - i;
let total_bytes = self.position();
- let computed_total_bytes = preamble_bytes
- + dep_bytes
- + lib_feature_bytes
- + stability_implications_bytes
- + lang_item_bytes
- + diagnostic_item_bytes
- + native_lib_bytes
- + foreign_modules_bytes
- + def_path_table_bytes
- + traits_bytes
- + impls_bytes
- + incoherent_impls_bytes
- + mir_bytes
- + item_bytes
- + interpret_alloc_index_bytes
- + proc_macro_data_bytes
- + tables_bytes
- + debugger_visualizers_bytes
- + exported_symbols_bytes
- + hygiene_bytes
- + def_path_hash_map_bytes
- + source_map_bytes
- + final_bytes;
+ let computed_total_bytes: usize = stats.iter().map(|(_, size)| size).sum();
assert_eq!(total_bytes, computed_total_bytes);
if tcx.sess.meta_stats() {
@@ -753,48 +727,77 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
assert_eq!(self.opaque.file().stream_position().unwrap(), pos_before_rewind);
+ stats.sort_by_key(|&(_, usize)| usize);
+
+ let prefix = "meta-stats";
let perc = |bytes| (bytes * 100) as f64 / total_bytes as f64;
- let p = |label, bytes| {
- eprintln!("{:>21}: {:>8} bytes ({:4.1}%)", label, bytes, perc(bytes));
- };
- eprintln!("");
+ eprintln!("{} METADATA STATS", prefix);
+ eprintln!("{} {:<23}{:>10}", prefix, "Section", "Size");
+ eprintln!(
+ "{} ----------------------------------------------------------------",
+ prefix
+ );
+ for (label, size) in stats {
+ eprintln!(
+ "{} {:<23}{:>10} ({:4.1}%)",
+ prefix,
+ label,
+ to_readable_str(size),
+ perc(size)
+ );
+ }
eprintln!(
- "{} metadata bytes, of which {} bytes ({:.1}%) are zero",
- total_bytes,
- zero_bytes,
+ "{} ----------------------------------------------------------------",
+ prefix
+ );
+ eprintln!(
+ "{} {:<23}{:>10} (of which {:.1}% are zero bytes)",
+ prefix,
+ "Total",
+ to_readable_str(total_bytes),
perc(zero_bytes)
);
- p("preamble", preamble_bytes);
- p("dep", dep_bytes);
- p("lib feature", lib_feature_bytes);
- p("stability_implications", stability_implications_bytes);
- p("lang item", lang_item_bytes);
- p("diagnostic item", diagnostic_item_bytes);
- p("native lib", native_lib_bytes);
- p("foreign modules", foreign_modules_bytes);
- p("def-path table", def_path_table_bytes);
- p("traits", traits_bytes);
- p("impls", impls_bytes);
- p("incoherent_impls", incoherent_impls_bytes);
- p("mir", mir_bytes);
- p("item", item_bytes);
- p("interpret_alloc_index", interpret_alloc_index_bytes);
- p("proc-macro-data", proc_macro_data_bytes);
- p("tables", tables_bytes);
- p("debugger visualizers", debugger_visualizers_bytes);
- p("exported symbols", exported_symbols_bytes);
- p("hygiene", hygiene_bytes);
- p("def-path hashes", def_path_hash_map_bytes);
- p("source_map", source_map_bytes);
- p("final", final_bytes);
- eprintln!("");
+ eprintln!("{}", prefix);
}
root
}
}
+/// Returns whether an attribute needs to be recorded in metadata, that is, if it's usable and
+/// useful in downstream crates. Local-only attributes are an obvious example, but some
+/// rustdoc-specific attributes can equally be of use while documenting the current crate only.
+///
+/// Removing these superfluous attributes speeds up compilation by making the metadata smaller.
+///
+/// Note: the `is_def_id_public` parameter is used to cache whether the given `DefId` has a public
+/// visibility: this is a piece of data that can be computed once per defid, and not once per
+/// attribute. Some attributes would only be usable downstream if they are public.
+#[inline]
+fn should_encode_attr(
+ tcx: TyCtxt<'_>,
+ attr: &Attribute,
+ def_id: LocalDefId,
+ is_def_id_public: &mut Option<bool>,
+) -> bool {
+ if rustc_feature::is_builtin_only_local(attr.name_or_empty()) {
+ // Attributes marked local-only don't need to be encoded for downstream crates.
+ false
+ } else if attr.doc_str().is_some() {
+ // We keep all public doc comments because they might be "imported" into downstream crates
+ // if they use `#[doc(inline)]` to copy an item's documentation into their own.
+ *is_def_id_public
+ .get_or_insert_with(|| tcx.effective_visibilities(()).effective_vis(def_id).is_some())
+ } else if attr.has_name(sym::doc) {
+ // If this is a `doc` attribute, and it's marked `inline` (as in `#[doc(inline)]`), we can
+ // remove it. It won't be inlinable in downstream crates.
+ attr.meta_item_list().map(|l| l.iter().any(|l| !l.has_name(sym::inline))).unwrap_or(false)
+ } else {
+ true
+ }
+}
+
fn should_encode_visibility(def_kind: DefKind) -> bool {
match def_kind {
DefKind::Mod
@@ -817,6 +820,7 @@ fn should_encode_visibility(def_kind: DefKind) -> bool {
| DefKind::Use
| DefKind::ForeignMod
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Impl
| DefKind::Field => true,
DefKind::TyParam
@@ -849,6 +853,7 @@ fn should_encode_stability(def_kind: DefKind) -> bool {
| DefKind::ForeignMod
| DefKind::TyAlias
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Enum
| DefKind::Union
| DefKind::Impl
@@ -937,6 +942,7 @@ fn should_encode_variances(def_kind: DefKind) -> bool {
| DefKind::ForeignMod
| DefKind::TyAlias
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Impl
| DefKind::Trait
| DefKind::TraitAlias
@@ -973,6 +979,7 @@ fn should_encode_generics(def_kind: DefKind) -> bool {
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Impl
| DefKind::Field
| DefKind::TyParam
@@ -989,14 +996,141 @@ fn should_encode_generics(def_kind: DefKind) -> bool {
}
}
+fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Ctor(..)
+ | DefKind::Field
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::Static(..)
+ | DefKind::TyAlias
+ | DefKind::OpaqueTy
+ | DefKind::ForeignTy
+ | DefKind::Impl
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ConstParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst => true,
+
+ DefKind::ImplTraitPlaceholder => {
+ let parent_def_id = tcx.impl_trait_in_trait_parent(def_id.to_def_id());
+ let assoc_item = tcx.associated_item(parent_def_id);
+ match assoc_item.container {
+ // Always encode an RPIT in an impl fn, since it always has a body
+ ty::AssocItemContainer::ImplContainer => true,
+ ty::AssocItemContainer::TraitContainer => {
+ // Encode an RPIT for a trait only if the trait has a default body
+ assoc_item.defaultness(tcx).has_value()
+ }
+ }
+ }
+
+ DefKind::AssocTy => {
+ let assoc_item = tcx.associated_item(def_id);
+ match assoc_item.container {
+ ty::AssocItemContainer::ImplContainer => true,
+ ty::AssocItemContainer::TraitContainer => assoc_item.defaultness(tcx).has_value(),
+ }
+ }
+ DefKind::TyParam => {
+ let hir::Node::GenericParam(param) = tcx.hir().get_by_def_id(def_id) else { bug!() };
+ let hir::GenericParamKind::Type { default, .. } = param.kind else { bug!() };
+ default.is_some()
+ }
+
+ DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::Mod
+ | DefKind::ForeignMod
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::ExternCrate => false,
+ }
+}
+
+fn should_encode_const(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Const | DefKind::AssocConst | DefKind::AnonConst => true,
+
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Ctor(..)
+ | DefKind::Field
+ | DefKind::Fn
+ | DefKind::Static(..)
+ | DefKind::TyAlias
+ | DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
+ | DefKind::ForeignTy
+ | DefKind::Impl
+ | DefKind::AssocFn
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ConstParam
+ | DefKind::InlineConst
+ | DefKind::AssocTy
+ | DefKind::TyParam
+ | DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::Mod
+ | DefKind::ForeignMod
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::ExternCrate => false,
+ }
+}
+
+fn should_encode_trait_impl_trait_tys<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
+ if tcx.def_kind(def_id) != DefKind::AssocFn {
+ return false;
+ }
+
+ let Some(item) = tcx.opt_associated_item(def_id) else { return false; };
+ if item.container != ty::AssocItemContainer::ImplContainer {
+ return false;
+ }
+
+ let Some(trait_item_def_id) = item.trait_item_def_id else { return false; };
+
+ // FIXME(RPITIT): This does a somewhat manual walk through the signature
+ // of the trait fn to look for any RPITITs, but that's kinda doing a lot
+ // of work. We can probably remove this when we refactor RPITITs to be
+ // associated types.
+ tcx.fn_sig(trait_item_def_id).skip_binder().output().walk().any(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(data) = ty.kind()
+ && tcx.def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder
+ {
+ true
+ } else {
+ false
+ }
+ })
+}
+
impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fn encode_attrs(&mut self, def_id: LocalDefId) {
- let mut attrs = self
- .tcx
+ let tcx = self.tcx;
+ let mut is_public: Option<bool> = None;
+
+ let mut attrs = tcx
.hir()
- .attrs(self.tcx.hir().local_def_id_to_hir_id(def_id))
+ .attrs(tcx.hir().local_def_id_to_hir_id(def_id))
.iter()
- .filter(|attr| !rustc_feature::is_builtin_only_local(attr.name_or_empty()));
+ .filter(move |attr| should_encode_attr(tcx, attr, def_id, &mut is_public));
record_array!(self.tables.attributes[def_id.to_def_id()] <- attrs.clone());
if attrs.any(|attr| attr.may_have_doc_links()) {
@@ -1014,7 +1148,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let def_kind = tcx.opt_def_kind(local_id);
let Some(def_kind) = def_kind else { continue };
self.tables.opt_def_kind.set(def_id.index, def_kind);
- record!(self.tables.def_span[def_id] <- tcx.def_span(def_id));
+ let def_span = tcx.def_span(local_id);
+ record!(self.tables.def_span[def_id] <- def_span);
self.encode_attrs(local_id);
record!(self.tables.expn_that_defined[def_id] <- self.tcx.expn_that_defined(def_id));
if let Some(ident_span) = tcx.def_ident_span(def_id) {
@@ -1024,11 +1159,14 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record!(self.tables.codegen_fn_attrs[def_id] <- self.tcx.codegen_fn_attrs(def_id));
}
if should_encode_visibility(def_kind) {
- record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
+ let vis =
+ self.tcx.local_visibility(local_id).map_id(|def_id| def_id.local_def_index);
+ record!(self.tables.visibility[def_id] <- vis);
}
if should_encode_stability(def_kind) {
self.encode_stability(def_id);
self.encode_const_stability(def_id);
+ self.encode_default_body_stability(def_id);
self.encode_deprecation(def_id);
}
if should_encode_variances(def_kind) {
@@ -1044,9 +1182,25 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record_array!(self.tables.inferred_outlives_of[def_id] <- inferred_outlives);
}
}
+ if should_encode_type(tcx, local_id, def_kind) {
+ record!(self.tables.type_of[def_id] <- self.tcx.type_of(def_id));
+ }
+ if let DefKind::TyParam = def_kind {
+ let default = self.tcx.object_lifetime_default(def_id);
+ record!(self.tables.object_lifetime_default[def_id] <- default);
+ }
if let DefKind::Trait | DefKind::TraitAlias = def_kind {
record!(self.tables.super_predicates_of[def_id] <- self.tcx.super_predicates_of(def_id));
}
+ if let DefKind::Enum | DefKind::Struct | DefKind::Union = def_kind {
+ let params_in_repr = self.tcx.params_in_repr(def_id);
+ record!(self.tables.params_in_repr[def_id] <- params_in_repr);
+ }
+ if should_encode_trait_impl_trait_tys(tcx, def_id)
+ && let Ok(table) = self.tcx.collect_trait_impl_trait_tys(def_id)
+ {
+ record!(self.tables.trait_impl_trait_tys[def_id] <- table);
+ }
}
let inherent_impls = tcx.crate_inherent_impls(());
for (def_id, implementations) in inherent_impls.inherent_impls.iter() {
@@ -1060,11 +1214,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
}
- fn encode_item_type(&mut self, def_id: DefId) {
- debug!("EncodeContext::encode_item_type({:?})", def_id);
- record!(self.tables.type_of[def_id] <- self.tcx.type_of(def_id));
- }
-
fn encode_enum_variant_info(&mut self, def: ty::AdtDef<'tcx>, index: VariantIdx) {
let tcx = self.tcx;
let variant = &def.variant(index);
@@ -1078,13 +1227,12 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
};
- record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
+ record!(self.tables.variant_data[def_id] <- data);
self.tables.constness.set(def_id.index, hir::Constness::Const);
record_array!(self.tables.children[def_id] <- variant.fields.iter().map(|f| {
assert!(f.did.is_local());
f.did.index
}));
- self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
// FIXME(eddyb) encode signature only in `encode_enum_variant_ctor`.
if let Some(ctor_def_id) = variant.ctor_def_id {
@@ -1107,9 +1255,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
};
- record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
+ record!(self.tables.variant_data[def_id] <- data);
self.tables.constness.set(def_id.index, hir::Constness::Const);
- self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
}
@@ -1126,15 +1273,12 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
// code uses it). However, we skip encoding anything relating to child
// items - we encode information about proc-macros later on.
let reexports = if !self.is_proc_macro {
- match tcx.module_reexports(local_def_id) {
- Some(exports) => self.lazy_array(exports),
- _ => LazyArray::empty(),
- }
+ tcx.module_reexports(local_def_id).unwrap_or(&[])
} else {
- LazyArray::empty()
+ &[]
};
- record!(self.tables.kind[def_id] <- EntryKind::Mod(reexports));
+ record_array!(self.tables.module_reexports[def_id] <- reexports);
if self.is_proc_macro {
// Encode this here because we don't do it in encode_def_ids.
record!(self.tables.expn_that_defined[def_id] <- tcx.expn_that_defined(local_def_id));
@@ -1146,14 +1290,21 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
// from name resolution point of view.
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
- yield foreign_item.id.def_id.local_def_index;
+ yield foreign_item.id.owner_id.def_id.local_def_index;
}
}
// Only encode named non-reexport children, reexports are encoded
// separately and unnamed items are not used by name resolution.
hir::ItemKind::ExternCrate(..) => continue,
- _ if tcx.def_key(item_id.def_id.to_def_id()).get_opt_name().is_some() => {
- yield item_id.def_id.local_def_index;
+ hir::ItemKind::Struct(ref vdata, _) => {
+ yield item_id.owner_id.def_id.local_def_index;
+ // Encode constructors which take a separate slot in value namespace.
+ if let Some(ctor_hir_id) = vdata.ctor_hir_id() {
+ yield tcx.hir().local_def_id(ctor_hir_id).local_def_index;
+ }
+ }
+ _ if tcx.def_key(item_id.owner_id.to_def_id()).get_opt_name().is_some() => {
+ yield item_id.owner_id.def_id.local_def_index;
}
_ => continue,
}
@@ -1162,22 +1313,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
}
- fn encode_field(
- &mut self,
- adt_def: ty::AdtDef<'tcx>,
- variant_index: VariantIdx,
- field_index: usize,
- ) {
- let variant = &adt_def.variant(variant_index);
- let field = &variant.fields[field_index];
-
- let def_id = field.did;
- debug!("EncodeContext::encode_field({:?})", def_id);
-
- record!(self.tables.kind[def_id] <- EntryKind::Field);
- self.encode_item_type(def_id);
- }
-
fn encode_struct_ctor(&mut self, adt_def: ty::AdtDef<'tcx>, def_id: DefId) {
debug!("EncodeContext::encode_struct_ctor({:?})", def_id);
let tcx = self.tcx;
@@ -1191,9 +1326,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
};
record!(self.tables.repr_options[def_id] <- adt_def.repr());
+ record!(self.tables.variant_data[def_id] <- data);
self.tables.constness.set(def_id.index, hir::Constness::Const);
- record!(self.tables.kind[def_id] <- EntryKind::Struct(self.lazy(data)));
- self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
}
@@ -1214,18 +1348,10 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let ast_item = tcx.hir().expect_trait_item(def_id.expect_local());
self.tables.impl_defaultness.set(def_id.index, ast_item.defaultness);
let trait_item = tcx.associated_item(def_id);
+ self.tables.assoc_container.set(def_id.index, trait_item.container);
match trait_item.kind {
- ty::AssocKind::Const => {
- let rendered = rustc_hir_pretty::to_string(
- &(&self.tcx.hir() as &dyn intravisit::Map<'_>),
- |s| s.print_trait_item(ast_item),
- );
-
- record!(self.tables.kind[def_id] <- EntryKind::AssocConst(ty::AssocItemContainer::TraitContainer));
- record!(self.tables.mir_const_qualif[def_id] <- mir::ConstQualifs::default());
- record!(self.tables.rendered_const[def_id] <- rendered);
- }
+ ty::AssocKind::Const => {}
ty::AssocKind::Fn => {
let hir::TraitItemKind::Fn(m_sig, m) = &ast_item.kind else { bug!() };
match *m {
@@ -1238,24 +1364,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
};
self.tables.asyncness.set(def_id.index, m_sig.header.asyncness);
self.tables.constness.set(def_id.index, hir::Constness::NotConst);
- record!(self.tables.kind[def_id] <- EntryKind::AssocFn {
- container: ty::AssocItemContainer::TraitContainer,
- has_self: trait_item.fn_has_self_parameter,
- });
}
ty::AssocKind::Type => {
self.encode_explicit_item_bounds(def_id);
- record!(self.tables.kind[def_id] <- EntryKind::AssocType(ty::AssocItemContainer::TraitContainer));
- }
- }
- match trait_item.kind {
- ty::AssocKind::Const | ty::AssocKind::Fn => {
- self.encode_item_type(def_id);
- }
- ty::AssocKind::Type => {
- if ast_item.defaultness.has_value() {
- self.encode_item_type(def_id);
- }
}
}
if trait_item.kind == ty::AssocKind::Fn {
@@ -1270,20 +1381,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let ast_item = self.tcx.hir().expect_impl_item(def_id.expect_local());
self.tables.impl_defaultness.set(def_id.index, ast_item.defaultness);
let impl_item = self.tcx.associated_item(def_id);
+ self.tables.assoc_container.set(def_id.index, impl_item.container);
match impl_item.kind {
- ty::AssocKind::Const => {
- if let hir::ImplItemKind::Const(_, body_id) = ast_item.kind {
- let qualifs = self.tcx.at(ast_item.span).mir_const_qualif(def_id);
- let const_data = self.encode_rendered_const_for_body(body_id);
-
- record!(self.tables.kind[def_id] <- EntryKind::AssocConst(ty::AssocItemContainer::ImplContainer));
- record!(self.tables.mir_const_qualif[def_id] <- qualifs);
- record!(self.tables.rendered_const[def_id] <- const_data);
- } else {
- bug!()
- }
- }
ty::AssocKind::Fn => {
let hir::ImplItemKind::Fn(ref sig, body) = ast_item.kind else { bug!() };
self.tables.asyncness.set(def_id.index, sig.header.asyncness);
@@ -1295,16 +1395,9 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
hir::Constness::NotConst
};
self.tables.constness.set(def_id.index, constness);
- record!(self.tables.kind[def_id] <- EntryKind::AssocFn {
- container: ty::AssocItemContainer::ImplContainer,
- has_self: impl_item.fn_has_self_parameter,
- });
- }
- ty::AssocKind::Type => {
- record!(self.tables.kind[def_id] <- EntryKind::AssocType(ty::AssocItemContainer::ImplContainer));
}
+ ty::AssocKind::Const | ty::AssocKind::Type => {}
}
- self.encode_item_type(def_id);
if let Some(trait_item_def_id) = impl_item.trait_item_def_id {
self.tables.trait_item_def_id.set(def_id.index, trait_item_def_id.into());
}
@@ -1321,44 +1414,62 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
return;
}
- let keys_and_jobs = self
- .tcx
- .mir_keys(())
- .iter()
- .filter_map(|&def_id| {
- let (encode_const, encode_opt) = should_encode_mir(self.tcx, def_id);
- if encode_const || encode_opt {
- Some((def_id, encode_const, encode_opt))
- } else {
- None
- }
- })
- .collect::<Vec<_>>();
- for (def_id, encode_const, encode_opt) in keys_and_jobs.into_iter() {
+ let tcx = self.tcx;
+
+ let keys_and_jobs = tcx.mir_keys(()).iter().filter_map(|&def_id| {
+ let (encode_const, encode_opt) = should_encode_mir(tcx, def_id);
+ if encode_const || encode_opt { Some((def_id, encode_const, encode_opt)) } else { None }
+ });
+ for (def_id, encode_const, encode_opt) in keys_and_jobs {
debug_assert!(encode_const || encode_opt);
debug!("EntryBuilder::encode_mir({:?})", def_id);
if encode_opt {
- record!(self.tables.optimized_mir[def_id.to_def_id()] <- self.tcx.optimized_mir(def_id));
+ record!(self.tables.optimized_mir[def_id.to_def_id()] <- tcx.optimized_mir(def_id));
}
if encode_const {
- record!(self.tables.mir_for_ctfe[def_id.to_def_id()] <- self.tcx.mir_for_ctfe(def_id));
+ record!(self.tables.mir_for_ctfe[def_id.to_def_id()] <- tcx.mir_for_ctfe(def_id));
// FIXME(generic_const_exprs): this feels wrong to have in `encode_mir`
- let abstract_const = self.tcx.thir_abstract_const(def_id);
+ let abstract_const = tcx.thir_abstract_const(def_id);
if let Ok(Some(abstract_const)) = abstract_const {
record!(self.tables.thir_abstract_const[def_id.to_def_id()] <- abstract_const);
}
+
+ if should_encode_const(tcx.def_kind(def_id)) {
+ let qualifs = tcx.mir_const_qualif(def_id);
+ record!(self.tables.mir_const_qualif[def_id.to_def_id()] <- qualifs);
+ let body_id = tcx.hir().maybe_body_owned_by(def_id);
+ if let Some(body_id) = body_id {
+ let const_data = self.encode_rendered_const_for_body(body_id);
+ record!(self.tables.rendered_const[def_id.to_def_id()] <- const_data);
+ }
+ }
}
- record!(self.tables.promoted_mir[def_id.to_def_id()] <- self.tcx.promoted_mir(def_id));
+ record!(self.tables.promoted_mir[def_id.to_def_id()] <- tcx.promoted_mir(def_id));
let instance =
ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id()));
- let unused = self.tcx.unused_generic_params(instance);
+ let unused = tcx.unused_generic_params(instance);
if !unused.is_empty() {
record!(self.tables.unused_generic_params[def_id.to_def_id()] <- unused);
}
}
+
+ // Encode all the deduced parameter attributes for everything that has MIR, even for items
+ // that can't be inlined. But don't if we aren't optimizing in non-incremental mode, to
+ // save the query traffic.
+ if tcx.sess.opts.output_types.should_codegen()
+ && tcx.sess.opts.optimize != OptLevel::No
+ && tcx.sess.opts.incremental.is_none()
+ {
+ for &local_def_id in tcx.mir_keys(()) {
+ if let DefKind::AssocFn | DefKind::Fn = tcx.def_kind(local_def_id) {
+ record_array!(self.tables.deduced_param_attrs[local_def_id.to_def_id()] <-
+ self.tcx.deduced_param_attrs(local_def_id.to_def_id()));
+ }
+ }
+ }
}
fn encode_stability(&mut self, def_id: DefId) {
@@ -1385,6 +1496,18 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
}
+ fn encode_default_body_stability(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_default_body_stability({:?})", def_id);
+
+ // The query lookup can take a measurable amount of time in crates with many items. Check if
+ // the stability attributes are even enabled before using their queries.
+ if self.feat.staged_api || self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ if let Some(stab) = self.tcx.lookup_default_body_stability(def_id) {
+ record!(self.tables.lookup_default_body_stability[def_id] <- stab)
+ }
+ }
+ }
+
fn encode_deprecation(&mut self, def_id: DefId) {
debug!("EncodeContext::encode_deprecation({:?})", def_id);
if let Some(depr) = self.tcx.lookup_deprecation(def_id) {
@@ -1405,38 +1528,27 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
debug!("EncodeContext::encode_info_for_item({:?})", def_id);
- let entry_kind = match item.kind {
- hir::ItemKind::Static(..) => EntryKind::Static,
- hir::ItemKind::Const(_, body_id) => {
- let qualifs = self.tcx.at(item.span).mir_const_qualif(def_id);
- let const_data = self.encode_rendered_const_for_body(body_id);
- record!(self.tables.mir_const_qualif[def_id] <- qualifs);
- record!(self.tables.rendered_const[def_id] <- const_data);
- EntryKind::Const
- }
+ match item.kind {
hir::ItemKind::Fn(ref sig, .., body) => {
self.tables.asyncness.set(def_id.index, sig.header.asyncness);
record_array!(self.tables.fn_arg_names[def_id] <- self.tcx.hir().body_param_names(body));
self.tables.constness.set(def_id.index, sig.header.constness);
- EntryKind::Fn
}
hir::ItemKind::Macro(ref macro_def, _) => {
- EntryKind::MacroDef(self.lazy(&*macro_def.body), macro_def.macro_rules)
+ if macro_def.macro_rules {
+ self.tables.macro_rules.set(def_id.index, ());
+ }
+ record!(self.tables.macro_definition[def_id] <- &*macro_def.body);
}
hir::ItemKind::Mod(ref m) => {
- return self.encode_info_for_mod(item.def_id, m);
+ return self.encode_info_for_mod(item.owner_id.def_id, m);
}
- hir::ItemKind::ForeignMod { .. } => EntryKind::ForeignMod,
- hir::ItemKind::GlobalAsm(..) => EntryKind::GlobalAsm,
- hir::ItemKind::TyAlias(..) => EntryKind::Type,
hir::ItemKind::OpaqueTy(..) => {
self.encode_explicit_item_bounds(def_id);
- EntryKind::OpaqueTy
}
hir::ItemKind::Enum(..) => {
let adt_def = self.tcx.adt_def(def_id);
record!(self.tables.repr_options[def_id] <- adt_def.repr());
- EntryKind::Enum
}
hir::ItemKind::Struct(ref struct_def, _) => {
let adt_def = self.tcx.adt_def(def_id);
@@ -1451,24 +1563,24 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
.map(|ctor_hir_id| self.tcx.hir().local_def_id(ctor_hir_id).local_def_index);
let variant = adt_def.non_enum_variant();
- EntryKind::Struct(self.lazy(VariantData {
+ record!(self.tables.variant_data[def_id] <- VariantData {
ctor_kind: variant.ctor_kind,
discr: variant.discr,
ctor,
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
- }))
+ });
}
hir::ItemKind::Union(..) => {
let adt_def = self.tcx.adt_def(def_id);
record!(self.tables.repr_options[def_id] <- adt_def.repr());
let variant = adt_def.non_enum_variant();
- EntryKind::Union(self.lazy(VariantData {
+ record!(self.tables.variant_data[def_id] <- VariantData {
ctor_kind: variant.ctor_kind,
discr: variant.discr,
ctor: None,
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
- }))
+ });
}
hir::ItemKind::Impl(hir::Impl { defaultness, constness, .. }) => {
self.tables.impl_defaultness.set(def_id.index, *defaultness);
@@ -1494,34 +1606,37 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let polarity = self.tcx.impl_polarity(def_id);
self.tables.impl_polarity.set(def_id.index, polarity);
-
- EntryKind::Impl
}
hir::ItemKind::Trait(..) => {
let trait_def = self.tcx.trait_def(def_id);
record!(self.tables.trait_def[def_id] <- trait_def);
-
- EntryKind::Trait
}
hir::ItemKind::TraitAlias(..) => {
let trait_def = self.tcx.trait_def(def_id);
record!(self.tables.trait_def[def_id] <- trait_def);
-
- EntryKind::TraitAlias
}
hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => {
bug!("cannot encode info for item {:?}", item)
}
+ hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::GlobalAsm(..)
+ | hir::ItemKind::TyAlias(..) => {}
};
- record!(self.tables.kind[def_id] <- entry_kind);
// FIXME(eddyb) there should be a nicer way to do this.
match item.kind {
- hir::ItemKind::Enum(..) => record_array!(self.tables.children[def_id] <-
- self.tcx.adt_def(def_id).variants().iter().map(|v| {
- assert!(v.def_id.is_local());
- v.def_id.index
- })
- ),
+ hir::ItemKind::Enum(..) => {
+ record_array!(self.tables.children[def_id] <- iter::from_generator(||
+ for variant in tcx.adt_def(def_id).variants() {
+ yield variant.def_id.index;
+ // Encode constructors which take a separate slot in value namespace.
+ if let Some(ctor_def_id) = variant.ctor_def_id {
+ yield ctor_def_id.index;
+ }
+ }
+ ))
+ }
hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
record_array!(self.tables.children[def_id] <-
self.tcx.adt_def(def_id).non_enum_variant().fields.iter().map(|f| {
@@ -1541,18 +1656,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
_ => {}
}
- match item.kind {
- hir::ItemKind::Static(..)
- | hir::ItemKind::Const(..)
- | hir::ItemKind::Fn(..)
- | hir::ItemKind::TyAlias(..)
- | hir::ItemKind::OpaqueTy(..)
- | hir::ItemKind::Enum(..)
- | hir::ItemKind::Struct(..)
- | hir::ItemKind::Union(..)
- | hir::ItemKind::Impl { .. } => self.encode_item_type(def_id),
- _ => {}
- }
if let hir::ItemKind::Fn(..) = item.kind {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
if tcx.is_intrinsic(def_id) {
@@ -1564,12 +1667,44 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record!(self.tables.impl_trait_ref[def_id] <- trait_ref);
}
}
- }
+ // In some cases, along with the item itself, we also
+ // encode some sub-items. Usually we want some info from the item
+ // so it's easier to do that here then to wait until we would encounter
+ // normally in the visitor walk.
+ match item.kind {
+ hir::ItemKind::Enum(..) => {
+ let def = self.tcx.adt_def(item.owner_id.to_def_id());
+ for (i, variant) in def.variants().iter_enumerated() {
+ self.encode_enum_variant_info(def, i);
- fn encode_info_for_generic_param(&mut self, def_id: DefId, kind: EntryKind, encode_type: bool) {
- record!(self.tables.kind[def_id] <- kind);
- if encode_type {
- self.encode_item_type(def_id);
+ if let Some(_ctor_def_id) = variant.ctor_def_id {
+ self.encode_enum_variant_ctor(def, i);
+ }
+ }
+ }
+ hir::ItemKind::Struct(ref struct_def, _) => {
+ let def = self.tcx.adt_def(item.owner_id.to_def_id());
+ // If the struct has a constructor, encode it.
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ let ctor_def_id = self.tcx.hir().local_def_id(ctor_hir_id);
+ self.encode_struct_ctor(def, ctor_def_id.to_def_id());
+ }
+ }
+ hir::ItemKind::Impl { .. } => {
+ for &trait_item_def_id in
+ self.tcx.associated_item_def_ids(item.owner_id.to_def_id()).iter()
+ {
+ self.encode_info_for_impl_item(trait_item_def_id);
+ }
+ }
+ hir::ItemKind::Trait(..) => {
+ for &item_def_id in
+ self.tcx.associated_item_def_ids(item.owner_id.to_def_id()).iter()
+ {
+ self.encode_info_for_trait_item(item_def_id);
+ }
+ }
+ _ => {}
}
}
@@ -1584,34 +1719,16 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
ty::Generator(..) => {
let data = self.tcx.generator_kind(def_id).unwrap();
let generator_diagnostic_data = typeck_result.get_generator_diagnostic_data();
- record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::Generator);
record!(self.tables.generator_kind[def_id.to_def_id()] <- data);
record!(self.tables.generator_diagnostic_data[def_id.to_def_id()] <- generator_diagnostic_data);
}
- ty::Closure(..) => {
- record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::Closure);
+ ty::Closure(_, substs) => {
+ record!(self.tables.fn_sig[def_id.to_def_id()] <- substs.as_closure().sig());
}
_ => bug!("closure that is neither generator nor closure"),
}
- self.encode_item_type(def_id.to_def_id());
- if let ty::Closure(def_id, substs) = *ty.kind() {
- record!(self.tables.fn_sig[def_id] <- substs.as_closure().sig());
- }
- }
-
- fn encode_info_for_anon_const(&mut self, id: hir::HirId) {
- let def_id = self.tcx.hir().local_def_id(id);
- debug!("EncodeContext::encode_info_for_anon_const({:?})", def_id);
- let body_id = self.tcx.hir().body_owned_by(def_id);
- let const_data = self.encode_rendered_const_for_body(body_id);
- let qualifs = self.tcx.mir_const_qualif(def_id);
-
- record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::AnonConst);
- record!(self.tables.mir_const_qualif[def_id.to_def_id()] <- qualifs);
- record!(self.tables.rendered_const[def_id.to_def_id()] <- const_data);
- self.encode_item_type(def_id.to_def_id());
}
fn encode_native_libraries(&mut self) -> LazyArray<NativeLib> {
@@ -1670,7 +1787,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
self.tables.opt_def_kind.set(LOCAL_CRATE.as_def_id().index, DefKind::Mod);
record!(self.tables.def_span[LOCAL_CRATE.as_def_id()] <- tcx.def_span(LOCAL_CRATE.as_def_id()));
self.encode_attrs(LOCAL_CRATE.as_def_id().expect_local());
- record!(self.tables.visibility[LOCAL_CRATE.as_def_id()] <- tcx.visibility(LOCAL_CRATE.as_def_id()));
+ let vis = tcx.local_visibility(CRATE_DEF_ID).map_id(|def_id| def_id.local_def_index);
+ record!(self.tables.visibility[LOCAL_CRATE.as_def_id()] <- vis);
if let Some(stability) = stability {
record!(self.tables.lookup_stability[LOCAL_CRATE.as_def_id()] <- stability);
}
@@ -1709,7 +1827,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
let def_id = id.to_def_id();
self.tables.opt_def_kind.set(def_id.index, DefKind::Macro(macro_kind));
- record!(self.tables.kind[def_id] <- EntryKind::ProcMacro(macro_kind));
+ self.tables.proc_macro.set(def_id.index, macro_kind);
self.encode_attrs(id);
record!(self.tables.def_keys[def_id] <- def_key);
record!(self.tables.def_ident_span[def_id] <- span);
@@ -1822,8 +1940,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
FxHashMap::default();
for id in tcx.hir().items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
- if let Some(trait_ref) = tcx.impl_trait_ref(id.def_id.to_def_id()) {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Impl) {
+ if let Some(trait_ref) = tcx.impl_trait_ref(id.owner_id) {
let simplified_self_ty = fast_reject::simplify_type(
self.tcx,
trait_ref.self_ty(),
@@ -1833,7 +1951,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
fx_hash_map
.entry(trait_ref.def_id)
.or_default()
- .push((id.def_id.local_def_index, simplified_self_ty));
+ .push((id.owner_id.def_id.local_def_index, simplified_self_ty));
}
}
}
@@ -1947,18 +2065,11 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
hir::Constness::NotConst
};
self.tables.constness.set(def_id.index, constness);
- record!(self.tables.kind[def_id] <- EntryKind::ForeignFn);
- }
- hir::ForeignItemKind::Static(..) => {
- record!(self.tables.kind[def_id] <- EntryKind::ForeignStatic);
- }
- hir::ForeignItemKind::Type => {
- record!(self.tables.kind[def_id] <- EntryKind::ForeignType);
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
}
+ hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => {}
}
- self.encode_item_type(def_id);
if let hir::ForeignItemKind::Fn(..) = nitem.kind {
- record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
if tcx.is_intrinsic(def_id) {
self.tables.is_intrinsic.set(def_id.index, ());
}
@@ -1977,21 +2088,16 @@ impl<'a, 'tcx> Visitor<'tcx> for EncodeContext<'a, 'tcx> {
intravisit::walk_expr(self, ex);
self.encode_info_for_expr(ex);
}
- fn visit_anon_const(&mut self, c: &'tcx AnonConst) {
- intravisit::walk_anon_const(self, c);
- self.encode_info_for_anon_const(c.hir_id);
- }
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
intravisit::walk_item(self, item);
match item.kind {
hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => {} // ignore these
- _ => self.encode_info_for_item(item.def_id.to_def_id(), item),
+ _ => self.encode_info_for_item(item.owner_id.to_def_id(), item),
}
- self.encode_addl_info_for_item(item);
}
fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem<'tcx>) {
intravisit::walk_foreign_item(self, ni);
- self.encode_info_for_foreign_item(ni.def_id.to_def_id(), ni);
+ self.encode_info_for_foreign_item(ni.owner_id.to_def_id(), ni);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
intravisit::walk_generics(self, generics);
@@ -2000,29 +2106,13 @@ impl<'a, 'tcx> Visitor<'tcx> for EncodeContext<'a, 'tcx> {
}
impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
- fn encode_fields(&mut self, adt_def: ty::AdtDef<'tcx>) {
- for (variant_index, variant) in adt_def.variants().iter_enumerated() {
- for (field_index, _field) in variant.fields.iter().enumerate() {
- self.encode_field(adt_def, variant_index, field_index);
- }
- }
- }
-
fn encode_info_for_generics(&mut self, generics: &hir::Generics<'tcx>) {
for param in generics.params {
let def_id = self.tcx.hir().local_def_id(param.hir_id);
match param.kind {
- GenericParamKind::Lifetime { .. } => continue,
- GenericParamKind::Type { default, .. } => {
- self.encode_info_for_generic_param(
- def_id.to_def_id(),
- EntryKind::TypeParam,
- default.is_some(),
- );
- }
- GenericParamKind::Const { ref default, .. } => {
+ hir::GenericParamKind::Lifetime { .. } | hir::GenericParamKind::Type { .. } => {}
+ hir::GenericParamKind::Const { ref default, .. } => {
let def_id = def_id.to_def_id();
- self.encode_info_for_generic_param(def_id, EntryKind::ConstParam, true);
if default.is_some() {
record!(self.tables.const_param_default[def_id] <- self.tcx.const_param_default(def_id))
}
@@ -2036,68 +2126,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
self.encode_info_for_closure(expr.hir_id);
}
}
-
- /// In some cases, along with the item itself, we also
- /// encode some sub-items. Usually we want some info from the item
- /// so it's easier to do that here then to wait until we would encounter
- /// normally in the visitor walk.
- fn encode_addl_info_for_item(&mut self, item: &hir::Item<'_>) {
- match item.kind {
- hir::ItemKind::Static(..)
- | hir::ItemKind::Const(..)
- | hir::ItemKind::Fn(..)
- | hir::ItemKind::Macro(..)
- | hir::ItemKind::Mod(..)
- | hir::ItemKind::ForeignMod { .. }
- | hir::ItemKind::GlobalAsm(..)
- | hir::ItemKind::ExternCrate(..)
- | hir::ItemKind::Use(..)
- | hir::ItemKind::TyAlias(..)
- | hir::ItemKind::OpaqueTy(..)
- | hir::ItemKind::TraitAlias(..) => {
- // no sub-item recording needed in these cases
- }
- hir::ItemKind::Enum(..) => {
- let def = self.tcx.adt_def(item.def_id.to_def_id());
- self.encode_fields(def);
-
- for (i, variant) in def.variants().iter_enumerated() {
- self.encode_enum_variant_info(def, i);
-
- if let Some(_ctor_def_id) = variant.ctor_def_id {
- self.encode_enum_variant_ctor(def, i);
- }
- }
- }
- hir::ItemKind::Struct(ref struct_def, _) => {
- let def = self.tcx.adt_def(item.def_id.to_def_id());
- self.encode_fields(def);
-
- // If the struct has a constructor, encode it.
- if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
- let ctor_def_id = self.tcx.hir().local_def_id(ctor_hir_id);
- self.encode_struct_ctor(def, ctor_def_id.to_def_id());
- }
- }
- hir::ItemKind::Union(..) => {
- let def = self.tcx.adt_def(item.def_id.to_def_id());
- self.encode_fields(def);
- }
- hir::ItemKind::Impl { .. } => {
- for &trait_item_def_id in
- self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
- {
- self.encode_info_for_impl_item(trait_item_def_id);
- }
- }
- hir::ItemKind::Trait(..) => {
- for &item_def_id in self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
- {
- self.encode_info_for_trait_item(item_def_id);
- }
- }
- }
- }
}
/// Used to prefetch queries which will be needed later by metadata encoding.
@@ -2220,7 +2248,7 @@ pub fn encode_metadata(tcx: TyCtxt<'_>, path: &Path) {
fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
let mut encoder = opaque::FileEncoder::new(path)
- .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to create file encoder: {}", err)));
+ .unwrap_or_else(|err| tcx.sess.emit_fatal(FailCreateFileEncoder { err }));
encoder.emit_raw_bytes(METADATA_HEADER);
// Will be filled with the root position after encoding everything.
@@ -2228,7 +2256,7 @@ fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
let source_map_files = tcx.sess.source_map().files();
let source_file_cache = (source_map_files[0].clone(), 0);
- let required_source_files = Some(GrowableBitSet::with_capacity(source_map_files.len()));
+ let required_source_files = Some(FxIndexSet::default());
drop(source_map_files);
let hygiene_ctxt = HygieneEncodeContext::default();
@@ -2246,6 +2274,7 @@ fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
required_source_files,
is_proc_macro: tcx.sess.crate_types().contains(&CrateType::ProcMacro),
hygiene_ctxt: &hygiene_ctxt,
+ symbol_table: Default::default(),
};
// Encode the rustc version string in a predictable location.
@@ -2264,10 +2293,10 @@ fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
// Encode the root position.
let header = METADATA_HEADER.len();
file.seek(std::io::SeekFrom::Start(header as u64))
- .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to seek the file: {}", err)));
+ .unwrap_or_else(|err| tcx.sess.emit_fatal(FailSeekFile { err }));
let pos = root.position.get();
file.write_all(&[(pos >> 24) as u8, (pos >> 16) as u8, (pos >> 8) as u8, (pos >> 0) as u8])
- .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to write to the file: {}", err)));
+ .unwrap_or_else(|err| tcx.sess.emit_fatal(FailWriteFile { err }));
// Return to the position where we are before writing the root position.
file.seek(std::io::SeekFrom::Start(pos_before_seek)).unwrap();
@@ -2287,8 +2316,8 @@ pub fn provide(providers: &mut Providers) {
let mut traits = Vec::new();
for id in tcx.hir().items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::Trait | DefKind::TraitAlias) {
- traits.push(id.def_id.to_def_id())
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Trait | DefKind::TraitAlias) {
+ traits.push(id.owner_id.to_def_id())
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index 66bdecc30..27dc8ff16 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -1,6 +1,7 @@
use crate::creader::CrateMetadataRef;
use decoder::Metadata;
use def_path_hash_map::DefPathHashMapRef;
+use rustc_data_structures::fx::FxHashMap;
use table::TableBuilder;
use rustc_ast as ast;
@@ -12,15 +13,17 @@ use rustc_hir::def::{CtorKind, DefKind};
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, DefPathHash, StableCrateId};
use rustc_hir::definitions::DefKey;
use rustc_hir::lang_items;
-use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_index::bit_set::{BitSet, FiniteBitSet};
+use rustc_index::vec::IndexVec;
use rustc_middle::metadata::ModChild;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
+use rustc_middle::middle::resolve_lifetime::ObjectLifetimeDefault;
use rustc_middle::mir;
use rustc_middle::ty::fast_reject::SimplifiedType;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, ReprOptions, Ty};
-use rustc_middle::ty::{GeneratorDiagnosticData, ParameterizedOverTcx, TyCtxt};
+use rustc_middle::ty::{DeducedParamAttrs, GeneratorDiagnosticData, ParameterizedOverTcx, TyCtxt};
use rustc_serialize::opaque::FileEncoder;
use rustc_session::config::SymbolManglingVersion;
use rustc_session::cstore::{CrateDepKind, ForeignModule, LinkagePreference, NativeLib};
@@ -249,7 +252,7 @@ pub(crate) struct CrateRoot {
def_path_hash_map: LazyValue<DefPathHashMapRef<'static>>,
- source_map: LazyArray<rustc_span::SourceFile>,
+ source_map: LazyTable<u32, LazyValue<rustc_span::SourceFile>>,
compiler_builtins: bool,
needs_allocator: bool,
@@ -333,16 +336,16 @@ macro_rules! define_tables {
}
define_tables! {
- kind: Table<DefIndex, LazyValue<EntryKind>>,
attributes: Table<DefIndex, LazyArray<ast::Attribute>>,
children: Table<DefIndex, LazyArray<DefIndex>>,
opt_def_kind: Table<DefIndex, DefKind>,
- visibility: Table<DefIndex, LazyValue<ty::Visibility>>,
+ visibility: Table<DefIndex, LazyValue<ty::Visibility<DefIndex>>>,
def_span: Table<DefIndex, LazyValue<Span>>,
def_ident_span: Table<DefIndex, LazyValue<Span>>,
lookup_stability: Table<DefIndex, LazyValue<attr::Stability>>,
lookup_const_stability: Table<DefIndex, LazyValue<attr::ConstStability>>,
+ lookup_default_body_stability: Table<DefIndex, LazyValue<attr::DefaultBodyStability>>,
lookup_deprecation_entry: Table<DefIndex, LazyValue<attr::Deprecation>>,
// As an optimization, a missing entry indicates an empty `&[]`.
explicit_item_bounds: Table<DefIndex, LazyArray<(ty::Predicate<'static>, Span)>>,
@@ -357,6 +360,7 @@ define_tables! {
codegen_fn_attrs: Table<DefIndex, LazyValue<CodegenFnAttrs>>,
impl_trait_ref: Table<DefIndex, LazyValue<ty::TraitRef<'static>>>,
const_param_default: Table<DefIndex, LazyValue<rustc_middle::ty::Const<'static>>>,
+ object_lifetime_default: Table<DefIndex, LazyValue<ObjectLifetimeDefault>>,
optimized_mir: Table<DefIndex, LazyValue<mir::Body<'static>>>,
mir_for_ctfe: Table<DefIndex, LazyValue<mir::Body<'static>>>,
promoted_mir: Table<DefIndex, LazyValue<IndexVec<mir::Promoted, mir::Body<'static>>>>,
@@ -380,6 +384,7 @@ define_tables! {
inherent_impls: Table<DefIndex, LazyArray<DefIndex>>,
expn_that_defined: Table<DefIndex, LazyValue<ExpnId>>,
unused_generic_params: Table<DefIndex, LazyValue<FiniteBitSet<u32>>>,
+ params_in_repr: Table<DefIndex, LazyValue<BitSet<u32>>>,
repr_options: Table<DefIndex, LazyValue<ReprOptions>>,
// `def_keys` and `def_path_hashes` represent a lazy version of a
// `DefPathTable`. This allows us to avoid deserializing an entire
@@ -390,39 +395,16 @@ define_tables! {
proc_macro_quoted_spans: Table<usize, LazyValue<Span>>,
generator_diagnostic_data: Table<DefIndex, LazyValue<GeneratorDiagnosticData<'static>>>,
may_have_doc_links: Table<DefIndex, ()>,
-}
-
-#[derive(Copy, Clone, MetadataEncodable, MetadataDecodable)]
-enum EntryKind {
- AnonConst,
- Const,
- Static,
- ForeignStatic,
- ForeignMod,
- ForeignType,
- GlobalAsm,
- Type,
- TypeParam,
- ConstParam,
- OpaqueTy,
- Enum,
- Field,
- Variant(LazyValue<VariantData>),
- Struct(LazyValue<VariantData>),
- Union(LazyValue<VariantData>),
- Fn,
- ForeignFn,
- Mod(LazyArray<ModChild>),
- MacroDef(LazyValue<ast::MacArgs>, /*macro_rules*/ bool),
- ProcMacro(MacroKind),
- Closure,
- Generator,
- Trait,
- Impl,
- AssocFn { container: ty::AssocItemContainer, has_self: bool },
- AssocType(ty::AssocItemContainer),
- AssocConst(ty::AssocItemContainer),
- TraitAlias,
+ variant_data: Table<DefIndex, LazyValue<VariantData>>,
+ assoc_container: Table<DefIndex, ty::AssocItemContainer>,
+ // Slot is full when macro is macro_rules.
+ macro_rules: Table<DefIndex, ()>,
+ macro_definition: Table<DefIndex, LazyValue<ast::MacArgs>>,
+ proc_macro: Table<DefIndex, MacroKind>,
+ module_reexports: Table<DefIndex, LazyArray<ModChild>>,
+ deduced_param_attrs: Table<DefIndex, LazyArray<DeducedParamAttrs>>,
+
+ trait_impl_trait_tys: Table<DefIndex, LazyValue<FxHashMap<DefId, Ty<'static>>>>,
}
#[derive(TyEncodable, TyDecodable)]
@@ -444,6 +426,11 @@ const TAG_VALID_SPAN_LOCAL: u8 = 0;
const TAG_VALID_SPAN_FOREIGN: u8 = 1;
const TAG_PARTIAL_SPAN: u8 = 2;
+// Tags for encoding Symbol's
+const SYMBOL_STR: u8 = 0;
+const SYMBOL_OFFSET: u8 = 1;
+const SYMBOL_PREINTERNED: u8 = 2;
+
pub fn provide(providers: &mut Providers) {
encoder::provide(providers);
decoder::provide(providers);
@@ -451,7 +438,6 @@ pub fn provide(providers: &mut Providers) {
trivially_parameterized_over_tcx! {
VariantData,
- EntryKind,
RawDefId,
TraitImpls,
IncoherentImpls,
diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs
index 21841ae25..e7c1abd12 100644
--- a/compiler/rustc_metadata/src/rmeta/table.rs
+++ b/compiler/rustc_metadata/src/rmeta/table.rs
@@ -10,7 +10,6 @@ use rustc_span::hygiene::MacroKind;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
-use tracing::debug;
/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
/// Used mainly for Lazy positions and lengths.
@@ -51,7 +50,7 @@ macro_rules! fixed_size_enum {
}
match b[0] - 1 {
$(${index()} => Some($($pat)*),)*
- _ => panic!("Unexpected ImplPolarity code: {:?}", b[0]),
+ _ => panic!("Unexpected {} code: {:?}", stringify!($ty), b[0]),
}
}
@@ -91,6 +90,7 @@ fixed_size_enum! {
( AnonConst )
( InlineConst )
( OpaqueTy )
+ ( ImplTraitPlaceholder )
( Field )
( LifetimeParam )
( GlobalAsm )
@@ -141,6 +141,21 @@ fixed_size_enum! {
}
}
+fixed_size_enum! {
+ ty::AssocItemContainer {
+ ( TraitContainer )
+ ( ImplContainer )
+ }
+}
+
+fixed_size_enum! {
+ MacroKind {
+ ( Attr )
+ ( Bang )
+ ( Derive )
+ }
+}
+
// We directly encode `DefPathHash` because a `LazyValue` would incur a 25% cost.
impl FixedSizeEncoding for Option<DefPathHash> {
type ByteArray = [u8; 16];
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index 008d2c709..de916ea8c 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -7,34 +7,33 @@ edition = "2021"
doctest = false
[dependencies]
-rustc_arena = { path = "../rustc_arena" }
bitflags = "1.2.1"
+chalk-ir = "0.80.0"
either = "1.5.0"
gsgdt = "0.1.2"
-tracing = "0.1"
-rustc-rayon = { version = "0.4.0", optional = true }
-rustc-rayon-core = { version = "0.4.0", optional = true }
polonius-engine = "0.13.0"
rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_arena = { path = "../rustc_arena" }
+rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
-rustc_feature = { path = "../rustc_feature" }
-rustc_hir = { path = "../rustc_hir" }
-rustc_target = { path = "../rustc_target" }
-rustc_macros = { path = "../rustc_macros" }
rustc_data_structures = { path = "../rustc_data_structures" }
-rustc_query_system = { path = "../rustc_query_system" }
rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc-rayon-core = { version = "0.4.0", optional = true }
+rustc-rayon = { version = "0.4.0", optional = true }
rustc_serialize = { path = "../rustc_serialize" }
-rustc_ast = { path = "../rustc_ast" }
-rustc_span = { path = "../rustc_span" }
-chalk-ir = "0.80.0"
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
rustc_type_ir = { path = "../rustc_type_ir" }
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+thin-vec = "0.2.8"
+tracing = "0.1"
[features]
rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"]
diff --git a/compiler/rustc_middle/benches/lib.rs b/compiler/rustc_middle/benches/lib.rs
deleted file mode 100644
index 237751bcb..000000000
--- a/compiler/rustc_middle/benches/lib.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-#![feature(test)]
-
-extern crate test;
-
-use test::Bencher;
-
-// Static/dynamic method dispatch
-
-struct Struct {
- field: isize,
-}
-
-trait Trait {
- fn method(&self) -> isize;
-}
-
-impl Trait for Struct {
- fn method(&self) -> isize {
- self.field
- }
-}
-
-#[bench]
-fn trait_vtable_method_call(b: &mut Bencher) {
- let s = Struct { field: 10 };
- let t = &s as &dyn Trait;
- b.iter(|| t.method());
-}
-
-#[bench]
-fn trait_static_method_call(b: &mut Bencher) {
- let s = Struct { field: 10 };
- b.iter(|| s.method());
-}
-
-// Overhead of various match forms
-
-#[bench]
-fn option_some(b: &mut Bencher) {
- let x = Some(10);
- b.iter(|| match x {
- Some(y) => y,
- None => 11,
- });
-}
-
-#[bench]
-fn vec_pattern(b: &mut Bencher) {
- let x = [1, 2, 3, 4, 5, 6];
- b.iter(|| match x {
- [1, 2, 3, ..] => 10,
- _ => 11,
- });
-}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index b94de537d..f8aae86fe 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -77,7 +77,7 @@ macro_rules! arena_types {
rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>>
>,
[] all_traits: Vec<rustc_hir::def_id::DefId>,
- [] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels,
+ [] effective_visibilities: rustc_middle::middle::privacy::EffectiveVisibilities,
[] foreign_module: rustc_session::cstore::ForeignModule,
[] foreign_modules: Vec<rustc_session::cstore::ForeignModule>,
[] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>,
@@ -96,11 +96,14 @@ macro_rules! arena_types {
// since we need to allocate this type on both the `rustc_hir` arena
// (during lowering) and the `librustc_middle` arena (for decoding MIR)
[decode] asm_template: rustc_ast::InlineAsmTemplatePiece,
- [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
+ [decode] used_trait_imports: rustc_data_structures::unord::UnordSet<rustc_hir::def_id::LocalDefId>,
[decode] is_late_bound_map: rustc_data_structures::fx::FxIndexSet<rustc_hir::def_id::LocalDefId>,
[decode] impl_source: rustc_middle::traits::ImplSource<'tcx, ()>,
- [] dep_kind: rustc_middle::dep_graph::DepKindStruct,
+ [] dep_kind: rustc_middle::dep_graph::DepKindStruct<'tcx>,
+
+ [decode] trait_impl_trait_tys: rustc_data_structures::fx::FxHashMap<rustc_hir::def_id::DefId, rustc_middle::ty::Ty<'tcx>>,
+ [] bit_set_u32: rustc_index::bit_set::BitSet<u32>,
]);
)
}
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 2d095438f..6b5568269 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -62,93 +62,18 @@ use crate::ty::TyCtxt;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::DefPathHash;
-use rustc_hir::HirId;
+use rustc_hir::{HirId, ItemLocalId, OwnerId};
use rustc_query_system::dep_graph::FingerprintStyle;
use rustc_span::symbol::Symbol;
use std::hash::Hash;
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
-/// This struct stores metadata about each DepKind.
-///
-/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
-/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
-/// jump table instead of large matches.
-pub struct DepKindStruct {
- /// Anonymous queries cannot be replayed from one compiler invocation to the next.
- /// When their result is needed, it is recomputed. They are useful for fine-grained
- /// dependency tracking, and caching within one compiler invocation.
- pub is_anon: bool,
-
- /// Eval-always queries do not track their dependencies, and are always recomputed, even if
- /// their inputs have not changed since the last compiler invocation. The result is still
- /// cached within one compiler invocation.
- pub is_eval_always: bool,
-
- /// Whether the query key can be recovered from the hashed fingerprint.
- /// See [DepNodeParams] trait for the behaviour of each key type.
- pub fingerprint_style: FingerprintStyle,
-
- /// The red/green evaluation system will try to mark a specific DepNode in the
- /// dependency graph as green by recursively trying to mark the dependencies of
- /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
- /// where we don't know if it is red or green and we therefore actually have
- /// to recompute its value in order to find out. Since the only piece of
- /// information that we have at that point is the `DepNode` we are trying to
- /// re-evaluate, we need some way to re-run a query from just that. This is what
- /// `force_from_dep_node()` implements.
- ///
- /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
- /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
- /// is usually constructed by computing a stable hash of the query-key that the
- /// `DepNode` corresponds to. Consequently, it is not in general possible to go
- /// back from hash to query-key (since hash functions are not reversible). For
- /// this reason `force_from_dep_node()` is expected to fail from time to time
- /// because we just cannot find out, from the `DepNode` alone, what the
- /// corresponding query-key is and therefore cannot re-run the query.
- ///
- /// The system deals with this case letting `try_mark_green` fail which forces
- /// the root query to be re-evaluated.
- ///
- /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
- /// Fortunately, we can use some contextual information that will allow us to
- /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
- /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
- /// valid `DefPathHash`. Since we also always build a huge table that maps every
- /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
- /// everything we need to re-run the query.
- ///
- /// Take the `mir_promoted` query as an example. Like many other queries, it
- /// just has a single parameter: the `DefId` of the item it will compute the
- /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
- /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
- /// is actually a `DefPathHash`, and can therefore just look up the corresponding
- /// `DefId` in `tcx.def_path_hash_to_def_id`.
- pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>,
-
- /// Invoke a query to put the on-disk cached value in memory.
- pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>,
-}
-
-impl DepKind {
- #[inline(always)]
- pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
- // Only fetch the DepKindStruct once.
- let data = tcx.query_kind(self);
- if data.is_anon {
- return FingerprintStyle::Opaque;
- }
- data.fingerprint_style
- }
-}
-
macro_rules! define_dep_nodes {
- (<$tcx:tt>
- $(
- [$($attrs:tt)*]
- $variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
- ,)*
- ) => (
+ (
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $variant:ident($($K:tt)*) -> $V:ty,)*) => {
+
#[macro_export]
macro_rules! make_dep_kind_array {
($mod:ident) => {[ $($mod::$variant()),* ]};
@@ -158,10 +83,10 @@ macro_rules! define_dep_nodes {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
#[allow(non_camel_case_types)]
pub enum DepKind {
- $($variant),*
+ $( $( #[$attr] )* $variant),*
}
- fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
+ pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
match label {
$(stringify!($variant) => Ok(DepKind::$variant),)*
_ => Err(()),
@@ -176,24 +101,17 @@ macro_rules! define_dep_nodes {
pub const $variant: &str = stringify!($variant);
)*
}
- );
+ };
}
-rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
- // We use this for most things when incr. comp. is turned off.
- [] Null,
-
- // We use this to create a forever-red node.
- [] Red,
-
- [anon] TraitSelect,
-
- // WARNING: if `Symbol` is changed, make sure you update `make_compile_codegen_unit` below.
- [] CompileCodegenUnit(Symbol),
-
- // WARNING: if `MonoItem` is changed, make sure you update `make_compile_mono_item` below.
- // Only used by rustc_codegen_cranelift
- [] CompileMonoItem(MonoItem),
+rustc_query_append!(define_dep_nodes![
+ /// We use this for most things when incr. comp. is turned off.
+ [] fn Null() -> (),
+ /// We use this to create a forever-red node.
+ [] fn Red() -> (),
+ [] fn TraitSelect() -> (),
+ [] fn CompileCodegenUnit() -> (),
+ [] fn CompileMonoItem() -> (),
]);
// WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys.
@@ -223,11 +141,6 @@ static_assert_size!(DepNode, 18);
static_assert_size!(DepNode, 24);
pub trait DepNodeExt: Sized {
- /// Construct a DepNode from the given DepKind and DefPathHash. This
- /// method will assert that the given DepKind actually requires a
- /// single DefId/DefPathHash parameter.
- fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
-
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
///
@@ -252,14 +165,6 @@ pub trait DepNodeExt: Sized {
}
impl DepNodeExt for DepNode {
- /// Construct a DepNode from the given DepKind and DefPathHash. This
- /// method will assert that the given DepKind actually requires a
- /// single DefId/DefPathHash parameter.
- fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
- debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
- DepNode { kind, hash: def_path_hash.0.into() }
- }
-
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
///
@@ -271,7 +176,7 @@ impl DepNodeExt for DepNode {
/// refers to something from the previous compilation session that
/// has been removed.
fn extract_def_id<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
- if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
+ if tcx.fingerprint_style(self.kind) == FingerprintStyle::DefPathHash {
Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || {
panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash)
}))
@@ -288,8 +193,8 @@ impl DepNodeExt for DepNode {
) -> Result<DepNode, ()> {
let kind = dep_kind_from_label_string(label)?;
- match kind.fingerprint_style(tcx) {
- FingerprintStyle::Opaque => Err(()),
+ match tcx.fingerprint_style(kind) {
+ FingerprintStyle::Opaque | FingerprintStyle::HirId => Err(()),
FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
FingerprintStyle::DefPathHash => {
Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
@@ -364,6 +269,28 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
}
}
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for OwnerId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| OwnerId { def_id: id.expect_local() })
+ }
+}
+
impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
#[inline(always)]
fn fingerprint_style() -> FingerprintStyle {
@@ -417,7 +344,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
#[inline(always)]
fn fingerprint_style() -> FingerprintStyle {
- FingerprintStyle::Opaque
+ FingerprintStyle::HirId
}
// We actually would not need to specialize the implementation of this
@@ -426,10 +353,36 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let HirId { owner, local_id } = *self;
-
let def_path_hash = tcx.def_path_hash(owner.to_def_id());
- let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+ Fingerprint::new(
+ // `owner` is local, so is completely defined by the local hash
+ def_path_hash.local_hash(),
+ local_id.as_u32().into(),
+ )
+ }
- def_path_hash.0.combine(local_id)
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ let HirId { owner, local_id } = *self;
+ format!("{}.{}", tcx.def_path_str(owner.to_def_id()), local_id.as_u32())
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ if tcx.fingerprint_style(dep_node.kind) == FingerprintStyle::HirId {
+ let (local_hash, local_id) = Fingerprint::from(dep_node.hash).as_value();
+ let def_path_hash = DefPathHash::new(tcx.sess.local_stable_crate_id(), local_hash);
+ let def_id = tcx
+ .def_path_hash_to_def_id(def_path_hash, &mut || {
+ panic!("Failed to extract HirId: {:?} {}", dep_node.kind, dep_node.hash)
+ })
+ .expect_local();
+ let local_id = local_id
+ .try_into()
+ .unwrap_or_else(|_| panic!("local id should be u32, found {:?}", local_id));
+ Some(HirId { owner: OwnerId { def_id }, local_id: ItemLocalId::from_u32(local_id) })
+ } else {
+ None
+ }
}
}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
index c8b3b52b0..2e62bebc8 100644
--- a/compiler/rustc_middle/src/dep_graph/mod.rs
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -11,15 +11,17 @@ pub use rustc_query_system::dep_graph::{
SerializedDepNodeIndex, WorkProduct, WorkProductId,
};
-pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
+pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+
pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
+pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>;
impl rustc_query_system::dep_graph::DepKind for DepKind {
const NULL: Self = DepKind::Null;
@@ -91,50 +93,8 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
self.sess
}
- #[inline(always)]
- fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
- kind.fingerprint_style(*self)
- }
-
- #[inline(always)]
- fn is_eval_always(&self, kind: DepKind) -> bool {
- self.query_kind(kind).is_eval_always
- }
-
- fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
- debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
-
- // We must avoid ever having to call `force_from_dep_node()` for a
- // `DepNode::codegen_unit`:
- // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
- // would always end up having to evaluate the first caller of the
- // `codegen_unit` query that *is* reconstructible. This might very well be
- // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
- // to re-trigger calling the `codegen_unit` query with the right key. At
- // that point we would already have re-done all the work we are trying to
- // avoid doing in the first place.
- // The solution is simple: Just explicitly call the `codegen_unit` query for
- // each CGU, right after partitioning. This way `try_mark_green` will always
- // hit the cache instead of having to go through `force_from_dep_node`.
- // This assertion makes sure, we actually keep applying the solution above.
- debug_assert!(
- dep_node.kind != DepKind::codegen_unit,
- "calling force_from_dep_node() on DepKind::codegen_unit"
- );
-
- let cb = self.query_kind(dep_node.kind);
- if let Some(f) = cb.force_from_dep_node {
- f(*self, dep_node);
- true
- } else {
- false
- }
- }
-
- fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
- let cb = self.query_kind(dep_node.kind);
- if let Some(f) = cb.try_load_from_on_disk_cache {
- f(*self, dep_node)
- }
+ #[inline]
+ fn dep_kind_info(&self, dep_kind: DepKind) -> &DepKindStruct<'tcx> {
+ &self.query_kinds[dep_kind as usize]
}
}
diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs
new file mode 100644
index 000000000..a7a7ac059
--- /dev/null
+++ b/compiler/rustc_middle/src/error.rs
@@ -0,0 +1,57 @@
+use rustc_macros::Diagnostic;
+use rustc_span::Span;
+
+use crate::ty::Ty;
+
+#[derive(Diagnostic)]
+#[diag(middle_drop_check_overflow, code = "E0320")]
+#[note]
+pub struct DropCheckOverflow<'tcx> {
+ #[primary_span]
+ pub span: Span,
+ pub ty: Ty<'tcx>,
+ pub overflow_ty: Ty<'tcx>,
+}
+
+#[derive(Diagnostic)]
+#[diag(middle_opaque_hidden_type_mismatch)]
+pub struct OpaqueHiddenTypeMismatch<'tcx> {
+ pub self_ty: Ty<'tcx>,
+ pub other_ty: Ty<'tcx>,
+ #[primary_span]
+ #[label]
+ pub other_span: Span,
+ #[subdiagnostic]
+ pub sub: TypeMismatchReason,
+}
+
+#[derive(Subdiagnostic)]
+pub enum TypeMismatchReason {
+ #[label(middle_conflict_types)]
+ ConflictType {
+ #[primary_span]
+ span: Span,
+ },
+ #[note(middle_previous_use_here)]
+ PreviousUse {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(middle_limit_invalid)]
+pub struct LimitInvalid<'a> {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub value_span: Span,
+ pub error_str: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(middle_const_eval_non_int)]
+pub struct ConstEvalNonIntError {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index 47b04c33e..83a4d16d7 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -14,31 +14,9 @@ use rustc_index::vec::Idx;
use rustc_middle::hir::nested_filter;
use rustc_span::def_id::StableCrateId;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
use rustc_target::spec::abi::Abi;
-fn fn_decl<'hir>(node: Node<'hir>) -> Option<&'hir FnDecl<'hir>> {
- match node {
- Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
- | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
- | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(&sig.decl),
- Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl, .. }), .. })
- | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, ..), .. }) => {
- Some(fn_decl)
- }
- _ => None,
- }
-}
-
-pub fn fn_sig<'hir>(node: Node<'hir>) -> Option<&'hir FnSig<'hir>> {
- match &node {
- Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
- | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
- | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(sig),
- _ => None,
- }
-}
-
#[inline]
pub fn associated_body<'hir>(node: Node<'hir>) -> Option<BodyId> {
match node {
@@ -83,7 +61,7 @@ pub struct ParentHirIterator<'hir> {
}
impl<'hir> Iterator for ParentHirIterator<'hir> {
- type Item = (HirId, Node<'hir>);
+ type Item = HirId;
fn next(&mut self) -> Option<Self::Item> {
if self.current_id == CRATE_HIR_ID {
@@ -99,10 +77,7 @@ impl<'hir> Iterator for ParentHirIterator<'hir> {
}
self.current_id = parent_id;
- if let Some(node) = self.map.find(parent_id) {
- return Some((parent_id, node));
- }
- // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ return Some(parent_id);
}
}
}
@@ -115,7 +90,7 @@ pub struct ParentOwnerIterator<'hir> {
}
impl<'hir> Iterator for ParentOwnerIterator<'hir> {
- type Item = (LocalDefId, OwnerNode<'hir>);
+ type Item = (OwnerId, OwnerNode<'hir>);
fn next(&mut self) -> Option<Self::Item> {
if self.current_id.local_id.index() != 0 {
@@ -129,13 +104,13 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> {
}
loop {
// There are nodes that do not have entries, so we need to skip them.
- let parent_id = self.map.def_key(self.current_id.owner).parent;
+ let parent_id = self.map.def_key(self.current_id.owner.def_id).parent;
- let parent_id = parent_id.map_or(CRATE_HIR_ID.owner, |local_def_index| {
+ let parent_id = parent_id.map_or(CRATE_OWNER_ID, |local_def_index| {
let def_id = LocalDefId { local_def_index };
self.map.local_def_id_to_hir_id(def_id).owner
});
- self.current_id = HirId::make_owner(parent_id);
+ self.current_id = HirId::make_owner(parent_id.def_id);
// If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
@@ -146,25 +121,30 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> {
}
impl<'hir> Map<'hir> {
+ #[inline]
pub fn krate(self) -> &'hir Crate<'hir> {
self.tcx.hir_crate(())
}
+ #[inline]
pub fn root_module(self) -> &'hir Mod<'hir> {
- match self.tcx.hir_owner(CRATE_DEF_ID).map(|o| o.node) {
+ match self.tcx.hir_owner(CRATE_OWNER_ID).map(|o| o.node) {
Some(OwnerNode::Crate(item)) => item,
_ => bug!(),
}
}
+ #[inline]
pub fn items(self) -> impl Iterator<Item = ItemId> + 'hir {
self.tcx.hir_crate_items(()).items.iter().copied()
}
+ #[inline]
pub fn module_items(self, module: LocalDefId) -> impl Iterator<Item = ItemId> + 'hir {
self.tcx.hir_module_items(module).items()
}
+ #[inline]
pub fn par_for_each_item(self, f: impl Fn(ItemId) + Sync + Send) {
par_for_each_in(&self.tcx.hir_crate_items(()).items[..], |id| f(*id));
}
@@ -203,7 +183,7 @@ impl<'hir> Map<'hir> {
#[inline]
pub fn opt_local_def_id(self, hir_id: HirId) -> Option<LocalDefId> {
if hir_id.local_id == ItemLocalId::new(0) {
- Some(hir_id.owner)
+ Some(hir_id.owner.def_id)
} else {
self.tcx
.hir_owner_nodes(hir_id.owner)
@@ -229,7 +209,13 @@ impl<'hir> Map<'hir> {
ItemKind::Fn(..) => DefKind::Fn,
ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind),
ItemKind::Mod(..) => DefKind::Mod,
- ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
+ ItemKind::OpaqueTy(ref opaque) => {
+ if opaque.in_trait {
+ DefKind::ImplTraitPlaceholder
+ } else {
+ DefKind::OpaqueTy
+ }
+ }
ItemKind::TyAlias(..) => DefKind::TyAlias,
ItemKind::Enum(..) => DefKind::Enum,
ItemKind::Struct(..) => DefKind::Struct,
@@ -255,7 +241,7 @@ impl<'hir> Map<'hir> {
Node::ImplItem(item) => match item.kind {
ImplItemKind::Const(..) => DefKind::AssocConst,
ImplItemKind::Fn(..) => DefKind::AssocFn,
- ImplItemKind::TyAlias(..) => DefKind::AssocTy,
+ ImplItemKind::Type(..) => DefKind::AssocTy,
},
Node::Variant(_) => DefKind::Variant,
Node::Ctor(variant_data) => {
@@ -297,6 +283,8 @@ impl<'hir> Map<'hir> {
| Node::Infer(_)
| Node::TraitRef(_)
| Node::Pat(_)
+ | Node::PatField(_)
+ | Node::ExprField(_)
| Node::Local(_)
| Node::Param(_)
| Node::Arm(_)
@@ -306,6 +294,9 @@ impl<'hir> Map<'hir> {
Some(def_kind)
}
+ /// Finds the id of the parent node to this one.
+ ///
+ /// If calling repeatedly and iterating over parents, prefer [`Map::parent_iter`].
pub fn find_parent_node(self, id: HirId) -> Option<HirId> {
if id.local_id == ItemLocalId::from_u32(0) {
Some(self.tcx.hir_owner_parent(id.owner))
@@ -313,6 +304,8 @@ impl<'hir> Map<'hir> {
let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?;
let node = owner.nodes[id.local_id].as_ref()?;
let hir_id = HirId { owner: id.owner, local_id: node.parent };
+ // HIR indexing should have checked that.
+ debug_assert_ne!(id.local_id, node.parent);
Some(hir_id)
}
}
@@ -356,24 +349,24 @@ impl<'hir> Map<'hir> {
}
pub fn get_generics(self, id: LocalDefId) -> Option<&'hir Generics<'hir>> {
- let node = self.tcx.hir_owner(id)?;
+ let node = self.tcx.hir_owner(OwnerId { def_id: id })?;
node.node.generics()
}
pub fn item(self, id: ItemId) -> &'hir Item<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_item()
}
pub fn trait_item(self, id: TraitItemId) -> &'hir TraitItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_trait_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_trait_item()
}
pub fn impl_item(self, id: ImplItemId) -> &'hir ImplItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_impl_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_impl_item()
}
pub fn foreign_item(self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_foreign_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_foreign_item()
}
pub fn body(self, id: BodyId) -> &'hir Body<'hir> {
@@ -382,7 +375,7 @@ impl<'hir> Map<'hir> {
pub fn fn_decl_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
if let Some(node) = self.find(hir_id) {
- fn_decl(node)
+ node.fn_decl()
} else {
bug!("no node for hir_id `{}`", hir_id)
}
@@ -390,15 +383,15 @@ impl<'hir> Map<'hir> {
pub fn fn_sig_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnSig<'hir>> {
if let Some(node) = self.find(hir_id) {
- fn_sig(node)
+ node.fn_sig()
} else {
bug!("no node for hir_id `{}`", hir_id)
}
}
pub fn enclosing_body_owner(self, hir_id: HirId) -> LocalDefId {
- for (parent, _) in self.parent_iter(hir_id) {
- if let Some(body) = self.find(parent).map(associated_body).flatten() {
+ for (_, node) in self.parent_iter(hir_id) {
+ if let Some(body) = associated_body(node) {
return self.body_owner_def_id(body);
}
}
@@ -487,11 +480,13 @@ impl<'hir> Map<'hir> {
/// Returns an iterator of the `DefId`s for all body-owners in this
/// crate. If you would prefer to iterate over the bodies
/// themselves, you can do `self.hir().krate().body_ids.iter()`.
+ #[inline]
pub fn body_owners(self) -> impl Iterator<Item = LocalDefId> + 'hir {
self.tcx.hir_crate_items(()).body_owners.iter().copied()
}
- pub fn par_body_owners<F: Fn(LocalDefId) + Sync + Send>(self, f: F) {
+ #[inline]
+ pub fn par_body_owners(self, f: impl Fn(LocalDefId) + Sync + Send) {
par_for_each_in(&self.tcx.hir_crate_items(()).body_owners[..], |&def_id| f(def_id));
}
@@ -499,7 +494,9 @@ impl<'hir> Map<'hir> {
let def_kind = self.tcx.def_kind(def_id);
match def_kind {
DefKind::Trait | DefKind::TraitAlias => def_id,
- DefKind::TyParam | DefKind::ConstParam => self.tcx.local_parent(def_id),
+ DefKind::LifetimeParam | DefKind::TyParam | DefKind::ConstParam => {
+ self.tcx.local_parent(def_id)
+ }
_ => bug!("ty_param_owner: {:?} is a {:?} not a type parameter", def_id, def_kind),
}
}
@@ -508,7 +505,9 @@ impl<'hir> Map<'hir> {
let def_kind = self.tcx.def_kind(def_id);
match def_kind {
DefKind::Trait | DefKind::TraitAlias => kw::SelfUpper,
- DefKind::TyParam | DefKind::ConstParam => self.tcx.item_name(def_id.to_def_id()),
+ DefKind::LifetimeParam | DefKind::TyParam | DefKind::ConstParam => {
+ self.tcx.item_name(def_id.to_def_id())
+ }
_ => bug!("ty_param_name: {:?} is a {:?} not a type parameter", def_id, def_kind),
}
}
@@ -530,7 +529,7 @@ impl<'hir> Map<'hir> {
pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
let hir_id = HirId::make_owner(module);
- match self.tcx.hir_owner(module).map(|o| o.node) {
+ match self.tcx.hir_owner(hir_id.owner).map(|o| o.node) {
Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => {
(m, span, hir_id)
}
@@ -620,39 +619,33 @@ impl<'hir> Map<'hir> {
pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) {
let crate_items = self.tcx.hir_crate_items(());
for module in crate_items.submodules.iter() {
- f(*module)
+ f(module.def_id)
}
}
- #[cfg(not(parallel_compiler))]
#[inline]
- pub fn par_for_each_module(self, f: impl Fn(LocalDefId)) {
- self.for_each_module(f)
- }
-
- #[cfg(parallel_compiler)]
- pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync) {
- use rustc_data_structures::sync::{par_iter, ParallelIterator};
- par_iter_submodules(self.tcx, CRATE_DEF_ID, &f);
-
- fn par_iter_submodules<F>(tcx: TyCtxt<'_>, module: LocalDefId, f: &F)
- where
- F: Fn(LocalDefId) + Sync,
- {
- (*f)(module);
- let items = tcx.hir_module_items(module);
- par_iter(&items.submodules[..]).for_each(|&sm| par_iter_submodules(tcx, sm, f));
- }
+ pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync + Send) {
+ let crate_items = self.tcx.hir_crate_items(());
+ par_for_each_in(&crate_items.submodules[..], |module| f(module.def_id))
}
/// Returns an iterator for the nodes in the ancestor tree of the `current_id`
/// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
- pub fn parent_iter(self, current_id: HirId) -> ParentHirIterator<'hir> {
+ #[inline]
+ pub fn parent_id_iter(self, current_id: HirId) -> impl Iterator<Item = HirId> + 'hir {
ParentHirIterator { current_id, map: self }
}
/// Returns an iterator for the nodes in the ancestor tree of the `current_id`
/// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ #[inline]
+ pub fn parent_iter(self, current_id: HirId) -> impl Iterator<Item = (HirId, Node<'hir>)> {
+ self.parent_id_iter(current_id).filter_map(move |id| Some((id, self.find(id)?)))
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ #[inline]
pub fn parent_owner_iter(self, current_id: HirId) -> ParentOwnerIterator<'hir> {
ParentOwnerIterator { current_id, map: self }
}
@@ -732,27 +725,27 @@ impl<'hir> Map<'hir> {
None
}
- /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no
+ /// Retrieves the `OwnerId` for `id`'s parent item, or `id` itself if no
/// parent item is in this map. The "parent item" is the closest parent node
/// in the HIR which is recorded by the map and is an item, either an item
/// in a module, trait, or impl.
- pub fn get_parent_item(self, hir_id: HirId) -> LocalDefId {
+ pub fn get_parent_item(self, hir_id: HirId) -> OwnerId {
if let Some((def_id, _node)) = self.parent_owner_iter(hir_id).next() {
def_id
} else {
- CRATE_DEF_ID
+ CRATE_OWNER_ID
}
}
- /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no
+ /// Returns the `OwnerId` of `id`'s nearest module parent, or `id` itself if no
/// module parent is in this map.
- pub(super) fn get_module_parent_node(self, hir_id: HirId) -> LocalDefId {
+ pub(super) fn get_module_parent_node(self, hir_id: HirId) -> OwnerId {
for (def_id, node) in self.parent_owner_iter(hir_id) {
if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
return def_id;
}
}
- CRATE_DEF_ID
+ CRATE_OWNER_ID
}
/// When on an if expression, a match arm tail expression or a match arm, give back
@@ -825,30 +818,30 @@ impl<'hir> Map<'hir> {
}
bug!(
"expected foreign mod or inlined parent, found {}",
- self.node_to_string(HirId::make_owner(parent))
+ self.node_to_string(HirId::make_owner(parent.def_id))
)
}
- pub fn expect_owner(self, id: LocalDefId) -> OwnerNode<'hir> {
+ pub fn expect_owner(self, id: OwnerId) -> OwnerNode<'hir> {
self.tcx.hir_owner(id).unwrap_or_else(|| bug!("expected owner for {:?}", id)).node
}
pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::Item(item), .. }) => item,
_ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))),
}
}
pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::ImplItem(item), .. }) => item,
_ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))),
}
}
pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::TraitItem(item), .. }) => item,
_ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))),
}
@@ -861,11 +854,14 @@ impl<'hir> Map<'hir> {
}
}
- pub fn expect_foreign_item(self, id: LocalDefId) -> &'hir ForeignItem<'hir> {
+ pub fn expect_foreign_item(self, id: OwnerId) -> &'hir ForeignItem<'hir> {
match self.tcx.hir_owner(id) {
Some(Owner { node: OwnerNode::ForeignItem(item), .. }) => item,
_ => {
- bug!("expected foreign item, found {}", self.node_to_string(HirId::make_owner(id)))
+ bug!(
+ "expected foreign item, found {}",
+ self.node_to_string(HirId::make_owner(id.def_id))
+ )
}
}
}
@@ -945,9 +941,19 @@ impl<'hir> Map<'hir> {
let span = match self.find(hir_id)? {
// Function-like.
- Node::Item(Item { kind: ItemKind::Fn(sig, ..), .. })
- | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, ..), .. })
- | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, ..), .. }) => sig.span,
+ Node::Item(Item { kind: ItemKind::Fn(sig, ..), span: outer_span, .. })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Fn(sig, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Fn(sig, ..), span: outer_span, ..
+ }) => {
+ // Ensure that the returned span has the item's SyntaxContext, and not the
+ // SyntaxContext of the visibility.
+ sig.span.find_ancestor_in_same_ctxt(*outer_span).unwrap_or(*outer_span)
+ }
// Constants and Statics.
Node::Item(Item {
kind:
@@ -989,7 +995,11 @@ impl<'hir> Map<'hir> {
}
// Other cases.
Node::Item(item) => match &item.kind {
- ItemKind::Use(path, _) => path.span,
+ ItemKind::Use(path, _) => {
+ // Ensure that the returned span has the item's SyntaxContext, and not the
+ // SyntaxContext of the path.
+ path.span.find_ancestor_in_same_ctxt(item.span).unwrap_or(item.span)
+ }
_ => named_span(item.span, item.ident, item.kind.generics()),
},
Node::Variant(variant) => named_span(variant.span, variant.ident, None),
@@ -999,11 +1009,17 @@ impl<'hir> Map<'hir> {
_ => named_span(item.span, item.ident, None),
},
Node::Ctor(_) => return self.opt_span(self.get_parent_node(hir_id)),
- Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl_span, .. }), .. }) => {
- *fn_decl_span
+ Node::Expr(Expr {
+ kind: ExprKind::Closure(Closure { fn_decl_span, .. }),
+ span,
+ ..
+ }) => {
+ // Ensure that the returned span has the item's SyntaxContext.
+ fn_decl_span.find_ancestor_in_same_ctxt(*span).unwrap_or(*span)
}
_ => self.span_with_body(hir_id),
};
+ debug_assert_eq!(span.ctxt(), self.span_with_body(hir_id).ctxt());
Some(span)
}
@@ -1020,6 +1036,7 @@ impl<'hir> Map<'hir> {
Node::Field(field) => field.span,
Node::AnonConst(constant) => self.body(constant.body).value.span,
Node::Expr(expr) => expr.span,
+ Node::ExprField(field) => field.span,
Node::Stmt(stmt) => stmt.span,
Node::PathSegment(seg) => {
let ident_span = seg.ident.span;
@@ -1030,6 +1047,7 @@ impl<'hir> Map<'hir> {
Node::TypeBinding(tb) => tb.span,
Node::TraitRef(tr) => tr.path.span,
Node::Pat(pat) => pat.span,
+ Node::PatField(field) => field.span,
Node::Arm(arm) => arm.span,
Node::Block(block) => block.span,
Node::Ctor(..) => self.span_with_body(self.get_parent_node(hir_id)),
@@ -1137,7 +1155,7 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh {
.filter_map(|(def_id, info)| {
let _ = info.as_owner()?;
let def_path_hash = definitions.def_path_hash(def_id);
- let span = resolutions.source_span[def_id];
+ let span = resolutions.source_span.get(def_id).unwrap_or(&DUMMY_SP);
debug_assert_eq!(span.parent(), None);
Some((def_path_hash, span))
})
@@ -1204,7 +1222,13 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
ItemKind::ForeignMod { .. } => "foreign mod",
ItemKind::GlobalAsm(..) => "global asm",
ItemKind::TyAlias(..) => "ty",
- ItemKind::OpaqueTy(..) => "opaque type",
+ ItemKind::OpaqueTy(ref opaque) => {
+ if opaque.in_trait {
+ "opaque type in trait"
+ } else {
+ "opaque type"
+ }
+ }
ItemKind::Enum(..) => "enum",
ItemKind::Struct(..) => "struct",
ItemKind::Union(..) => "union",
@@ -1220,7 +1244,7 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
format!("assoc const {} in {}{}", ii.ident, path_str(), id_str)
}
ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str),
- ImplItemKind::TyAlias(_) => {
+ ImplItemKind::Type(_) => {
format!("assoc type {} in {}{}", ii.ident, path_str(), id_str)
}
},
@@ -1241,12 +1265,14 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
}
Some(Node::AnonConst(_)) => node_str("const"),
Some(Node::Expr(_)) => node_str("expr"),
+ Some(Node::ExprField(_)) => node_str("expr field"),
Some(Node::Stmt(_)) => node_str("stmt"),
Some(Node::PathSegment(_)) => node_str("path segment"),
Some(Node::Ty(_)) => node_str("type"),
Some(Node::TypeBinding(_)) => node_str("type binding"),
Some(Node::TraitRef(_)) => node_str("trait ref"),
Some(Node::Pat(_)) => node_str("pat"),
+ Some(Node::PatField(_)) => node_str("pattern field"),
Some(Node::Param(_)) => node_str("param"),
Some(Node::Arm(_)) => node_str("arm"),
Some(Node::Block(_)) => node_str("block"),
@@ -1291,7 +1317,7 @@ pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems {
// A "crate collector" and "module collector" start at a
// module item (the former starts at the crate root) but only
// the former needs to collect it. ItemCollector does not do this for us.
- collector.submodules.push(CRATE_DEF_ID);
+ collector.submodules.push(CRATE_OWNER_ID);
tcx.hir().walk_toplevel_module(&mut collector);
let ItemCollector {
@@ -1319,7 +1345,7 @@ struct ItemCollector<'tcx> {
// otherwise it collects items in some module.
crate_collector: bool,
tcx: TyCtxt<'tcx>,
- submodules: Vec<LocalDefId>,
+ submodules: Vec<OwnerId>,
items: Vec<ItemId>,
trait_items: Vec<TraitItemId>,
impl_items: Vec<ImplItemId>,
@@ -1351,14 +1377,14 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_item(&mut self, item: &'hir Item<'hir>) {
if associated_body(Node::Item(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.items.push(item.item_id());
// Items that are modules are handled here instead of in visit_mod.
if let ItemKind::Mod(module) = &item.kind {
- self.submodules.push(item.def_id);
+ self.submodules.push(item.owner_id);
// A module collector does not recurse inside nested modules.
if self.crate_collector {
intravisit::walk_mod(self, module, item.hir_id());
@@ -1387,7 +1413,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_trait_item(&mut self, item: &'hir TraitItem<'hir>) {
if associated_body(Node::TraitItem(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.trait_items.push(item.trait_item_id());
@@ -1396,7 +1422,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_impl_item(&mut self, item: &'hir ImplItem<'hir>) {
if associated_body(Node::ImplItem(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.impl_items.push(item.impl_item_id());
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
index 211a61471..1c6264ad0 100644
--- a/compiler/rustc_middle/src/hir/mod.rs
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -39,7 +39,7 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> {
/// bodies. The Ids are in visitor order. This is used to partition a pass between modules.
#[derive(Debug, HashStable, Encodable, Decodable)]
pub struct ModuleItems {
- submodules: Box<[LocalDefId]>,
+ submodules: Box<[OwnerId]>,
items: Box<[ItemId]>,
trait_items: Box<[TraitItemId]>,
impl_items: Box<[ImplItemId]>,
@@ -67,10 +67,10 @@ impl ModuleItems {
pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ {
self.items
.iter()
- .map(|id| id.def_id)
- .chain(self.trait_items.iter().map(|id| id.def_id))
- .chain(self.impl_items.iter().map(|id| id.def_id))
- .chain(self.foreign_items.iter().map(|id| id.def_id))
+ .map(|id| id.owner_id.def_id)
+ .chain(self.trait_items.iter().map(|id| id.owner_id.def_id))
+ .chain(self.impl_items.iter().map(|id| id.owner_id.def_id))
+ .chain(self.foreign_items.iter().map(|id| id.owner_id.def_id))
}
pub fn par_items(&self, f: impl Fn(ItemId) + Send + Sync) {
@@ -97,7 +97,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn parent_module(self, id: HirId) -> LocalDefId {
- self.parent_module_from_def_id(id.owner)
+ self.parent_module_from_def_id(id.owner.def_id)
}
pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> {
@@ -110,13 +110,13 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn provide(providers: &mut Providers) {
providers.parent_module_from_def_id = |tcx, id| {
let hir = tcx.hir();
- hir.get_module_parent_node(hir.local_def_id_to_hir_id(id))
+ hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)).def_id
};
providers.hir_crate_items = map::hir_crate_items;
providers.crate_hash = map::crate_hash;
providers.hir_module_items = map::hir_module_items;
providers.hir_owner = |tcx, id| {
- let owner = tcx.hir_crate(()).owners.get(id)?.as_owner()?;
+ let owner = tcx.hir_crate(()).owners.get(id.def_id)?.as_owner()?;
let node = owner.node();
Some(Owner { node, hash_without_bodies: owner.nodes.hash_without_bodies })
};
@@ -128,21 +128,24 @@ pub fn provide(providers: &mut Providers) {
MaybeOwner::NonOwner(hir_id) => hir_id,
}
};
- providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id].map(|i| &i.nodes);
+ providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id.def_id].map(|i| &i.nodes);
providers.hir_owner_parent = |tcx, id| {
// Accessing the local_parent is ok since its value is hashed as part of `id`'s DefPathHash.
- tcx.opt_local_parent(id).map_or(CRATE_HIR_ID, |parent| {
+ tcx.opt_local_parent(id.def_id).map_or(CRATE_HIR_ID, |parent| {
let mut parent_hir_id = tcx.hir().local_def_id_to_hir_id(parent);
- if let Some(local_id) =
- tcx.hir_crate(()).owners[parent_hir_id.owner].unwrap().parenting.get(&id)
+ if let Some(local_id) = tcx.hir_crate(()).owners[parent_hir_id.owner.def_id]
+ .unwrap()
+ .parenting
+ .get(&id.def_id)
{
parent_hir_id.local_id = *local_id;
}
parent_hir_id
})
};
- providers.hir_attrs =
- |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs);
+ providers.hir_attrs = |tcx, id| {
+ tcx.hir_crate(()).owners[id.def_id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs)
+ };
providers.source_span =
|tcx, def_id| tcx.resolutions(()).source_span.get(def_id).copied().unwrap_or(DUMMY_SP);
providers.def_span = |tcx, def_id| {
@@ -177,6 +180,7 @@ pub fn provide(providers: &mut Providers) {
let id = id.expect_local();
tcx.resolutions(()).expn_that_defined.get(&id).copied().unwrap_or(ExpnId::root())
};
- providers.in_scope_traits_map =
- |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map(|owner_info| &owner_info.trait_map);
+ providers.in_scope_traits_map = |tcx, id| {
+ tcx.hir_crate(()).owners[id.def_id].as_owner().map(|owner_info| &owner_info.trait_map)
+ };
}
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
index 200de9079..d3cf519b6 100644
--- a/compiler/rustc_middle/src/infer/canonical.rs
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -22,6 +22,7 @@
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
use crate::infer::MemberConstraint;
+use crate::mir::ConstraintCategory;
use crate::ty::subst::GenericArg;
use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt};
use rustc_index::vec::IndexVec;
@@ -43,6 +44,15 @@ pub struct Canonical<'tcx, V> {
pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>;
+impl<'tcx> ty::TypeFoldable<'tcx> for CanonicalVarInfos<'tcx> {
+ fn try_fold_with<F: ty::FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_canonical_var_infos(v))
+ }
+}
+
/// A set of values corresponding to the canonical variables from some
/// `Canonical`. You can give these values to
/// `canonical_value.substitute` to substitute them into the canonical
@@ -89,6 +99,7 @@ impl<'tcx> Default for OriginalQueryValues<'tcx> {
/// a copy of the canonical value in some other inference context,
/// with fresh inference variables replacing the canonical values.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub struct CanonicalVarInfo<'tcx> {
pub kind: CanonicalVarKind<'tcx>,
}
@@ -114,6 +125,7 @@ impl<'tcx> CanonicalVarInfo<'tcx> {
/// in the type-theory sense of the term -- i.e., a "meta" type system
/// that analyzes type-like values.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub enum CanonicalVarKind<'tcx> {
/// Some kind of type inference variable.
Ty(CanonicalTyVarKind),
@@ -290,20 +302,15 @@ impl<'tcx, V> Canonical<'tcx, V> {
}
}
-pub type QueryOutlivesConstraint<'tcx> =
- ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>;
+pub type QueryOutlivesConstraint<'tcx> = (
+ ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>,
+ ConstraintCategory<'tcx>,
+);
TrivialTypeTraversalAndLiftImpls! {
for <'tcx> {
crate::infer::canonical::Certainty,
- crate::infer::canonical::CanonicalVarInfo<'tcx>,
- crate::infer::canonical::CanonicalVarKind<'tcx>,
- }
-}
-
-TrivialTypeTraversalImpls! {
- for <'tcx> {
- crate::infer::canonical::CanonicalVarInfos<'tcx>,
+ crate::infer::canonical::CanonicalTyVarKind,
}
}
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
index f2627885d..41d8c7ffd 100644
--- a/compiler/rustc_middle/src/infer/unify_key.rs
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -129,7 +129,7 @@ impl<'tcx> UnifyKey for ty::ConstVid<'tcx> {
}
impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
- type Error = (ty::Const<'tcx>, ty::Const<'tcx>);
+ type Error = NoError;
fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> {
Ok(match (value1.val, value2.val) {
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index ef06c457b..a58cbc376 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -26,28 +26,24 @@
#![feature(allocator_api)]
#![feature(array_windows)]
#![feature(assert_matches)]
-#![feature(backtrace)]
#![feature(box_patterns)]
#![feature(core_intrinsics)]
#![feature(discriminant_kind)]
#![feature(exhaustive_patterns)]
#![feature(get_mut_unchecked)]
-#![feature(generic_associated_types)]
#![feature(if_let_guard)]
-#![feature(map_first_last)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(extern_types)]
#![feature(new_uninit)]
#![feature(once_cell)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(type_alias_impl_trait)]
#![feature(associated_type_bounds)]
#![feature(rustc_attrs)]
-#![feature(half_open_range_patterns)]
+#![cfg_attr(bootstrap, feature(half_open_range_patterns))]
#![feature(control_flow_enum)]
#![feature(associated_type_defaults)]
#![feature(trusted_step)]
@@ -59,6 +55,7 @@
#![feature(drain_filter)]
#![feature(intra_doc_pointers)]
#![feature(yeet_expr)]
+#![feature(result_option_inspect)]
#![feature(const_option)]
#![recursion_limit = "512"]
#![allow(rustc::potential_query_instability)]
@@ -87,6 +84,7 @@ pub mod query;
pub mod arena;
#[macro_use]
pub mod dep_graph;
+pub(crate) mod error;
pub mod hir;
pub mod infer;
pub mod lint;
@@ -96,6 +94,7 @@ pub mod mir;
pub mod thir;
pub mod traits;
pub mod ty;
+mod values;
pub mod util {
pub mod bug;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index 2f45222de..79522bd0b 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -1,20 +1,20 @@
use std::cmp;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_errors::{Diagnostic, DiagnosticId, LintDiagnosticBuilder, MultiSpan};
-use rustc_hir::HirId;
-use rustc_index::vec::IndexVec;
-use rustc_query_system::ich::StableHashingContext;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, MultiSpan};
+use rustc_hir::{HirId, ItemLocalId};
use rustc_session::lint::{
builtin::{self, FORBIDDEN_LINT_GROUPS},
- FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId,
+ FutureIncompatibilityReason, Level, Lint, LintId,
};
use rustc_session::Session;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::{DesugaringKind, ExpnKind};
use rustc_span::{symbol, Span, Symbol, DUMMY_SP};
+use crate::ty::TyCtxt;
+
/// How a lint level was set.
#[derive(Clone, Copy, PartialEq, Eq, HashStable, Debug)]
pub enum LintLevelSource {
@@ -23,7 +23,12 @@ pub enum LintLevelSource {
Default,
/// Lint level was set by an attribute.
- Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
+ Node {
+ name: Symbol,
+ span: Span,
+ /// RFC 2383 reason
+ reason: Option<Symbol>,
+ },
/// Lint level was set by a command-line flag.
/// The provided `Level` is the level specified on the command line.
@@ -35,7 +40,7 @@ impl LintLevelSource {
pub fn name(&self) -> Symbol {
match *self {
LintLevelSource::Default => symbol::kw::Default,
- LintLevelSource::Node(name, _, _) => name,
+ LintLevelSource::Node { name, .. } => name,
LintLevelSource::CommandLine(name, _) => name,
}
}
@@ -43,7 +48,7 @@ impl LintLevelSource {
pub fn span(&self) -> Span {
match *self {
LintLevelSource::Default => DUMMY_SP,
- LintLevelSource::Node(_, span, _) => span,
+ LintLevelSource::Node { span, .. } => span,
LintLevelSource::CommandLine(_, _) => DUMMY_SP,
}
}
@@ -52,145 +57,137 @@ impl LintLevelSource {
/// A tuple of a lint level and its source.
pub type LevelAndSource = (Level, LintLevelSource);
-#[derive(Debug, HashStable)]
-pub struct LintLevelSets {
- pub list: IndexVec<LintStackIndex, LintSet>,
- pub lint_cap: Level,
-}
-
-rustc_index::newtype_index! {
- #[derive(HashStable)]
- pub struct LintStackIndex {
- const COMMAND_LINE = 0,
- }
-}
-
-#[derive(Debug, HashStable)]
-pub struct LintSet {
- // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
- // flag.
- pub specs: FxHashMap<LintId, LevelAndSource>,
-
- pub parent: LintStackIndex,
+/// Return type for the `shallow_lint_levels_on` query.
+///
+/// This map represents the set of allowed lints and allowance levels given
+/// by the attributes for *a single HirId*.
+#[derive(Default, Debug, HashStable)]
+pub struct ShallowLintLevelMap {
+ pub specs: SortedMap<ItemLocalId, FxHashMap<LintId, LevelAndSource>>,
}
-impl LintLevelSets {
- pub fn new() -> Self {
- LintLevelSets { list: IndexVec::new(), lint_cap: Level::Forbid }
- }
-
- pub fn get_lint_level(
- &self,
- lint: &'static Lint,
- idx: LintStackIndex,
- aux: Option<&FxHashMap<LintId, LevelAndSource>>,
- sess: &Session,
- ) -> LevelAndSource {
- let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux);
-
- // If `level` is none then we actually assume the default level for this
- // lint.
- let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition()));
-
- // If we're about to issue a warning, check at the last minute for any
- // directives against the warnings "lint". If, for example, there's an
- // `allow(warnings)` in scope then we want to respect that instead.
- //
- // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
- // triggers in cases (like #80988) where you have `forbid(warnings)`,
- // and so if we turned that into an error, it'd defeat the purpose of the
- // future compatibility warning.
- if level == Level::Warn && LintId::of(lint) != LintId::of(FORBIDDEN_LINT_GROUPS) {
- let (warnings_level, warnings_src) =
- self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux);
- if let Some(configured_warning_level) = warnings_level {
- if configured_warning_level != Level::Warn {
- level = configured_warning_level;
- src = warnings_src;
- }
+/// From an initial level and source, verify the effect of special annotations:
+/// `warnings` lint level and lint caps.
+///
+/// The return of this function is suitable for diagnostics.
+pub fn reveal_actual_level(
+ level: Option<Level>,
+ src: &mut LintLevelSource,
+ sess: &Session,
+ lint: LintId,
+ probe_for_lint_level: impl FnOnce(LintId) -> (Option<Level>, LintLevelSource),
+) -> Level {
+ // If `level` is none then we actually assume the default level for this lint.
+ let mut level = level.unwrap_or_else(|| lint.lint.default_level(sess.edition()));
+
+ // If we're about to issue a warning, check at the last minute for any
+ // directives against the warnings "lint". If, for example, there's an
+ // `allow(warnings)` in scope then we want to respect that instead.
+ //
+ // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
+ // triggers in cases (like #80988) where you have `forbid(warnings)`,
+ // and so if we turned that into an error, it'd defeat the purpose of the
+ // future compatibility warning.
+ if level == Level::Warn && lint != LintId::of(FORBIDDEN_LINT_GROUPS) {
+ let (warnings_level, warnings_src) = probe_for_lint_level(LintId::of(builtin::WARNINGS));
+ if let Some(configured_warning_level) = warnings_level {
+ if configured_warning_level != Level::Warn {
+ level = configured_warning_level;
+ *src = warnings_src;
}
}
+ }
- // Ensure that we never exceed the `--cap-lints` argument
- // unless the source is a --force-warn
- level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
- level
- } else {
- cmp::min(level, self.lint_cap)
- };
-
- if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) {
- // Ensure that we never exceed driver level.
- level = cmp::min(*driver_level, level);
- }
+ // Ensure that we never exceed the `--cap-lints` argument unless the source is a --force-warn
+ level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
+ level
+ } else {
+ cmp::min(level, sess.opts.lint_cap.unwrap_or(Level::Forbid))
+ };
- (level, src)
+ if let Some(driver_level) = sess.driver_lint_caps.get(&lint) {
+ // Ensure that we never exceed driver level.
+ level = cmp::min(*driver_level, level);
}
- pub fn get_lint_id_level(
+ level
+}
+
+impl ShallowLintLevelMap {
+ /// Perform a deep probe in the HIR tree looking for the actual level for the lint.
+ /// This lint level is not usable for diagnostics, it needs to be corrected by
+ /// `reveal_actual_level` beforehand.
+ #[instrument(level = "trace", skip(self, tcx), ret)]
+ fn probe_for_lint_level(
&self,
+ tcx: TyCtxt<'_>,
id: LintId,
- mut idx: LintStackIndex,
- aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ start: HirId,
) -> (Option<Level>, LintLevelSource) {
- if let Some(specs) = aux {
- if let Some(&(level, src)) = specs.get(&id) {
- return (Some(level), src);
- }
+ if let Some(map) = self.specs.get(&start.local_id)
+ && let Some(&(level, src)) = map.get(&id)
+ {
+ return (Some(level), src);
}
- loop {
- let LintSet { ref specs, parent } = self.list[idx];
- if let Some(&(level, src)) = specs.get(&id) {
- return (Some(level), src);
+
+ let mut owner = start.owner;
+ let mut specs = &self.specs;
+
+ for parent in tcx.hir().parent_id_iter(start) {
+ if parent.owner != owner {
+ owner = parent.owner;
+ specs = &tcx.shallow_lint_levels_on(owner).specs;
}
- if idx == COMMAND_LINE {
- return (None, LintLevelSource::Default);
+ if let Some(map) = specs.get(&parent.local_id)
+ && let Some(&(level, src)) = map.get(&id)
+ {
+ return (Some(level), src);
}
- idx = parent;
}
- }
-}
-#[derive(Debug)]
-pub struct LintLevelMap {
- /// This is a collection of lint expectations as described in RFC 2383, that
- /// can be fulfilled during this compilation session. This means that at least
- /// one expected lint is currently registered in the lint store.
- ///
- /// The [`LintExpectationId`] is stored as a part of the [`Expect`](Level::Expect)
- /// lint level.
- pub lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
- pub sets: LintLevelSets,
- pub id_to_set: FxHashMap<HirId, LintStackIndex>,
-}
+ (None, LintLevelSource::Default)
+ }
-impl LintLevelMap {
- /// If the `id` was previously registered with `register_id` when building
- /// this `LintLevelMap` this returns the corresponding lint level and source
- /// of the lint level for the lint provided.
- ///
- /// If the `id` was not previously registered, returns `None`. If `None` is
- /// returned then the parent of `id` should be acquired and this function
- /// should be called again.
- pub fn level_and_source(
+ /// Fetch and return the user-visible lint level for the given lint at the given HirId.
+ #[instrument(level = "trace", skip(self, tcx), ret)]
+ pub fn lint_level_id_at_node(
&self,
- lint: &'static Lint,
- id: HirId,
- session: &Session,
- ) -> Option<LevelAndSource> {
- self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session))
+ tcx: TyCtxt<'_>,
+ lint: LintId,
+ cur: HirId,
+ ) -> (Level, LintLevelSource) {
+ let (level, mut src) = self.probe_for_lint_level(tcx, lint, cur);
+ let level = reveal_actual_level(level, &mut src, tcx.sess, lint, |lint| {
+ self.probe_for_lint_level(tcx, lint, cur)
+ });
+ (level, src)
}
}
-impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap {
- #[inline]
- fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- let LintLevelMap { ref sets, ref id_to_set, ref lint_expectations } = *self;
+impl TyCtxt<'_> {
+ /// Fetch and return the user-visible lint level for the given lint at the given HirId.
+ pub fn lint_level_at_node(self, lint: &'static Lint, id: HirId) -> (Level, LintLevelSource) {
+ self.shallow_lint_levels_on(id.owner).lint_level_id_at_node(self, LintId::of(lint), id)
+ }
- id_to_set.hash_stable(hcx, hasher);
- lint_expectations.hash_stable(hcx, hasher);
+ /// Walks upwards from `id` to find a node which might change lint levels with attributes.
+ /// It stops at `bound` and just returns it if reached.
+ pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
+ let hir = self.hir();
+ loop {
+ if id == bound {
+ return bound;
+ }
- hcx.while_hashing_spans(true, |hcx| sets.hash_stable(hcx, hasher))
+ if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
+ return id;
+ }
+ let next = hir.get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
}
}
@@ -261,11 +258,11 @@ pub fn explain_lint_level_source(
));
}
}
- LintLevelSource::Node(lint_attr_name, src, reason) => {
+ LintLevelSource::Node { name: lint_attr_name, span, reason, .. } => {
if let Some(rationale) = reason {
err.note(rationale.as_str());
}
- err.span_note_once(src, "the lint level is defined here");
+ err.span_note_once(span, "the lint level is defined here");
if lint_attr_name.as_str() != name {
let level_str = level.as_str();
err.note_once(&format!(
@@ -277,23 +274,65 @@ pub fn explain_lint_level_source(
}
}
-pub fn struct_lint_level<'s, 'd>(
- sess: &'s Session,
+/// The innermost function for emitting lints.
+///
+/// If you are loocking to implement a lint, look for higher level functions,
+/// for example:
+/// - [`TyCtxt::emit_spanned_lint`]
+/// - [`TyCtxt::struct_span_lint_hir`]
+/// - [`TyCtxt::emit_lint`]
+/// - [`TyCtxt::struct_lint_node`]
+/// - `LintContext::lookup`
+///
+/// ## `decorate` signature
+///
+/// The return value of `decorate` is ignored by this function. So what is the
+/// point of returning `&'b mut DiagnosticBuilder<'a, ()>`?
+///
+/// There are 2 reasons for this signature.
+///
+/// First of all, it prevents accidental use of `.emit()` -- it's clear that the
+/// builder will be later used and shouldn't be emitted right away (this is
+/// especially important because the old API expected you to call `.emit()` in
+/// the closure).
+///
+/// Second of all, it makes the most common case of adding just a single label
+/// /suggestion much nicer, since [`DiagnosticBuilder`] methods return
+/// `&mut DiagnosticBuilder`, you can just chain methods, without needed
+/// awkward `{ ...; }`:
+/// ```ignore pseudo-code
+/// struct_lint_level(
+/// ...,
+/// |lint| lint.span_label(sp, "lbl")
+/// // ^^^^^^^^^^^^^^^^^^^^^ returns `&mut DiagnosticBuilder` by default
+/// )
+/// ```
+pub fn struct_lint_level(
+ sess: &Session,
lint: &'static Lint,
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd,
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
// Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
// the "real" work.
- fn struct_lint_level_impl<'s, 'd>(
- sess: &'s Session,
+ fn struct_lint_level_impl(
+ sess: &Session,
lint: &'static Lint,
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>,
+ msg: impl Into<DiagnosticMessage>,
+ decorate: Box<
+ dyn '_
+ + for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
+ >,
) {
// Check for future incompatibility lints and issue a stronger warning.
let future_incompatible = lint.future_incompatible;
@@ -344,6 +383,8 @@ pub fn struct_lint_level<'s, 'd>(
(Level::Deny | Level::Forbid, None) => sess.diagnostic().struct_err_lint(""),
};
+ err.set_is_lint();
+
// If this code originates in a foreign macro, aka something that this crate
// did not itself author, then it's likely that there's nothing this crate
// can do about it. We probably want to skip the lint entirely.
@@ -366,6 +407,10 @@ pub fn struct_lint_level<'s, 'd>(
}
}
+ // Delay evaluating and setting the primary message until after we've
+ // suppressed the lint due to macros.
+ err.set_primary_message(msg);
+
// Lint diagnostics that are covered by the expect level will not be emitted outside
// the compiler. It is therefore not necessary to add any information for the user.
// This will therefore directly call the decorate function which will in turn emit
@@ -373,12 +418,12 @@ pub fn struct_lint_level<'s, 'd>(
if let Level::Expect(_) = level {
let name = lint.name_lower();
err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn: false });
- decorate(LintDiagnosticBuilder::new(err));
+
+ decorate(&mut err);
+ err.emit();
return;
}
- explain_lint_level_source(lint, level, src, &mut err);
-
let name = lint.name_lower();
let is_force_warn = matches!(level, Level::ForceWarn(_));
err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn });
@@ -417,10 +462,12 @@ pub fn struct_lint_level<'s, 'd>(
}
}
- // Finally, run `decorate`. This function is also responsible for emitting the diagnostic.
- decorate(LintDiagnosticBuilder::new(err));
+ // Finally, run `decorate`.
+ decorate(&mut err);
+ explain_lint_level_source(lint, level, src, &mut *err);
+ err.emit()
}
- struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate))
+ struct_lint_level_impl(sess, lint, level, src, span, msg, Box::new(decorate))
}
/// Returns whether `span` originates in a foreign crate's external macro.
@@ -432,7 +479,9 @@ pub fn in_external_macro(sess: &Session, span: Span) -> bool {
match expn_data.kind {
ExpnKind::Inlined
| ExpnKind::Root
- | ExpnKind::Desugaring(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) => false,
+ | ExpnKind::Desugaring(
+ DesugaringKind::ForLoop | DesugaringKind::WhileLoop | DesugaringKind::OpaqueTy,
+ ) => false,
ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
ExpnKind::Macro(MacroKind::Bang, _) => {
// Dummy span for the `def_site` means it's an external macro.
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
index 0e85c60a3..01fe72de6 100644
--- a/compiler/rustc_middle/src/macros.rs
+++ b/compiler/rustc_middle/src/macros.rs
@@ -54,13 +54,22 @@ macro_rules! TrivialTypeTraversalImpls {
impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty {
fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
self,
- _: &mut F
- ) -> ::std::result::Result<$ty, F::Error> {
+ _: &mut F,
+ ) -> ::std::result::Result<Self, F::Error> {
Ok(self)
}
+
+ #[inline]
+ fn fold_with<F: $crate::ty::fold::TypeFolder<$tcx>>(
+ self,
+ _: &mut F,
+ ) -> Self {
+ self
+ }
}
impl<$tcx> $crate::ty::visit::TypeVisitable<$tcx> for $ty {
+ #[inline]
fn visit_with<F: $crate::ty::visit::TypeVisitor<$tcx>>(
&self,
_: &mut F)
diff --git a/compiler/rustc_middle/src/metadata.rs b/compiler/rustc_middle/src/metadata.rs
index c8e78747d..5ff014c78 100644
--- a/compiler/rustc_middle/src/metadata.rs
+++ b/compiler/rustc_middle/src/metadata.rs
@@ -2,6 +2,7 @@ use crate::ty;
use rustc_hir::def::Res;
use rustc_macros::HashStable;
+use rustc_span::def_id::DefId;
use rustc_span::symbol::Ident;
use rustc_span::Span;
@@ -18,7 +19,7 @@ pub struct ModChild {
/// Local variables cannot be exported, so this `Res` doesn't need the ID parameter.
pub res: Res<!>,
/// Visibility of the item.
- pub vis: ty::Visibility,
+ pub vis: ty::Visibility<DefId>,
/// Span of the item.
pub span: Span,
/// A proper `macro_rules` item (not a reexport).
diff --git a/compiler/rustc_middle/src/middle/lang_items.rs b/compiler/rustc_middle/src/middle/lang_items.rs
index cc9706f2d..31c20fa14 100644
--- a/compiler/rustc_middle/src/middle/lang_items.rs
+++ b/compiler/rustc_middle/src/middle/lang_items.rs
@@ -18,11 +18,11 @@ impl<'tcx> TyCtxt<'tcx> {
/// Returns the `DefId` for a given `LangItem`.
/// If not found, fatally aborts compilation.
pub fn require_lang_item(self, lang_item: LangItem, span: Option<Span>) -> DefId {
- self.lang_items().require(lang_item).unwrap_or_else(|msg| {
+ self.lang_items().require(lang_item).unwrap_or_else(|err| {
if let Some(span) = span {
- self.sess.span_fatal(span, &msg)
+ self.sess.span_fatal(span, err.to_string())
} else {
- self.sess.fatal(&msg)
+ self.sess.fatal(err.to_string())
}
})
}
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
index acced0492..12aef66bc 100644
--- a/compiler/rustc_middle/src/middle/limits.rs
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -10,6 +10,7 @@
//! just peeks and looks for that attribute.
use crate::bug;
+use crate::error::LimitInvalid;
use crate::ty;
use rustc_ast::Attribute;
use rustc_session::Session;
@@ -37,7 +38,7 @@ pub fn provide(providers: &mut ty::query::Providers) {
tcx.hir().krate_attrs(),
tcx.sess,
sym::const_eval_limit,
- 1_000_000,
+ 2_000_000,
),
}
}
@@ -56,9 +57,6 @@ fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: u
match s.as_str().parse() {
Ok(n) => return Limit::new(n),
Err(e) => {
- let mut err =
- sess.struct_span_err(attr.span, "`limit` must be a non-negative integer");
-
let value_span = attr
.meta()
.and_then(|meta| meta.name_value_literal_span())
@@ -74,9 +72,7 @@ fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: u
IntErrorKind::Zero => bug!("zero is a valid `limit`"),
kind => bug!("unimplemented IntErrorKind variant: {:?}", kind),
};
-
- err.span_label(value_span, error_str);
- err.emit();
+ sess.emit_err(LimitInvalid { span: attr.span, value_span, error_str });
}
}
}
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
index 751c7f464..9c68c7504 100644
--- a/compiler/rustc_middle/src/middle/privacy.rs
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -1,64 +1,218 @@
//! A pass that checks to make sure private fields and methods aren't used
//! outside their scopes. This pass will also generate a set of exported items
//! which are available for use externally when compiled as a library.
-
+use crate::ty::{DefIdTree, Visibility};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_macros::HashStable;
use rustc_query_system::ich::StableHashingContext;
-use rustc_span::def_id::LocalDefId;
+use rustc_span::def_id::{DefId, LocalDefId};
use std::hash::Hash;
-/// Represents the levels of accessibility an item can have.
+/// Represents the levels of effective visibility an item can have.
///
-/// The variants are sorted in ascending order of accessibility.
+/// The variants are sorted in ascending order of directness.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)]
-pub enum AccessLevel {
- /// Superset of `AccessLevel::Reachable` used to mark impl Trait items.
- ReachableFromImplTrait,
- /// Exported items + items participating in various kinds of public interfaces,
- /// but not directly nameable. For example, if function `fn f() -> T {...}` is
- /// public, then type `T` is reachable. Its values can be obtained by other crates
- /// even if the type itself is not nameable.
+pub enum Level {
+ /// Superset of `Reachable` including items leaked through return position `impl Trait`.
+ ReachableThroughImplTrait,
+ /// Item is either reexported, or leaked through any kind of interface.
+ /// For example, if function `fn f() -> T {...}` is directly public, then type `T` is publicly
+ /// reachable and its values can be obtained by other crates even if the type itself is not
+ /// nameable.
Reachable,
- /// Public items + items accessible to other crates with the help of `pub use` re-exports.
- Exported,
- /// Items accessible to other crates directly, without the help of re-exports.
- Public,
+ /// Item is accessible either directly, or with help of `use` reexports.
+ Reexported,
+ /// Item is directly accessible, without help of reexports.
+ Direct,
+}
+
+impl Level {
+ pub fn all_levels() -> [Level; 4] {
+ [Level::Direct, Level::Reexported, Level::Reachable, Level::ReachableThroughImplTrait]
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)]
+pub struct EffectiveVisibility {
+ direct: Visibility,
+ reexported: Visibility,
+ reachable: Visibility,
+ reachable_through_impl_trait: Visibility,
+}
+
+impl EffectiveVisibility {
+ pub fn at_level(&self, level: Level) -> &Visibility {
+ match level {
+ Level::Direct => &self.direct,
+ Level::Reexported => &self.reexported,
+ Level::Reachable => &self.reachable,
+ Level::ReachableThroughImplTrait => &self.reachable_through_impl_trait,
+ }
+ }
+
+ fn at_level_mut(&mut self, level: Level) -> &mut Visibility {
+ match level {
+ Level::Direct => &mut self.direct,
+ Level::Reexported => &mut self.reexported,
+ Level::Reachable => &mut self.reachable,
+ Level::ReachableThroughImplTrait => &mut self.reachable_through_impl_trait,
+ }
+ }
+
+ pub fn is_public_at_level(&self, level: Level) -> bool {
+ self.at_level(level).is_public()
+ }
+
+ pub fn from_vis(vis: Visibility) -> EffectiveVisibility {
+ EffectiveVisibility {
+ direct: vis,
+ reexported: vis,
+ reachable: vis,
+ reachable_through_impl_trait: vis,
+ }
+ }
}
-/// Holds a map of accessibility levels for reachable HIR nodes.
+/// Holds a map of effective visibilities for reachable HIR nodes.
#[derive(Debug, Clone)]
-pub struct AccessLevels<Id = LocalDefId> {
- pub map: FxHashMap<Id, AccessLevel>,
+pub struct EffectiveVisibilities<Id = LocalDefId> {
+ map: FxHashMap<Id, EffectiveVisibility>,
}
-impl<Id: Hash + Eq> AccessLevels<Id> {
- /// See `AccessLevel::Reachable`.
+impl<Id: Hash + Eq + Copy> EffectiveVisibilities<Id> {
+ pub fn is_public_at_level(&self, id: Id, level: Level) -> bool {
+ self.effective_vis(id)
+ .map_or(false, |effective_vis| effective_vis.is_public_at_level(level))
+ }
+
+ /// See `Level::Reachable`.
pub fn is_reachable(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Reachable)
+ self.is_public_at_level(id, Level::Reachable)
}
- /// See `AccessLevel::Exported`.
+ /// See `Level::Reexported`.
pub fn is_exported(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Exported)
+ self.is_public_at_level(id, Level::Reexported)
+ }
+
+ /// See `Level::Direct`.
+ pub fn is_directly_public(&self, id: Id) -> bool {
+ self.is_public_at_level(id, Level::Direct)
+ }
+
+ pub fn public_at_level(&self, id: Id) -> Option<Level> {
+ self.effective_vis(id).and_then(|effective_vis| {
+ for level in Level::all_levels() {
+ if effective_vis.is_public_at_level(level) {
+ return Some(level);
+ }
+ }
+ None
+ })
+ }
+
+ pub fn effective_vis(&self, id: Id) -> Option<&EffectiveVisibility> {
+ self.map.get(&id)
}
- /// See `AccessLevel::Public`.
- pub fn is_public(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Public)
+ pub fn iter(&self) -> impl Iterator<Item = (&Id, &EffectiveVisibility)> {
+ self.map.iter()
+ }
+
+ pub fn map_id<OutId: Hash + Eq + Copy>(
+ &self,
+ f: impl Fn(Id) -> OutId,
+ ) -> EffectiveVisibilities<OutId> {
+ EffectiveVisibilities { map: self.map.iter().map(|(k, v)| (f(*k), *v)).collect() }
+ }
+
+ pub fn set_public_at_level(
+ &mut self,
+ id: Id,
+ default_vis: impl FnOnce() -> Visibility,
+ level: Level,
+ ) {
+ let mut effective_vis = self
+ .effective_vis(id)
+ .copied()
+ .unwrap_or_else(|| EffectiveVisibility::from_vis(default_vis()));
+ for l in Level::all_levels() {
+ if l <= level {
+ *effective_vis.at_level_mut(l) = Visibility::Public;
+ }
+ }
+ self.map.insert(id, effective_vis);
+ }
+}
+
+impl<Id: Hash + Eq + Copy + Into<DefId>> EffectiveVisibilities<Id> {
+ // `parent_id` is not necessarily a parent in source code tree,
+ // it is the node from which the maximum effective visibility is inherited.
+ pub fn update(
+ &mut self,
+ id: Id,
+ nominal_vis: Visibility,
+ default_vis: impl FnOnce() -> Visibility,
+ parent_id: Id,
+ level: Level,
+ tree: impl DefIdTree,
+ ) -> bool {
+ let mut changed = false;
+ let mut current_effective_vis = self.effective_vis(id).copied().unwrap_or_else(|| {
+ if id.into().is_crate_root() {
+ EffectiveVisibility::from_vis(Visibility::Public)
+ } else {
+ EffectiveVisibility::from_vis(default_vis())
+ }
+ });
+ if let Some(inherited_effective_vis) = self.effective_vis(parent_id) {
+ let mut inherited_effective_vis_at_prev_level =
+ *inherited_effective_vis.at_level(level);
+ let mut calculated_effective_vis = inherited_effective_vis_at_prev_level;
+ for l in Level::all_levels() {
+ if level >= l {
+ let inherited_effective_vis_at_level = *inherited_effective_vis.at_level(l);
+ let current_effective_vis_at_level = current_effective_vis.at_level_mut(l);
+ // effective visibility for id shouldn't be recalculated if
+ // inherited from parent_id effective visibility isn't changed at next level
+ if !(inherited_effective_vis_at_prev_level == inherited_effective_vis_at_level
+ && level != l)
+ {
+ calculated_effective_vis =
+ if nominal_vis.is_at_least(inherited_effective_vis_at_level, tree) {
+ inherited_effective_vis_at_level
+ } else {
+ nominal_vis
+ };
+ }
+ // effective visibility can't be decreased at next update call for the
+ // same id
+ if *current_effective_vis_at_level != calculated_effective_vis
+ && calculated_effective_vis
+ .is_at_least(*current_effective_vis_at_level, tree)
+ {
+ changed = true;
+ *current_effective_vis_at_level = calculated_effective_vis;
+ }
+ inherited_effective_vis_at_prev_level = inherited_effective_vis_at_level;
+ }
+ }
+ }
+ self.map.insert(id, current_effective_vis);
+ changed
}
}
-impl<Id> Default for AccessLevels<Id> {
+impl<Id> Default for EffectiveVisibilities<Id> {
fn default() -> Self {
- AccessLevels { map: Default::default() }
+ EffectiveVisibilities { map: Default::default() }
}
}
-impl<'a> HashStable<StableHashingContext<'a>> for AccessLevels {
+impl<'a> HashStable<StableHashingContext<'a>> for EffectiveVisibilities {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- let AccessLevels { ref map } = *self;
+ let EffectiveVisibilities { ref map } = *self;
map.hash_stable(hcx, hasher);
}
}
diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
index 9b2f44567..c3bf1c717 100644
--- a/compiler/rustc_middle/src/middle/resolve_lifetime.rs
+++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
@@ -2,15 +2,15 @@
use crate::ty;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::ItemLocalId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ItemLocalId, OwnerId};
use rustc_macros::HashStable;
#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
pub enum Region {
Static,
- EarlyBound(/* index */ u32, /* lifetime decl */ DefId),
+ EarlyBound(/* lifetime decl */ DefId),
LateBound(ty::DebruijnIndex, /* late-bound index */ u32, /* lifetime decl */ DefId),
Free(DefId, /* lifetime decl */ DefId),
}
@@ -35,7 +35,13 @@ impl<T: PartialEq> Set1<T> {
}
}
-pub type ObjectLifetimeDefault = Set1<Region>;
+#[derive(Copy, Clone, Debug, HashStable, Encodable, Decodable)]
+pub enum ObjectLifetimeDefault {
+ Empty,
+ Static,
+ Ambiguous,
+ Param(DefId),
+}
/// Maps the id of each lifetime reference to the lifetime decl
/// that it corresponds to.
@@ -43,12 +49,7 @@ pub type ObjectLifetimeDefault = Set1<Region>;
pub struct ResolveLifetimes {
/// Maps from every use of a named (not anonymous) lifetime to a
/// `Region` describing how that region is bound
- pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
-
- /// Set of lifetime def ids that are late-bound; a region can
- /// be late-bound if (a) it does NOT appear in a where-clause and
- /// (b) it DOES appear in the arguments.
- pub late_bound: FxHashMap<LocalDefId, FxHashSet<LocalDefId>>,
+ pub defs: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Region>>,
- pub late_bound_vars: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
+ pub late_bound_vars: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
}
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
index 414912dd0..61bc089e4 100644
--- a/compiler/rustc_middle/src/middle/stability.rs
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -5,7 +5,7 @@ pub use self::StabilityLevel::*;
use crate::ty::{self, DefIdTree, TyCtxt};
use rustc_ast::NodeId;
-use rustc_attr::{self as attr, ConstStability, Deprecation, Stability};
+use rustc_attr::{self as attr, ConstStability, DefaultBodyStability, Deprecation, Stability};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{Applicability, Diagnostic};
use rustc_feature::GateIssue;
@@ -61,6 +61,7 @@ pub struct Index {
/// are filled by the annotator.
pub stab_map: FxHashMap<LocalDefId, Stability>,
pub const_stab_map: FxHashMap<LocalDefId, ConstStability>,
+ pub default_body_stab_map: FxHashMap<LocalDefId, DefaultBodyStability>,
pub depr_map: FxHashMap<LocalDefId, DeprecationEntry>,
/// Mapping from feature name to feature name based on the `implied_by` field of `#[unstable]`
/// attributes. If a `#[unstable(feature = "implier", implied_by = "impliee")]` attribute
@@ -86,6 +87,10 @@ impl Index {
self.const_stab_map.get(&def_id).copied()
}
+ pub fn local_default_body_stability(&self, def_id: LocalDefId) -> Option<DefaultBodyStability> {
+ self.default_body_stab_map.get(&def_id).copied()
+ }
+
pub fn local_deprecation_entry(&self, def_id: LocalDefId) -> Option<DeprecationEntry> {
self.depr_map.get(&def_id).cloned()
}
@@ -248,13 +253,12 @@ fn late_report_deprecation(
return;
}
let method_span = method_span.unwrap_or(span);
- tcx.struct_span_lint_hir(lint, hir_id, method_span, |lint| {
- let mut diag = lint.build(message);
+ tcx.struct_span_lint_hir(lint, hir_id, method_span, message, |diag| {
if let hir::Node::Expr(_) = tcx.hir().get(hir_id) {
let kind = tcx.def_kind(def_id).descr(def_id);
- deprecation_suggestion(&mut diag, kind, suggestion, method_span);
+ deprecation_suggestion(diag, kind, suggestion, method_span);
}
- diag.emit();
+ diag
});
}
@@ -288,7 +292,7 @@ fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
// These are not visible outside crate; therefore
// stability markers are irrelevant, if even present.
- ty::Visibility::Restricted(..) | ty::Visibility::Invisible => true,
+ ty::Visibility::Restricted(..) => true,
}
}
@@ -416,6 +420,12 @@ impl<'tcx> TyCtxt<'tcx> {
return EvalResult::Allow;
}
+ // Only the cross-crate scenario matters when checking unstable APIs
+ let cross_crate = !def_id.is_local();
+ if !cross_crate {
+ return EvalResult::Allow;
+ }
+
let stability = self.lookup_stability(def_id);
debug!(
"stability: \
@@ -423,12 +433,6 @@ impl<'tcx> TyCtxt<'tcx> {
def_id, span, stability
);
- // Only the cross-crate scenario matters when checking unstable APIs
- let cross_crate = !def_id.is_local();
- if !cross_crate {
- return EvalResult::Allow;
- }
-
// Issue #38412: private items lack stability markers.
if skip_stability_check_due_to_privacy(self, def_id) {
return EvalResult::Allow;
@@ -492,6 +496,62 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
+ /// Evaluates the default-impl stability of an item.
+ ///
+ /// Returns `EvalResult::Allow` if the item's default implementation is stable, or unstable but the corresponding
+ /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+ /// unstable feature otherwise.
+ pub fn eval_default_body_stability(self, def_id: DefId, span: Span) -> EvalResult {
+ let is_staged_api = self.lookup_stability(def_id.krate.as_def_id()).is_some();
+ if !is_staged_api {
+ return EvalResult::Allow;
+ }
+
+ // Only the cross-crate scenario matters when checking unstable APIs
+ let cross_crate = !def_id.is_local();
+ if !cross_crate {
+ return EvalResult::Allow;
+ }
+
+ let stability = self.lookup_default_body_stability(def_id);
+ debug!(
+ "body stability: inspecting def_id={def_id:?} span={span:?} of stability={stability:?}"
+ );
+
+ // Issue #38412: private items lack stability markers.
+ if skip_stability_check_due_to_privacy(self, def_id) {
+ return EvalResult::Allow;
+ }
+
+ match stability {
+ Some(DefaultBodyStability {
+ level: attr::Unstable { reason, issue, is_soft, .. },
+ feature,
+ }) => {
+ if span.allows_unstable(feature) {
+ debug!("body stability: skipping span={:?} since it is internal", span);
+ return EvalResult::Allow;
+ }
+ if self.features().active(feature) {
+ return EvalResult::Allow;
+ }
+
+ EvalResult::Deny {
+ feature,
+ reason: reason.to_opt_reason(),
+ issue,
+ suggestion: None,
+ is_soft,
+ }
+ }
+ Some(_) => {
+ // Stable APIs are always ok to call
+ EvalResult::Allow
+ }
+ None => EvalResult::Unmarked,
+ }
+ }
+
/// Checks if an item is stable or error out.
///
/// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
@@ -560,9 +620,7 @@ impl<'tcx> TyCtxt<'tcx> {
unmarked: impl FnOnce(Span, DefId),
) -> bool {
let soft_handler = |lint, span, msg: &_| {
- self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
- lint.build(msg).emit();
- })
+ self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, msg, |lint| lint)
};
let eval_result =
self.eval_stability_allow_unstable(def_id, id, span, method_span, allow_unstable);
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
index 78080fcd5..752cbdeae 100644
--- a/compiler/rustc_middle/src/mir/basic_blocks.rs
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -86,7 +86,7 @@ impl<'tcx> BasicBlocks<'tcx> {
///
/// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`].
/// All other methods that allow you to mutate the basic blocks also call this method
- /// themselves, thereby avoiding any risk of accidentaly cache invalidation.
+ /// themselves, thereby avoiding any risk of accidentally cache invalidation.
pub fn invalidate_cfg_cache(&mut self) {
self.predecessor_cache.invalidate();
self.switch_source_cache.invalidate();
diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs
index f3621cd99..d1f3561c0 100644
--- a/compiler/rustc_middle/src/mir/generic_graph.rs
+++ b/compiler/rustc_middle/src/mir/generic_graph.rs
@@ -12,14 +12,14 @@ pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Grap
// Nodes
let nodes: Vec<Node> = body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.map(|(block, _)| bb_to_graph_node(block, body, dark_mode))
.collect();
// Edges
let mut edges = Vec::new();
- for (source, _) in body.basic_blocks().iter_enumerated() {
+ for (source, _) in body.basic_blocks.iter_enumerated() {
let def_id = body.source.def_id();
let terminator = body[source].terminator();
let labels = terminator.kind.fmt_successor_labels();
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index db7e0fb8a..37ec04b07 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -16,8 +16,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
- ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
- UninitBytesAccess, UnsupportedOpInfo,
+ ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
+ UnsupportedOpInfo,
};
use crate::ty;
@@ -34,11 +34,11 @@ pub struct Allocation<Prov = AllocId, Extra = ()> {
/// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Box<[u8]>,
- /// Maps from byte addresses to extra data for each pointer.
+ /// Maps from byte addresses to extra provenance data for each pointer.
/// Only the first byte of a pointer is inserted into the map; i.e.,
/// every entry in this map applies to `pointer_size` consecutive bytes starting
/// at the given offset.
- relocations: Relocations<Prov>,
+ provenance: ProvenanceMap<Prov>,
/// Denotes which part of this allocation is initialized.
init_mask: InitMask,
/// The alignment of the allocation to detect unaligned reads.
@@ -84,7 +84,7 @@ impl hash::Hash for Allocation {
}
// Hash the other fields as usual.
- self.relocations.hash(state);
+ self.provenance.hash(state);
self.init_mask.hash(state);
self.align.hash(state);
self.mutability.hash(state);
@@ -130,6 +130,8 @@ pub enum AllocError {
ReadPointerAsBytes,
/// Partially overwriting a pointer.
PartialPointerOverwrite(Size),
+ /// Partially copying a pointer.
+ PartialPointerCopy(Size),
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<UninitBytesAccess>),
}
@@ -152,6 +154,9 @@ impl AllocError {
PartialPointerOverwrite(offset) => InterpError::Unsupported(
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
),
+ PartialPointerCopy(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
+ ),
InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
),
@@ -211,7 +216,7 @@ impl<Prov> Allocation<Prov> {
let size = Size::from_bytes(bytes.len());
Self {
bytes,
- relocations: Relocations::new(),
+ provenance: ProvenanceMap::new(),
init_mask: InitMask::new(size, true),
align,
mutability,
@@ -246,7 +251,7 @@ impl<Prov> Allocation<Prov> {
let bytes = unsafe { bytes.assume_init() };
Ok(Allocation {
bytes,
- relocations: Relocations::new(),
+ provenance: ProvenanceMap::new(),
init_mask: InitMask::new(size, false),
align,
mutability: Mutability::Mut,
@@ -266,22 +271,22 @@ impl Allocation {
) -> Result<Allocation<Prov, Extra>, Err> {
// Compute new pointer provenance, which also adjusts the bytes.
let mut bytes = self.bytes;
- let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
+ let mut new_provenance = Vec::with_capacity(self.provenance.0.len());
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
let endian = cx.data_layout().endian;
- for &(offset, alloc_id) in self.relocations.iter() {
+ for &(offset, alloc_id) in self.provenance.iter() {
let idx = offset.bytes_usize();
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
let bits = read_target_uint(endian, ptr_bytes).unwrap();
let (ptr_prov, ptr_offset) =
adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
- new_relocations.push((offset, ptr_prov));
+ new_provenance.push((offset, ptr_prov));
}
// Create allocation.
Ok(Allocation {
bytes,
- relocations: Relocations::from_presorted(new_relocations),
+ provenance: ProvenanceMap::from_presorted(new_provenance),
init_mask: self.init_mask,
align: self.align,
mutability: self.mutability,
@@ -300,8 +305,8 @@ impl<Prov, Extra> Allocation<Prov, Extra> {
Size::from_bytes(self.len())
}
- /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
- /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
+ /// Looks at a slice which may contain uninitialized bytes or provenance. This differs
+ /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
/// edges) at all.
/// This must not be used for reads affecting the interpreter execution.
pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
@@ -313,74 +318,47 @@ impl<Prov, Extra> Allocation<Prov, Extra> {
&self.init_mask
}
- /// Returns the relocation list.
- pub fn relocations(&self) -> &Relocations<Prov> {
- &self.relocations
+ /// Returns the provenance map.
+ pub fn provenance(&self) -> &ProvenanceMap<Prov> {
+ &self.provenance
}
}
/// Byte accessors.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// This is the entirely abstraction-violating way to just grab the raw bytes without
- /// caring about relocations. It just deduplicates some code between `read_scalar`
- /// and `get_bytes_internal`.
- fn get_bytes_even_more_internal(&self, range: AllocRange) -> &[u8] {
- &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
- }
-
- /// The last argument controls whether we error out when there are uninitialized or pointer
- /// bytes. However, we *always* error when there are relocations overlapping the edges of the
- /// range.
- ///
- /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead,
+ /// caring about provenance or initialization.
///
/// This function also guarantees that the resulting pointer will remain stable
/// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
/// on that.
- ///
- /// It is the caller's responsibility to check bounds and alignment beforehand.
- fn get_bytes_internal(
- &self,
- cx: &impl HasDataLayout,
- range: AllocRange,
- check_init_and_ptr: bool,
- ) -> AllocResult<&[u8]> {
- if check_init_and_ptr {
- self.check_init(range)?;
- self.check_relocations(cx, range)?;
- } else {
- // We still don't want relocations on the *edges*.
- self.check_relocation_edges(cx, range)?;
- }
-
- Ok(self.get_bytes_even_more_internal(range))
+ #[inline]
+ pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
+ &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
}
- /// Checks that these bytes are initialized and not pointer bytes, and then return them
- /// as a slice.
+ /// Checks that these bytes are initialized, and then strip provenance (if possible) and return
+ /// them.
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
/// on `InterpCx` instead.
#[inline]
- pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
- self.get_bytes_internal(cx, range, true)
- }
-
- /// It is the caller's responsibility to handle uninitialized and pointer bytes.
- /// However, this still checks that there are no relocations on the *edges*.
- ///
- /// It is the caller's responsibility to check bounds and alignment beforehand.
- #[inline]
- pub fn get_bytes_with_uninit_and_ptr(
+ pub fn get_bytes_strip_provenance(
&self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> AllocResult<&[u8]> {
- self.get_bytes_internal(cx, range, false)
+ self.check_init(range)?;
+ if !Prov::OFFSET_IS_ADDR {
+ if self.range_has_provenance(cx, range) {
+ return Err(AllocError::ReadPointerAsBytes);
+ }
+ }
+ Ok(self.get_bytes_unchecked(range))
}
- /// Just calling this already marks everything as defined and removes relocations,
+ /// Just calling this already marks everything as defined and removes provenance,
/// so be sure to actually put data there!
///
/// It is the caller's responsibility to check bounds and alignment beforehand.
@@ -392,7 +370,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<&mut [u8]> {
self.mark_init(range, true);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
}
@@ -404,7 +382,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
range: AllocRange,
) -> AllocResult<*mut [u8]> {
self.mark_init(range, true);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
@@ -415,28 +393,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Reading and writing.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
- /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
- /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
- /// given range contains no uninitialized bytes/relocations.
- pub fn check_bytes(
- &self,
- cx: &impl HasDataLayout,
- range: AllocRange,
- allow_uninit: bool,
- allow_ptr: bool,
- ) -> AllocResult {
- // Check bounds and relocations on the edges.
- self.get_bytes_with_uninit_and_ptr(cx, range)?;
- // Check uninit and ptr.
- if !allow_uninit {
- self.check_init(range)?;
- }
- if !allow_ptr {
- self.check_relocations(cx, range)?;
- }
- Ok(())
- }
-
/// Reads a *non-ZST* scalar.
///
/// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
@@ -452,47 +408,55 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
cx: &impl HasDataLayout,
range: AllocRange,
read_provenance: bool,
- ) -> AllocResult<ScalarMaybeUninit<Prov>> {
- if read_provenance {
- assert_eq!(range.size, cx.data_layout().pointer_size);
- }
-
+ ) -> AllocResult<Scalar<Prov>> {
// First and foremost, if anything is uninit, bail.
if self.is_init(range).is_err() {
- // This inflates uninitialized bytes to the entire scalar, even if only a few
- // bytes are uninitialized.
- return Ok(ScalarMaybeUninit::Uninit);
+ return Err(AllocError::InvalidUninitBytes(None));
}
- // If we are doing a pointer read, and there is a relocation exactly where we
- // are reading, then we can put data and relocation back together and return that.
- if read_provenance && let Some(&prov) = self.relocations.get(&range.start) {
- // We already checked init and relocations, so we can use this function.
- let bytes = self.get_bytes_even_more_internal(range);
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- let ptr = Pointer::new(prov, Size::from_bytes(bits));
- return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
- }
+ // Get the integer part of the result. We HAVE TO check provenance before returning this!
+ let bytes = self.get_bytes_unchecked(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- // If we are *not* reading a pointer, and we can just ignore relocations,
- // then do exactly that.
- if !read_provenance && Prov::OFFSET_IS_ADDR {
- // We just strip provenance.
- let bytes = self.get_bytes_even_more_internal(range);
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)));
+ if read_provenance {
+ assert_eq!(range.size, cx.data_layout().pointer_size);
+
+ // When reading data with provenance, the easy case is finding provenance exactly where we
+ // are reading, then we can put data and provenance back together and return that.
+ if let Some(&prov) = self.provenance.get(&range.start) {
+ // Now we can return the bits, with their appropriate provenance.
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(Scalar::from_pointer(ptr, cx));
+ }
+
+ // If we can work on pointers byte-wise, join the byte-wise provenances.
+ if Prov::OFFSET_IS_ADDR {
+ let mut prov = self.offset_get_provenance(cx, range.start);
+ for offset in 1..range.size.bytes() {
+ let this_prov =
+ self.offset_get_provenance(cx, range.start + Size::from_bytes(offset));
+ prov = Prov::join(prov, this_prov);
+ }
+ // Now use this provenance.
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(Scalar::from_maybe_pointer(ptr, cx));
+ }
+ } else {
+ // We are *not* reading a pointer.
+ // If we can just ignore provenance, do exactly that.
+ if Prov::OFFSET_IS_ADDR {
+ // We just strip provenance.
+ return Ok(Scalar::from_uint(bits, range.size));
+ }
}
- // It's complicated. Better make sure there is no provenance anywhere.
- // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then
- // `read_pointer` is true and we ideally would distinguish the following two cases:
- // - The entire `range` is covered by 2 relocations for the same provenance.
- // Then we should return a pointer with that provenance.
- // - The range has inhomogeneous provenance. Then we should return just the
- // underlying bits.
- let bytes = self.get_bytes(cx, range)?;
- let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
- Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
+ // Fallback path for when we cannot treat provenance bytewise or ignore it.
+ assert!(!Prov::OFFSET_IS_ADDR);
+ if self.range_has_provenance(cx, range) {
+ return Err(AllocError::ReadPointerAsBytes);
+ }
+ // There is no provenance, we can just return the bits.
+ Ok(Scalar::from_uint(bits, range.size))
}
/// Writes a *non-ZST* scalar.
@@ -507,17 +471,10 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
- val: ScalarMaybeUninit<Prov>,
+ val: Scalar<Prov>,
) -> AllocResult {
assert!(self.mutability == Mutability::Mut);
- let val = match val {
- ScalarMaybeUninit::Scalar(scalar) => scalar,
- ScalarMaybeUninit::Uninit => {
- return self.write_uninit(cx, range);
- }
- };
-
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
@@ -532,9 +489,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
let dst = self.get_bytes_mut(cx, range)?;
write_target_uint(endian, dst, bytes).unwrap();
- // See if we have to also write a relocation.
+ // See if we have to also store some provenance.
if let Some(provenance) = provenance {
- self.relocations.0.insert(range.start, provenance);
+ self.provenance.0.insert(range.start, provenance);
}
Ok(())
@@ -543,64 +500,65 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Write "uninit" to the given memory range.
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
self.mark_init(range, false);
- self.clear_relocations(cx, range)?;
+ self.clear_provenance(cx, range)?;
return Ok(());
}
}
-/// Relocations.
+/// Provenance.
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- /// Returns all relocations overlapping with the given pointer-offset pair.
- fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
+ /// Returns all provenance overlapping with the given pointer-offset pair.
+ fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
- self.relocations.range(Size::from_bytes(start)..range.end())
+ self.provenance.range(Size::from_bytes(start)..range.end())
}
- /// Returns whether this allocation has relocations overlapping with the given range.
- ///
- /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat
- /// limit access to relocations outside of the `Allocation` abstraction.
- ///
- pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
- !self.get_relocations(cx, range).is_empty()
+ /// Get the provenance of a single byte.
+ fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> {
+ let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1)));
+ assert!(prov.len() <= 1);
+ prov.first().map(|(_offset, prov)| *prov)
}
- /// Checks that there are no relocations overlapping with the given range.
- #[inline(always)]
- fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
- if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) }
+ /// Returns whether this allocation has progrnance overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
+ /// limit access to provenance outside of the `Allocation` abstraction.
+ ///
+ pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
+ !self.range_get_provenance(cx, range).is_empty()
}
- /// Removes all relocations inside the given range.
- /// If there are relocations overlapping with the edges, they
+ /// Removes all provenance inside the given range.
+ /// If there is provenance overlapping with the edges, it
/// are removed as well *and* the bytes they cover are marked as
/// uninitialized. This is a somewhat odd "spooky action at a distance",
/// but it allows strictly more code to run than if we would just error
/// immediately in that case.
- fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
+ fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
where
Prov: Provenance,
{
- // Find the start and end of the given range and its outermost relocations.
+ // Find the start and end of the given range and its outermost provenance.
let (first, last) = {
- // Find all relocations overlapping the given range.
- let relocations = self.get_relocations(cx, range);
- if relocations.is_empty() {
+ // Find all provenance overlapping the given range.
+ let provenance = self.range_get_provenance(cx, range);
+ if provenance.is_empty() {
return Ok(());
}
(
- relocations.first().unwrap().0,
- relocations.last().unwrap().0 + cx.data_layout().pointer_size,
+ provenance.first().unwrap().0,
+ provenance.last().unwrap().0 + cx.data_layout().pointer_size,
)
};
let start = range.start;
let end = range.end();
- // We need to handle clearing the relocations from parts of a pointer.
- // FIXME: Miri should preserve partial relocations; see
+ // We need to handle clearing the provenance from parts of a pointer.
+ // FIXME: Miri should preserve partial provenance; see
// https://github.com/rust-lang/miri/issues/2181.
if first < start {
if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
@@ -623,41 +581,32 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
self.init_mask.set_range(end, last, false);
}
- // Forget all the relocations.
- // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine,
- // i.e., this will not remove any other relocations just after the ones we care about.
- self.relocations.0.remove_range(first..last);
+ // Forget all the provenance.
+ // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
+ // i.e., this will not remove any other provenance just after the ones we care about.
+ self.provenance.0.remove_range(first..last);
Ok(())
}
-
- /// Errors if there are relocations overlapping with the edges of the
- /// given memory range.
- #[inline]
- fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
- self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
- self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
- Ok(())
- }
}
-/// "Relocations" stores the provenance information of pointers stored in memory.
+/// Stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>);
+pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>);
-impl<Prov> Relocations<Prov> {
+impl<Prov> ProvenanceMap<Prov> {
pub fn new() -> Self {
- Relocations(SortedMap::new())
+ ProvenanceMap(SortedMap::new())
}
- // The caller must guarantee that the given relocations are already sorted
+ // The caller must guarantee that the given provenance list is already sorted
// by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
- Relocations(SortedMap::from_presorted_elements(r))
+ ProvenanceMap(SortedMap::from_presorted_elements(r))
}
}
-impl<Prov> Deref for Relocations<Prov> {
+impl<Prov> Deref for ProvenanceMap<Prov> {
type Target = SortedMap<Size, Prov>;
fn deref(&self) -> &Self::Target {
@@ -665,36 +614,36 @@ impl<Prov> Deref for Relocations<Prov> {
}
}
-/// A partial, owned list of relocations to transfer into another allocation.
+/// A partial, owned list of provenance to transfer into another allocation.
///
/// Offsets are already adjusted to the destination allocation.
-pub struct AllocationRelocations<Prov> {
- dest_relocations: Vec<(Size, Prov)>,
+pub struct AllocationProvenance<Prov> {
+ dest_provenance: Vec<(Size, Prov)>,
}
impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
- pub fn prepare_relocation_copy(
+ pub fn prepare_provenance_copy(
&self,
cx: &impl HasDataLayout,
src: AllocRange,
dest: Size,
count: u64,
- ) -> AllocationRelocations<Prov> {
- let relocations = self.get_relocations(cx, src);
- if relocations.is_empty() {
- return AllocationRelocations { dest_relocations: Vec::new() };
+ ) -> AllocationProvenance<Prov> {
+ let provenance = self.range_get_provenance(cx, src);
+ if provenance.is_empty() {
+ return AllocationProvenance { dest_provenance: Vec::new() };
}
let size = src.size;
- let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
+ let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize));
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
- // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
+ // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
- // the right sequence of relocations for all N copies.
+ // the right sequence of provenance for all N copies.
for i in 0..count {
- new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
+ new_provenance.extend(provenance.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = dest + size * i; // `Size` operations
(
@@ -705,17 +654,17 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
}));
}
- AllocationRelocations { dest_relocations: new_relocations }
+ AllocationProvenance { dest_provenance: new_provenance }
}
- /// Applies a relocation copy.
- /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
- /// to be clear of relocations.
+ /// Applies a provenance copy.
+ /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected
+ /// to be clear of provenance.
///
/// This is dangerous to use as it can violate internal `Allocation` invariants!
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
- pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) {
- self.relocations.0.insert_presorted(relocations.dest_relocations);
+ pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) {
+ self.provenance.0.insert_presorted(provenance.dest_provenance);
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index cecb55578..b5a50cc15 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -401,14 +401,18 @@ impl fmt::Display for UndefinedBehaviorInfo {
pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught!
Unsupported(String),
- /// Encountered a pointer where we needed raw bytes.
- ReadPointerAsBytes,
/// Overwriting parts of a pointer; the resulting state cannot be represented in our
/// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
PartialPointerOverwrite(Pointer<AllocId>),
+ /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be
+ /// represented in our `Allocation` data structure. See
+ /// <https://github.com/rust-lang/miri/issues/2181>.
+ PartialPointerCopy(Pointer<AllocId>),
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
/// Accessing thread local statics
ThreadLocalStatic(DefId),
/// Accessing an unsupported extern static.
@@ -420,10 +424,13 @@ impl fmt::Display for UnsupportedOpInfo {
use UnsupportedOpInfo::*;
match self {
Unsupported(ref msg) => write!(f, "{msg}"),
- ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
PartialPointerOverwrite(ptr) => {
write!(f, "unable to overwrite parts of a pointer in memory at {ptr:?}")
}
+ PartialPointerCopy(ptr) => {
+ write!(f, "unable to copy parts of a pointer from memory at {ptr:?}")
+ }
+ ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({did:?})"),
ReadExternStatic(did) => write!(f, "cannot read from extern static ({did:?})"),
}
@@ -472,12 +479,7 @@ impl<T: Any> AsAny for T {
}
/// A trait for machine-specific errors (or other "machine stop" conditions).
-pub trait MachineStopType: AsAny + fmt::Display + Send {
- /// If `true`, emit a hard error instead of going through the `CONST_ERR` lint
- fn is_hard_err(&self) -> bool {
- false
- }
-}
+pub trait MachineStopType: AsAny + fmt::Display + Send {}
impl dyn MachineStopType {
#[inline(always)]
@@ -536,16 +538,4 @@ impl InterpError<'_> {
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
)
}
-
- /// Should this error be reported as a hard error, preventing compilation, or a soft error,
- /// causing a deny-by-default lint?
- pub fn is_hard_err(&self) -> bool {
- use InterpError::*;
- match *self {
- MachineStop(ref err) => err.is_hard_err(),
- UndefinedBehavior(_) => true,
- ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted) => true,
- _ => false,
- }
- }
}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 967f8ece1..5e3dfcbcc 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -124,11 +124,11 @@ pub use self::error::{
UninitBytesAccess, UnsupportedOpInfo,
};
-pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
+pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
pub use self::allocation::{
alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
- Relocations,
+ ProvenanceMap,
};
pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
@@ -137,7 +137,7 @@ pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
/// - A constant
/// - A static
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, Lift)]
+#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
pub struct GlobalId<'tcx> {
/// For a constant or static, the `Instance` of the item itself.
/// For a promoted global, the `Instance` of the function they belong to.
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 384954cbb..23c2ce647 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -43,7 +43,7 @@ pub trait PointerArithmetic: HasDataLayout {
let val = val as i64;
// Now wrap-around into the machine_isize range.
if val > self.machine_isize_max() {
- // This can only happen the the ptr size is < 64, so we know max_usize_plus_1 fits into
+ // This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into
// i64.
debug_assert!(self.pointer_size().bits() < 64);
let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
@@ -107,8 +107,12 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// pointer), but `derive` adds some unnecessary bounds.
pub trait Provenance: Copy + fmt::Debug {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
- /// If `true, ptr-to-int casts work by simply discarding the provenance.
- /// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case.
+ /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
+ /// different from what the Abstract Machine prescribes, so the interpreter must prevent any
+ /// operation that would inspect the underlying bytes of a pointer, such as ptr-to-int
+ /// transmutation. A `ReadPointerAsBytes` error will be raised in such situations.
+ /// - If `true`, the interpreter will permit operations to inspect the underlying bytes of a
+ /// pointer, and implement ptr-to-int transmutation by stripping provenance.
const OFFSET_IS_ADDR: bool;
/// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
@@ -125,6 +129,9 @@ pub trait Provenance: Copy + fmt::Debug {
/// Otherwise this function is best-effort (but must agree with `Machine::ptr_get_alloc`).
/// (Identifying the offset in that allocation, however, is harder -- use `Memory::ptr_get_alloc` for that.)
fn get_alloc_id(self) -> Option<AllocId>;
+
+ /// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance.
+ fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>;
}
impl Provenance for AllocId {
@@ -152,6 +159,10 @@ impl Provenance for AllocId {
fn get_alloc_id(self) -> Option<AllocId> {
Some(self)
}
+
+ fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> {
+ panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false")
+ }
}
/// Represents a pointer in the Miri engine.
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 786927e2d..473894ac1 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -4,7 +4,9 @@ use crate::mir;
use crate::ty::subst::InternalSubsts;
use crate::ty::visit::TypeVisitable;
use crate::ty::{self, query::TyCtxtAt, query::TyCtxtEnsure, TyCtxt};
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
+use rustc_session::lint;
use rustc_span::{Span, DUMMY_SP};
impl<'tcx> TyCtxt<'tcx> {
@@ -36,7 +38,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn const_eval_resolve(
self,
param_env: ty::ParamEnv<'tcx>,
- ct: ty::Unevaluated<'tcx>,
+ ct: mir::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToConstValueResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
@@ -45,11 +47,15 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_infer_types_or_consts() {
+ if ct.substs.has_non_region_infer() {
bug!("did not expect inference variables here");
}
- match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ match ty::Instance::resolve_opt_const_arg(
+ self, param_env,
+ // FIXME: maybe have a separate version for resolving mir::UnevaluatedConst?
+ ct.def, ct.substs,
+ ) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: ct.promoted };
self.const_eval_global_id(param_env, cid, span)
@@ -63,7 +69,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn const_eval_resolve_for_typeck(
self,
param_env: ty::ParamEnv<'tcx>,
- ct: ty::Unevaluated<'tcx>,
+ ct: ty::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
@@ -72,14 +78,36 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_infer_types_or_consts() {
+ if ct.substs.has_non_region_infer() {
bug!("did not expect inference variables here");
}
match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
Ok(Some(instance)) => {
- let cid = GlobalId { instance, promoted: ct.promoted };
- self.const_eval_global_id_for_typeck(param_env, cid, span)
+ let cid = GlobalId { instance, promoted: None };
+ self.const_eval_global_id_for_typeck(param_env, cid, span).inspect(|_| {
+ // We are emitting the lint here instead of in `is_const_evaluatable`
+ // as we normalize obligations before checking them, and normalization
+ // uses this function to evaluate this constant.
+ //
+ // @lcnr believes that successfully evaluating even though there are
+ // used generic parameters is a bug of evaluation, so checking for it
+ // here does feel somewhat sensible.
+ if !self.features().generic_const_exprs && ct.substs.has_non_region_param() {
+ assert!(matches!(self.def_kind(ct.def.did), DefKind::AnonConst));
+ let mir_body = self.mir_for_ctfe_opt_const_arg(ct.def);
+ if mir_body.is_polymorphic {
+ let Some(local_def_id) = ct.def.did.as_local() else { return };
+ self.struct_span_lint_hir(
+ lint::builtin::CONST_EVALUATABLE_UNCHECKED,
+ self.hir().local_def_id_to_hir_id(local_def_id),
+ self.def_span(ct.def.did),
+ "cannot use constants which depend on generic parameters in types",
+ |err| err,
+ )
+ }
+ }
+ })
}
Ok(None) => Err(ErrorHandled::TooGeneric),
Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
@@ -211,7 +239,7 @@ impl<'tcx> TyCtxt<'tcx> {
self,
param_env: ty::ParamEnv<'tcx>,
constant: mir::ConstantKind<'tcx>,
- ) -> mir::DestructuredMirConstant<'tcx> {
+ ) -> mir::DestructuredConstant<'tcx> {
self.try_destructure_mir_constant(param_env.and(constant)).unwrap()
}
}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 834c114ee..ac5fddb7a 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -8,7 +8,7 @@ use rustc_apfloat::{
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size};
-use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
+use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{
AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
@@ -27,7 +27,7 @@ pub struct ConstAlloc<'tcx> {
/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
/// array length computations, enum discriminants and the pattern matching logic.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
-#[derive(HashStable)]
+#[derive(HashStable, Lift)]
pub enum ConstValue<'tcx> {
/// Used only for types with `layout::abi::Scalar` ABI.
///
@@ -53,22 +53,6 @@ pub enum ConstValue<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32);
-impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
- type Lifted = ConstValue<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
- Some(match self {
- ConstValue::Scalar(s) => ConstValue::Scalar(s),
- ConstValue::ZeroSized => ConstValue::ZeroSized,
- ConstValue::Slice { data, start, end } => {
- ConstValue::Slice { data: tcx.lift(data)?, start, end }
- }
- ConstValue::ByRef { alloc, offset } => {
- ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset }
- }
- })
- }
-}
-
impl<'tcx> ConstValue<'tcx> {
#[inline]
pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
@@ -79,7 +63,7 @@ impl<'tcx> ConstValue<'tcx> {
}
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
- Some(self.try_to_scalar()?.assert_int())
+ self.try_to_scalar()?.try_to_int().ok()
}
pub fn try_to_bits(&self, size: Size) -> Option<u128> {
@@ -130,9 +114,7 @@ pub enum Scalar<Prov = AllocId> {
/// The raw bytes of a simple value.
Int(ScalarInt),
- /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
- /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
- /// relocation and its associated offset together as a `Pointer` here.
+ /// A pointer.
///
/// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
/// The size is always the pointer size of the current target, but this is not information
@@ -368,6 +350,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
#[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_int(self) -> ScalarInt {
self.try_to_int().unwrap()
}
@@ -389,6 +372,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
#[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).unwrap()
}
@@ -502,145 +486,12 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
}
-#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
-pub enum ScalarMaybeUninit<Prov = AllocId> {
- Scalar(Scalar<Prov>),
- Uninit,
-}
-
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(ScalarMaybeUninit, 24);
-
-impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
- #[inline(always)]
- fn from(s: Scalar<Prov>) -> Self {
- ScalarMaybeUninit::Scalar(s)
- }
-}
-
-// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
-// all the Miri types.
-impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
- ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
- }
- }
-}
-
-impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
- ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s),
- }
- }
-}
-
-impl<Prov> ScalarMaybeUninit<Prov> {
- #[inline]
- pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
- }
-
- #[inline]
- pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
- }
-
- #[inline]
- pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
- match self {
- ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
- ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
- }
- }
-}
-
-impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
- #[inline(always)]
- pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
- self.check_init()?.to_pointer(cx)
- }
-
- #[inline(always)]
- pub fn to_bool(self) -> InterpResult<'tcx, bool> {
- self.check_init()?.to_bool()
- }
-
- #[inline(always)]
- pub fn to_char(self) -> InterpResult<'tcx, char> {
- self.check_init()?.to_char()
- }
-
- #[inline(always)]
- pub fn to_f32(self) -> InterpResult<'tcx, Single> {
- self.check_init()?.to_f32()
- }
-
- #[inline(always)]
- pub fn to_f64(self) -> InterpResult<'tcx, Double> {
- self.check_init()?.to_f64()
- }
-
- #[inline(always)]
- pub fn to_u8(self) -> InterpResult<'tcx, u8> {
- self.check_init()?.to_u8()
- }
-
- #[inline(always)]
- pub fn to_u16(self) -> InterpResult<'tcx, u16> {
- self.check_init()?.to_u16()
- }
-
- #[inline(always)]
- pub fn to_u32(self) -> InterpResult<'tcx, u32> {
- self.check_init()?.to_u32()
- }
-
- #[inline(always)]
- pub fn to_u64(self) -> InterpResult<'tcx, u64> {
- self.check_init()?.to_u64()
- }
-
- #[inline(always)]
- pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
- self.check_init()?.to_machine_usize(cx)
- }
-
- #[inline(always)]
- pub fn to_i8(self) -> InterpResult<'tcx, i8> {
- self.check_init()?.to_i8()
- }
-
- #[inline(always)]
- pub fn to_i16(self) -> InterpResult<'tcx, i16> {
- self.check_init()?.to_i16()
- }
-
- #[inline(always)]
- pub fn to_i32(self) -> InterpResult<'tcx, i32> {
- self.check_init()?.to_i32()
- }
-
- #[inline(always)]
- pub fn to_i64(self) -> InterpResult<'tcx, i64> {
- self.check_init()?.to_i64()
- }
-
- #[inline(always)]
- pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
- self.check_init()?.to_machine_isize(cx)
- }
-}
-
/// Gets the bytes of a constant slice value.
pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
if let ConstValue::Slice { data, start, end } = val {
let len = end - start;
data.inner()
- .get_bytes(
+ .get_bytes_strip_provenance(
cx,
AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
)
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 7ab71f900..79db35a76 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -3,22 +3,22 @@
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
use crate::mir::interpret::{
- AllocRange, ConstAllocation, ConstValue, GlobalAlloc, LitToConstInput, Scalar,
+ AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, LitToConstInput, Scalar,
};
use crate::mir::visit::MirVisitable;
use crate::ty::codec::{TyDecoder, TyEncoder};
-use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable};
use crate::ty::print::{FmtPrinter, Printer};
-use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
-use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::visit::{TypeVisitable, TypeVisitor};
use crate::ty::{self, List, Ty, TyCtxt};
use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
+use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
use rustc_data_structures::captures::Captures;
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::{CtorKind, Namespace};
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
-use rustc_hir::{self, GeneratorKind};
+use rustc_hir::{self, GeneratorKind, ImplicitSelfKind};
use rustc_hir::{self as hir, HirId};
use rustc_session::Session;
use rustc_target::abi::{Size, VariantIdx};
@@ -116,11 +116,6 @@ pub trait MirPass<'tcx> {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
- /// If this pass causes the MIR to enter a new phase, return that phase.
- fn phase_change(&self) -> Option<MirPhase> {
- None
- }
-
fn is_mir_dump_enabled(&self) -> bool {
true
}
@@ -128,8 +123,49 @@ pub trait MirPass<'tcx> {
impl MirPhase {
/// Gets the index of the current MirPhase within the set of all `MirPhase`s.
+ ///
+ /// FIXME(JakobDegen): Return a `(usize, usize)` instead.
pub fn phase_index(&self) -> usize {
- *self as usize
+ const BUILT_PHASE_COUNT: usize = 1;
+ const ANALYSIS_PHASE_COUNT: usize = 2;
+ match self {
+ MirPhase::Built => 1,
+ MirPhase::Analysis(analysis_phase) => {
+ 1 + BUILT_PHASE_COUNT + (*analysis_phase as usize)
+ }
+ MirPhase::Runtime(runtime_phase) => {
+ 1 + BUILT_PHASE_COUNT + ANALYSIS_PHASE_COUNT + (*runtime_phase as usize)
+ }
+ }
+ }
+}
+
+impl Display for MirPhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ MirPhase::Built => write!(f, "built"),
+ MirPhase::Analysis(p) => write!(f, "analysis-{}", p),
+ MirPhase::Runtime(p) => write!(f, "runtime-{}", p),
+ }
+ }
+}
+
+impl Display for AnalysisPhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ AnalysisPhase::Initial => write!(f, "initial"),
+ AnalysisPhase::PostCleanup => write!(f, "post_cleanup"),
+ }
+ }
+}
+
+impl Display for RuntimePhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ RuntimePhase::Initial => write!(f, "initial"),
+ RuntimePhase::PostCleanup => write!(f, "post_cleanup"),
+ RuntimePhase::Optimized => write!(f, "optimized"),
+ }
}
}
@@ -195,6 +231,9 @@ pub struct Body<'tcx> {
/// us to see the difference and forego optimization on the inlined promoted items.
pub phase: MirPhase,
+ /// How many passses we have executed since starting the current phase. Used for debug output.
+ pub pass_count: usize,
+
pub source: MirSource<'tcx>,
/// A list of source scopes; these are referenced by statements
@@ -280,6 +319,7 @@ impl<'tcx> Body<'tcx> {
let mut body = Body {
phase: MirPhase::Built,
+ pass_count: 1,
source,
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes,
@@ -301,7 +341,7 @@ impl<'tcx> Body<'tcx> {
is_polymorphic: false,
tainted_by_errors,
};
- body.is_polymorphic = body.has_param_types_or_consts();
+ body.is_polymorphic = body.has_non_region_param();
body
}
@@ -313,6 +353,7 @@ impl<'tcx> Body<'tcx> {
pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
let mut body = Body {
phase: MirPhase::Built,
+ pass_count: 1,
source: MirSource::item(CRATE_DEF_ID.to_def_id()),
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes: IndexVec::new(),
@@ -327,16 +368,11 @@ impl<'tcx> Body<'tcx> {
is_polymorphic: false,
tainted_by_errors: None,
};
- body.is_polymorphic = body.has_param_types_or_consts();
+ body.is_polymorphic = body.has_non_region_param();
body
}
#[inline]
- pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
- &self.basic_blocks
- }
-
- #[inline]
pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
self.basic_blocks.as_mut()
}
@@ -490,7 +526,7 @@ impl<'tcx> Index<BasicBlock> for Body<'tcx> {
#[inline]
fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
- &self.basic_blocks()[index]
+ &self.basic_blocks[index]
}
}
@@ -646,22 +682,6 @@ pub enum BindingForm<'tcx> {
RefForGuard,
}
-/// Represents what type of implicit self a function has, if any.
-#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-pub enum ImplicitSelfKind {
- /// Represents a `fn x(self);`.
- Imm,
- /// Represents a `fn x(mut self);`.
- Mut,
- /// Represents a `fn x(&self);`.
- ImmRef,
- /// Represents a `fn x(&mut self);`.
- MutRef,
- /// Represents when a function does not have a self argument or
- /// when a function has a `self: X` argument.
- None,
-}
-
TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx>, }
mod binding_form_impl {
@@ -832,10 +852,6 @@ pub struct LocalDecl<'tcx> {
pub source_info: SourceInfo,
}
-// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(LocalDecl<'_>, 56);
-
/// Extra information about a some locals that's used for diagnostics and for
/// classifying variables into local variables, statics, etc, which is needed e.g.
/// for unsafety checking.
@@ -1310,10 +1326,6 @@ pub struct Statement<'tcx> {
pub kind: StatementKind<'tcx>,
}
-// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(Statement<'_>, 32);
-
impl Statement<'_> {
/// Changes a statement to a nop. This is both faster than deleting instructions and avoids
/// invalidating statement indices in `Location`s.
@@ -1363,13 +1375,7 @@ impl Debug for Statement<'_> {
write!(fmt, "Coverage::{:?} for {:?}", kind, rgn)
}
Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
- CopyNonOverlapping(box crate::mir::CopyNonOverlapping {
- ref src,
- ref dst,
- ref count,
- }) => {
- write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count)
- }
+ Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"),
Nop => write!(fmt, "nop"),
}
}
@@ -1403,6 +1409,7 @@ impl<V, T> ProjectionElem<V, T> {
Self::Field(_, _)
| Self::Index(_)
+ | Self::OpaqueCast(_)
| Self::ConstantIndex { .. }
| Self::Subslice { .. }
| Self::Downcast(_, _) => false,
@@ -1450,7 +1457,7 @@ pub struct PlaceRef<'tcx> {
// Once we stop implementing `Ord` for `DefId`,
// this impl will be unnecessary. Until then, we'll
// leave this impl in place to prevent re-adding a
-// dependnecy on the `Ord` impl for `DefId`
+// dependency on the `Ord` impl for `DefId`
impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
impl<'tcx> Place<'tcx> {
@@ -1471,7 +1478,9 @@ impl<'tcx> Place<'tcx> {
/// It's guaranteed to be in the first place
pub fn has_deref(&self) -> bool {
// To make sure this is not accidently used in wrong mir phase
- debug_assert!(!self.projection[1..].contains(&PlaceElem::Deref));
+ debug_assert!(
+ self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
+ );
self.projection.first() == Some(&PlaceElem::Deref)
}
@@ -1531,6 +1540,7 @@ impl<'tcx> Place<'tcx> {
}
impl From<Local> for Place<'_> {
+ #[inline]
fn from(local: Local) -> Self {
Place { local, projection: List::empty() }
}
@@ -1594,7 +1604,9 @@ impl Debug for Place<'_> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
for elem in self.projection.iter().rev() {
match elem {
- ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => {
+ ProjectionElem::OpaqueCast(_)
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Field(_, _) => {
write!(fmt, "(").unwrap();
}
ProjectionElem::Deref => {
@@ -1610,6 +1622,9 @@ impl Debug for Place<'_> {
for elem in self.projection.iter() {
match elem {
+ ProjectionElem::OpaqueCast(ty) => {
+ write!(fmt, " as {})", ty)?;
+ }
ProjectionElem::Downcast(Some(name), _index) => {
write!(fmt, " as {})", name)?;
}
@@ -1847,7 +1862,15 @@ impl<'tcx> Rvalue<'tcx> {
| Rvalue::AddressOf(_, _)
| Rvalue::Len(_)
| Rvalue::Cast(
- CastKind::Misc | CastKind::Pointer(_) | CastKind::PointerFromExposedAddress,
+ CastKind::IntToInt
+ | CastKind::FloatToInt
+ | CastKind::FloatToFloat
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
+ | CastKind::Pointer(_)
+ | CastKind::PointerFromExposedAddress
+ | CastKind::DynStar,
_,
_,
)
@@ -2047,6 +2070,7 @@ impl<'tcx> Debug for Rvalue<'tcx> {
/// particular, one must be wary of `NaN`!
#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub struct Constant<'tcx> {
pub span: Span,
@@ -2061,10 +2085,14 @@ pub struct Constant<'tcx> {
}
#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
-#[derive(Lift)]
+#[derive(Lift, TypeFoldable, TypeVisitable)]
pub enum ConstantKind<'tcx> {
/// This constant came from the type system
Ty(ty::Const<'tcx>),
+
+ /// An unevaluated mir constant which is not part of the type system.
+ Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>),
+
/// This constant cannot go back into the type system, as it represents
/// something the type system cannot handle (e.g. pointers).
Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
@@ -2090,20 +2118,11 @@ impl<'tcx> Constant<'tcx> {
}
impl<'tcx> ConstantKind<'tcx> {
- /// Returns `None` if the constant is not trivially safe for use in the type system.
- #[inline]
- pub fn const_for_ty(&self) -> Option<ty::Const<'tcx>> {
- match self {
- ConstantKind::Ty(c) => Some(*c),
- ConstantKind::Val(..) => None,
- }
- }
-
#[inline(always)]
pub fn ty(&self) -> Ty<'tcx> {
match self {
ConstantKind::Ty(c) => c.ty(),
- ConstantKind::Val(_, ty) => *ty,
+ ConstantKind::Val(_, ty) | ConstantKind::Unevaluated(_, ty) => *ty,
}
}
@@ -2115,6 +2134,7 @@ impl<'tcx> ConstantKind<'tcx> {
_ => None,
},
ConstantKind::Val(val, _) => Some(val),
+ ConstantKind::Unevaluated(..) => None,
}
}
@@ -2129,6 +2149,7 @@ impl<'tcx> ConstantKind<'tcx> {
_ => None,
},
ConstantKind::Val(val, _) => val.try_to_scalar(),
+ ConstantKind::Unevaluated(..) => None,
}
}
@@ -2161,6 +2182,14 @@ impl<'tcx> ConstantKind<'tcx> {
}
}
Self::Val(_, _) => self,
+ Self::Unevaluated(uneval, ty) => {
+ // FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
+ match tcx.const_eval_resolve(param_env, uneval, None) {
+ Ok(val) => Self::Val(val, ty),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => self,
+ Err(_) => Self::Ty(tcx.const_error(ty)),
+ }
+ }
}
}
@@ -2186,6 +2215,18 @@ impl<'tcx> ConstantKind<'tcx> {
tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
val.try_to_bits(size)
}
+ Self::Unevaluated(uneval, ty) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => {
+ let size = tcx
+ .layout_of(param_env.with_reveal_all_normalized(tcx).and(*ty))
+ .ok()?
+ .size;
+ val.try_to_bits(size)
+ }
+ Err(_) => None,
+ }
+ }
}
}
@@ -2194,6 +2235,12 @@ impl<'tcx> ConstantKind<'tcx> {
match self {
Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
Self::Val(val, _) => val.try_to_bool(),
+ Self::Unevaluated(uneval, _) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => val.try_to_bool(),
+ Err(_) => None,
+ }
+ }
}
}
@@ -2202,6 +2249,12 @@ impl<'tcx> ConstantKind<'tcx> {
match self {
Self::Ty(ct) => ct.try_eval_usize(tcx, param_env),
Self::Val(val, _) => val.try_to_machine_usize(tcx),
+ Self::Unevaluated(uneval, _) => {
+ match tcx.const_eval_resolve(param_env, *uneval, None) {
+ Ok(val) => val.try_to_machine_usize(tcx),
+ Err(_) => None,
+ }
+ }
}
}
@@ -2259,7 +2312,7 @@ impl<'tcx> ConstantKind<'tcx> {
Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id), param_env)
}
- #[instrument(skip(tcx), level = "debug")]
+ #[instrument(skip(tcx), level = "debug", ret)]
pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
let body_id = match tcx.hir().get(hir_id) {
@@ -2297,21 +2350,18 @@ impl<'tcx> ConstantKind<'tcx> {
let substs =
ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
.substs;
- let uneval_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: ty::WithOptConstParam::unknown(def_id).to_global(),
- substs,
- promoted: None,
- }),
- ty,
- });
- debug!(?uneval_const);
- debug_assert!(!uneval_const.has_free_regions());
- Self::Ty(uneval_const)
+ let uneval = UnevaluatedConst {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ };
+ debug_assert!(!uneval.has_free_regions());
+
+ Self::Unevaluated(uneval, ty)
}
- #[instrument(skip(tcx), level = "debug")]
+ #[instrument(skip(tcx), level = "debug", ret)]
fn from_opt_const_arg_anon_const(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
@@ -2389,29 +2439,26 @@ impl<'tcx> ConstantKind<'tcx> {
let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
let span = tcx.hir().span(hir_id);
- let uneval = ty::Unevaluated::new(def.to_global(), substs);
+ let uneval = UnevaluatedConst::new(def.to_global(), substs);
debug!(?span, ?param_env);
match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
Ok(val) => {
- debug!("evaluated const value: {:?}", val);
+ debug!("evaluated const value");
Self::Val(val, ty)
}
Err(_) => {
debug!("error encountered during evaluation");
// Error was handled in `const_eval_resolve`. Here we just create a
// new unevaluated const and error hard later in codegen
- let ty_const = tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ Self::Unevaluated(
+ UnevaluatedConst {
def: def.to_global(),
substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
promoted: None,
- }),
+ },
ty,
- });
- debug!(?ty_const);
-
- Self::Ty(ty_const)
+ )
}
}
}
@@ -2422,11 +2469,40 @@ impl<'tcx> ConstantKind<'tcx> {
let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
Self::Val(const_val, c.ty())
}
+ ty::ConstKind::Unevaluated(uv) => Self::Unevaluated(uv.expand(), c.ty()),
_ => Self::Ty(c),
}
}
}
+/// An unevaluated (potentially generic) constant used in MIR.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UnevaluatedConst<'tcx> {
+ pub def: ty::WithOptConstParam<DefId>,
+ pub substs: SubstsRef<'tcx>,
+ pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ // FIXME: probably should get rid of this method. It's also wrong to
+ // shrink and then later expand a promoted.
+ #[inline]
+ pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
+ ty::UnevaluatedConst { def: self.def, substs: self.substs }
+ }
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ #[inline]
+ pub fn new(
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, substs, promoted: Default::default() }
+ }
+}
+
/// A collection of projections into user types.
///
/// They are projections because a binding can occur a part of a
@@ -2576,8 +2652,6 @@ impl UserTypeProjection {
}
}
-TrivialTypeTraversalAndLiftImpls! { ProjectionKind, }
-
impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
Ok(UserTypeProjection {
@@ -2622,6 +2696,11 @@ impl<'tcx> Display for ConstantKind<'tcx> {
match *self {
ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true),
+ // FIXME(valtrees): Correctly print mir constants.
+ ConstantKind::Unevaluated(..) => {
+ fmt.write_str("_")?;
+ Ok(())
+ }
}
}
}
@@ -2643,15 +2722,7 @@ fn pretty_print_const<'tcx>(
}
fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
- fmt.write_str("b\"")?;
- for &c in byte_str {
- for e in std::ascii::escape_default(c) {
- fmt.write_char(e as char)?;
- }
- }
- fmt.write_str("\"")?;
-
- Ok(())
+ write!(fmt, "b\"{}\"", byte_str.escape_ascii())
}
fn comma_sep<'tcx>(fmt: &mut Formatter<'_>, elems: Vec<ConstantKind<'tcx>>) -> fmt::Result {
@@ -2691,8 +2762,8 @@ fn pretty_print_const_value<'tcx>(
match inner.kind() {
ty::Slice(t) => {
if *t == u8_type {
- // The `inspect` here is okay since we checked the bounds, and there are
- // no relocations (we have an active slice reference here). We don't use
+ // The `inspect` here is okay since we checked the bounds, and `u8` carries
+ // no provenance (we have an active slice reference here). We don't use
// this result to affect interpreter execution.
let byte_str = data
.inner()
@@ -2702,8 +2773,8 @@ fn pretty_print_const_value<'tcx>(
}
}
ty::Str => {
- // The `inspect` here is okay since we checked the bounds, and there are no
- // relocations (we have an active `str` reference here). We don't use this
+ // The `inspect` here is okay since we checked the bounds, and `str` carries
+ // no provenance (we have an active `str` reference here). We don't use this
// result to affect interpreter execution.
let slice = data
.inner()
@@ -2718,14 +2789,14 @@ fn pretty_print_const_value<'tcx>(
let n = n.kind().try_to_bits(tcx.data_layout.pointer_size).unwrap();
// cast is ok because we already checked for pointer size (32 or 64 bit) above
let range = AllocRange { start: offset, size: Size::from_bytes(n) };
- let byte_str = alloc.inner().get_bytes(&tcx, range).unwrap();
+ let byte_str = alloc.inner().get_bytes_strip_provenance(&tcx, range).unwrap();
fmt.write_str("*")?;
pretty_print_byte_str(fmt, byte_str)?;
return Ok(());
}
// Aggregates, printed as array/tuple/struct/variant construction syntax.
//
- // NB: the `has_param_types_or_consts` check ensures that we can use
+ // NB: the `has_non_region_param` check ensures that we can use
// the `destructure_const` query with an empty `ty::ParamEnv` without
// introducing ICEs (e.g. via `layout_of`) from missing bounds.
// E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
@@ -2733,7 +2804,7 @@ fn pretty_print_const_value<'tcx>(
//
// FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the
// correct `ty::ParamEnv` to allow printing *all* constant values.
- (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => {
+ (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_non_region_param() => {
let ct = tcx.lift(ct).unwrap();
let ty = tcx.lift(ty).unwrap();
if let Some(contents) = tcx.try_destructure_mir_constant(
@@ -2898,3 +2969,18 @@ impl Location {
}
}
}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(BasicBlockData<'_>, 144);
+ static_assert_size!(LocalDecl<'_>, 56);
+ static_assert_size!(Statement<'_>, 32);
+ static_assert_size!(StatementKind<'_>, 16);
+ static_assert_size!(Terminator<'_>, 112);
+ static_assert_size!(TerminatorKind<'_>, 96);
+ // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index 21ae121e1..15a24aa4a 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -81,7 +81,7 @@ impl<'tcx> MonoItem<'tcx> {
MonoItem::Fn(instance) => tcx.symbol_name(instance),
MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
MonoItem::GlobalAsm(item_id) => {
- SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id))
+ SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.owner_id))
}
}
}
@@ -182,7 +182,7 @@ impl<'tcx> MonoItem<'tcx> {
match *self {
MonoItem::Fn(Instance { def, .. }) => def.def_id().as_local(),
MonoItem::Static(def_id) => def_id.as_local(),
- MonoItem::GlobalAsm(item_id) => Some(item_id.def_id),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id),
}
.map(|def_id| tcx.def_span(def_id))
}
@@ -373,7 +373,7 @@ impl<'tcx> CodegenUnit<'tcx> {
}
}
MonoItem::Static(def_id) => def_id.as_local().map(Idx::index),
- MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.index()),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id.index()),
},
item.symbol_name(tcx),
)
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
index 15496842d..24fe3b472 100644
--- a/compiler/rustc_middle/src/mir/patch.rs
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -19,7 +19,7 @@ pub struct MirPatch<'tcx> {
impl<'tcx> MirPatch<'tcx> {
pub fn new(body: &Body<'tcx>) -> Self {
let mut result = MirPatch {
- patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+ patch_map: IndexVec::from_elem(None, &body.basic_blocks),
new_blocks: vec![],
new_statements: vec![],
new_locals: vec![],
@@ -29,7 +29,7 @@ impl<'tcx> MirPatch<'tcx> {
};
// Check if we already have a resume block
- for (bb, block) in body.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
result.resume_block = Some(bb);
break;
@@ -61,14 +61,14 @@ impl<'tcx> MirPatch<'tcx> {
}
pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
- let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+ let offset = match bb.index().checked_sub(body.basic_blocks.len()) {
Some(index) => self.new_blocks[index].statements.len(),
None => body[bb].statements.len(),
};
Location { block: bb, statement_index: offset }
}
- pub fn new_local_with_info(
+ pub fn new_internal_with_info(
&mut self,
ty: Ty<'tcx>,
span: Span,
@@ -76,14 +76,17 @@ impl<'tcx> MirPatch<'tcx> {
) -> Local {
let index = self.next_local;
self.next_local += 1;
- let mut new_decl = LocalDecl::new(ty, span);
+ let mut new_decl = LocalDecl::new(ty, span).internal();
new_decl.local_info = local_info;
self.new_locals.push(new_decl);
Local::new(index as usize)
}
pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
- self.new_local_with_info(ty, span, None)
+ let index = self.next_local;
+ self.next_local += 1;
+ self.new_locals.push(LocalDecl::new(ty, span));
+ Local::new(index as usize)
}
pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
@@ -126,7 +129,7 @@ impl<'tcx> MirPatch<'tcx> {
debug!(
"MirPatch: {} new blocks, starting from index {}",
self.new_blocks.len(),
- body.basic_blocks().len()
+ body.basic_blocks.len()
);
let bbs = if self.patch_map.is_empty() && self.new_blocks.is_empty() {
body.basic_blocks.as_mut_preserves_cfg()
@@ -147,7 +150,6 @@ impl<'tcx> MirPatch<'tcx> {
let mut delta = 0;
let mut last_bb = START_BLOCK;
- let mut stmts_and_targets: Vec<(Statement<'_>, BasicBlock)> = Vec::new();
for (mut loc, stmt) in new_statements {
if loc.block != last_bb {
delta = 0;
@@ -156,27 +158,11 @@ impl<'tcx> MirPatch<'tcx> {
debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
loc.statement_index += delta;
let source_info = Self::source_info_for_index(&body[loc.block], loc);
-
- // For mir-opt `Derefer` to work in all cases we need to
- // get terminator's targets and apply the statement to all of them.
- if loc.statement_index > body[loc.block].statements.len() {
- let term = body[loc.block].terminator();
- for i in term.successors() {
- stmts_and_targets.push((Statement { source_info, kind: stmt.clone() }, i));
- }
- delta += 1;
- continue;
- }
-
body[loc.block]
.statements
.insert(loc.statement_index, Statement { source_info, kind: stmt });
delta += 1;
}
-
- for (stmt, target) in stmts_and_targets.into_iter().rev() {
- body[target].statements.insert(0, stmt);
- }
}
pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
@@ -187,7 +173,7 @@ impl<'tcx> MirPatch<'tcx> {
}
pub fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
- let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+ let data = match loc.block.index().checked_sub(body.basic_blocks.len()) {
Some(new) => &self.new_blocks[new],
None => &body[loc.block],
};
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 0ce41337b..05dcfba77 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -318,10 +318,10 @@ where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
write_mir_intro(tcx, body, w)?;
- for block in body.basic_blocks().indices() {
+ for block in body.basic_blocks.indices() {
extra_data(PassWhere::BeforeBlock(block), w)?;
write_basic_block(tcx, block, body, extra_data, w)?;
- if block.index() + 1 != body.basic_blocks().len() {
+ if block.index() + 1 != body.basic_blocks.len() {
writeln!(w)?;
}
}
@@ -464,12 +464,13 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
let val = match literal {
ConstantKind::Ty(ct) => match ct.kind() {
ty::ConstKind::Param(p) => format!("Param({})", p),
- ty::ConstKind::Unevaluated(uv) => format!(
- "Unevaluated({}, {:?}, {:?})",
- self.tcx.def_path_str(uv.def.did),
- uv.substs,
- uv.promoted,
- ),
+ ty::ConstKind::Unevaluated(uv) => {
+ format!(
+ "Unevaluated({}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ )
+ }
ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
ty::ConstKind::Error(_) => "Error".to_string(),
// These variants shouldn't exist in the MIR.
@@ -477,6 +478,14 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
| ty::ConstKind::Infer(_)
| ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
},
+ ConstantKind::Unevaluated(uv, _) => {
+ format!(
+ "Unevaluated({}, {:?}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ uv.promoted,
+ )
+ }
// To keep the diffs small, we render this like we render `ty::Const::Value`.
//
// This changes once `ty::Const::Value` is represented using valtrees.
@@ -676,7 +685,7 @@ pub fn write_allocations<'tcx>(
fn alloc_ids_from_alloc(
alloc: ConstAllocation<'_>,
) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
- alloc.inner().relocations().values().map(|id| *id)
+ alloc.inner().provenance().values().map(|id| *id)
}
fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
@@ -696,9 +705,9 @@ pub fn write_allocations<'tcx>(
struct CollectAllocIds(BTreeSet<AllocId>);
impl<'tcx> Visitor<'tcx> for CollectAllocIds {
- fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) {
+ fn visit_constant(&mut self, c: &Constant<'tcx>, _: Location) {
match c.literal {
- ConstantKind::Ty(c) => self.visit_const(c, loc),
+ ConstantKind::Ty(_) | ConstantKind::Unevaluated(..) => {}
ConstantKind::Val(val, _) => {
self.0.extend(alloc_ids_from_const_val(val));
}
@@ -778,7 +787,7 @@ pub fn write_allocations<'tcx>(
/// If the allocation is small enough to fit into a single line, no start address is given.
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.`
-/// This also prints relocations adequately.
+/// This also prints provenance adequately.
pub fn display_allocation<'a, 'tcx, Prov, Extra>(
tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Prov, Extra>,
@@ -873,34 +882,34 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
if i != line_start {
write!(w, " ")?;
}
- if let Some(&prov) = alloc.relocations().get(&i) {
- // Memory with a relocation must be defined
+ if let Some(&prov) = alloc.provenance().get(&i) {
+ // Memory with provenance must be defined
assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
let j = i.bytes_usize();
let offset = alloc
.inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
let offset = Size::from_bytes(offset);
- let relocation_width = |bytes| bytes * 3;
+ let provenance_width = |bytes| bytes * 3;
let ptr = Pointer::new(prov, offset);
let mut target = format!("{:?}", ptr);
- if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+ if target.len() > provenance_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space.
target = format!("{:#?}", ptr);
}
if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
- // This branch handles the situation where a relocation starts in the current line
+ // This branch handles the situation where a provenance starts in the current line
// but ends in the next one.
let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
let overflow = ptr_size - remainder;
- let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
- let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+ let remainder_width = provenance_width(remainder.bytes_usize()) - 2;
+ let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1;
ascii.push('╾');
for _ in 0..remainder.bytes() - 1 {
ascii.push('─');
}
if overflow_width > remainder_width && overflow_width >= target.len() {
- // The case where the relocation fits into the part in the next line
+ // The case where the provenance fits into the part in the next line
write!(w, "╾{0:─^1$}", "", remainder_width)?;
line_start =
write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
@@ -921,11 +930,11 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
i += ptr_size;
continue;
} else {
- // This branch handles a relocation that starts and ends in the current line.
- let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
- oversized_ptr(&mut target, relocation_width);
+ // This branch handles a provenance that starts and ends in the current line.
+ let provenance_width = provenance_width(ptr_size.bytes_usize() - 1);
+ oversized_ptr(&mut target, provenance_width);
ascii.push('╾');
- write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+ write!(w, "╾{0:─^1$}╼", target, provenance_width)?;
for _ in 0..ptr_size.bytes() - 2 {
ascii.push('─');
}
@@ -935,7 +944,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
} else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
let j = i.bytes_usize();
- // Checked definedness (and thus range) and relocations. This access also doesn't
+ // Checked definedness (and thus range) and provenance. This access also doesn't
// influence interpreter execution but is only for debugging.
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
write!(w, "{:02x}", c)?;
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index dd9f8795f..efd7357af 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -2,7 +2,7 @@
use crate::mir::{Body, ConstantKind, Promoted};
use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::vec_map::VecMap;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
@@ -115,21 +115,6 @@ pub enum UnusedUnsafe {
/// `unsafe` block nested under another (used) `unsafe` block
/// > ``… because it's nested under this `unsafe` block``
InUnsafeBlock(hir::HirId),
- /// `unsafe` block nested under `unsafe fn`
- /// > ``… because it's nested under this `unsafe fn` ``
- ///
- /// the second HirId here indicates the first usage of the `unsafe` block,
- /// which allows retrieval of the LintLevelSource for why that operation would
- /// have been permitted without the block
- InUnsafeFn(hir::HirId, hir::HirId),
-}
-
-#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
-pub enum UsedUnsafeBlockData {
- SomeDisallowedInUnsafeFn,
- // the HirId here indicates the first usage of the `unsafe` block
- // (i.e. the one that's first encountered in the MIR traversal of the unsafety check)
- AllAllowedInUnsafeFn(hir::HirId),
}
#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
@@ -138,10 +123,7 @@ pub struct UnsafetyCheckResult {
pub violations: Vec<UnsafetyViolation>,
/// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
- ///
- /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
- /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
- pub used_unsafe_blocks: FxHashMap<hir::HirId, UsedUnsafeBlockData>,
+ pub used_unsafe_blocks: FxHashSet<hir::HirId>,
/// This is `Some` iff the item is not a closure.
pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>,
@@ -345,7 +327,7 @@ rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16);
///
/// See also `rustc_const_eval::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, Lift, TypeVisitable, TypeFoldable)]
pub enum ConstraintCategory<'tcx> {
Return(ReturnConstraint),
Yield,
@@ -387,7 +369,7 @@ pub enum ConstraintCategory<'tcx> {
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)]
pub enum ReturnConstraint {
Normal,
ClosureUpvar(Field),
@@ -410,16 +392,9 @@ pub enum ClosureOutlivesSubject<'tcx> {
Region(ty::RegionVid),
}
-/// The constituent parts of a type level constant of kind ADT or array.
-#[derive(Copy, Clone, Debug, HashStable)]
-pub struct DestructuredConst<'tcx> {
- pub variant: Option<VariantIdx>,
- pub fields: &'tcx [ty::Const<'tcx>],
-}
-
/// The constituent parts of a mir constant of kind ADT or array.
#[derive(Copy, Clone, Debug, HashStable)]
-pub struct DestructuredMirConstant<'tcx> {
+pub struct DestructuredConstant<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [ConstantKind<'tcx>],
}
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
index 4418b848e..4e06d9101 100644
--- a/compiler/rustc_middle/src/mir/spanview.rs
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -105,7 +105,7 @@ where
}
let body_span = hir_body.unwrap().value.span;
let mut span_viewables = Vec::new();
- for (bb, data) in body.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks.iter_enumerated() {
match spanview {
MirSpanview::Statement => {
for (i, statement) in data.statements.iter().enumerate() {
@@ -249,7 +249,7 @@ pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
Retag(..) => "Retag",
AscribeUserType(..) => "AscribeUserType",
Coverage(..) => "Coverage",
- CopyNonOverlapping(..) => "CopyNonOverlapping",
+ Intrinsic(..) => "Intrinsic",
Nop => "Nop",
}
}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index eb90169d0..85ef51f12 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -23,75 +23,111 @@ use rustc_span::symbol::Symbol;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
-/// The various "big phases" that MIR goes through.
+/// Represents the "flavors" of MIR.
///
-/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
-/// dialects forbid certain variants or values in certain phases. The sections below summarize the
-/// changes, but do not document them thoroughly. The full documentation is found in the appropriate
-/// documentation for the thing the change is affecting.
+/// All flavors of MIR use the same data structure, but there are some important differences. These
+/// differences come in two forms: Dialects and phases.
///
-/// Warning: ordering of variants is significant.
+/// Dialects represent a stronger distinction than phases. This is because the transitions between
+/// dialects are semantic changes, and therefore technically *lowerings* between distinct IRs. In
+/// other words, the same [`Body`](crate::mir::Body) might be well-formed for multiple dialects, but
+/// have different semantic meaning and different behavior at runtime.
+///
+/// Each dialect additionally has a number of phases. However, phase changes never involve semantic
+/// changes. If some MIR is well-formed both before and after a phase change, it is also guaranteed
+/// that it has the same semantic meaning. In this sense, phase changes can only add additional
+/// restrictions on what MIR is well-formed.
+///
+/// When adding phases, remember to update [`MirPhase::phase_index`].
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)]
pub enum MirPhase {
- /// The dialect of MIR used during all phases before `DropsLowered` is the same. This is also
- /// the MIR that analysis such as borrowck uses.
- ///
- /// One important thing to remember about the behavior of this section of MIR is that drop terminators
- /// (including drop and replace) are *conditional*. The elaborate drops pass will then replace each
- /// instance of a drop terminator with a nop, an unconditional drop, or a drop conditioned on a drop
- /// flag. Of course, this means that it is important that the drop elaboration can accurately recognize
- /// when things are initialized and when things are de-initialized. That means any code running on this
- /// version of MIR must be sure to produce output that drop elaboration can reason about. See the
- /// section on the drop terminatorss for more details.
- Built = 0,
- // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
- // We used to have this for pre-miri MIR based const eval.
- Const = 1,
- /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
- /// by creating a new MIR body per promoted element. After this phase (and thus the termination
- /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
- /// query.
- ConstsPromoted = 2,
- /// After this projections may only contain deref projections as the first element.
- Derefered = 3,
- /// Beginning with this phase, the following variants are disallowed:
- /// * [`TerminatorKind::DropAndReplace`]
+ /// The MIR that is generated by MIR building.
+ ///
+ /// The only things that operate on this dialect are unsafeck, the various MIR lints, and const
+ /// qualifs.
+ ///
+ /// This has no distinct phases.
+ Built,
+ /// The MIR used for most analysis.
+ ///
+ /// The only semantic change between analysis and built MIR is constant promotion. In built MIR,
+ /// sequences of statements that would generally be subject to constant promotion are
+ /// semantically constants, while in analysis MIR all constants are explicit.
+ ///
+ /// The result of const promotion is available from the `mir_promoted` and `promoted_mir` queries.
+ ///
+ /// This is the version of MIR used by borrowck and friends.
+ Analysis(AnalysisPhase),
+ /// The MIR used for CTFE, optimizations, and codegen.
+ ///
+ /// The semantic changes that occur in the lowering from analysis to runtime MIR are as follows:
+ ///
+ /// - Drops: In analysis MIR, `Drop` terminators represent *conditional* drops; roughly speaking,
+ /// if dataflow analysis determines that the place being dropped is uninitialized, the drop will
+ /// not be executed. The exact semantics of this aren't written down anywhere, which means they
+ /// are essentially "what drop elaboration does." In runtime MIR, the drops are unconditional;
+ /// when a `Drop` terminator is reached, if the type has drop glue that drop glue is always
+ /// executed. This may be UB if the underlying place is not initialized.
+ /// - Packed drops: Places might in general be misaligned - in most cases this is UB, the exception
+ /// is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be misaligned
+ /// for this reason implicitly moves `P` to a temporary before dropping. Runtime MIR has no such
+ /// rules, and dropping a misaligned place is simply UB.
+ /// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In runtime
+ /// MIR, this is UB.
+ /// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same way
+ /// that Rust itself has them. Where exactly these are is generally subject to change, and so we
+ /// don't document this here. Runtime MIR has all retags explicit.
+ /// - Generator bodies: In analysis MIR, locals may actually be behind a pointer that user code has
+ /// access to. This occurs in generator bodies. Such locals do not behave like other locals,
+ /// because they eg may be aliased in surprising ways. Runtime MIR has no such special locals -
+ /// all generator bodies are lowered and so all places that look like locals really are locals.
+ ///
+ /// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part
+ /// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that
+ /// transformations which may supress such errors should not run on analysis MIR.
+ Runtime(RuntimePhase),
+}
+
+/// See [`MirPhase::Analysis`].
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum AnalysisPhase {
+ Initial = 0,
+ /// Beginning in this phase, the following variants are disallowed:
/// * [`TerminatorKind::FalseUnwind`]
/// * [`TerminatorKind::FalseEdge`]
/// * [`StatementKind::FakeRead`]
/// * [`StatementKind::AscribeUserType`]
/// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
///
- /// And the following variant is allowed:
- /// * [`StatementKind::Retag`]
- ///
- /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
- /// terminator means that the auto-generated drop glue will be invoked. Also, `Copy` operands
- /// are allowed for non-`Copy` types.
- DropsLowered = 4,
- /// Beginning with this phase, the following variant is disallowed:
+ /// Furthermore, `Deref` projections must be the first projection within any place (if they
+ /// appear at all)
+ PostCleanup = 1,
+}
+
+/// See [`MirPhase::Runtime`].
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum RuntimePhase {
+ /// In addition to the semantic changes, beginning with this phase, the following variants are
+ /// disallowed:
+ /// * [`TerminatorKind::DropAndReplace`]
+ /// * [`TerminatorKind::Yield`]
+ /// * [`TerminatorKind::GeneratorDrop`]
/// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
///
- /// And the following variant is allowed:
+ /// And the following variants are allowed:
+ /// * [`StatementKind::Retag`]
/// * [`StatementKind::SetDiscriminant`]
- Deaggregated = 5,
- /// Before this phase, generators are in the "source code" form, featuring `yield` statements
- /// and such. With this phase change, they are transformed into a proper state machine. Running
- /// optimizations before this change can be potentially dangerous because the source code is to
- /// some extent a "lie." In particular, `yield` terminators effectively make the value of all
- /// locals visible to the caller. This means that dead store elimination before them, or code
- /// motion across them, is not correct in general. This is also exasperated by type checking
- /// having pre-computed a list of the types that it thinks are ok to be live across a yield
- /// point - this is necessary to decide eg whether autotraits are implemented. Introducing new
- /// types across a yield point will lead to ICEs becaues of this.
- ///
- /// Beginning with this phase, the following variants are disallowed:
- /// * [`TerminatorKind::Yield`]
- /// * [`TerminatorKind::GeneratorDrop`]
+ /// * [`StatementKind::Deinit`]
+ ///
+ /// Furthermore, `Copy` operands are allowed for non-`Copy` types.
+ Initial = 0,
+ /// Beginning with this phase, the following variant is disallowed:
/// * [`ProjectionElem::Deref`] of `Box`
- GeneratorsLowered = 6,
- Optimized = 7,
+ PostCleanup = 1,
+ Optimized = 2,
}
///////////////////////////////////////////////////////////////////////////
@@ -292,12 +328,40 @@ pub enum StatementKind<'tcx> {
/// executed.
Coverage(Box<Coverage>),
+ /// Denotes a call to an intrinsic that does not require an unwind path and always returns.
+ /// This avoids adding a new block and a terminator for simple intrinsics.
+ Intrinsic(Box<NonDivergingIntrinsic<'tcx>>),
+
+ /// No-op. Useful for deleting instructions without affecting statement indices.
+ Nop,
+}
+
+#[derive(
+ Clone,
+ TyEncodable,
+ TyDecodable,
+ Debug,
+ PartialEq,
+ Hash,
+ HashStable,
+ TypeFoldable,
+ TypeVisitable
+)]
+pub enum NonDivergingIntrinsic<'tcx> {
+ /// Denotes a call to the intrinsic function `assume`.
+ ///
+ /// The operand must be a boolean. Optimizers may use the value of the boolean to backtrack its
+ /// computation to infer information about other variables. So if the boolean came from a
+ /// `x < y` operation, subsequent operations on `x` and `y` could elide various bound checks.
+ /// If the argument is `false`, this operation is equivalent to `TerminatorKind::Unreachable`.
+ Assume(Operand<'tcx>),
+
/// Denotes a call to the intrinsic function `copy_nonoverlapping`.
///
/// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
/// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
/// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
- /// the `src` place are copied to the continguous range of bytes beginning with the first byte
+ /// the `src` place are copied to the contiguous range of bytes beginning with the first byte
/// of `dest`.
///
/// **Needs clarification**: In what order are operands computed and dereferenced? It should
@@ -305,10 +369,18 @@ pub enum StatementKind<'tcx> {
///
/// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
/// I vaguely remember Ralf saying somewhere that he thought it should not be.
- CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>),
+ CopyNonOverlapping(CopyNonOverlapping<'tcx>),
+}
- /// No-op. Useful for deleting instructions without affecting statement indices.
- Nop,
+impl std::fmt::Display for NonDivergingIntrinsic<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Assume(op) => write!(f, "assume({op:?})"),
+ Self::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
+ write!(f, "copy_nonoverlapping(dst = {dst:?}, src = {src:?}, count = {count:?})")
+ }
+ }
+ }
}
/// Describes what kind of retag is to be performed.
@@ -343,7 +415,7 @@ pub enum FakeReadCause {
/// Some(closure_def_id).
/// Otherwise, the value of the optional LocalDefId will be None.
//
- // We can use LocaDefId here since fake read statements are removed
+ // We can use LocalDefId here since fake read statements are removed
// before codegen in the `CleanupNonCodegenStatements` pass.
ForMatchedPlace(Option<LocalDefId>),
@@ -417,7 +489,7 @@ pub struct CopyNonOverlapping<'tcx> {
/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
/// runtime.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum TerminatorKind<'tcx> {
/// Block has one successor; we continue execution there.
Goto { target: BasicBlock },
@@ -670,7 +742,7 @@ pub enum TerminatorKind<'tcx> {
}
/// Information about an assertion failure.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum AssertKind<O> {
BoundsCheck { len: O, index: O },
Overflow(BinOp, O, O),
@@ -758,6 +830,9 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
/// generator has more than one variant, the parent place's variant index must be set, indicating
/// which variant is being used. If it has just one variant, the variant index may or may not be
/// included - the single possible variant is inferred if it is not included.
+/// - [`OpaqueCast`](ProjectionElem::OpaqueCast): This projection changes the place's type to the
+/// given one, and makes no other changes. A `OpaqueCast` projection on any type other than an
+/// opaque type from the current crate is not well-formed.
/// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
/// place as described in the documentation for the `ProjectionElem`. The resulting address is
/// the parent's address plus that offset, and the type is `T`. This is only legal if the parent
@@ -792,7 +867,7 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
///
/// Rust currently requires that every place obey those two rules. This is checked by MIRI and taken
/// advantage of by codegen (via `gep inbounds`). That is possibly subject to change.
-#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Place<'tcx> {
pub local: Local,
@@ -801,7 +876,7 @@ pub struct Place<'tcx> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum ProjectionElem<V, T> {
Deref,
Field(Field, T),
@@ -857,6 +932,10 @@ pub enum ProjectionElem<V, T> {
///
/// The included Symbol is the name of the variant, used for printing MIR.
Downcast(Option<Symbol>, VariantIdx),
+
+ /// Like an explicit cast from an opaque type to a concrete type, but without
+ /// requiring an intermediate variable.
+ OpaqueCast(T),
}
/// Alias for projections as they appear in places, where the base is a place
@@ -884,7 +963,7 @@ pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri
/// currently implements it, but it seems like this may be something to check against in the
/// validator.
-#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
pub enum Operand<'tcx> {
/// Creates a value by loading the given place.
///
@@ -915,7 +994,7 @@ pub enum Operand<'tcx> {
/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
/// value that an [`Operand`] produces.
-#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)]
pub enum Rvalue<'tcx> {
/// Yields the operand unchanged
Use(Operand<'tcx>),
@@ -1068,11 +1147,18 @@ pub enum CastKind {
/// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are
/// translated into `&raw mut/const *r`, i.e., they are not actually casts.
Pointer(PointerCast),
- /// Remaining unclassified casts.
- Misc,
+ /// Cast into a dyn* object.
+ DynStar,
+ IntToInt,
+ FloatToInt,
+ FloatToFloat,
+ IntToFloat,
+ PtrToPtr,
+ FnPtrToPtr,
}
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
pub enum AggregateKind<'tcx> {
/// The type is of the element
Array(Ty<'tcx>),
@@ -1159,10 +1245,11 @@ pub enum BinOp {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- static_assert_size!(AggregateKind<'_>, 48);
+ // tidy-alphabetical-start
+ static_assert_size!(AggregateKind<'_>, 40);
static_assert_size!(Operand<'_>, 24);
static_assert_size!(Place<'_>, 16);
static_assert_size!(PlaceElem<'_>, 24);
static_assert_size!(Rvalue<'_>, 40);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index 405003156..fa3adafd4 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -4,7 +4,6 @@
*/
use crate::mir::*;
-use crate::ty::subst::Subst;
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir as hir;
use rustc_target::abi::VariantIdx;
@@ -57,7 +56,7 @@ impl<'tcx> PlaceTy<'tcx> {
/// `PlaceElem`, where we can just use the `Ty` that is already
/// stored inline on field projection elems.
pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> {
- self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty)
+ self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty, |_, ty| ty)
}
/// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
@@ -71,6 +70,7 @@ impl<'tcx> PlaceTy<'tcx> {
param_env: ty::ParamEnv<'tcx>,
elem: &ProjectionElem<V, T>,
mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>,
+ mut handle_opaque_cast: impl FnMut(&Self, T) -> Ty<'tcx>,
) -> PlaceTy<'tcx>
where
V: ::std::fmt::Debug,
@@ -109,6 +109,7 @@ impl<'tcx> PlaceTy<'tcx> {
PlaceTy { ty: self.ty, variant_index: Some(index) }
}
ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
+ ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(handle_opaque_cast(&self, ty)),
};
debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
answer
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 9ccf5aea6..4ea333cff 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -14,7 +14,7 @@ use std::slice;
pub use super::query::*;
-#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
pub struct SwitchTargets {
/// Possible values. The locations to branch to in each case
/// are found in the corresponding indices from the `targets` vector.
@@ -102,7 +102,7 @@ impl<'a> Iterator for SwitchTargetsIter<'a> {
impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Terminator<'tcx> {
pub source_info: SourceInfo,
pub kind: TerminatorKind<'tcx>,
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
index 627dc32f3..55b2c5927 100644
--- a/compiler/rustc_middle/src/mir/traversal.rs
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -37,7 +37,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> {
Preorder {
body,
- visited: BitSet::new_empty(body.basic_blocks().len()),
+ visited: BitSet::new_empty(body.basic_blocks.len()),
worklist,
root_is_start_block: root == START_BLOCK,
}
@@ -71,7 +71,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited.
- let upper = self.body.basic_blocks().len() - self.visited.count();
+ let upper = self.body.basic_blocks.len() - self.visited.count();
let lower = if self.root_is_start_block {
// We will visit all remaining blocks exactly once.
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index 82a6b0c50..4c0974f86 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -1,8 +1,9 @@
//! `TypeFoldable` implementations for MIR types
+use rustc_ast::InlineAsmTemplatePiece;
+
use super::*;
use crate::ty;
-use rustc_data_structures::functor::IdFunctor;
TrivialTypeTraversalAndLiftImpls! {
BlockTailInfo,
@@ -13,96 +14,33 @@ TrivialTypeTraversalAndLiftImpls! {
SourceScope,
SourceScopeLocalData,
UserTypeAnnotationIndex,
+ BorrowKind,
+ CastKind,
+ BinOp,
+ NullOp,
+ UnOp,
+ hir::Movability,
+ BasicBlock,
+ SwitchTargets,
+ GeneratorKind,
+ GeneratorSavedLocal,
}
-impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::TerminatorKind::*;
-
- let kind = match self.kind {
- Goto { target } => Goto { target },
- SwitchInt { discr, switch_ty, targets } => SwitchInt {
- discr: discr.try_fold_with(folder)?,
- switch_ty: switch_ty.try_fold_with(folder)?,
- targets,
- },
- Drop { place, target, unwind } => {
- Drop { place: place.try_fold_with(folder)?, target, unwind }
- }
- DropAndReplace { place, value, target, unwind } => DropAndReplace {
- place: place.try_fold_with(folder)?,
- value: value.try_fold_with(folder)?,
- target,
- unwind,
- },
- Yield { value, resume, resume_arg, drop } => Yield {
- value: value.try_fold_with(folder)?,
- resume,
- resume_arg: resume_arg.try_fold_with(folder)?,
- drop,
- },
- Call { func, args, destination, target, cleanup, from_hir_call, fn_span } => Call {
- func: func.try_fold_with(folder)?,
- args: args.try_fold_with(folder)?,
- destination: destination.try_fold_with(folder)?,
- target,
- cleanup,
- from_hir_call,
- fn_span,
- },
- Assert { cond, expected, msg, target, cleanup } => {
- use AssertKind::*;
- let msg = match msg {
- BoundsCheck { len, index } => BoundsCheck {
- len: len.try_fold_with(folder)?,
- index: index.try_fold_with(folder)?,
- },
- Overflow(op, l, r) => {
- Overflow(op, l.try_fold_with(folder)?, r.try_fold_with(folder)?)
- }
- OverflowNeg(op) => OverflowNeg(op.try_fold_with(folder)?),
- DivisionByZero(op) => DivisionByZero(op.try_fold_with(folder)?),
- RemainderByZero(op) => RemainderByZero(op.try_fold_with(folder)?),
- ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg,
- };
- Assert { cond: cond.try_fold_with(folder)?, expected, msg, target, cleanup }
- }
- GeneratorDrop => GeneratorDrop,
- Resume => Resume,
- Abort => Abort,
- Return => Return,
- Unreachable => Unreachable,
- FalseEdge { real_target, imaginary_target } => {
- FalseEdge { real_target, imaginary_target }
- }
- FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind },
- InlineAsm { template, operands, options, line_spans, destination, cleanup } => {
- InlineAsm {
- template,
- operands: operands.try_fold_with(folder)?,
- options,
- line_spans,
- destination,
- cleanup,
- }
- }
- };
- Ok(Terminator { source_info: self.source_info, kind })
+TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ ConstValue<'tcx>,
}
}
-impl<'tcx> TypeFoldable<'tcx> for GeneratorKind {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx [InlineAsmTemplatePiece] {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
Ok(self)
}
}
-impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(Place {
- local: self.local.try_fold_with(folder)?,
- projection: self.projection.try_fold_with(folder)?,
- })
+impl<'tcx> TypeFoldable<'tcx> for &'tcx [Span] {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
}
}
@@ -112,129 +50,8 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
}
}
-impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::Rvalue::*;
- Ok(match self {
- Use(op) => Use(op.try_fold_with(folder)?),
- Repeat(op, len) => Repeat(op.try_fold_with(folder)?, len.try_fold_with(folder)?),
- ThreadLocalRef(did) => ThreadLocalRef(did.try_fold_with(folder)?),
- Ref(region, bk, place) => {
- Ref(region.try_fold_with(folder)?, bk, place.try_fold_with(folder)?)
- }
- CopyForDeref(place) => CopyForDeref(place.try_fold_with(folder)?),
- AddressOf(mutability, place) => AddressOf(mutability, place.try_fold_with(folder)?),
- Len(place) => Len(place.try_fold_with(folder)?),
- Cast(kind, op, ty) => Cast(kind, op.try_fold_with(folder)?, ty.try_fold_with(folder)?),
- BinaryOp(op, box (rhs, lhs)) => {
- BinaryOp(op, Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)))
- }
- CheckedBinaryOp(op, box (rhs, lhs)) => CheckedBinaryOp(
- op,
- Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)),
- ),
- UnaryOp(op, val) => UnaryOp(op, val.try_fold_with(folder)?),
- Discriminant(place) => Discriminant(place.try_fold_with(folder)?),
- NullaryOp(op, ty) => NullaryOp(op, ty.try_fold_with(folder)?),
- Aggregate(kind, fields) => {
- let kind = kind.try_map_id(|kind| {
- Ok(match kind {
- AggregateKind::Array(ty) => AggregateKind::Array(ty.try_fold_with(folder)?),
- AggregateKind::Tuple => AggregateKind::Tuple,
- AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt(
- def,
- v,
- substs.try_fold_with(folder)?,
- user_ty.try_fold_with(folder)?,
- n,
- ),
- AggregateKind::Closure(id, substs) => {
- AggregateKind::Closure(id, substs.try_fold_with(folder)?)
- }
- AggregateKind::Generator(id, substs, movablity) => {
- AggregateKind::Generator(id, substs.try_fold_with(folder)?, movablity)
- }
- })
- })?;
- Aggregate(kind, fields.try_fold_with(folder)?)
- }
- ShallowInitBox(op, ty) => {
- ShallowInitBox(op.try_fold_with(folder)?, ty.try_fold_with(folder)?)
- }
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(match self {
- Operand::Copy(place) => Operand::Copy(place.try_fold_with(folder)?),
- Operand::Move(place) => Operand::Move(place.try_fold_with(folder)?),
- Operand::Constant(c) => Operand::Constant(c.try_fold_with(folder)?),
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::mir::ProjectionElem::*;
-
- Ok(match self {
- Deref => Deref,
- Field(f, ty) => Field(f, ty.try_fold_with(folder)?),
- Index(v) => Index(v.try_fold_with(folder)?),
- Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
- ConstantIndex { offset, min_length, from_end } => {
- ConstantIndex { offset, min_length, from_end }
- }
- Subslice { from, to, from_end } => Subslice { from, to, from_end },
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for Field {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
- Ok(self)
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
- Ok(self)
- }
-}
-
impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
Ok(self)
}
}
-
-impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(Constant {
- span: self.span,
- user_ty: self.user_ty.try_fold_with(folder)?,
- literal: self.literal.try_fold_with(folder)?,
- })
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
- #[inline(always)]
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- folder.try_fold_mir_const(self)
- }
-}
-
-impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> {
- fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
- self,
- folder: &mut F,
- ) -> Result<Self, F::Error> {
- match self {
- ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)),
- ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)),
- }
- }
-}
diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs
index 6a0801cb0..e7cd497b2 100644
--- a/compiler/rustc_middle/src/mir/type_visitable.rs
+++ b/compiler/rustc_middle/src/mir/type_visitable.rs
@@ -1,190 +1,9 @@
//! `TypeVisitable` implementations for MIR types
use super::*;
-use crate::ty;
-
-impl<'tcx> TypeVisitable<'tcx> for Terminator<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- use crate::mir::TerminatorKind::*;
-
- match self.kind {
- SwitchInt { ref discr, switch_ty, .. } => {
- discr.visit_with(visitor)?;
- switch_ty.visit_with(visitor)
- }
- Drop { ref place, .. } => place.visit_with(visitor),
- DropAndReplace { ref place, ref value, .. } => {
- place.visit_with(visitor)?;
- value.visit_with(visitor)
- }
- Yield { ref value, .. } => value.visit_with(visitor),
- Call { ref func, ref args, ref destination, .. } => {
- destination.visit_with(visitor)?;
- func.visit_with(visitor)?;
- args.visit_with(visitor)
- }
- Assert { ref cond, ref msg, .. } => {
- cond.visit_with(visitor)?;
- use AssertKind::*;
- match msg {
- BoundsCheck { ref len, ref index } => {
- len.visit_with(visitor)?;
- index.visit_with(visitor)
- }
- Overflow(_, l, r) => {
- l.visit_with(visitor)?;
- r.visit_with(visitor)
- }
- OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
- op.visit_with(visitor)
- }
- ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE,
- }
- }
- InlineAsm { ref operands, .. } => operands.visit_with(visitor),
- Goto { .. }
- | Resume
- | Abort
- | Return
- | GeneratorDrop
- | Unreachable
- | FalseEdge { .. }
- | FalseUnwind { .. } => ControlFlow::CONTINUE,
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for GeneratorKind {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Place<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.local.visit_with(visitor)?;
- self.projection.visit_with(visitor)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|t| t.visit_with(visitor))
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Rvalue<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- use crate::mir::Rvalue::*;
- match *self {
- Use(ref op) => op.visit_with(visitor),
- CopyForDeref(ref place) => {
- let op = &Operand::Copy(*place);
- op.visit_with(visitor)
- }
- Repeat(ref op, _) => op.visit_with(visitor),
- ThreadLocalRef(did) => did.visit_with(visitor),
- Ref(region, _, ref place) => {
- region.visit_with(visitor)?;
- place.visit_with(visitor)
- }
- AddressOf(_, ref place) => place.visit_with(visitor),
- Len(ref place) => place.visit_with(visitor),
- Cast(_, ref op, ty) => {
- op.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => {
- rhs.visit_with(visitor)?;
- lhs.visit_with(visitor)
- }
- UnaryOp(_, ref val) => val.visit_with(visitor),
- Discriminant(ref place) => place.visit_with(visitor),
- NullaryOp(_, ty) => ty.visit_with(visitor),
- Aggregate(ref kind, ref fields) => {
- match **kind {
- AggregateKind::Array(ty) => {
- ty.visit_with(visitor)?;
- }
- AggregateKind::Tuple => {}
- AggregateKind::Adt(_, _, substs, user_ty, _) => {
- substs.visit_with(visitor)?;
- user_ty.visit_with(visitor)?;
- }
- AggregateKind::Closure(_, substs) => {
- substs.visit_with(visitor)?;
- }
- AggregateKind::Generator(_, substs, _) => {
- substs.visit_with(visitor)?;
- }
- }
- fields.visit_with(visitor)
- }
- ShallowInitBox(ref op, ty) => {
- op.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Operand<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- match *self {
- Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
- Operand::Constant(ref c) => c.visit_with(visitor),
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for PlaceElem<'tcx> {
- fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
- use crate::mir::ProjectionElem::*;
-
- match self {
- Field(_, ty) => ty.visit_with(visitor),
- Index(v) => v.visit_with(visitor),
- _ => ControlFlow::CONTINUE,
- }
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for Field {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for GeneratorSavedLocal {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
ControlFlow::CONTINUE
}
}
-
-impl<'tcx> TypeVisitable<'tcx> for Constant<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.literal.visit_with(visitor)?;
- self.user_ty.visit_with(visitor)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- visitor.visit_mir_const(*self)
- }
-}
-
-impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> {
- fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- match *self {
- ConstantKind::Ty(c) => c.visit_with(visitor),
- ConstantKind::Val(_, t) => t.visit_with(visitor),
- }
- }
-}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 891608764..ddcf3711b 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -80,6 +80,8 @@ macro_rules! make_mir_visitor {
self.super_body(body);
}
+ extra_body_methods!($($mutability)?);
+
fn visit_basic_block_data(
&mut self,
block: BasicBlock,
@@ -235,14 +237,6 @@ macro_rules! make_mir_visitor {
self.super_region(region);
}
- fn visit_const(
- &mut self,
- constant: $(& $mutability)? ty::Const<'tcx>,
- _: Location,
- ) {
- self.super_const(constant);
- }
-
fn visit_substs(
&mut self,
substs: & $($mutability)? SubstsRef<'tcx>,
@@ -287,63 +281,7 @@ macro_rules! make_mir_visitor {
&mut self,
body: &$($mutability)? Body<'tcx>,
) {
- let span = body.span;
- if let Some(gen) = &$($mutability)? body.generator {
- if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
- self.visit_ty(
- yield_ty,
- TyContext::YieldTy(SourceInfo::outermost(span))
- );
- }
- }
-
- // for best performance, we want to use an iterator rather
- // than a for-loop, to avoid calling `body::Body::invalidate` for
- // each basic block.
- #[allow(unused_macro_rules)]
- macro_rules! basic_blocks {
- (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
- () => (body.basic_blocks().iter_enumerated());
- }
- for (bb, data) in basic_blocks!($($mutability)?) {
- self.visit_basic_block_data(bb, data);
- }
-
- for scope in &$($mutability)? body.source_scopes {
- self.visit_source_scope_data(scope);
- }
-
- self.visit_ty(
- $(& $mutability)? body.return_ty(),
- TyContext::ReturnTy(SourceInfo::outermost(body.span))
- );
-
- for local in body.local_decls.indices() {
- self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
- }
-
- #[allow(unused_macro_rules)]
- macro_rules! type_annotations {
- (mut) => (body.user_type_annotations.iter_enumerated_mut());
- () => (body.user_type_annotations.iter_enumerated());
- }
-
- for (index, annotation) in type_annotations!($($mutability)?) {
- self.visit_user_type_annotation(
- index, annotation
- );
- }
-
- for var_debug_info in &$($mutability)? body.var_debug_info {
- self.visit_var_debug_info(var_debug_info);
- }
-
- self.visit_span($(& $mutability)? body.span);
-
- for const_ in &$($mutability)? body.required_consts {
- let location = START_BLOCK.start_location();
- self.visit_constant(const_, location);
- }
+ super_body!(self, body, $($mutability, true)?);
}
fn super_basic_block_data(&mut self,
@@ -479,14 +417,15 @@ macro_rules! make_mir_visitor {
location
)
}
- StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{
- src,
- dst,
- count,
- }) => {
- self.visit_operand(src, location);
- self.visit_operand(dst, location);
- self.visit_operand(count, location)
+ StatementKind::Intrinsic(box ref $($mutability)? intrinsic) => {
+ match intrinsic {
+ NonDivergingIntrinsic::Assume(op) => self.visit_operand(op, location),
+ NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
+ self.visit_operand(src, location);
+ self.visit_operand(dst, location);
+ self.visit_operand(count, location);
+ }
+ }
}
StatementKind::Nop => {}
}
@@ -930,8 +869,9 @@ macro_rules! make_mir_visitor {
self.visit_span($(& $mutability)? *span);
drop(user_ty); // no visit method for this
match literal {
- ConstantKind::Ty(ct) => self.visit_const($(& $mutability)? *ct, location),
+ ConstantKind::Ty(_) => {}
ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ ConstantKind::Unevaluated(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
}
}
@@ -969,9 +909,6 @@ macro_rules! make_mir_visitor {
fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) {
}
- fn super_const(&mut self, _const: $(& $mutability)? ty::Const<'tcx>) {
- }
-
fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
}
@@ -982,12 +919,7 @@ macro_rules! make_mir_visitor {
body: &$($mutability)? Body<'tcx>,
location: Location
) {
- #[allow(unused_macro_rules)]
- macro_rules! basic_blocks {
- (mut) => (body.basic_blocks_mut());
- () => (body.basic_blocks());
- }
- let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
+ let basic_block = & $($mutability)? basic_blocks!(body, $($mutability, true)?)[location.block];
if basic_block.statements.len() == location.statement_index {
if let Some(ref $($mutability)? terminator) = basic_block.terminator {
self.visit_terminator(terminator, location)
@@ -1002,6 +934,94 @@ macro_rules! make_mir_visitor {
}
}
+macro_rules! basic_blocks {
+ ($body:ident, mut, true) => {
+ $body.basic_blocks.as_mut()
+ };
+ ($body:ident, mut, false) => {
+ $body.basic_blocks.as_mut_preserves_cfg()
+ };
+ ($body:ident,) => {
+ $body.basic_blocks
+ };
+}
+
+macro_rules! basic_blocks_iter {
+ ($body:ident, mut, $invalidate:tt) => {
+ basic_blocks!($body, mut, $invalidate).iter_enumerated_mut()
+ };
+ ($body:ident,) => {
+ basic_blocks!($body,).iter_enumerated()
+ };
+}
+
+macro_rules! extra_body_methods {
+ (mut) => {
+ fn visit_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) {
+ self.super_body_preserves_cfg(body);
+ }
+
+ fn super_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) {
+ super_body!(self, body, mut, false);
+ }
+ };
+ () => {};
+}
+
+macro_rules! super_body {
+ ($self:ident, $body:ident, $($mutability:ident, $invalidate:tt)?) => {
+ let span = $body.span;
+ if let Some(gen) = &$($mutability)? $body.generator {
+ if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
+ $self.visit_ty(
+ yield_ty,
+ TyContext::YieldTy(SourceInfo::outermost(span))
+ );
+ }
+ }
+
+ for (bb, data) in basic_blocks_iter!($body, $($mutability, $invalidate)?) {
+ $self.visit_basic_block_data(bb, data);
+ }
+
+ for scope in &$($mutability)? $body.source_scopes {
+ $self.visit_source_scope_data(scope);
+ }
+
+ $self.visit_ty(
+ $(& $mutability)? $body.return_ty(),
+ TyContext::ReturnTy(SourceInfo::outermost($body.span))
+ );
+
+ for local in $body.local_decls.indices() {
+ $self.visit_local_decl(local, & $($mutability)? $body.local_decls[local]);
+ }
+
+ #[allow(unused_macro_rules)]
+ macro_rules! type_annotations {
+ (mut) => ($body.user_type_annotations.iter_enumerated_mut());
+ () => ($body.user_type_annotations.iter_enumerated());
+ }
+
+ for (index, annotation) in type_annotations!($($mutability)?) {
+ $self.visit_user_type_annotation(
+ index, annotation
+ );
+ }
+
+ for var_debug_info in &$($mutability)? $body.var_debug_info {
+ $self.visit_var_debug_info(var_debug_info);
+ }
+
+ $self.visit_span($(& $mutability)? $body.span);
+
+ for const_ in &$($mutability)? $body.required_consts {
+ let location = START_BLOCK.start_location();
+ $self.visit_constant(const_, location);
+ }
+ }
+}
+
macro_rules! visit_place_fns {
(mut) => {
fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
@@ -1064,6 +1084,11 @@ macro_rules! visit_place_fns {
self.visit_ty(&mut new_ty, TyContext::Location(location));
if ty != new_ty { Some(PlaceElem::Field(field, new_ty)) } else { None }
}
+ PlaceElem::OpaqueCast(ty) => {
+ let mut new_ty = ty;
+ self.visit_ty(&mut new_ty, TyContext::Location(location));
+ if ty != new_ty { Some(PlaceElem::OpaqueCast(new_ty)) } else { None }
+ }
PlaceElem::Deref
| PlaceElem::ConstantIndex { .. }
| PlaceElem::Subslice { .. }
@@ -1133,7 +1158,7 @@ macro_rules! visit_place_fns {
location: Location,
) {
match elem {
- ProjectionElem::Field(_field, ty) => {
+ ProjectionElem::OpaqueCast(ty) | ProjectionElem::Field(_, ty) => {
self.visit_ty(ty, TyContext::Location(location));
}
ProjectionElem::Index(local) => {
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index d8483e7e4..3d720f09b 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -4,6 +4,9 @@
//! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html).
//! This chapter includes instructions for adding new queries.
+use crate::ty::{self, print::describe_as_module, TyCtxt};
+use rustc_span::def_id::LOCAL_CRATE;
+
// Each of these queries corresponds to a function pointer field in the
// `Providers` struct for requesting a value of that type, and a method
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
@@ -17,19 +20,19 @@
// as they will raise an fatal error on query cycles instead.
rustc_queries! {
query trigger_delay_span_bug(key: DefId) -> () {
- desc { "trigger a delay span bug" }
+ desc { "triggering a delay span bug" }
}
- query resolutions(_: ()) -> &'tcx ty::ResolverOutputs {
+ query resolutions(_: ()) -> &'tcx ty::ResolverGlobalCtxt {
eval_always
no_hash
- desc { "get the resolver outputs" }
+ desc { "getting the resolver outputs" }
}
query resolver_for_lowering(_: ()) -> &'tcx Steal<ty::ResolverAstLowering> {
eval_always
no_hash
- desc { "get the resolver for lowering" }
+ desc { "getting the resolver for lowering" }
}
/// Return the span for a definition.
@@ -37,7 +40,7 @@ rustc_queries! {
/// This span is meant for dep-tracking rather than diagnostics. It should not be used outside
/// of rustc_middle::hir::source_map.
query source_span(key: LocalDefId) -> Span {
- desc { "get the source span" }
+ desc { "getting the source span" }
}
/// Represents crate as a whole (as distinct from the top-level crate module).
@@ -47,16 +50,16 @@ rustc_queries! {
/// To avoid this fate, do not call `tcx.hir().krate()`; instead,
/// prefer wrappers like `tcx.visit_all_items_in_krate()`.
query hir_crate(key: ()) -> Crate<'tcx> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
- desc { "get the crate HIR" }
+ desc { "getting the crate HIR" }
}
/// All items in the crate.
query hir_crate_items(_: ()) -> rustc_middle::hir::ModuleItems {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
- desc { "get HIR crate items" }
+ desc { "getting HIR crate items" }
}
/// The items in a module.
@@ -64,8 +67,8 @@ rustc_queries! {
/// This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`.
/// Avoid calling this query directly.
query hir_module_items(key: LocalDefId) -> rustc_middle::hir::ModuleItems {
- storage(ArenaCacheSelector<'tcx>)
- desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ arena_cache
+ desc { |tcx| "getting HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
@@ -73,8 +76,8 @@ rustc_queries! {
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner(key: LocalDefId) -> Option<crate::hir::Owner<'tcx>> {
- desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner(key: hir::OwnerId) -> Option<crate::hir::Owner<'tcx>> {
+ desc { |tcx| "getting HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR ID for the given `LocalDefId` owner `key`.
@@ -82,31 +85,31 @@ rustc_queries! {
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId {
- desc { |tcx| "HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "getting HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR node's parent for the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner_parent(key: LocalDefId) -> hir::HirId {
- desc { |tcx| "HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner_parent(key: hir::OwnerId) -> hir::HirId {
+ desc { |tcx| "getting HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR nodes and bodies inside the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner_nodes(key: LocalDefId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
- desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner_nodes(key: hir::OwnerId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
+ desc { |tcx| "getting HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR attributes inside the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_attrs(key: LocalDefId) -> &'tcx hir::AttributeMap<'tcx> {
- desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_attrs(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> {
+ desc { |tcx| "getting HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Computes the `DefId` of the corresponding const parameter in case the `key` is a
@@ -135,7 +138,7 @@ rustc_queries! {
/// Given the def_id of a const-generic parameter, computes the associated default const
/// parameter. e.g. `fn example<const N: usize=3>` called on `N` would return `3`.
query const_param_default(param: DefId) -> ty::Const<'tcx> {
- desc { |tcx| "compute const default for a given parameter `{}`", tcx.def_path_str(param) }
+ desc { |tcx| "computing const default for a given parameter `{}`", tcx.def_path_str(param) }
cache_on_disk_if { param.is_local() }
separate_provide_extern
}
@@ -161,6 +164,14 @@ rustc_queries! {
separate_provide_extern
}
+ query collect_trait_impl_trait_tys(key: DefId)
+ -> Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed>
+ {
+ desc { "comparing an impl and trait method signature, inferring any hidden `impl Trait` types in the process" }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
query analysis(key: ()) -> Result<(), ErrorGuaranteed> {
eval_always
desc { "running analysis passes on this crate" }
@@ -189,7 +200,7 @@ rustc_queries! {
/// associated generics.
query generics_of(key: DefId) -> ty::Generics {
desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
@@ -261,24 +272,29 @@ rustc_queries! {
}
query native_libraries(_: CrateNum) -> Vec<NativeLib> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "looking up the native libraries of a linked crate" }
separate_provide_extern
}
- query lint_levels(_: ()) -> LintLevelMap {
- storage(ArenaCacheSelector<'tcx>)
- eval_always
- desc { "computing the lint levels for items in this crate" }
+ query shallow_lint_levels_on(key: hir::OwnerId) -> rustc_middle::lint::ShallowLintLevelMap {
+ eval_always // fetches `resolutions`
+ arena_cache
+ desc { |tcx| "looking up lint levels for `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ query lint_expectations(_: ()) -> Vec<(LintExpectationId, LintExpectation)> {
+ arena_cache
+ desc { "computing `#[expect]`ed lints in this crate" }
}
query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
eval_always
- desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "getting the parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
}
query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
- desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "getting the expansion that defined `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
@@ -288,6 +304,32 @@ rustc_queries! {
separate_provide_extern
}
+ /// Checks whether a type is representable or infinitely sized
+ query representability(_: LocalDefId) -> rustc_middle::ty::Representability {
+ desc { "checking if `{}` is representable", tcx.def_path_str(key.to_def_id()) }
+ // infinitely sized types will cause a cycle
+ cycle_delay_bug
+ // we don't want recursive representability calls to be forced with
+ // incremental compilation because, if a cycle occurs, we need the
+ // entire cycle to be in memory for diagnostics
+ anon
+ }
+
+ /// An implementation detail for the `representability` query
+ query representability_adt_ty(_: Ty<'tcx>) -> rustc_middle::ty::Representability {
+ desc { "checking if `{}` is representable", key }
+ cycle_delay_bug
+ anon
+ }
+
+ /// Set of param indexes for type params that are in the type's representation
+ query params_in_repr(key: DefId) -> rustc_index::bit_set::BitSet<u32> {
+ desc { "finding type parameters in the representation" }
+ arena_cache
+ no_hash
+ separate_provide_extern
+ }
+
/// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`.
query thir_body(key: ty::WithOptConstParam<LocalDefId>)
-> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed>
@@ -300,7 +342,7 @@ rustc_queries! {
/// Create a THIR tree for debugging.
query thir_tree(key: ty::WithOptConstParam<LocalDefId>) -> String {
no_hash
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { |tcx| "constructing THIR tree for `{}`", tcx.def_path_str(key.did.to_def_id()) }
}
@@ -308,7 +350,7 @@ rustc_queries! {
/// them. This includes all the body owners, but also things like struct
/// constructors.
query mir_keys(_: ()) -> rustc_data_structures::fx::FxIndexSet<LocalDefId> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "getting a list of all mir_keys" }
}
@@ -341,7 +383,7 @@ rustc_queries! {
/// See the README for the `mir` module for details.
query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
desc {
- |tcx| "processing MIR for {}`{}`",
+ |tcx| "preparing {}`{}` for borrow checking",
if key.const_param_did.is_some() { "the const argument " } else { "" },
tcx.def_path_str(key.did.to_def_id()),
}
@@ -353,7 +395,7 @@ rustc_queries! {
key: DefId
) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
desc {
- |tcx| "building an abstract representation for {}", tcx.def_path_str(key),
+ |tcx| "building an abstract representation for `{}`", tcx.def_path_str(key),
}
separate_provide_extern
}
@@ -363,16 +405,16 @@ rustc_queries! {
) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
desc {
|tcx|
- "building an abstract representation for the const argument {}",
+ "building an abstract representation for the const argument `{}`",
tcx.def_path_str(key.0.to_def_id()),
}
}
query try_unify_abstract_consts(key:
- ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
+ ty::ParamEnvAnd<'tcx, (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx>
)>) -> bool {
desc {
- |tcx| "trying to unify the generic constants {} and {}",
+ |tcx| "trying to unify the generic constants `{}` and `{}`",
tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
}
}
@@ -394,7 +436,7 @@ rustc_queries! {
query mir_for_ctfe_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> {
desc {
- |tcx| "MIR for CTFE of the const argument `{}`",
+ |tcx| "caching MIR for CTFE of the const argument `{}`",
tcx.def_path_str(key.0.to_def_id())
}
}
@@ -406,7 +448,7 @@ rustc_queries! {
) {
no_hash
desc {
- |tcx| "processing {}`{}`",
+ |tcx| "processing MIR for {}`{}`",
if key.const_param_did.is_some() { "the const argument " } else { "" },
tcx.def_path_str(key.did.to_def_id()),
}
@@ -415,9 +457,9 @@ rustc_queries! {
query symbols_for_closure_captures(
key: (LocalDefId, LocalDefId)
) -> Vec<rustc_span::Symbol> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc {
- |tcx| "symbols for captures of closure `{}` in `{}`",
+ |tcx| "finding symbols for captures of closure `{}` in `{}`",
tcx.def_path_str(key.1.to_def_id()),
tcx.def_path_str(key.0.to_def_id())
}
@@ -435,7 +477,7 @@ rustc_queries! {
/// MIR pass (assuming the -Cinstrument-coverage option is enabled).
query coverageinfo(key: ty::InstanceDef<'tcx>) -> mir::CoverageInfo {
desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key.def_id()) }
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
}
/// Returns the `CodeRegions` for a function that has instrumented coverage, in case the
@@ -445,7 +487,7 @@ rustc_queries! {
|tcx| "retrieving the covered `CodeRegion`s, if instrumented, for `{}`",
tcx.def_path_str(key)
}
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
cache_on_disk_if { key.is_local() }
}
@@ -479,12 +521,12 @@ rustc_queries! {
// queries). Making it anonymous avoids hashing the result, which
// may save a bit of time.
anon
- desc { "erasing regions from `{:?}`", ty }
+ desc { "erasing regions from `{}`", ty }
}
query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> {
- storage(ArenaCacheSelector<'tcx>)
- desc { "wasm import module map" }
+ arena_cache
+ desc { "getting wasm import module map" }
}
/// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
@@ -559,7 +601,7 @@ rustc_queries! {
query trait_def(key: DefId) -> ty::TraitDef {
desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) }
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
@@ -574,16 +616,8 @@ rustc_queries! {
separate_provide_extern
}
- // The cycle error here should be reported as an error by `check_representable`.
- // We consider the type as Sized in the meanwhile to avoid
- // further errors (done in impl Value for AdtSizedConstraint).
- // Use `cycle_delay_bug` to delay the cycle error here to be emitted later
- // in case we accidentally otherwise don't emit an error.
- query adt_sized_constraint(
- key: DefId
- ) -> AdtSizedConstraint<'tcx> {
+ query adt_sized_constraint(key: DefId) -> &'tcx [Ty<'tcx>] {
desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
- cycle_delay_bug
}
query adt_dtorck_constraint(
@@ -637,7 +671,7 @@ rustc_queries! {
/// Gets a map with the variance of every item; use `item_variance` instead.
query crate_variances(_: ()) -> ty::CrateVariancesMap<'tcx> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "computing the variances for items in this crate" }
}
@@ -650,7 +684,7 @@ rustc_queries! {
/// Maps from thee `DefId` of a type to its (inferred) outlives.
query inferred_outlives_crate(_: ()) -> ty::CratePredicatesMap<'tcx> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "computing the inferred outlives predicates for items in this crate" }
}
@@ -664,15 +698,15 @@ rustc_queries! {
/// Maps from a trait item to the trait item "descriptor".
query associated_item(key: DefId) -> ty::AssocItem {
desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) }
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// Collects the associated items defined on a trait or impl.
query associated_items(key: DefId) -> ty::AssocItems<'tcx> {
- storage(ArenaCacheSelector<'tcx>)
- desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) }
+ arena_cache
+ desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
}
/// Maps from associated items on a trait to the corresponding associated
@@ -697,8 +731,8 @@ rustc_queries! {
/// The map returned for `tcx.impl_item_implementor_ids(impl_id)` would be
///`{ trait_f: impl_f, trait_g: impl_g }`
query impl_item_implementor_ids(impl_id: DefId) -> FxHashMap<DefId, DefId> {
- storage(ArenaCacheSelector<'tcx>)
- desc { |tcx| "comparing impl items against trait for {}", tcx.def_path_str(impl_id) }
+ arena_cache
+ desc { |tcx| "comparing impl items against trait for `{}`", tcx.def_path_str(impl_id) }
}
/// Given an `impl_id`, return the trait it implements.
@@ -765,11 +799,20 @@ rustc_queries! {
desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) }
}
+ /// Returns the types assumed to be well formed while "inside" of the given item.
+ ///
+ /// Note that we've liberated the late bound regions of function signatures, so
+ /// this can not be used to check whether these types are well formed.
+ query assumed_wf_types(key: DefId) -> &'tcx ty::List<Ty<'tcx>> {
+ desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
+ }
+
/// Computes the signature of the function.
query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> {
desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
+ cycle_delay_bug
}
/// Performs lint checking for the module.
@@ -809,8 +852,8 @@ rustc_queries! {
desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) }
}
- query check_mod_liveness(key: LocalDefId) -> () {
- desc { |tcx| "checking liveness of variables in {}", describe_as_module(key, tcx) }
+ query check_liveness(key: DefId) {
+ desc { |tcx| "checking liveness of variables in `{}`", tcx.def_path_str(key) }
}
/// Return the live symbols in the crate for dead code check.
@@ -821,8 +864,8 @@ rustc_queries! {
FxHashSet<LocalDefId>,
FxHashMap<LocalDefId, Vec<(DefId, DefId)>>
) {
- storage(ArenaCacheSelector<'tcx>)
- desc { "find live symbols in crate" }
+ arena_cache
+ desc { "finding live symbols in crate" }
}
query check_mod_deathness(key: LocalDefId) -> () {
@@ -867,17 +910,10 @@ rustc_queries! {
query diagnostic_only_typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
- load_cached(tcx, id) {
- let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
- .on_disk_cache().as_ref()
- .and_then(|c| c.try_load_query_result(*tcx, id));
-
- typeck_results.map(|x| &*tcx.arena.alloc(x))
- }
}
- query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> {
- desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
+ query used_trait_imports(key: LocalDefId) -> &'tcx UnordSet<LocalDefId> {
+ desc { |tcx| "finding used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
@@ -905,8 +941,8 @@ rustc_queries! {
/// Gets a complete map from all types to their inherent impls.
/// Not meant to be used directly outside of coherence.
query crate_inherent_impls(k: ()) -> CrateInherentImpls {
- storage(ArenaCacheSelector<'tcx>)
- desc { "all inherent impls defined in crate" }
+ arena_cache
+ desc { "finding all inherent impls defined in crate" }
}
/// Checks all types in the crate for overlap in their inherent impls. Reports errors.
@@ -993,8 +1029,10 @@ rustc_queries! {
/// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
/// and its field values.
- query try_destructure_mir_constant(key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>) -> Option<mir::DestructuredMirConstant<'tcx>> {
- desc { "destructuring mir constant"}
+ query try_destructure_mir_constant(
+ key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> Option<mir::DestructuredConstant<'tcx>> {
+ desc { "destructuring MIR constant"}
remap_env_constness
}
@@ -1003,12 +1041,12 @@ rustc_queries! {
query deref_mir_constant(
key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
) -> mir::ConstantKind<'tcx> {
- desc { "dereferencing mir constant" }
+ desc { "dereferencing MIR constant" }
remap_env_constness
}
query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
- desc { "get a &core::panic::Location referring to a span" }
+ desc { "getting a &core::panic::Location referring to a span" }
}
// FIXME get rid of this with valtrees
@@ -1027,10 +1065,10 @@ rustc_queries! {
cache_on_disk_if { key.is_local() }
}
- /// Performs part of the privacy check and computes "access levels".
- query privacy_access_levels(_: ()) -> &'tcx AccessLevels {
+ /// Performs part of the privacy check and computes effective visibilities.
+ query effective_visibilities(_: ()) -> &'tcx EffectiveVisibilities {
eval_always
- desc { "privacy access levels" }
+ desc { "checking effective visibilities" }
}
query check_private_in_public(_: ()) -> () {
eval_always
@@ -1038,7 +1076,7 @@ rustc_queries! {
}
query reachable_set(_: ()) -> FxHashSet<LocalDefId> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "reachability" }
}
@@ -1050,7 +1088,7 @@ rustc_queries! {
/// Generates a MIR body for the shim.
query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
}
@@ -1094,6 +1132,11 @@ rustc_queries! {
separate_provide_extern
}
+ query lookup_default_body_stability(def_id: DefId) -> Option<attr::DefaultBodyStability> {
+ desc { |tcx| "looking up default body stability of `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+
query should_inherit_track_caller(def_id: DefId) -> bool {
desc { |tcx| "computing should_inherit_track_caller of `{}`", tcx.def_path_str(def_id) }
}
@@ -1109,6 +1152,11 @@ rustc_queries! {
desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
}
+ /// Determines whether an item is annotated with `doc(notable_trait)`.
+ query is_doc_notable_trait(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is `doc(notable_trait)`", tcx.def_path_str(def_id) }
+ }
+
/// Returns the attributes on the item at `def_id`.
///
/// Do not use this directly, use `tcx.get_attrs` instead.
@@ -1119,7 +1167,7 @@ rustc_queries! {
query codegen_fn_attrs(def_id: DefId) -> CodegenFnAttrs {
desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) }
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
@@ -1136,8 +1184,8 @@ rustc_queries! {
/// Gets the rendered value of the specified constant or associated constant.
/// Used by rustdoc.
query rendered_const(def_id: DefId) -> String {
- storage(ArenaCacheSelector<'tcx>)
- desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) }
+ arena_cache
+ desc { |tcx| "rendering constant initializer of `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
@@ -1148,29 +1196,29 @@ rustc_queries! {
}
query is_ctfe_mir_available(key: DefId) -> bool {
- desc { |tcx| "checking if item has ctfe mir available: `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "checking if item has CTFE MIR available: `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query is_mir_available(key: DefId) -> bool {
- desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "checking if item has MIR available: `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query own_existential_vtable_entries(
- key: ty::PolyExistentialTraitRef<'tcx>
+ key: DefId
) -> &'tcx [DefId] {
- desc { |tcx| "finding all existential vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ desc { |tcx| "finding all existential vtable entries for trait `{}`", tcx.def_path_str(key) }
}
query vtable_entries(key: ty::PolyTraitRef<'tcx>)
-> &'tcx [ty::VtblEntry<'tcx>] {
- desc { |tcx| "finding all vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ desc { |tcx| "finding all vtable entries for trait `{}`", tcx.def_path_str(key.def_id()) }
}
- query vtable_trait_upcasting_coercion_new_vptr_slot(key: (ty::Ty<'tcx>, ty::Ty<'tcx>)) -> Option<usize> {
- desc { |tcx| "finding the slot within vtable for trait object {} vtable ptr during trait upcasting coercion from {} vtable",
+ query vtable_trait_upcasting_coercion_new_vptr_slot(key: (Ty<'tcx>, Ty<'tcx>)) -> Option<usize> {
+ desc { |tcx| "finding the slot within vtable for trait object `{}` vtable ptr during trait upcasting coercion from `{}` vtable",
key.1, key.0 }
}
@@ -1181,34 +1229,31 @@ rustc_queries! {
}
}
- query codegen_fulfill_obligation(
+ query codegen_select_candidate(
key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)
) -> Result<&'tcx ImplSource<'tcx, ()>, traits::CodegenObligationError> {
cache_on_disk_if { true }
- desc { |tcx|
- "checking if `{}` fulfills its obligations",
- tcx.def_path_str(key.1.def_id())
- }
+ desc { |tcx| "computing candidate for `{}`", key.1 }
}
/// Return all `impl` blocks in the current crate.
query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> {
- desc { "local trait impls" }
+ desc { "finding local trait impls" }
}
/// Given a trait `trait_id`, return all known `impl` blocks.
query trait_impls_of(trait_id: DefId) -> ty::trait_def::TraitImpls {
- storage(ArenaCacheSelector<'tcx>)
- desc { |tcx| "trait impls of `{}`", tcx.def_path_str(trait_id) }
+ arena_cache
+ desc { |tcx| "finding trait impls of `{}`", tcx.def_path_str(trait_id) }
}
query specialization_graph_of(trait_id: DefId) -> specialization_graph::Graph {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) }
cache_on_disk_if { true }
}
query object_safety_violations(trait_id: DefId) -> &'tcx [traits::ObjectSafetyViolation] {
- desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(trait_id) }
+ desc { |tcx| "determining object safety of trait `{}`", tcx.def_path_str(trait_id) }
}
/// Gets the ParameterEnvironment for a given item; this environment
@@ -1266,7 +1311,7 @@ rustc_queries! {
/// correctly.
query has_structural_eq_impls(ty: Ty<'tcx>) -> bool {
desc {
- "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`",
+ "computing whether `{}` implements `PartialStructuralEq` and `StructuralEq`",
ty
}
}
@@ -1295,6 +1340,7 @@ rustc_queries! {
query layout_of(
key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> {
+ depth_limit
desc { "computing layout of `{}`", key.value }
remap_env_constness
}
@@ -1324,13 +1370,13 @@ rustc_queries! {
query dylib_dependency_formats(_: CrateNum)
-> &'tcx [(CrateNum, LinkagePreference)] {
- desc { "dylib dependency formats of crate" }
+ desc { "getting dylib dependency formats of crate" }
separate_provide_extern
}
query dependency_formats(_: ()) -> Lrc<crate::middle::dependency_format::Dependencies> {
- storage(ArenaCacheSelector<'tcx>)
- desc { "get the linkage format of all dependencies" }
+ arena_cache
+ desc { "getting the linkage format of all dependencies" }
}
query is_compiler_builtins(_: CrateNum) -> bool {
@@ -1352,31 +1398,31 @@ rustc_queries! {
}
query is_profiler_runtime(_: CrateNum) -> bool {
fatal_cycle
- desc { "query a crate is `#![profiler_runtime]`" }
+ desc { "checking if a crate is `#![profiler_runtime]`" }
separate_provide_extern
}
query has_ffi_unwind_calls(key: LocalDefId) -> bool {
- desc { |tcx| "check if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "checking if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
fatal_cycle
- desc { "query a crate's required panic strategy" }
+ desc { "getting a crate's required panic strategy" }
separate_provide_extern
}
query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
fatal_cycle
- desc { "query a crate's configured panic-in-drop strategy" }
+ desc { "getting a crate's configured panic-in-drop strategy" }
separate_provide_extern
}
query is_no_builtins(_: CrateNum) -> bool {
fatal_cycle
- desc { "test whether a crate has `#![no_builtins]`" }
+ desc { "getting whether a crate has `#![no_builtins]`" }
separate_provide_extern
}
query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
fatal_cycle
- desc { "query a crate's symbol mangling version" }
+ desc { "getting a crate's symbol mangling version" }
separate_provide_extern
}
@@ -1389,9 +1435,9 @@ rustc_queries! {
query specializes(_: (DefId, DefId)) -> bool {
desc { "computing whether impls specialize one another" }
}
- query in_scope_traits_map(_: LocalDefId)
+ query in_scope_traits_map(_: hir::OwnerId)
-> Option<&'tcx FxHashMap<ItemLocalId, Box<[TraitCandidate]>>> {
- desc { "traits in scope at a block" }
+ desc { "getting traits in scope at a block" }
}
query module_reexports(def_id: LocalDefId) -> Option<&'tcx [ModChild]> {
@@ -1404,7 +1450,7 @@ rustc_queries! {
separate_provide_extern
}
- query check_well_formed(key: LocalDefId) -> () {
+ query check_well_formed(key: hir::OwnerId) -> () {
desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
}
@@ -1422,7 +1468,7 @@ rustc_queries! {
// like the compiler-generated `main` function and so on.
query reachable_non_generics(_: CrateNum)
-> DefIdMap<SymbolExportInfo> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "looking up the exported symbols of a crate" }
separate_provide_extern
}
@@ -1445,7 +1491,7 @@ rustc_queries! {
/// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even
/// better, `Instance::upstream_monomorphization()`.
query upstream_monomorphizations(_: ()) -> DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "collecting available upstream monomorphizations" }
}
@@ -1459,7 +1505,7 @@ rustc_queries! {
query upstream_monomorphizations_for(def_id: DefId)
-> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>>
{
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { |tcx|
"collecting available upstream monomorphizations for `{}`",
tcx.def_path_str(def_id),
@@ -1487,7 +1533,7 @@ rustc_queries! {
}
query foreign_modules(_: CrateNum) -> FxHashMap<DefId, ForeignModule> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "looking up the foreign modules of a linked crate" }
separate_provide_extern
}
@@ -1513,13 +1559,13 @@ rustc_queries! {
separate_provide_extern
}
query extra_filename(_: CrateNum) -> String {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "looking up the extra filename for a crate" }
separate_provide_extern
}
query crate_extern_paths(_: CrateNum) -> Vec<PathBuf> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "looking up the paths for extern crates" }
separate_provide_extern
@@ -1541,15 +1587,8 @@ rustc_queries! {
separate_provide_extern
}
- query is_dllimport_foreign_item(def_id: DefId) -> bool {
- desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) }
- }
- query is_statically_included_foreign_item(def_id: DefId) -> bool {
- desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) }
- }
- query native_library_kind(def_id: DefId)
- -> Option<NativeLibKind> {
- desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) }
+ query native_library(def_id: DefId) -> Option<&'tcx NativeLib> {
+ desc { |tcx| "getting the native library for `{}`", tcx.def_path_str(def_id) }
}
/// Does lifetime resolution, but does not descend into trait items. This
@@ -1558,48 +1597,59 @@ rustc_queries! {
/// the same lifetimes and is responsible for diagnostics.
/// See `rustc_resolve::late::lifetimes for details.
query resolve_lifetimes_trait_definition(_: LocalDefId) -> ResolveLifetimes {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "resolving lifetimes for a trait definition" }
}
/// Does lifetime resolution on items. Importantly, we can't resolve
/// lifetimes directly on things like trait methods, because of trait params.
/// See `rustc_resolve::late::lifetimes for details.
query resolve_lifetimes(_: LocalDefId) -> ResolveLifetimes {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "resolving lifetimes" }
}
- query named_region_map(_: LocalDefId) ->
+ query named_region_map(_: hir::OwnerId) ->
Option<&'tcx FxHashMap<ItemLocalId, Region>> {
desc { "looking up a named region" }
}
query is_late_bound_map(_: LocalDefId) -> Option<&'tcx FxIndexSet<LocalDefId>> {
desc { "testing if a region is late bound" }
}
- /// For a given item (like a struct), gets the default lifetimes to be used
+ /// For a given item's generic parameter, gets the default lifetimes to be used
/// for each parameter if a trait object were to be passed for that parameter.
- /// For example, for `struct Foo<'a, T, U>`, this would be `['static, 'static]`.
- /// For `struct Foo<'a, T: 'a, U>`, this would instead be `['a, 'static]`.
- query object_lifetime_defaults(_: LocalDefId) -> Option<&'tcx [ObjectLifetimeDefault]> {
- desc { "looking up lifetime defaults for a region on an item" }
+ /// For example, for `T` in `struct Foo<'a, T>`, this would be `'static`.
+ /// For `T` in `struct Foo<'a, T: 'a>`, this would instead be `'a`.
+ /// This query will panic if passed something that is not a type parameter.
+ query object_lifetime_default(key: DefId) -> ObjectLifetimeDefault {
+ desc { "looking up lifetime defaults for generic parameter `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
}
- query late_bound_vars_map(_: LocalDefId)
+ query late_bound_vars_map(_: hir::OwnerId)
-> Option<&'tcx FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>> {
desc { "looking up late bound vars" }
}
- query visibility(def_id: DefId) -> ty::Visibility {
+ /// Computes the visibility of the provided `def_id`.
+ ///
+ /// If the item from the `def_id` doesn't have a visibility, it will panic. For example
+ /// a generic type parameter will panic if you call this method on it:
+ ///
+ /// ```
+ /// pub trait Foo<T: Debug> {}
+ /// ```
+ ///
+ /// In here, if you call `visibility` on `T`, it'll panic.
+ query visibility(def_id: DefId) -> ty::Visibility<DefId> {
desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
- /// Computes the set of modules from which this type is visibly uninhabited.
- /// To check whether a type is uninhabited at all (not just from a given module), you could
- /// check whether the forest is empty.
- query type_uninhabited_from(
- key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
- ) -> ty::inhabitedness::DefIdForest<'tcx> {
- desc { "computing the inhabitedness of `{:?}`", key }
- remap_env_constness
+ query inhabited_predicate_adt(key: DefId) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
+ desc { "computing the uninhabited predicate of `{:?}`", key }
+ }
+
+ /// Do not call this query directly: invoke `Ty::inhabited_predicate` instead.
+ query inhabited_predicate_type(key: Ty<'tcx>) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
+ desc { "computing the uninhabited predicate of `{}`", key }
}
query dep_kind(_: CrateNum) -> CrateDepKind {
@@ -1623,7 +1673,7 @@ rustc_queries! {
}
query lib_features(_: ()) -> LibFeatures {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "calculating the lib features map" }
}
query defined_lib_features(_: CrateNum) -> &'tcx [(Symbol, Option<Symbol>)] {
@@ -1631,25 +1681,25 @@ rustc_queries! {
separate_provide_extern
}
query stability_implications(_: CrateNum) -> FxHashMap<Symbol, Symbol> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "calculating the implications between `#[unstable]` features defined in a crate" }
separate_provide_extern
}
/// Whether the function is an intrinsic
query is_intrinsic(def_id: DefId) -> bool {
- desc { |tcx| "is_intrinsic({})", tcx.def_path_str(def_id) }
+ desc { |tcx| "checking whether `{}` is an intrinsic", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns the lang items defined in another crate by loading it from metadata.
query get_lang_items(_: ()) -> LanguageItems {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "calculating the lang items map" }
}
/// Returns all diagnostic items defined in all crates.
query all_diagnostic_items(_: ()) -> rustc_hir::diagnostic_items::DiagnosticItems {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "calculating the diagnostic items map" }
}
@@ -1662,7 +1712,7 @@ rustc_queries! {
/// Returns the diagnostic items defined in a crate.
query diagnostic_items(_: CrateNum) -> rustc_hir::diagnostic_items::DiagnosticItems {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "calculating the diagnostic items map in a crate" }
separate_provide_extern
}
@@ -1672,11 +1722,11 @@ rustc_queries! {
separate_provide_extern
}
query visible_parent_map(_: ()) -> DefIdMap<DefId> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "calculating the visible parent map" }
}
query trimmed_def_paths(_: ()) -> FxHashMap<DefId, Symbol> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "calculating trimmed def paths" }
}
query missing_extern_crate_item(_: CrateNum) -> bool {
@@ -1685,14 +1735,14 @@ rustc_queries! {
separate_provide_extern
}
query used_crate_source(_: CrateNum) -> Lrc<CrateSource> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "looking at the source for a crate" }
separate_provide_extern
}
/// Returns the debugger visualizers defined for this crate.
query debugger_visualizers(_: CrateNum) -> Vec<rustc_span::DebuggerVisualizerFile> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { "looking up the debugger visualizers for this crate" }
separate_provide_extern
}
@@ -1704,12 +1754,12 @@ rustc_queries! {
/// is marked as a private dependency
query is_private_dep(c: CrateNum) -> bool {
eval_always
- desc { "check whether crate {} is a private dependency", c }
+ desc { "checking whether crate `{}` is a private dependency", c }
separate_provide_extern
}
query allocator_kind(_: ()) -> Option<AllocatorKind> {
eval_always
- desc { "allocator kind for the current crate" }
+ desc { "getting the allocator kind for the current crate" }
}
query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
@@ -1722,11 +1772,11 @@ rustc_queries! {
desc { "looking up all possibly unused extern crates" }
}
query names_imported_by_glob_use(def_id: LocalDefId) -> &'tcx FxHashSet<Symbol> {
- desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ desc { |tcx| "finding names imported by glob use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
}
query stability_index(_: ()) -> stability::Index {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "calculating the stability index for the local crate" }
}
@@ -1748,7 +1798,7 @@ rustc_queries! {
/// correspond to a publicly visible symbol in `cnum` machine code.
/// - The `exported_symbols` sets of different crates do not intersect.
query exported_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
- desc { "exported_symbols" }
+ desc { "collecting exported symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
separate_provide_extern
}
@@ -1757,6 +1807,7 @@ rustc_queries! {
eval_always
desc { "collect_and_partition_mono_items" }
}
+
query is_codegened_item(def_id: DefId) -> bool {
desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
}
@@ -1764,12 +1815,13 @@ rustc_queries! {
/// All items participating in code generation together with items inlined into them.
query codegened_and_inlined_items(_: ()) -> &'tcx DefIdSet {
eval_always
- desc { "codegened_and_inlined_items" }
+ desc { "collecting codegened and inlined items" }
}
- query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
- desc { "codegen_unit" }
+ query codegen_unit(sym: Symbol) -> &'tcx CodegenUnit<'tcx> {
+ desc { "getting codegen unit `{sym}`" }
}
+
query unused_generic_params(key: ty::InstanceDef<'tcx>) -> FiniteBitSet<u32> {
cache_on_disk_if { key.def_id().is_local() }
desc {
@@ -1778,6 +1830,7 @@ rustc_queries! {
}
separate_provide_extern
}
+
query backend_optimization_level(_: ()) -> OptLevel {
desc { "optimization level used by backend" }
}
@@ -1788,7 +1841,7 @@ rustc_queries! {
/// has been destroyed.
query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> {
eval_always
- desc { "output_filenames" }
+ desc { "getting output filenames" }
}
/// Do not call this query directly: invoke `normalize` instead.
@@ -1798,7 +1851,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{}`", goal.value.value }
remap_env_constness
}
@@ -1810,21 +1863,13 @@ rustc_queries! {
remap_env_constness
}
- /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
- query try_normalize_mir_const_after_erasing_regions(
- goal: ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
- ) -> Result<mir::ConstantKind<'tcx>, NoSolution> {
- desc { "normalizing `{}`", goal.value }
- remap_env_constness
- }
-
query implied_outlives_bounds(
goal: CanonicalTyGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
NoSolution,
> {
- desc { "computing implied outlives bounds for `{:?}`", goal }
+ desc { "computing implied outlives bounds for `{}`", goal.value.value }
remap_env_constness
}
@@ -1836,7 +1881,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
NoSolution,
> {
- desc { "computing dropck types for `{:?}`", goal }
+ desc { "computing dropck types for `{}`", goal.value.value }
remap_env_constness
}
@@ -1864,7 +1909,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal }
+ desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1875,7 +1920,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_eq` `{:?}`", goal }
+ desc { "evaluating `type_op_eq` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1886,7 +1931,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_subtype` `{:?}`", goal }
+ desc { "evaluating `type_op_subtype` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1897,7 +1942,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_prove_predicate` `{:?}`", goal }
+ desc { "evaluating `type_op_prove_predicate` `{:?}`", goal.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
@@ -1907,7 +1952,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{}`", goal.value.value.value }
remap_env_constness
}
@@ -1918,7 +1963,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
@@ -1929,7 +1974,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
@@ -1940,26 +1985,34 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
desc { |tcx|
- "impossible substituted predicates:`{}`",
+ "checking impossible substituted predicates: `{}`",
tcx.def_path_str(key.0)
}
}
+ query is_impossible_method(key: (DefId, DefId)) -> bool {
+ desc { |tcx|
+ "checking if `{}` is impossible to call within `{}`",
+ tcx.def_path_str(key.1),
+ tcx.def_path_str(key.0),
+ }
+ }
+
query method_autoderef_steps(
goal: CanonicalTyGoal<'tcx>
) -> MethodAutoderefStepsResult<'tcx> {
- desc { "computing autoderef types for `{:?}`", goal }
+ desc { "computing autoderef types for `{}`", goal.value.value }
remap_env_constness
}
query supported_target_features(_: CrateNum) -> FxHashMap<String, Option<Symbol>> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "looking up supported target features" }
}
@@ -2002,7 +2055,7 @@ rustc_queries! {
}
query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
- desc { "normalizing opaque types in {:?}", key }
+ desc { "normalizing opaque types in `{:?}`", key }
}
/// Checks whether a type is definitely uninhabited. This is
@@ -2012,7 +2065,7 @@ rustc_queries! {
/// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
/// size, to account for partial initialisation. See #49298 for details.)
query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
- desc { "conservatively checking if {:?} is privately uninhabited", key }
+ desc { "conservatively checking if `{}` is privately uninhabited", key.value }
remap_env_constness
}
@@ -2029,32 +2082,43 @@ rustc_queries! {
/// all of the cases that the normal `ty::Ty`-based wfcheck does. This is fine,
/// because the `ty::Ty`-based wfcheck is always run.
query diagnostic_hir_wf_check(key: (ty::Predicate<'tcx>, traits::WellFormedLoc)) -> Option<traits::ObligationCause<'tcx>> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
no_hash
- desc { "performing HIR wf-checking for predicate {:?} at item {:?}", key.0, key.1 }
+ desc { "performing HIR wf-checking for predicate `{:?}` at item `{:?}`", key.0, key.1 }
}
/// The list of backend features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
/// `--target` and similar).
query global_backend_features(_: ()) -> Vec<String> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
eval_always
desc { "computing the backend features for CLI flags" }
}
query generator_diagnostic_data(key: DefId) -> Option<GeneratorDiagnosticData<'tcx>> {
- storage(ArenaCacheSelector<'tcx>)
+ arena_cache
desc { |tcx| "looking up generator diagnostic data of `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
query permits_uninit_init(key: TyAndLayout<'tcx>) -> bool {
- desc { "checking to see if {:?} permits being left uninit", key.ty }
+ desc { "checking to see if `{}` permits being left uninit", key.ty }
}
query permits_zero_init(key: TyAndLayout<'tcx>) -> bool {
- desc { "checking to see if {:?} permits being left zeroed", key.ty }
+ desc { "checking to see if `{}` permits being left zeroed", key.ty }
+ }
+
+ query compare_assoc_const_impl_item_with_trait_item(
+ key: (LocalDefId, DefId)
+ ) -> Result<(), ErrorGuaranteed> {
+ desc { |tcx| "checking assoc const `{}` has the same type as trait item", tcx.def_path_str(key.0.to_def_id()) }
+ }
+
+ query deduced_param_attrs(def_id: DefId) -> &'tcx [ty::DeducedParamAttrs] {
+ desc { |tcx| "deducing parameter attributes for {}", tcx.def_path_str(def_id) }
+ separate_provide_extern
}
}
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
index b856af1d8..ea7a507d7 100644
--- a/compiler/rustc_middle/src/thir.rs
+++ b/compiler/rustc_middle/src/thir.rs
@@ -15,50 +15,33 @@ use rustc_hir::def_id::DefId;
use rustc_hir::RangeEnd;
use rustc_index::newtype_index;
use rustc_index::vec::IndexVec;
-use rustc_middle::infer::canonical::Canonical;
use rustc_middle::middle::region;
use rustc_middle::mir::interpret::AllocId;
use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, Field, Mutability, UnOp};
use rustc_middle::ty::adjustment::PointerCast;
use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::CanonicalUserTypeAnnotation;
-use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts, UserType};
-use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts};
+use rustc_middle::ty::{CanonicalUserType, CanonicalUserTypeAnnotation};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::{sym, Span, Symbol, DUMMY_SP};
use rustc_target::abi::VariantIdx;
use rustc_target::asm::InlineAsmRegOrRegClass;
-
-use rustc_span::def_id::LocalDefId;
use std::fmt;
use std::ops::Index;
pub mod visit;
-newtype_index! {
- /// An index to an [`Arm`] stored in [`Thir::arms`]
- #[derive(HashStable)]
- pub struct ArmId {
- DEBUG_FORMAT = "a{}"
- }
-}
-
-newtype_index! {
- /// An index to an [`Expr`] stored in [`Thir::exprs`]
- #[derive(HashStable)]
- pub struct ExprId {
- DEBUG_FORMAT = "e{}"
- }
-}
-
-newtype_index! {
- #[derive(HashStable)]
- /// An index to a [`Stmt`] stored in [`Thir::stmts`]
- pub struct StmtId {
- DEBUG_FORMAT = "s{}"
- }
-}
-
macro_rules! thir_with_elements {
- ($($name:ident: $id:ty => $value:ty,)*) => {
+ ($($name:ident: $id:ty => $value:ty => $format:literal,)*) => {
+ $(
+ newtype_index! {
+ #[derive(HashStable)]
+ pub struct $id {
+ DEBUG_FORMAT = $format
+ }
+ }
+ )*
+
/// A container for a THIR body.
///
/// This can be indexed directly by any THIR index (e.g. [`ExprId`]).
@@ -90,10 +73,29 @@ macro_rules! thir_with_elements {
}
}
+pub const UPVAR_ENV_PARAM: ParamId = ParamId::from_u32(0);
+
thir_with_elements! {
- arms: ArmId => Arm<'tcx>,
- exprs: ExprId => Expr<'tcx>,
- stmts: StmtId => Stmt<'tcx>,
+ arms: ArmId => Arm<'tcx> => "a{}",
+ blocks: BlockId => Block => "b{}",
+ exprs: ExprId => Expr<'tcx> => "e{}",
+ stmts: StmtId => Stmt<'tcx> => "s{}",
+ params: ParamId => Param<'tcx> => "p{}",
+}
+
+/// Description of a type-checked function parameter.
+#[derive(Clone, Debug, HashStable)]
+pub struct Param<'tcx> {
+ /// The pattern that appears in the parameter list, or None for implicit parameters.
+ pub pat: Option<Box<Pat<'tcx>>>,
+ /// The possibly inferred type.
+ pub ty: Ty<'tcx>,
+ /// Span of the explicitly provided type, or None if inferred for closures.
+ pub ty_span: Option<Span>,
+ /// Whether this param is `self`, and how it is bound.
+ pub self_kind: Option<hir::ImplicitSelfKind>,
+ /// HirId for lints.
+ pub hir_id: Option<hir::HirId>,
}
#[derive(Copy, Clone, Debug, HashStable)]
@@ -121,8 +123,10 @@ pub struct Block {
pub safety_mode: BlockSafety,
}
+type UserTy<'tcx> = Option<Box<CanonicalUserType<'tcx>>>;
+
#[derive(Clone, Debug, HashStable)]
-pub struct Adt<'tcx> {
+pub struct AdtExpr<'tcx> {
/// The ADT we're constructing.
pub adt_def: AdtDef<'tcx>,
/// The variant of the ADT.
@@ -131,13 +135,30 @@ pub struct Adt<'tcx> {
/// Optional user-given substs: for something like `let x =
/// Bar::<T> { ... }`.
- pub user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ pub user_ty: UserTy<'tcx>,
pub fields: Box<[FieldExpr]>,
/// The base, e.g. `Foo {x: 1, .. base}`.
pub base: Option<FruInfo<'tcx>>,
}
+#[derive(Clone, Debug, HashStable)]
+pub struct ClosureExpr<'tcx> {
+ pub closure_id: LocalDefId,
+ pub substs: UpvarSubsts<'tcx>,
+ pub upvars: Box<[ExprId]>,
+ pub movability: Option<hir::Movability>,
+ pub fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct InlineAsmExpr<'tcx> {
+ pub template: &'tcx [InlineAsmTemplatePiece],
+ pub operands: Box<[InlineAsmOperand<'tcx>]>,
+ pub options: InlineAsmOptions,
+ pub line_spans: &'tcx [Span],
+}
+
#[derive(Copy, Clone, Debug, HashStable)]
pub enum BlockSafety {
Safe,
@@ -177,13 +198,13 @@ pub enum StmtKind<'tcx> {
/// `let <PAT> = ...`
///
/// If a type annotation is included, it is added as an ascription pattern.
- pattern: Pat<'tcx>,
+ pattern: Box<Pat<'tcx>>,
/// `let pat: ty = <INIT>`
initializer: Option<ExprId>,
- /// `let pat: ty = <INIT> else { <ELSE> }
- else_block: Option<Block>,
+ /// `let pat: ty = <INIT> else { <ELSE> }`
+ else_block: Option<BlockId>,
/// The lint level for this `let` statement.
lint_level: LintLevel,
@@ -298,7 +319,7 @@ pub enum ExprKind<'tcx> {
},
Let {
expr: ExprId,
- pat: Pat<'tcx>,
+ pat: Box<Pat<'tcx>>,
},
/// A `match` expression.
Match {
@@ -307,7 +328,7 @@ pub enum ExprKind<'tcx> {
},
/// A block.
Block {
- body: Block,
+ block: BlockId,
},
/// An assignment: `lhs = rhs`.
Assign {
@@ -387,27 +408,21 @@ pub enum ExprKind<'tcx> {
fields: Box<[ExprId]>,
},
/// An ADT constructor, e.g. `Foo {x: 1, y: 2}`.
- Adt(Box<Adt<'tcx>>),
+ Adt(Box<AdtExpr<'tcx>>),
/// A type ascription on a place.
PlaceTypeAscription {
source: ExprId,
/// Type that the user gave to this expression
- user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ user_ty: UserTy<'tcx>,
},
/// A type ascription on a value, e.g. `42: i32`.
ValueTypeAscription {
source: ExprId,
/// Type that the user gave to this expression
- user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ user_ty: UserTy<'tcx>,
},
/// A closure definition.
- Closure {
- closure_id: LocalDefId,
- substs: UpvarSubsts<'tcx>,
- upvars: Box<[ExprId]>,
- movability: Option<hir::Movability>,
- fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>,
- },
+ Closure(Box<ClosureExpr<'tcx>>),
/// A literal.
Literal {
lit: &'tcx hir::Lit,
@@ -416,17 +431,17 @@ pub enum ExprKind<'tcx> {
/// For literals that don't correspond to anything in the HIR
NonHirLiteral {
lit: ty::ScalarInt,
- user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ user_ty: UserTy<'tcx>,
},
/// A literal of a ZST type.
ZstLiteral {
- user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ user_ty: UserTy<'tcx>,
},
/// Associated constants and named constants
NamedConst {
def_id: DefId,
substs: SubstsRef<'tcx>,
- user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ user_ty: UserTy<'tcx>,
},
ConstParam {
param: ty::ParamConst,
@@ -443,12 +458,7 @@ pub enum ExprKind<'tcx> {
def_id: DefId,
},
/// Inline assembly, i.e. `asm!()`.
- InlineAsm {
- template: &'tcx [InlineAsmTemplatePiece],
- operands: Box<[InlineAsmOperand<'tcx>]>,
- options: InlineAsmOptions,
- line_spans: &'tcx [Span],
- },
+ InlineAsm(Box<InlineAsmExpr<'tcx>>),
/// An expression taking a reference to a thread local.
ThreadLocalRef(DefId),
/// A `yield` expression.
@@ -475,7 +485,7 @@ pub struct FruInfo<'tcx> {
/// A `match` arm.
#[derive(Clone, Debug, HashStable)]
pub struct Arm<'tcx> {
- pub pattern: Pat<'tcx>,
+ pub pattern: Box<Pat<'tcx>>,
pub guard: Option<Guard<'tcx>>,
pub body: ExprId,
pub lint_level: LintLevel,
@@ -487,7 +497,7 @@ pub struct Arm<'tcx> {
#[derive(Clone, Debug, HashStable)]
pub enum Guard<'tcx> {
If(ExprId),
- IfLet(Pat<'tcx>, ExprId),
+ IfLet(Box<Pat<'tcx>>, ExprId),
}
#[derive(Copy, Clone, Debug, HashStable)]
@@ -542,19 +552,28 @@ pub enum BindingMode {
#[derive(Clone, Debug, HashStable)]
pub struct FieldPat<'tcx> {
pub field: Field,
- pub pattern: Pat<'tcx>,
+ pub pattern: Box<Pat<'tcx>>,
}
#[derive(Clone, Debug, HashStable)]
pub struct Pat<'tcx> {
pub ty: Ty<'tcx>,
pub span: Span,
- pub kind: Box<PatKind<'tcx>>,
+ pub kind: PatKind<'tcx>,
}
impl<'tcx> Pat<'tcx> {
pub fn wildcard_from_ty(ty: Ty<'tcx>) -> Self {
- Pat { ty, span: DUMMY_SP, kind: Box::new(PatKind::Wild) }
+ Pat { ty, span: DUMMY_SP, kind: PatKind::Wild }
+ }
+
+ pub fn simple_ident(&self) -> Option<Symbol> {
+ match self.kind {
+ PatKind::Binding { name, mode: BindingMode::ByValue, subpattern: None, .. } => {
+ Some(name)
+ }
+ _ => None,
+ }
}
}
@@ -589,7 +608,7 @@ pub enum PatKind<'tcx> {
AscribeUserType {
ascription: Ascription<'tcx>,
- subpattern: Pat<'tcx>,
+ subpattern: Box<Pat<'tcx>>,
},
/// `x`, `ref x`, `x @ P`, etc.
@@ -599,7 +618,7 @@ pub enum PatKind<'tcx> {
mode: BindingMode,
var: LocalVarId,
ty: Ty<'tcx>,
- subpattern: Option<Pat<'tcx>>,
+ subpattern: Option<Box<Pat<'tcx>>>,
/// Is this the leftmost occurrence of the binding, i.e., is `var` the
/// `HirId` of this pattern?
is_primary: bool,
@@ -622,7 +641,7 @@ pub enum PatKind<'tcx> {
/// `box P`, `&P`, `&mut P`, etc.
Deref {
- subpattern: Pat<'tcx>,
+ subpattern: Box<Pat<'tcx>>,
},
/// One of the following:
@@ -636,32 +655,32 @@ pub enum PatKind<'tcx> {
value: mir::ConstantKind<'tcx>,
},
- Range(PatRange<'tcx>),
+ Range(Box<PatRange<'tcx>>),
/// Matches against a slice, checking the length and extracting elements.
/// irrefutable when there is a slice pattern and both `prefix` and `suffix` are empty.
/// e.g., `&[ref xs @ ..]`.
Slice {
- prefix: Vec<Pat<'tcx>>,
- slice: Option<Pat<'tcx>>,
- suffix: Vec<Pat<'tcx>>,
+ prefix: Box<[Box<Pat<'tcx>>]>,
+ slice: Option<Box<Pat<'tcx>>>,
+ suffix: Box<[Box<Pat<'tcx>>]>,
},
/// Fixed match against an array; irrefutable.
Array {
- prefix: Vec<Pat<'tcx>>,
- slice: Option<Pat<'tcx>>,
- suffix: Vec<Pat<'tcx>>,
+ prefix: Box<[Box<Pat<'tcx>>]>,
+ slice: Option<Box<Pat<'tcx>>>,
+ suffix: Box<[Box<Pat<'tcx>>]>,
},
/// An or-pattern, e.g. `p | q`.
/// Invariant: `pats.len() >= 2`.
Or {
- pats: Vec<Pat<'tcx>>,
+ pats: Box<[Box<Pat<'tcx>>]>,
},
}
-#[derive(Copy, Clone, Debug, PartialEq, HashStable)]
+#[derive(Clone, Debug, PartialEq, HashStable)]
pub struct PatRange<'tcx> {
pub lo: mir::ConstantKind<'tcx>,
pub hi: mir::ConstantKind<'tcx>,
@@ -682,7 +701,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
};
let mut start_or_comma = || start_or_continue(", ");
- match *self.kind {
+ match self.kind {
PatKind::Wild => write!(f, "_"),
PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{}: _", subpattern),
PatKind::Binding { mutability, name, mode, ref subpattern, .. } => {
@@ -703,17 +722,32 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
Ok(())
}
PatKind::Variant { ref subpatterns, .. } | PatKind::Leaf { ref subpatterns } => {
- let variant = match *self.kind {
- PatKind::Variant { adt_def, variant_index, .. } => {
- Some(adt_def.variant(variant_index))
- }
- _ => self.ty.ty_adt_def().and_then(|adt| {
- if !adt.is_enum() { Some(adt.non_enum_variant()) } else { None }
+ let variant_and_name = match self.kind {
+ PatKind::Variant { adt_def, variant_index, .. } => ty::tls::with(|tcx| {
+ let variant = adt_def.variant(variant_index);
+ let adt_did = adt_def.did();
+ let name = if tcx.get_diagnostic_item(sym::Option) == Some(adt_did)
+ || tcx.get_diagnostic_item(sym::Result) == Some(adt_did)
+ {
+ variant.name.to_string()
+ } else {
+ format!("{}::{}", tcx.def_path_str(adt_def.did()), variant.name)
+ };
+ Some((variant, name))
+ }),
+ _ => self.ty.ty_adt_def().and_then(|adt_def| {
+ if !adt_def.is_enum() {
+ ty::tls::with(|tcx| {
+ Some((adt_def.non_enum_variant(), tcx.def_path_str(adt_def.did())))
+ })
+ } else {
+ None
+ }
}),
};
- if let Some(variant) = variant {
- write!(f, "{}", variant.name)?;
+ if let Some((variant, name)) = &variant_and_name {
+ write!(f, "{}", name)?;
// Only for Adt we can have `S {...}`,
// which we handle separately here.
@@ -722,7 +756,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
let mut printed = 0;
for p in subpatterns {
- if let PatKind::Wild = *p.pattern.kind {
+ if let PatKind::Wild = p.pattern.kind {
continue;
}
let name = variant.fields[p.field.index()].name;
@@ -738,8 +772,9 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
}
}
- let num_fields = variant.map_or(subpatterns.len(), |v| v.fields.len());
- if num_fields != 0 || variant.is_none() {
+ let num_fields =
+ variant_and_name.as_ref().map_or(subpatterns.len(), |(v, _)| v.fields.len());
+ if num_fields != 0 || variant_and_name.is_none() {
write!(f, "(")?;
for i in 0..num_fields {
write!(f, "{}", start_or_comma())?;
@@ -775,7 +810,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
write!(f, "{}", subpattern)
}
PatKind::Constant { value } => write!(f, "{}", value),
- PatKind::Range(PatRange { lo, hi, end }) => {
+ PatKind::Range(box PatRange { lo, hi, end }) => {
write!(f, "{}", lo)?;
write!(f, "{}", end)?;
write!(f, "{}", hi)
@@ -783,24 +818,24 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
PatKind::Slice { ref prefix, ref slice, ref suffix }
| PatKind::Array { ref prefix, ref slice, ref suffix } => {
write!(f, "[")?;
- for p in prefix {
+ for p in prefix.iter() {
write!(f, "{}{}", start_or_comma(), p)?;
}
if let Some(ref slice) = *slice {
write!(f, "{}", start_or_comma())?;
- match *slice.kind {
+ match slice.kind {
PatKind::Wild => {}
_ => write!(f, "{}", slice)?,
}
write!(f, "..")?;
}
- for p in suffix {
+ for p in suffix.iter() {
write!(f, "{}{}", start_or_comma(), p)?;
}
write!(f, "]")
}
PatKind::Or { ref pats } => {
- for pat in pats {
+ for pat in pats.iter() {
write!(f, "{}{}", start_or_continue(" | "), pat)?;
}
Ok(())
@@ -813,9 +848,13 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- rustc_data_structures::static_assert_size!(Block, 56);
- rustc_data_structures::static_assert_size!(Expr<'_>, 104);
- rustc_data_structures::static_assert_size!(Pat<'_>, 24);
- rustc_data_structures::static_assert_size!(Stmt<'_>, 120);
+ // tidy-alphabetical-start
+ static_assert_size!(Block, 56);
+ static_assert_size!(Expr<'_>, 64);
+ static_assert_size!(ExprKind<'_>, 40);
+ static_assert_size!(Pat<'_>, 72);
+ static_assert_size!(PatKind<'_>, 56);
+ static_assert_size!(Stmt<'_>, 48);
+ static_assert_size!(StmtKind<'_>, 40);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs
index 97249fdd1..79a0e75aa 100644
--- a/compiler/rustc_middle/src/thir/visit.rs
+++ b/compiler/rustc_middle/src/thir/visit.rs
@@ -1,5 +1,6 @@
use super::{
- Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir,
+ AdtExpr, Arm, Block, ClosureExpr, Expr, ExprKind, Guard, InlineAsmExpr, InlineAsmOperand, Pat,
+ PatKind, Stmt, StmtKind, Thir,
};
pub trait Visitor<'a, 'tcx: 'a>: Sized {
@@ -75,7 +76,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
visitor.visit_arm(&visitor.thir()[arm]);
}
}
- Block { ref body } => visitor.visit_block(body),
+ Block { block } => visitor.visit_block(&visitor.thir()[block]),
Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => {
visitor.visit_expr(&visitor.thir()[lhs]);
visitor.visit_expr(&visitor.thir()[rhs]);
@@ -108,7 +109,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
visitor.visit_expr(&visitor.thir()[field]);
}
}
- Adt(box crate::thir::Adt {
+ Adt(box AdtExpr {
ref fields,
ref base,
adt_def: _,
@@ -126,14 +127,20 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp
PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => {
visitor.visit_expr(&visitor.thir()[source])
}
- Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
+ Closure(box ClosureExpr {
+ closure_id: _,
+ substs: _,
+ upvars: _,
+ movability: _,
+ fake_reads: _,
+ }) => {}
Literal { lit: _, neg: _ } => {}
NonHirLiteral { lit: _, user_ty: _ } => {}
ZstLiteral { user_ty: _ } => {}
NamedConst { def_id: _, substs: _, user_ty: _ } => {}
ConstParam { param: _, def_id: _ } => {}
StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
- InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
+ InlineAsm(box InlineAsmExpr { ref operands, template: _, options: _, line_spans: _ }) => {
for op in &**operands {
use InlineAsmOperand::*;
match op {
@@ -174,7 +181,7 @@ pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stm
}
visitor.visit_pat(pattern);
if let Some(block) = else_block {
- visitor.visit_block(block)
+ visitor.visit_block(&visitor.thir()[*block])
}
}
}
@@ -204,7 +211,7 @@ pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<'
pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) {
use PatKind::*;
- match pat.kind.as_ref() {
+ match &pat.kind {
AscribeUserType { subpattern, ascription: _ }
| Deref { subpattern }
| Binding {
@@ -225,18 +232,18 @@ pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'
Constant { value: _ } => {}
Range(_) => {}
Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
- for subpattern in prefix {
+ for subpattern in prefix.iter() {
visitor.visit_pat(&subpattern);
}
if let Some(pat) = slice {
- visitor.visit_pat(pat);
+ visitor.visit_pat(&pat);
}
- for subpattern in suffix {
+ for subpattern in suffix.iter() {
visitor.visit_pat(&subpattern);
}
}
Or { pats } => {
- for pat in pats {
+ for pat in pats.iter() {
visitor.visit_pat(&pat);
}
}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 72b848c3e..e73d44bbb 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -10,9 +10,10 @@ mod structural_impls;
pub mod util;
use crate::infer::canonical::Canonical;
+use crate::mir::ConstraintCategory;
use crate::ty::abstract_const::NotConstEvaluatable;
use crate::ty::subst::SubstsRef;
-use crate::ty::{self, AdtKind, Predicate, Ty, TyCtxt};
+use crate::ty::{self, AdtKind, Ty, TyCtxt};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{Applicability, Diagnostic};
@@ -183,6 +184,16 @@ impl<'tcx> ObligationCause<'tcx> {
variant(DerivedObligationCause { parent_trait_pred, parent_code: self.code }).into();
self
}
+
+ pub fn to_constraint_category(&self) -> ConstraintCategory<'tcx> {
+ match self.code() {
+ MatchImpl(cause, _) => cause.to_constraint_category(),
+ AscribeUserTypeProvePredicate(predicate_span) => {
+ ConstraintCategory::Predicate(*predicate_span)
+ }
+ _ => ConstraintCategory::BoringNoLocation,
+ }
+ }
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
@@ -234,13 +245,23 @@ pub enum ObligationCauseCode<'tcx> {
/// This is the trait reference from the given projection.
ProjectionWf(ty::ProjectionTy<'tcx>),
- /// In an impl of trait `X` for type `Y`, type `Y` must
- /// also implement all supertraits of `X`.
+ /// Must satisfy all of the where-clause predicates of the
+ /// given item.
ItemObligation(DefId),
- /// Like `ItemObligation`, but with extra detail on the source of the obligation.
+ /// Like `ItemObligation`, but carries the span of the
+ /// predicate when it can be identified.
BindingObligation(DefId, Span),
+ /// Like `ItemObligation`, but carries the `HirId` of the
+ /// expression that caused the obligation, and the `usize`
+ /// indicates exactly which predicate it is in the list of
+ /// instantiated predicates.
+ ExprItemObligation(DefId, rustc_hir::HirId, usize),
+
+ /// Combines `ExprItemObligation` and `BindingObligation`.
+ ExprBindingObligation(DefId, Span, rustc_hir::HirId, usize),
+
/// A type like `&'a T` is WF only if `T: 'a`.
ReferenceOutlivesReferent(Ty<'tcx>),
@@ -406,8 +427,10 @@ pub enum ObligationCauseCode<'tcx> {
BinOp {
rhs_span: Option<Span>,
is_lit: bool,
- output_pred: Option<Predicate<'tcx>>,
+ output_ty: Option<Ty<'tcx>>,
},
+
+ AscribeUserTypeProvePredicate(Span),
}
/// The 'location' at which we try to perform HIR-based wf checking.
@@ -459,6 +482,13 @@ impl<'tcx> ObligationCauseCode<'tcx> {
_ => None,
}
}
+
+ pub fn peel_match_impls(&self) -> &Self {
+ match self {
+ MatchImpl(cause, _) => cause.code(),
+ _ => self,
+ }
+ }
}
// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger.
@@ -568,11 +598,6 @@ pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// // type parameters, ImplSource will carry resolutions for those as well:
/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
///
-/// // Case A: ImplSource points at a specific impl. Only possible when
-/// // type is concretely known. If the impl itself has bounded
-/// // type parameters, ImplSource will carry resolutions for those as well:
-/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
-///
/// // Case B: ImplSource must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // ImplSource::Param
@@ -648,7 +673,7 @@ impl<'tcx, N> ImplSource<'tcx, N> {
ImplSource::Object(d) => d.nested,
ImplSource::FnPointer(d) => d.nested,
ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
- | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(),
+ | ImplSource::Pointee(ImplSourcePointeeData) => vec![],
ImplSource::TraitAlias(d) => d.nested,
ImplSource::TraitUpcasting(d) => d.nested,
ImplSource::ConstDestruct(i) => i.nested,
@@ -893,6 +918,12 @@ impl ObjectSafetyViolation {
}
ObjectSafetyViolation::Method(
name,
+ MethodViolationCode::ReferencesImplTraitInTrait,
+ _,
+ ) => format!("method `{}` references an `impl Trait` type in its return type", name)
+ .into(),
+ ObjectSafetyViolation::Method(
+ name,
MethodViolationCode::WhereClauseReferencesSelf,
_,
) => {
@@ -997,6 +1028,9 @@ pub enum MethodViolationCode {
/// e.g., `fn foo(&self) -> Self`
ReferencesSelfOutput,
+ /// e.g., `fn foo(&self) -> impl Sized`
+ ReferencesImplTraitInTrait,
+
/// e.g., `fn foo(&self) where Self: Clone`
WhereClauseReferencesSelf,
@@ -1007,7 +1041,7 @@ pub enum MethodViolationCode {
UndispatchableReceiver(Option<Span>),
}
-/// These are the error cases for `codegen_fulfill_obligation`.
+/// These are the error cases for `codegen_select_candidate`.
#[derive(Copy, Clone, Debug, Hash, HashStable, Encodable, Decodable)]
pub enum CodegenObligationError {
/// Ambiguity can happen when monomorphizing during trans
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
index 1f9b474ad..fb152b63f 100644
--- a/compiler/rustc_middle/src/traits/query.rs
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -5,11 +5,12 @@
//! The providers for the queries defined here can be found in
//! `rustc_traits`.
+use crate::error::DropCheckOverflow;
use crate::infer::canonical::{Canonical, QueryResponse};
use crate::ty::error::TypeError;
-use crate::ty::subst::GenericArg;
+use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
-use rustc_errors::struct_span_err;
+use rustc_hir::def_id::DefId;
use rustc_span::source_map::Span;
use std::iter::FromIterator;
@@ -117,15 +118,7 @@ pub struct DropckOutlivesResult<'tcx> {
impl<'tcx> DropckOutlivesResult<'tcx> {
pub fn report_overflows(&self, tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) {
if let Some(overflow_ty) = self.overflows.get(0) {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0320,
- "overflow while adding drop-check rules for {}",
- ty,
- );
- err.note(&format!("overflowed on {}", overflow_ty));
- err.emit();
+ tcx.sess.emit_err(DropCheckOverflow { span, ty, overflow_ty: *overflow_ty });
}
}
@@ -227,4 +220,5 @@ pub enum OutlivesBound<'tcx> {
RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>),
+ RegionSubOpaque(ty::Region<'tcx>, DefId, SubstsRef<'tcx>),
}
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index e836ba47e..85ead3171 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -115,12 +115,13 @@ pub enum SelectionCandidate<'tcx> {
ParamCandidate(ty::PolyTraitPredicate<'tcx>),
ImplCandidate(DefId),
- AutoImplCandidate(DefId),
+ AutoImplCandidate,
/// This is a trait matching with a projected type as `Self`, and we found
/// an applicable bound in the trait definition. The `usize` is an index
- /// into the list returned by `tcx.item_bounds`.
- ProjectionCandidate(usize),
+ /// into the list returned by `tcx.item_bounds`. The constness is the
+ /// constness of the bound in the trait.
+ ProjectionCandidate(usize, ty::BoundConstness),
/// Implementation of a `Fn`-family trait by one of the anonymous types
/// generated for an `||` expression.
@@ -142,7 +143,7 @@ pub enum SelectionCandidate<'tcx> {
/// Builtin implementation of `Pointee`.
PointeeCandidate,
- TraitAliasCandidate(DefId),
+ TraitAliasCandidate,
/// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
/// position in the iterator returned by
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
index 2465f8e25..0a2819fee 100644
--- a/compiler/rustc_middle/src/traits/specialization_graph.rs
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -115,7 +115,7 @@ impl Node {
matches!(self, Node::Trait(..))
}
- /// Trys to find the associated item that implements `trait_item_def_id`
+ /// Tries to find the associated item that implements `trait_item_def_id`
/// defined in this node.
///
/// If this returns `None`, the item can potentially still be found in
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
index bed809930..1aa4df778 100644
--- a/compiler/rustc_middle/src/ty/abstract_const.rs
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -1,7 +1,7 @@
//! A subset of a mir body used for const evaluatability checking.
use crate::mir;
use crate::ty::visit::TypeVisitable;
-use crate::ty::{self, subst::Subst, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
+use crate::ty::{self, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use std::cmp;
@@ -30,7 +30,7 @@ pub struct AbstractConst<'tcx> {
impl<'tcx> AbstractConst<'tcx> {
pub fn new(
tcx: TyCtxt<'tcx>,
- uv: ty::Unevaluated<'tcx, ()>,
+ uv: ty::UnevaluatedConst<'tcx>,
) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
let inner = tcx.thir_abstract_const_opt_const_arg(uv.def)?;
debug!("AbstractConst::new({:?}) = {:?}", uv, inner);
@@ -42,7 +42,7 @@ impl<'tcx> AbstractConst<'tcx> {
ct: ty::Const<'tcx>,
) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
match ct.kind() {
- ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()),
+ ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv),
ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => Err(reported),
_ => Ok(None),
}
@@ -71,16 +71,16 @@ impl<'tcx> AbstractConst<'tcx> {
walk_abstract_const::<!, _>(tcx, self, |node| {
match node.root(tcx) {
Node::Leaf(leaf) => {
- if leaf.has_infer_types_or_consts() {
+ if leaf.has_non_region_infer() {
failure_kind = FailureKind::MentionsInfer;
- } else if leaf.has_param_types_or_consts() {
+ } else if leaf.has_non_region_param() {
failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
}
}
Node::Cast(_, _, ty) => {
- if ty.has_infer_types_or_consts() {
+ if ty.has_non_region_infer() {
failure_kind = FailureKind::MentionsInfer;
- } else if ty.has_param_types_or_consts() {
+ } else if ty.has_non_region_param() {
failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
}
}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index d36cf2fe3..4682ac96b 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -77,7 +77,7 @@ pub enum PointerCast {
/// At some point, of course, `Box` should move out of the compiler, in which
/// case this is analogous to transforming a struct. E.g., `Box<[i32; 4]>` ->
/// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`.
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct Adjustment<'tcx> {
pub kind: Adjust<'tcx>,
pub target: Ty<'tcx>,
@@ -89,7 +89,7 @@ impl<'tcx> Adjustment<'tcx> {
}
}
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
pub enum Adjust<'tcx> {
/// Go from ! to any type.
NeverToAny,
@@ -101,6 +101,9 @@ pub enum Adjust<'tcx> {
Borrow(AutoBorrow<'tcx>),
Pointer(PointerCast),
+
+ /// Cast into a dyn* object.
+ DynStar,
}
/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
@@ -108,7 +111,7 @@ pub enum Adjust<'tcx> {
/// The target type is `U` in both cases, with the region and mutability
/// being those shared by both the receiver and the returned reference.
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-#[derive(TypeFoldable, TypeVisitable)]
+#[derive(TypeFoldable, TypeVisitable, Lift)]
pub struct OverloadedDeref<'tcx> {
pub region: ty::Region<'tcx>,
pub mutbl: hir::Mutability,
@@ -167,7 +170,7 @@ impl From<AutoBorrowMutability> for hir::Mutability {
}
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-#[derive(TypeFoldable, TypeVisitable)]
+#[derive(TypeFoldable, TypeVisitable, Lift)]
pub enum AutoBorrow<'tcx> {
/// Converts from T to &T.
Ref(ty::Region<'tcx>, AutoBorrowMutability),
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index 2e596b275..b0a2412ab 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -26,9 +26,6 @@ use super::{
Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
};
-#[derive(Copy, Clone, HashStable, Debug)]
-pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
-
bitflags! {
#[derive(HashStable, TyEncodable, TyDecodable)]
pub struct AdtFlags: u32 {
@@ -332,13 +329,13 @@ impl<'tcx> AdtDef<'tcx> {
self.flags().contains(AdtFlags::IS_PHANTOM_DATA)
}
- /// Returns `true` if this is Box<T>.
+ /// Returns `true` if this is `Box<T>`.
#[inline]
pub fn is_box(self) -> bool {
self.flags().contains(AdtFlags::IS_BOX)
}
- /// Returns `true` if this is UnsafeCell<T>.
+ /// Returns `true` if this is `UnsafeCell<T>`.
#[inline]
pub fn is_unsafe_cell(self) -> bool {
self.flags().contains(AdtFlags::IS_UNSAFE_CELL)
@@ -438,7 +435,8 @@ impl<'tcx> AdtDef<'tcx> {
| Res::Def(DefKind::Union, _)
| Res::Def(DefKind::TyAlias, _)
| Res::Def(DefKind::AssocTy, _)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::SelfCtor(..) => self.non_enum_variant(),
_ => bug!("unexpected res {:?} in variant_of_res", res),
}
@@ -457,11 +455,9 @@ impl<'tcx> AdtDef<'tcx> {
Some(Discr { val: b, ty })
} else {
info!("invalid enum discriminant: {:#?}", val);
- crate::mir::interpret::struct_error(
- tcx.at(tcx.def_span(expr_did)),
- "constant evaluation of enum discriminant resulted in non-integer",
- )
- .emit();
+ tcx.sess.emit_err(crate::error::ConstEvalNonIntError {
+ span: tcx.def_span(expr_did),
+ });
None
}
}
@@ -564,6 +560,13 @@ impl<'tcx> AdtDef<'tcx> {
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer (e.g., issue #31299).
pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> {
- ty::EarlyBinder(tcx.adt_sized_constraint(self.did()).0)
+ ty::EarlyBinder(tcx.adt_sized_constraint(self.did()))
}
}
+
+#[derive(Clone, Copy, Debug)]
+#[derive(HashStable)]
+pub enum Representability {
+ Representable,
+ Infinite,
+}
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
index c97156ac1..55ee5bd2f 100644
--- a/compiler/rustc_middle/src/ty/assoc.rs
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -42,7 +42,7 @@ impl AssocItem {
}
#[inline]
- pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility {
+ pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility<DefId> {
tcx.visibility(self.def_id)
}
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
index 3d65429f2..a5b05a4f9 100644
--- a/compiler/rustc_middle/src/ty/binding.rs
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -1,6 +1,4 @@
-use rustc_hir::BindingAnnotation;
-use rustc_hir::BindingAnnotation::*;
-use rustc_hir::Mutability;
+use rustc_hir::{BindingAnnotation, ByRef, Mutability};
#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Debug, Copy, HashStable)]
pub enum BindingMode {
@@ -11,12 +9,10 @@ pub enum BindingMode {
TrivialTypeTraversalAndLiftImpls! { BindingMode, }
impl BindingMode {
- pub fn convert(ba: BindingAnnotation) -> BindingMode {
- match ba {
- Unannotated => BindingMode::BindByValue(Mutability::Not),
- Mutable => BindingMode::BindByValue(Mutability::Mut),
- Ref => BindingMode::BindByReference(Mutability::Not),
- RefMut => BindingMode::BindByReference(Mutability::Mut),
+ pub fn convert(BindingAnnotation(by_ref, mutbl): BindingAnnotation) -> BindingMode {
+ match by_ref {
+ ByRef::No => BindingMode::BindByValue(mutbl),
+ ByRef::Yes => BindingMode::BindByReference(mutbl),
}
}
}
diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs
index c4b743dd4..e65585955 100644
--- a/compiler/rustc_middle/src/ty/cast.rs
+++ b/compiler/rustc_middle/src/ty/cast.rs
@@ -2,6 +2,7 @@
// typeck and codegen.
use crate::ty::{self, Ty};
+use rustc_middle::mir;
use rustc_macros::HashStable;
@@ -33,10 +34,12 @@ pub enum CastTy<'tcx> {
FnPtr,
/// Raw pointers.
Ptr(ty::TypeAndMut<'tcx>),
+ /// Casting into a `dyn*` value.
+ DynStar,
}
/// Cast Kind. See [RFC 401](https://rust-lang.github.io/rfcs/0401-coercions.html)
-/// (or librustc_typeck/check/cast.rs).
+/// (or rustc_hir_analysis/check/cast.rs).
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum CastKind {
CoercionCast,
@@ -50,6 +53,7 @@ pub enum CastKind {
ArrayPtrCast,
FnPtrPtrCast,
FnPtrAddrCast,
+ DynStarCast,
}
impl<'tcx> CastTy<'tcx> {
@@ -67,7 +71,33 @@ impl<'tcx> CastTy<'tcx> {
ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)),
ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
ty::FnPtr(..) => Some(CastTy::FnPtr),
+ ty::Dynamic(_, _, ty::DynStar) => Some(CastTy::DynStar),
_ => None,
}
}
}
+
+/// Returns `mir::CastKind` from the given parameters.
+pub fn mir_cast_kind<'tcx>(from_ty: Ty<'tcx>, cast_ty: Ty<'tcx>) -> mir::CastKind {
+ let from = CastTy::from_ty(from_ty);
+ let cast = CastTy::from_ty(cast_ty);
+ let cast_kind = match (from, cast) {
+ (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
+ mir::CastKind::PointerExposeAddress
+ }
+ (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PointerFromExposedAddress,
+ (_, Some(CastTy::DynStar)) => mir::CastKind::DynStar,
+ (Some(CastTy::Int(_)), Some(CastTy::Int(_))) => mir::CastKind::IntToInt,
+ (Some(CastTy::FnPtr), Some(CastTy::Ptr(_))) => mir::CastKind::FnPtrToPtr,
+
+ (Some(CastTy::Float), Some(CastTy::Int(_))) => mir::CastKind::FloatToInt,
+ (Some(CastTy::Int(_)), Some(CastTy::Float)) => mir::CastKind::IntToFloat,
+ (Some(CastTy::Float), Some(CastTy::Float)) => mir::CastKind::FloatToFloat,
+ (Some(CastTy::Ptr(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PtrToPtr,
+
+ (_, _) => {
+ bug!("Attempting to cast non-castable types {:?} and {:?}", from_ty, cast_ty)
+ }
+ };
+ cast_kind
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 51137c526..14ec88b7e 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -455,6 +455,7 @@ impl_arena_copy_decoder! {<'tcx>
rustc_span::def_id::DefId,
rustc_span::def_id::LocalDefId,
(rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+ ty::DeducedParamAttrs,
}
#[macro_export]
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index f8792edc0..f998e6083 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -1,9 +1,6 @@
use crate::mir::interpret::LitToConstInput;
use crate::mir::ConstantKind;
-use crate::ty::{
- self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty,
- TyCtxt, TypeVisitable,
-};
+use crate::ty::{self, InternalSubsts, ParamEnv, ParamEnvAnd, Ty, TyCtxt};
use rustc_data_structures::intern::Interned;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
@@ -41,7 +38,7 @@ pub struct ConstS<'tcx> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(ConstS<'_>, 48);
+static_assert_size!(ConstS<'_>, 40);
impl<'tcx> Const<'tcx> {
#[inline]
@@ -65,8 +62,6 @@ impl<'tcx> Const<'tcx> {
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
) -> Self {
- debug!("Const::from_anon_const(def={:?})", def);
-
let body_id = match tcx.hir().get_by_def_id(def.did) {
hir::Node::AnonConst(ac) => ac.body,
_ => span_bug!(
@@ -83,10 +78,9 @@ impl<'tcx> Const<'tcx> {
match Self::try_eval_lit_or_param(tcx, ty, expr) {
Some(v) => v,
None => tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst {
def: def.to_global(),
substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
- promoted: None,
}),
ty,
}),
@@ -153,46 +147,6 @@ impl<'tcx> Const<'tcx> {
}
}
- pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
- debug!("Const::from_inline_const(def_id={:?})", def_id);
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-
- let body_id = match tcx.hir().get(hir_id) {
- hir::Node::AnonConst(ac) => ac.body,
- _ => span_bug!(
- tcx.def_span(def_id.to_def_id()),
- "from_inline_const can only process anonymous constants"
- ),
- };
-
- let expr = &tcx.hir().body(body_id).value;
-
- let ty = tcx.typeck(def_id).node_type(hir_id);
-
- let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) {
- Some(v) => v,
- None => {
- let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
- let parent_substs =
- tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
- let substs =
- InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
- .substs;
- tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: ty::WithOptConstParam::unknown(def_id).to_global(),
- substs,
- promoted: None,
- }),
- ty,
- })
- }
- };
- debug_assert!(!ret.has_free_regions());
- ret
- }
-
/// Interns the given value as a constant.
#[inline]
pub fn from_value(tcx: TyCtxt<'tcx>, val: ty::ValTree<'tcx>, ty: Ty<'tcx>) -> Self {
@@ -309,6 +263,10 @@ impl<'tcx> Const<'tcx> {
self.try_eval_usize(tcx, param_env)
.unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
}
+
+ pub fn is_ct_infer(self) -> bool {
+ matches!(self.kind(), ty::ConstKind::Infer(_))
+ }
}
pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Const<'tcx> {
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
index cb0137d2e..4ab761e07 100644
--- a/compiler/rustc_middle/src/ty/consts/kind.rs
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -1,50 +1,52 @@
use std::convert::TryInto;
+use crate::mir;
use crate::mir::interpret::{AllocId, ConstValue, Scalar};
-use crate::mir::Promoted;
use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::ParamEnv;
use crate::ty::{self, TyCtxt, TypeVisitable};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
use rustc_target::abi::Size;
use super::ScalarInt;
-/// An unevaluated, potentially generic, constant.
+
+/// An unevaluated (potentially generic) constant used in the type-system.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
-#[derive(Hash, HashStable)]
-pub struct Unevaluated<'tcx, P = Option<Promoted>> {
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UnevaluatedConst<'tcx> {
pub def: ty::WithOptConstParam<DefId>,
pub substs: SubstsRef<'tcx>,
- pub promoted: P,
}
-impl<'tcx> Unevaluated<'tcx> {
- #[inline]
- pub fn shrink(self) -> Unevaluated<'tcx, ()> {
- debug_assert_eq!(self.promoted, None);
- Unevaluated { def: self.def, substs: self.substs, promoted: () }
+impl rustc_errors::IntoDiagnosticArg for UnevaluatedConst<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ format!("{:?}", self).into_diagnostic_arg()
}
}
-impl<'tcx> Unevaluated<'tcx, ()> {
+impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn expand(self) -> Unevaluated<'tcx> {
- Unevaluated { def: self.def, substs: self.substs, promoted: None }
+ pub fn expand(self) -> mir::UnevaluatedConst<'tcx> {
+ mir::UnevaluatedConst { def: self.def, substs: self.substs, promoted: None }
}
}
-impl<'tcx, P: Default> Unevaluated<'tcx, P> {
+impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> {
- Unevaluated { def, substs, promoted: Default::default() }
+ pub fn new(
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, substs }
}
}
/// Represents a constant in Rust.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
-#[derive(Hash, HashStable)]
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
pub enum ConstKind<'tcx> {
/// A const generic parameter.
Param(ty::ParamConst),
@@ -60,7 +62,7 @@ pub enum ConstKind<'tcx> {
/// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
/// variants when the code is monomorphic enough for that.
- Unevaluated(Unevaluated<'tcx>),
+ Unevaluated(UnevaluatedConst<'tcx>),
/// Used to hold computed value.
Value(ty::ValTree<'tcx>),
@@ -71,7 +73,7 @@ pub enum ConstKind<'tcx> {
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(ConstKind<'_>, 40);
+static_assert_size!(ConstKind<'_>, 32);
impl<'tcx> ConstKind<'tcx> {
#[inline]
@@ -107,7 +109,6 @@ impl<'tcx> ConstKind<'tcx> {
/// An inference variable for a const, for use in const generics.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
-#[derive(HashStable)]
pub enum InferConst<'tcx> {
/// Infer the value of the const.
Var(ty::ConstVid<'tcx>),
@@ -115,6 +116,15 @@ pub enum InferConst<'tcx> {
Fresh(u32),
}
+impl<CTX> HashStable<CTX> for InferConst<'_> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ match self {
+ InferConst::Var(_) => panic!("const variables should not be hashed: {self:?}"),
+ InferConst::Fresh(i) => i.hash_stable(hcx, hasher),
+ }
+ }
+}
+
enum EvalMode {
Typeck,
Mir,
@@ -174,6 +184,7 @@ impl<'tcx> ConstKind<'tcx> {
param_env: ParamEnv<'tcx>,
eval_mode: EvalMode,
) -> Option<Result<EvalResult<'tcx>, ErrorGuaranteed>> {
+ assert!(!self.has_escaping_bound_vars(), "escaping vars in {self:?}");
if let ConstKind::Unevaluated(unevaluated) = self {
use crate::mir::interpret::ErrorHandled;
@@ -194,10 +205,9 @@ impl<'tcx> ConstKind<'tcx> {
// FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
// we can call `infcx.const_eval_resolve` which handles inference variables.
let param_env_and = if param_env_and.needs_infer() {
- tcx.param_env(unevaluated.def.did).and(ty::Unevaluated {
+ tcx.param_env(unevaluated.def.did).and(ty::UnevaluatedConst {
def: unevaluated.def,
substs: InternalSubsts::identity_for_item(tcx, unevaluated.def.did),
- promoted: unevaluated.promoted,
})
} else {
param_env_and
@@ -221,7 +231,7 @@ impl<'tcx> ConstKind<'tcx> {
}
}
EvalMode::Mir => {
- match tcx.const_eval_resolve(param_env, unevaluated, None) {
+ match tcx.const_eval_resolve(param_env, unevaluated.expand(), None) {
// NOTE(eddyb) `val` contains no lifetimes/types/consts,
// and we use the original type, so nothing from `substs`
// (which may be identity substs, see above),
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
index 93707bb18..a803fca0d 100644
--- a/compiler/rustc_middle/src/ty/consts/valtree.rs
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -18,7 +18,7 @@ use rustc_macros::{HashStable, TyDecodable, TyEncodable};
/// `ValTree` does not have this problem with representation, as it only contains integers or
/// lists of (nested) `ValTree`.
pub enum ValTree<'tcx> {
- /// ZSTs, integers, `bool`, `char` are represented as scalars.
+ /// integers, `bool`, `char` are represented as scalars.
/// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
/// of these types have the same representation.
Leaf(ScalarInt),
@@ -27,8 +27,11 @@ pub enum ValTree<'tcx> {
// dont use SliceOrStr for now
/// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
/// listing their fields' values in order.
+ ///
/// Enums are represented by storing their discriminant as a field, followed by all
/// the fields of the variant.
+ ///
+ /// ZST types are represented as an empty slice.
Branch(&'tcx [ValTree<'tcx>]),
}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 0a0f45ce1..3d7e2a083 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -1,10 +1,10 @@
//! Type context book-keeping.
use crate::arena::Arena;
-use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
+use crate::dep_graph::{DepGraph, DepKindStruct};
use crate::hir::place::Place as HirPlace;
use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
-use crate::lint::{struct_lint_level, LintLevelSource};
+use crate::lint::struct_lint_level;
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
use crate::middle::resolve_lifetime;
use crate::middle::stability;
@@ -15,14 +15,15 @@ use crate::mir::{
use crate::thir::Thir;
use crate::traits;
use crate::ty::query::{self, TyCtxtAt};
-use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
use crate::ty::{
self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy,
FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List,
ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region,
RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy,
+ Visibility,
};
+use crate::ty::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef, UserSubsts};
use rustc_ast as ast;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -33,12 +34,16 @@ use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{self, Lock, Lrc, ReadGuard, RwLock, WorkerLocal};
+use rustc_data_structures::unord::UnordSet;
use rustc_data_structures::vec_map::VecMap;
-use rustc_errors::{DecorateLint, ErrorGuaranteed, LintDiagnosticBuilder, MultiSpan};
+use rustc_errors::{
+ DecorateLint, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, MultiSpan,
+};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
+use rustc_hir::hir_id::OwnerId;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{
@@ -52,7 +57,7 @@ use rustc_query_system::ich::StableHashingContext;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use rustc_session::config::{CrateType, OutputFilenames};
use rustc_session::cstore::CrateStoreDyn;
-use rustc_session::lint::{Level, Lint};
+use rustc_session::lint::Lint;
use rustc_session::Limit;
use rustc_session::Session;
use rustc_span::def_id::{DefPathHash, StableCrateId};
@@ -62,7 +67,7 @@ use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx};
use rustc_target::spec::abi;
use rustc_type_ir::sty::TyKind::*;
-use rustc_type_ir::{InternAs, InternIteratorElement, Interner, TypeFlags};
+use rustc_type_ir::{DynKind, InternAs, InternIteratorElement, Interner, TypeFlags};
use std::any::Any;
use std::borrow::Borrow;
@@ -75,7 +80,7 @@ use std::mem;
use std::ops::{Bound, Deref};
use std::sync::Arc;
-use super::{ImplPolarity, RvalueScopes};
+use super::{ImplPolarity, ResolverOutputs, RvalueScopes};
pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
/// Creates a new `OnDiskCache` instance from the serialized data in `data`.
@@ -194,9 +199,9 @@ impl<'tcx> CtxtInterners<'tcx> {
.intern(kind, |kind| {
let flags = super::flags::FlagComputation::for_kind(&kind);
- // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them.
+ // It's impossible to hash inference variables (and will ICE), so we don't need to try to cache them.
// Without incremental, we rarely stable-hash types, so let's not do it proactively.
- let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER)
+ let stable_hash = if flags.flags.intersects(TypeFlags::NEEDS_INFER)
|| sess.opts.incremental.is_none()
{
Fingerprint::ZERO
@@ -275,9 +280,6 @@ pub struct CommonTypes<'tcx> {
}
pub struct CommonLifetimes<'tcx> {
- /// `ReEmpty` in the root universe.
- pub re_root_empty: Region<'tcx>,
-
/// `ReStatic`
pub re_static: Region<'tcx>,
@@ -290,7 +292,7 @@ pub struct CommonConsts<'tcx> {
}
pub struct LocalTableInContext<'a, V> {
- hir_owner: LocalDefId,
+ hir_owner: OwnerId,
data: &'a ItemLocalMap<V>,
}
@@ -302,7 +304,7 @@ pub struct LocalTableInContext<'a, V> {
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
#[inline]
-fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+fn validate_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) {
if hir_id.owner != hir_owner {
invalid_hir_id_for_typeck_results(hir_owner, hir_id);
}
@@ -310,7 +312,7 @@ fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId)
#[cold]
#[inline(never)]
-fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+fn invalid_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) {
ty::tls::with(|tcx| {
bug!(
"node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
@@ -346,7 +348,7 @@ impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
}
pub struct LocalTableInContextMut<'a, V> {
- hir_owner: LocalDefId,
+ hir_owner: OwnerId,
data: &'a mut ItemLocalMap<V>,
}
@@ -418,7 +420,7 @@ pub struct GeneratorDiagnosticData<'tcx> {
#[derive(TyEncodable, TyDecodable, Debug, HashStable)]
pub struct TypeckResults<'tcx> {
/// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
- pub hir_owner: LocalDefId,
+ pub hir_owner: OwnerId,
/// Resolved definitions for `<T>::X` associated paths and
/// method calls, including those of overloaded operators.
@@ -530,19 +532,17 @@ pub struct TypeckResults<'tcx> {
/// This is used for warning unused imports. During type
/// checking, this `Lrc` should not be cloned: it must have a ref-count
/// of 1 so that we can insert things into the set mutably.
- pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>,
+ pub used_trait_imports: Lrc<UnordSet<LocalDefId>>,
/// If any errors occurred while type-checking this body,
/// this field will be set to `Some(ErrorGuaranteed)`.
pub tainted_by_errors: Option<ErrorGuaranteed>,
/// All the opaque types that have hidden types set
- /// by this function. For return-position-impl-trait we also store the
- /// type here, so that mir-borrowck can figure out hidden types,
+ /// by this function. We also store the
+ /// type here, so that mir-borrowck can use it as a hint for figuring out hidden types,
/// even if they are only set in dead code (which doesn't show up in MIR).
- /// For type-alias-impl-trait, this map is only used to prevent query cycles,
- /// so the hidden types are all `None`.
- pub concrete_opaque_types: VecMap<LocalDefId, Option<Ty<'tcx>>>,
+ pub concrete_opaque_types: VecMap<LocalDefId, ty::OpaqueHiddenType<'tcx>>,
/// Tracks the minimum captures required for a closure;
/// see `MinCaptureInformationMap` for more details.
@@ -574,7 +574,7 @@ pub struct TypeckResults<'tcx> {
/// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions
/// by applying extended parameter rules.
- /// Details may be find in `rustc_typeck::check::rvalue_scopes`.
+ /// Details may be find in `rustc_hir_analysis::check::rvalue_scopes`.
pub rvalue_scopes: RvalueScopes,
/// Stores the type, expression, span and optional scope span of all types
@@ -593,7 +593,7 @@ pub struct TypeckResults<'tcx> {
}
impl<'tcx> TypeckResults<'tcx> {
- pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> {
+ pub fn new(hir_owner: OwnerId) -> TypeckResults<'tcx> {
TypeckResults {
hir_owner,
type_dependent_defs: Default::default(),
@@ -874,7 +874,7 @@ pub type CanonicalUserTypeAnnotations<'tcx> =
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct CanonicalUserTypeAnnotation<'tcx> {
- pub user_ty: CanonicalUserType<'tcx>,
+ pub user_ty: Box<CanonicalUserType<'tcx>>,
pub span: Span,
pub inferred_ty: Ty<'tcx>,
}
@@ -986,11 +986,7 @@ impl<'tcx> CommonLifetimes<'tcx> {
))
};
- CommonLifetimes {
- re_root_empty: mk(ty::ReEmpty(ty::UniverseIndex::ROOT)),
- re_static: mk(ty::ReStatic),
- re_erased: mk(ty::ReErased),
- }
+ CommonLifetimes { re_static: mk(ty::ReStatic), re_erased: mk(ty::ReErased) }
}
}
@@ -1072,10 +1068,9 @@ pub struct GlobalCtxt<'tcx> {
pub consts: CommonConsts<'tcx>,
definitions: RwLock<Definitions>,
- cstore: Box<CrateStoreDyn>,
/// Output of the resolver.
- pub(crate) untracked_resolutions: ty::ResolverOutputs,
+ pub(crate) untracked_resolutions: ty::ResolverGlobalCtxt,
untracked_resolver_for_lowering: Steal<ty::ResolverAstLowering>,
/// The entire crate as AST. This field serves as the input for the hir_crate query,
/// which lowers it from AST to HIR. It must not be read or used by anything else.
@@ -1089,7 +1084,7 @@ pub struct GlobalCtxt<'tcx> {
pub queries: &'tcx dyn query::QueryEngine<'tcx>,
pub query_caches: query::QueryCaches<'tcx>,
- query_kinds: &'tcx [DepKindStruct],
+ pub(crate) query_kinds: &'tcx [DepKindStruct<'tcx>],
// Internal caches for metadata decoding. No need to track deps on this.
pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
@@ -1238,27 +1233,29 @@ impl<'tcx> TyCtxt<'tcx> {
lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
- definitions: Definitions,
- cstore: Box<CrateStoreDyn>,
- untracked_resolutions: ty::ResolverOutputs,
- untracked_resolver_for_lowering: ty::ResolverAstLowering,
+ resolver_outputs: ResolverOutputs,
krate: Lrc<ast::Crate>,
dep_graph: DepGraph,
on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
queries: &'tcx dyn query::QueryEngine<'tcx>,
- query_kinds: &'tcx [DepKindStruct],
+ query_kinds: &'tcx [DepKindStruct<'tcx>],
crate_name: &str,
output_filenames: OutputFilenames,
) -> GlobalCtxt<'tcx> {
+ let ResolverOutputs {
+ definitions,
+ global_ctxt: untracked_resolutions,
+ ast_lowering: untracked_resolver_for_lowering,
+ } = resolver_outputs;
let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
- s.fatal(&err);
+ s.emit_fatal(err);
});
let interners = CtxtInterners::new(arena);
let common_types = CommonTypes::new(
&interners,
s,
&definitions,
- &*cstore,
+ &*untracked_resolutions.cstore,
// This is only used to create a stable hashing context.
&untracked_resolutions.source_span,
);
@@ -1273,7 +1270,6 @@ impl<'tcx> TyCtxt<'tcx> {
interners,
dep_graph,
definitions: RwLock::new(definitions),
- cstore,
prof: s.prof.clone(),
types: common_types,
lifetimes: common_lifetimes,
@@ -1296,10 +1292,6 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
- pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct {
- &self.query_kinds[k as usize]
- }
-
/// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
#[track_caller]
pub fn ty_error(self) -> Ty<'tcx> {
@@ -1378,7 +1370,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(id) = id.as_local() {
self.definitions_untracked().def_key(id)
} else {
- self.cstore.def_key(id)
+ self.untracked_resolutions.cstore.def_key(id)
}
}
@@ -1392,7 +1384,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(id) = id.as_local() {
self.definitions_untracked().def_path(id)
} else {
- self.cstore.def_path(id)
+ self.untracked_resolutions.cstore.def_path(id)
}
}
@@ -1402,7 +1394,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(def_id) = def_id.as_local() {
self.definitions_untracked().def_path_hash(def_id)
} else {
- self.cstore.def_path_hash(def_id)
+ self.untracked_resolutions.cstore.def_path_hash(def_id)
}
}
@@ -1411,7 +1403,7 @@ impl<'tcx> TyCtxt<'tcx> {
if crate_num == LOCAL_CRATE {
self.sess.local_stable_crate_id()
} else {
- self.cstore.stable_crate_id(crate_num)
+ self.untracked_resolutions.cstore.stable_crate_id(crate_num)
}
}
@@ -1422,7 +1414,7 @@ impl<'tcx> TyCtxt<'tcx> {
if stable_crate_id == self.sess.local_stable_crate_id() {
LOCAL_CRATE
} else {
- self.cstore.stable_crate_id_to_crate_num(stable_crate_id)
+ self.untracked_resolutions.cstore.stable_crate_id_to_crate_num(stable_crate_id)
}
}
@@ -1441,8 +1433,9 @@ impl<'tcx> TyCtxt<'tcx> {
} else {
// If this is a DefPathHash from an upstream crate, let the CrateStore map
// it to a DefId.
- let cnum = self.cstore.stable_crate_id_to_crate_num(stable_crate_id);
- self.cstore.def_path_hash_to_def_id(cnum, hash)
+ let cstore = &*self.untracked_resolutions.cstore;
+ let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id);
+ cstore.def_path_hash_to_def_id(cnum, hash)
}
}
@@ -1454,7 +1447,7 @@ impl<'tcx> TyCtxt<'tcx> {
let (crate_name, stable_crate_id) = if def_id.is_local() {
(self.crate_name, self.sess.local_stable_crate_id())
} else {
- let cstore = &self.cstore;
+ let cstore = &*self.untracked_resolutions.cstore;
(cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate))
};
@@ -1498,17 +1491,17 @@ impl<'tcx> TyCtxt<'tcx> {
// Create a dependency to the crate to be sure we re-execute this when the amount of
// definitions change.
self.ensure().hir_crate(());
- // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // Leak a read lock once we start iterating on definitions, to prevent adding new ones
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
let definitions = self.definitions.leak();
definitions.iter_local_def_id()
}
pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable {
- // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // Create a dependency to the crate to be sure we re-execute this when the amount of
// definitions change.
self.ensure().hir_crate(());
- // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // Leak a read lock once we start iterating on definitions, to prevent adding new ones
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
let definitions = self.definitions.leak();
definitions.def_path_table()
@@ -1517,10 +1510,10 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn def_path_hash_to_def_index_map(
self,
) -> &'tcx rustc_hir::def_path_hash_map::DefPathHashMap {
- // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // Create a dependency to the crate to be sure we re-execute this when the amount of
// definitions change.
self.ensure().hir_crate(());
- // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // Leak a read lock once we start iterating on definitions, to prevent adding new ones
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
let definitions = self.definitions.leak();
definitions.def_path_hash_to_def_index_map()
@@ -1529,7 +1522,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Note that this is *untracked* and should only be used within the query
/// system if the result is otherwise tracked through queries
pub fn cstore_untracked(self) -> &'tcx CrateStoreDyn {
- &*self.cstore
+ &*self.untracked_resolutions.cstore
}
/// Note that this is *untracked* and should only be used within the query
@@ -1555,7 +1548,7 @@ impl<'tcx> TyCtxt<'tcx> {
let hcx = StableHashingContext::new(
self.sess,
&*definitions,
- &*self.cstore,
+ &*self.untracked_resolutions.cstore,
&self.untracked_resolutions.source_span,
);
f(hcx)
@@ -1596,7 +1589,7 @@ impl<'tcx> TyCtxt<'tcx> {
})
}
- // Returns the `DefId` and the `BoundRegionKind` corresponding to the given region.
+ /// Returns the `DefId` and the `BoundRegionKind` corresponding to the given region.
pub fn is_suitable_region(self, region: Region<'tcx>) -> Option<FreeRegionInfo> {
let (suitable_region_binding_scope, bound_region) = match *region {
ty::ReFree(ref free_region) => {
@@ -1728,6 +1721,11 @@ impl<'tcx> TyCtxt<'tcx> {
.chain(self.crates(()).iter().copied())
.flat_map(move |cnum| self.traits_in_crate(cnum).iter().copied())
}
+
+ #[inline]
+ pub fn local_visibility(self, def_id: LocalDefId) -> Visibility {
+ self.visibility(def_id).expect_local()
+ }
}
/// A trait implemented for all `X<'a>` types that can be safely and
@@ -1821,7 +1819,9 @@ nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariable
// This is the impl for `&'a InternalSubsts<'a>`.
nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
-CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } }
+CloneLiftImpls! { for<'tcx> {
+ Constness, traits::WellFormedLoc, ImplPolarity, crate::mir::ReturnConstraint,
+} }
pub mod tls {
use super::{ptr_eq, GlobalCtxt, TyCtxt};
@@ -1829,9 +1829,9 @@ pub mod tls {
use crate::dep_graph::TaskDepsRef;
use crate::ty::query;
use rustc_data_structures::sync::{self, Lock};
- use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use std::mem;
+ use thin_vec::ThinVec;
#[cfg(not(parallel_compiler))]
use std::cell::Cell;
@@ -1857,8 +1857,8 @@ pub mod tls {
/// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
- /// Used to prevent layout from recursing too deeply.
- pub layout_depth: usize,
+ /// Used to prevent queries from calling too deeply.
+ pub query_depth: usize,
/// The current dep graph task. This is used to add dependencies to queries
/// when executing them.
@@ -1872,7 +1872,7 @@ pub mod tls {
tcx,
query: None,
diagnostics: None,
- layout_depth: 0,
+ query_depth: 0,
task_deps: TaskDepsRef::Ignore,
}
}
@@ -2366,7 +2366,7 @@ impl<'tcx> TyCtxt<'tcx> {
st,
self.sess,
&self.definitions.read(),
- &*self.cstore,
+ &*self.untracked_resolutions.cstore,
// This is only used to create a stable hashing context.
&self.untracked_resolutions.source_span,
)
@@ -2546,8 +2546,9 @@ impl<'tcx> TyCtxt<'tcx> {
self,
obj: &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>,
reg: ty::Region<'tcx>,
+ repr: DynKind,
) -> Ty<'tcx> {
- self.mk_ty(Dynamic(obj, reg))
+ self.mk_ty(Dynamic(obj, reg, repr))
}
#[inline]
@@ -2810,44 +2811,6 @@ impl<'tcx> TyCtxt<'tcx> {
iter.intern_with(|xs| self.intern_bound_variable_kinds(xs))
}
- /// Walks upwards from `id` to find a node which might change lint levels with attributes.
- /// It stops at `bound` and just returns it if reached.
- pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
- let hir = self.hir();
- loop {
- if id == bound {
- return bound;
- }
-
- if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
- return id;
- }
- let next = hir.get_parent_node(id);
- if next == id {
- bug!("lint traversal reached the root of the crate");
- }
- id = next;
- }
- }
-
- pub fn lint_level_at_node(
- self,
- lint: &'static Lint,
- mut id: hir::HirId,
- ) -> (Level, LintLevelSource) {
- let sets = self.lint_levels(());
- loop {
- if let Some(pair) = sets.level_and_source(lint, id, self.sess) {
- return pair;
- }
- let next = self.hir().get_parent_node(id);
- if next == id {
- bug!("lint traversal reached the root of the crate");
- }
- id = next;
- }
- }
-
/// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
/// typically generated by `#[derive(LintDiagnostic)]`).
pub fn emit_spanned_lint(
@@ -2857,18 +2820,28 @@ impl<'tcx> TyCtxt<'tcx> {
span: impl Into<MultiSpan>,
decorator: impl for<'a> DecorateLint<'a, ()>,
) {
- self.struct_span_lint_hir(lint, hir_id, span, |diag| decorator.decorate_lint(diag))
+ self.struct_span_lint_hir(lint, hir_id, span, decorator.msg(), |diag| {
+ decorator.decorate_lint(diag)
+ })
}
+ /// Emit a lint at the appropriate level for a hir node, with an associated span.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
pub fn struct_span_lint_hir(
self,
lint: &'static Lint,
hir_id: HirId,
span: impl Into<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let (level, src) = self.lint_level_at_node(lint, hir_id);
- struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
+ struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg, decorate);
}
/// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
@@ -2879,17 +2852,25 @@ impl<'tcx> TyCtxt<'tcx> {
id: HirId,
decorator: impl for<'a> DecorateLint<'a, ()>,
) {
- self.struct_lint_node(lint, id, |diag| decorator.decorate_lint(diag))
+ self.struct_lint_node(lint, id, decorator.msg(), |diag| decorator.decorate_lint(diag))
}
+ /// Emit a lint at the appropriate level for a hir node.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
pub fn struct_lint_node(
self,
lint: &'static Lint,
id: HirId,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let (level, src) = self.lint_level_at_node(lint, id);
- struct_lint_level(self.sess, lint, level, src, None, decorate);
+ struct_lint_level(self.sess, lint, level, src, None, msg, decorate);
}
pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> {
@@ -2904,7 +2885,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn is_late_bound(self, id: HirId) -> bool {
- self.is_late_bound_map(id.owner).map_or(false, |set| {
+ self.is_late_bound_map(id.owner.def_id).map_or(false, |set| {
let def_id = self.hir().local_def_id(id);
set.contains(&def_id)
})
@@ -2975,6 +2956,21 @@ impl<'tcx> TyCtxtAt<'tcx> {
}
}
+/// Parameter attributes that can only be determined by examining the body of a function instead
+/// of just its signature.
+///
+/// These can be useful for optimization purposes when a function is directly called. We compute
+/// them and store them into the crate metadata so that downstream crates can make use of them.
+///
+/// Right now, we only have `read_only`, but `no_capture` and `no_alias` might be useful in the
+/// future.
+#[derive(Clone, Copy, PartialEq, Debug, Default, TyDecodable, TyEncodable, HashStable)]
+pub struct DeducedParamAttrs {
+ /// The parameter is marked immutable in the function and contains no `UnsafeCell` (i.e. its
+ /// type is freeze).
+ pub read_only: bool,
+}
+
// We are comparing types with different invariant lifetimes, so `ptr::eq`
// won't work for us.
fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool {
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index dd2f43210..b8fd01e6a 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -102,13 +102,25 @@ pub fn suggest_arbitrary_trait_bound<'tcx>(
generics: &hir::Generics<'_>,
err: &mut Diagnostic,
trait_pred: PolyTraitPredicate<'tcx>,
+ associated_ty: Option<(&'static str, Ty<'tcx>)>,
) -> bool {
if !trait_pred.is_suggestable(tcx, false) {
return false;
}
let param_name = trait_pred.skip_binder().self_ty().to_string();
- let constraint = trait_pred.print_modifiers_and_trait_path().to_string();
+ let mut constraint = trait_pred.print_modifiers_and_trait_path().to_string();
+
+ if let Some((name, term)) = associated_ty {
+ // FIXME: this case overlaps with code in TyCtxt::note_and_explain_type_err.
+ // That should be extracted into a helper function.
+ if constraint.ends_with('>') {
+ constraint = format!("{}, {} = {}>", &constraint[..constraint.len() - 1], name, term);
+ } else {
+ constraint.push_str(&format!("<{} = {}>", name, term));
+ }
+ }
+
let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
// Skip, there is a param named Self
@@ -396,7 +408,7 @@ impl<'v> hir::intravisit::Visitor<'v> for TraitObjectVisitor<'v> {
) => {
self.0.push(ty);
}
- hir::TyKind::OpaqueDef(item_id, _) => {
+ hir::TyKind::OpaqueDef(item_id, _, _) => {
self.0.push(ty);
let item = self.1.item(item_id);
hir::intravisit::walk_item(self, item);
@@ -455,7 +467,7 @@ impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> {
}
}
- Dynamic(dty, _) => {
+ Dynamic(dty, _, _) => {
for pred in *dty {
match pred.skip_binder() {
ExistentialPredicate::Trait(_) | ExistentialPredicate::Projection(_) => {
@@ -499,3 +511,11 @@ impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> {
c.super_visit_with(self)
}
}
+
+#[derive(Diagnostic)]
+#[diag(borrowck_const_not_used_in_type_alias)]
+pub(super) struct ConstNotUsedTraitAlias {
+ pub ct: String,
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
index 3226950e7..ffdac93bc 100644
--- a/compiler/rustc_middle/src/ty/erase_regions.rs
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -1,4 +1,3 @@
-use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use crate::ty::visit::TypeVisitable;
use crate::ty::{self, Ty, TyCtxt, TypeFlags};
@@ -67,8 +66,4 @@ impl<'tcx> TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
_ => self.tcx.lifetimes.re_erased,
}
}
-
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- c.super_fold_with(self)
- }
}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index 4b0bc3c11..4e6cdb786 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -2,6 +2,7 @@ use crate::traits::{ObligationCause, ObligationCauseCode};
use crate::ty::diagnostics::suggest_constraining_type_param;
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::{self, BoundRegionKind, Region, Ty, TyCtxt};
+use hir::def::DefKind;
use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect};
use rustc_errors::{pluralize, Diagnostic, MultiSpan};
use rustc_hir as hir;
@@ -13,7 +14,7 @@ use rustc_target::spec::abi;
use std::borrow::Cow;
use std::fmt;
-#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable, Lift)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
@@ -30,7 +31,8 @@ impl<T> ExpectedFound<T> {
}
// Data structures used in type unification
-#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
+#[rustc_pass_by_value]
pub enum TypeError<'tcx> {
Mismatch,
ConstnessMismatch(ExpectedFound<ty::BoundConstness>),
@@ -73,6 +75,18 @@ pub enum TypeError<'tcx> {
TargetFeatureCast(DefId),
}
+impl TypeError<'_> {
+ pub fn involves_regions(self) -> bool {
+ match self {
+ TypeError::RegionsDoesNotOutlive(_, _)
+ | TypeError::RegionsInsufficientlyPolymorphic(_, _)
+ | TypeError::RegionsOverlyPolymorphic(_, _)
+ | TypeError::RegionsPlaceholderMismatch => true,
+ _ => false,
+ }
+ }
+}
+
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
@@ -211,7 +225,7 @@ impl<'tcx> fmt::Display for TypeError<'tcx> {
}
impl<'tcx> TypeError<'tcx> {
- pub fn must_include_note(&self) -> bool {
+ pub fn must_include_note(self) -> bool {
use self::TypeError::*;
match self {
CyclicTy(_) | CyclicConst(_) | UnsafetyMismatch(_) | ConstnessMismatch(_)
@@ -263,10 +277,23 @@ impl<'tcx> Ty<'tcx> {
}
ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(),
ty::Slice(_) => "slice".into(),
- ty::RawPtr(_) => "*-ptr".into(),
+ ty::RawPtr(tymut) => {
+ let tymut_string = match tymut.mutbl {
+ hir::Mutability::Mut => tymut.to_string(),
+ hir::Mutability::Not => format!("const {}", tymut.ty),
+ };
+
+ if tymut_string != "_" && (tymut.ty.is_simple_text() || tymut_string.len() < "const raw pointer".len()) {
+ format!("`*{}`", tymut_string).into()
+ } else {
+ // Unknown type name, it's long or has type arguments
+ "raw pointer".into()
+ }
+ },
ty::Ref(_, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
+
if tymut_string != "_"
&& (ty.is_simple_text() || tymut_string.len() < "mutable reference".len())
{
@@ -347,7 +374,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn note_and_explain_type_err(
self,
diag: &mut Diagnostic,
- err: &TypeError<'tcx>,
+ err: TypeError<'tcx>,
cause: &ObligationCause<'tcx>,
sp: Span,
body_owner_def_id: DefId,
@@ -512,7 +539,7 @@ impl<T> Trait<T> for X {
diag.span_label(p_span, "this type parameter");
}
}
- (ty::Projection(proj_ty), _) => {
+ (ty::Projection(proj_ty), _) if self.def_kind(proj_ty.item_def_id) != DefKind::ImplTraitPlaceholder => {
self.expected_projection(
diag,
proj_ty,
@@ -521,7 +548,7 @@ impl<T> Trait<T> for X {
cause.code(),
);
}
- (_, ty::Projection(proj_ty)) => {
+ (_, ty::Projection(proj_ty)) if self.def_kind(proj_ty.item_def_id) != DefKind::ImplTraitPlaceholder => {
let msg = format!(
"consider constraining the associated type `{}` to `{}`",
values.found, values.expected,
@@ -568,7 +595,7 @@ impl<T> Trait<T> for X {
}
TargetFeatureCast(def_id) => {
let target_spans =
- self.get_attrs(*def_id, sym::target_feature).map(|attr| attr.span);
+ self.get_attrs(def_id, sym::target_feature).map(|attr| attr.span);
diag.note(
"functions with `#[target_feature]` can only be coerced to `unsafe` function pointers"
);
@@ -640,7 +667,7 @@ impl<T> Trait<T> for X {
self,
diag: &mut Diagnostic,
proj_ty: &ty::ProjectionTy<'tcx>,
- values: &ExpectedFound<Ty<'tcx>>,
+ values: ExpectedFound<Ty<'tcx>>,
body_owner_def_id: DefId,
cause_code: &ObligationCauseCode<'_>,
) {
@@ -834,7 +861,7 @@ fn foo(&self) -> Self::T { String::new() }
// When `body_owner` is an `impl` or `trait` item, look in its associated types for
// `expected` and point at it.
let parent_id = self.hir().get_parent_item(hir_id);
- let item = self.hir().find_by_def_id(parent_id);
+ let item = self.hir().find_by_def_id(parent_id.def_id);
debug!("expected_projection parent item {:?}", item);
match item {
Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => {
@@ -845,9 +872,9 @@ fn foo(&self) -> Self::T { String::new() }
// FIXME: account for returning some type in a trait fn impl that has
// an assoc type as a return type (#72076).
if let hir::Defaultness::Default { has_value: true } =
- self.impl_defaultness(item.id.def_id)
+ self.impl_defaultness(item.id.owner_id)
{
- if self.type_of(item.id.def_id) == found {
+ if self.type_of(item.id.owner_id) == found {
diag.span_label(
item.span,
"associated type defaults can't be assumed inside the \
@@ -867,7 +894,7 @@ fn foo(&self) -> Self::T { String::new() }
})) => {
for item in &items[..] {
if let hir::AssocItemKind::Type = item.kind {
- if self.type_of(item.id.def_id) == found {
+ if self.type_of(item.id.owner_id) == found {
diag.span_label(item.span, "expected this associated type");
return true;
}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
index 8d019a3ba..3be0bc4de 100644
--- a/compiler/rustc_middle/src/ty/fast_reject.rs
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -132,7 +132,7 @@ pub fn simplify_type<'tcx>(
// don't unify with anything else as long as they are fully normalized.
//
// We will have to be careful with lazy normalization here.
- TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => {
+ TreatParams::AsPlaceholder if !ty.has_non_region_infer() => {
debug!("treating `{}` as a placeholder", ty);
Some(PlaceholderSimplifiedType)
}
@@ -384,14 +384,7 @@ impl DeepRejectCtxt {
// they might unify with any value.
ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true,
ty::ConstKind::Value(obl) => match k {
- ty::ConstKind::Value(imp) => {
- // FIXME(valtrees): Once we have valtrees, we can just
- // compare them directly here.
- match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) {
- (Some(obl), Some(imp)) => obl == imp,
- _ => true,
- }
- }
+ ty::ConstKind::Value(imp) => obl == imp,
_ => true,
},
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index ea6bb8a7a..7201737be 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -1,5 +1,5 @@
use crate::ty::subst::{GenericArg, GenericArgKind};
-use crate::ty::{self, InferConst, Term, Ty, TypeFlags};
+use crate::ty::{self, InferConst, Ty, TypeFlags};
use std::slice;
#[derive(Debug)]
@@ -34,12 +34,6 @@ impl FlagComputation {
result.flags
}
- pub fn for_unevaluated_const(uv: ty::Unevaluated<'_>) -> TypeFlags {
- let mut result = FlagComputation::new();
- result.add_unevaluated_const(uv);
- result.flags
- }
-
fn add_flags(&mut self, flags: TypeFlags) {
self.flags = self.flags | flags;
}
@@ -171,7 +165,7 @@ impl FlagComputation {
self.add_substs(substs);
}
- &ty::Dynamic(obj, r) => {
+ &ty::Dynamic(obj, r, _) => {
for predicate in obj.iter() {
self.bound_computation(predicate, |computation, predicate| match predicate {
ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
@@ -243,9 +237,9 @@ impl FlagComputation {
}
ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
self.add_projection_ty(projection_ty);
- match term {
- Term::Ty(ty) => self.add_ty(ty),
- Term::Const(c) => self.add_const(c),
+ match term.unpack() {
+ ty::TermKind::Ty(ty) => self.add_ty(ty),
+ ty::TermKind::Const(c) => self.add_const(c),
}
}
ty::PredicateKind::WellFormed(arg) => {
@@ -256,7 +250,7 @@ impl FlagComputation {
self.add_substs(substs);
}
ty::PredicateKind::ConstEvaluatable(uv) => {
- self.add_unevaluated_const(uv);
+ self.add_const(uv);
}
ty::PredicateKind::ConstEquate(expected, found) => {
self.add_const(expected);
@@ -289,7 +283,10 @@ impl FlagComputation {
fn add_const(&mut self, c: ty::Const<'_>) {
self.add_ty(c.ty());
match c.kind() {
- ty::ConstKind::Unevaluated(unevaluated) => self.add_unevaluated_const(unevaluated),
+ ty::ConstKind::Unevaluated(uv) => {
+ self.add_substs(uv.substs);
+ self.add_flags(TypeFlags::HAS_CT_PROJECTION);
+ }
ty::ConstKind::Infer(infer) => {
self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
match infer {
@@ -313,16 +310,11 @@ impl FlagComputation {
}
}
- fn add_unevaluated_const<P>(&mut self, ct: ty::Unevaluated<'_, P>) {
- self.add_substs(ct.substs);
- self.add_flags(TypeFlags::HAS_CT_PROJECTION);
- }
-
fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
self.add_substs(projection.substs);
- match projection.term {
- ty::Term::Ty(ty) => self.add_ty(ty),
- ty::Term::Const(ct) => self.add_const(ct),
+ match projection.term.unpack() {
+ ty::TermKind::Ty(ty) => self.add_ty(ty),
+ ty::TermKind::Const(ct) => self.add_const(ct),
}
}
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
index 5e96e278b..54f1499eb 100644
--- a/compiler/rustc_middle/src/ty/fold.rs
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -13,8 +13,7 @@
//!
//! There are three groups of traits involved in each traversal.
//! - `TypeFoldable`. This is implemented once for many types, including:
-//! - Types of interest, for which the the methods delegate to the
-//! folder.
+//! - Types of interest, for which the methods delegate to the folder.
//! - All other types, including generic containers like `Vec` and `Option`.
//! It defines a "skeleton" of how they should be folded.
//! - `TypeSuperFoldable`. This is implemented only for each type of interest,
@@ -43,7 +42,6 @@
//! - ty.super_fold_with(folder)
//! - u.fold_with(folder)
//! ```
-use crate::mir;
use crate::ty::{self, Binder, BoundTy, Ty, TyCtxt, TypeVisitable};
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::def_id::DefId;
@@ -128,17 +126,9 @@ pub trait TypeFolder<'tcx>: FallibleTypeFolder<'tcx, Error = !> {
c.super_fold_with(self)
}
- fn fold_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ty::Unevaluated<'tcx> {
- uv.super_fold_with(self)
- }
-
fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
p.super_fold_with(self)
}
-
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- bug!("most type folders should not be folding MIR datastructures: {:?}", c)
- }
}
/// This trait is implemented for every folding traversal. There is a fold
@@ -172,26 +162,12 @@ pub trait FallibleTypeFolder<'tcx>: Sized {
c.try_super_fold_with(self)
}
- fn try_fold_unevaluated(
- &mut self,
- c: ty::Unevaluated<'tcx>,
- ) -> Result<ty::Unevaluated<'tcx>, Self::Error> {
- c.try_super_fold_with(self)
- }
-
fn try_fold_predicate(
&mut self,
p: ty::Predicate<'tcx>,
) -> Result<ty::Predicate<'tcx>, Self::Error> {
p.try_super_fold_with(self)
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
- bug!("most type folders should not be folding MIR datastructures: {:?}", c)
- }
}
// This blanket implementation of the fallible trait for infallible folders
@@ -225,23 +201,9 @@ where
Ok(self.fold_const(c))
}
- fn try_fold_unevaluated(
- &mut self,
- c: ty::Unevaluated<'tcx>,
- ) -> Result<ty::Unevaluated<'tcx>, !> {
- Ok(self.fold_unevaluated(c))
- }
-
fn try_fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> Result<ty::Predicate<'tcx>, !> {
Ok(self.fold_predicate(p))
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, !> {
- Ok(self.fold_mir_const(c))
- }
}
///////////////////////////////////////////////////////////////////////////
@@ -302,6 +264,17 @@ impl<'tcx> TyCtxt<'tcx> {
{
value.fold_with(&mut RegionFolder::new(self, &mut f))
}
+
+ pub fn super_fold_regions<T>(
+ self,
+ value: T,
+ mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+ ) -> T
+ where
+ T: TypeSuperFoldable<'tcx>,
+ {
+ value.super_fold_with(&mut RegionFolder::new(self, &mut f))
+ }
}
/// Folds over the substructure of a type, visiting its component
@@ -353,7 +326,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
t
}
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self), level = "debug", ret)]
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
match *r {
ty::ReLateBound(debruijn, _) if debruijn < self.current_index => {
@@ -377,17 +350,13 @@ pub trait BoundVarReplacerDelegate<'tcx> {
fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx>;
}
-pub struct FnMutDelegate<R, T, C> {
- pub regions: R,
- pub types: T,
- pub consts: C,
+pub struct FnMutDelegate<'a, 'tcx> {
+ pub regions: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+ pub types: &'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a),
+ pub consts: &'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx> + 'a),
}
-impl<'tcx, R, T, C> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<R, T, C>
-where
- R: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
- T: FnMut(ty::BoundTy) -> Ty<'tcx>,
- C: FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx>,
-{
+
+impl<'a, 'tcx> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<'a, 'tcx> {
fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
(self.regions)(br)
}
@@ -511,7 +480,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn replace_late_bound_regions_uncached<T, F>(
self,
value: Binder<'tcx, T>,
- replace_regions: F,
+ mut replace_regions: F,
) -> T
where
F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
@@ -522,9 +491,9 @@ impl<'tcx> TyCtxt<'tcx> {
value
} else {
let delegate = FnMutDelegate {
- regions: replace_regions,
- types: |b| bug!("unexpected bound ty in binder: {b:?}"),
- consts: |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"),
+ regions: &mut replace_regions,
+ types: &mut |b| bug!("unexpected bound ty in binder: {b:?}"),
+ consts: &mut |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"),
};
let mut replacer = BoundVarReplacer::new(self, delegate);
value.fold_with(&mut replacer)
@@ -584,19 +553,19 @@ impl<'tcx> TyCtxt<'tcx> {
self.replace_escaping_bound_vars_uncached(
value,
FnMutDelegate {
- regions: |r: ty::BoundRegion| {
+ regions: &mut |r: ty::BoundRegion| {
self.mk_region(ty::ReLateBound(
ty::INNERMOST,
ty::BoundRegion { var: shift_bv(r.var), kind: r.kind },
))
},
- types: |t: ty::BoundTy| {
+ types: &mut |t: ty::BoundTy| {
self.mk_ty(ty::Bound(
ty::INNERMOST,
ty::BoundTy { var: shift_bv(t.var), kind: t.kind },
))
},
- consts: |c, ty: Ty<'tcx>| {
+ consts: &mut |c, ty: Ty<'tcx>| {
self.mk_const(ty::ConstS {
kind: ty::ConstKind::Bound(ty::INNERMOST, shift_bv(c)),
ty,
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
index add2df258..19754d145 100644
--- a/compiler/rustc_middle/src/ty/generics.rs
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -1,7 +1,5 @@
-use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
use crate::ty;
-use crate::ty::subst::{Subst, SubstsRef};
-use crate::ty::EarlyBinder;
+use crate::ty::{EarlyBinder, SubstsRef};
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
@@ -13,7 +11,7 @@ use super::{EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamTy, Predi
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum GenericParamDefKind {
Lifetime,
- Type { has_default: bool, object_lifetime_default: ObjectLifetimeDefault, synthetic: bool },
+ Type { has_default: bool, synthetic: bool },
Const { has_default: bool },
}
@@ -28,8 +26,9 @@ impl GenericParamDefKind {
pub fn to_ord(&self) -> ast::ParamKindOrd {
match self {
GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime,
- GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type,
- GenericParamDefKind::Const { .. } => ast::ParamKindOrd::Const,
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ ast::ParamKindOrd::TypeOrConst
+ }
}
}
@@ -122,6 +121,21 @@ pub struct Generics {
}
impl<'tcx> Generics {
+ /// Looks through the generics and all parents to find the index of the
+ /// given param def-id. This is in comparison to the `param_def_id_to_index`
+ /// struct member, which only stores information about this item's own
+ /// generics.
+ pub fn param_def_id_to_index(&self, tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<u32> {
+ if let Some(idx) = self.param_def_id_to_index.get(&def_id) {
+ Some(*idx)
+ } else if let Some(parent) = self.parent {
+ let parent = tcx.generics_of(parent);
+ parent.param_def_id_to_index(tcx, def_id)
+ } else {
+ None
+ }
+ }
+
#[inline]
pub fn count(&self) -> usize {
self.parent_count + self.params.len()
@@ -252,7 +266,7 @@ impl<'tcx> Generics {
// Filter the default arguments.
//
// This currently uses structural equality instead
- // of semantic equivalance. While not ideal, that's
+ // of semantic equivalence. While not ideal, that's
// good enough for now as this should only be used
// for diagnostics anyways.
own_params.end -= self
@@ -314,6 +328,7 @@ impl<'tcx> GenericPredicates<'tcx> {
}
}
+ #[instrument(level = "debug", skip(self, tcx))]
fn instantiate_into(
&self,
tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs
index cd00b26b8..d1c0d62ac 100644
--- a/compiler/rustc_middle/src/ty/impls_ty.rs
+++ b/compiler/rustc_middle/src/ty/impls_ty.rs
@@ -113,7 +113,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
}
// `Relocations` with default type parameters is a sorted map.
-impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov>
+impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::ProvenanceMap<Prov>
where
Prov: HashStable<StableHashingContext<'a>>,
{
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
deleted file mode 100644
index c4ad698ba..000000000
--- a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
+++ /dev/null
@@ -1,145 +0,0 @@
-use crate::ty::context::TyCtxt;
-use crate::ty::{DefId, DefIdTree};
-use rustc_span::def_id::CRATE_DEF_ID;
-use smallvec::SmallVec;
-use std::mem;
-
-use DefIdForest::*;
-
-/// Represents a forest of `DefId`s closed under the ancestor relation. That is,
-/// if a `DefId` representing a module is contained in the forest then all
-/// `DefId`s defined in that module or submodules are also implicitly contained
-/// in the forest.
-///
-/// This is used to represent a set of modules in which a type is visibly
-/// uninhabited.
-///
-/// We store the minimal set of `DefId`s required to represent the whole set. If A and B are
-/// `DefId`s in the `DefIdForest`, and A is a parent of B, then only A will be stored. When this is
-/// used with `type_uninhabited_from`, there will very rarely be more than one `DefId` stored.
-#[derive(Copy, Clone, HashStable, Debug)]
-pub enum DefIdForest<'a> {
- Empty,
- Single(DefId),
- /// This variant is very rare.
- /// Invariant: >1 elements
- Multiple(&'a [DefId]),
-}
-
-/// Tests whether a slice of roots contains a given DefId.
-#[inline]
-fn slice_contains<'tcx>(tcx: TyCtxt<'tcx>, slice: &[DefId], id: DefId) -> bool {
- slice.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
-}
-
-impl<'tcx> DefIdForest<'tcx> {
- /// Creates an empty forest.
- pub fn empty() -> DefIdForest<'tcx> {
- DefIdForest::Empty
- }
-
- /// Creates a forest consisting of a single tree representing the entire
- /// crate.
- #[inline]
- pub fn full() -> DefIdForest<'tcx> {
- DefIdForest::from_id(CRATE_DEF_ID.to_def_id())
- }
-
- /// Creates a forest containing a `DefId` and all its descendants.
- pub fn from_id(id: DefId) -> DefIdForest<'tcx> {
- DefIdForest::Single(id)
- }
-
- fn as_slice(&self) -> &[DefId] {
- match self {
- Empty => &[],
- Single(id) => std::slice::from_ref(id),
- Multiple(root_ids) => root_ids,
- }
- }
-
- // Only allocates in the rare `Multiple` case.
- fn from_vec(tcx: TyCtxt<'tcx>, root_ids: SmallVec<[DefId; 1]>) -> DefIdForest<'tcx> {
- match &root_ids[..] {
- [] => Empty,
- [id] => Single(*id),
- _ => DefIdForest::Multiple(tcx.arena.alloc_from_iter(root_ids)),
- }
- }
-
- /// Tests whether the forest is empty.
- pub fn is_empty(&self) -> bool {
- match self {
- Empty => true,
- Single(..) | Multiple(..) => false,
- }
- }
-
- /// Iterate over the set of roots.
- fn iter(&self) -> impl Iterator<Item = DefId> + '_ {
- self.as_slice().iter().copied()
- }
-
- /// Tests whether the forest contains a given DefId.
- pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool {
- slice_contains(tcx, self.as_slice(), id)
- }
-
- /// Calculate the intersection of a collection of forests.
- pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
- where
- I: IntoIterator<Item = DefIdForest<'tcx>>,
- {
- let mut iter = iter.into_iter();
- let mut ret: SmallVec<[_; 1]> = if let Some(first) = iter.next() {
- SmallVec::from_slice(first.as_slice())
- } else {
- return DefIdForest::full();
- };
-
- let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
- for next_forest in iter {
- // No need to continue if the intersection is already empty.
- if ret.is_empty() || next_forest.is_empty() {
- return DefIdForest::empty();
- }
-
- // We keep the elements in `ret` that are also in `next_forest`.
- next_ret.extend(ret.iter().copied().filter(|&id| next_forest.contains(tcx, id)));
- // We keep the elements in `next_forest` that are also in `ret`.
- next_ret.extend(next_forest.iter().filter(|&id| slice_contains(tcx, &ret, id)));
-
- mem::swap(&mut next_ret, &mut ret);
- next_ret.clear();
- }
- DefIdForest::from_vec(tcx, ret)
- }
-
- /// Calculate the union of a collection of forests.
- pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
- where
- I: IntoIterator<Item = DefIdForest<'tcx>>,
- {
- let mut ret: SmallVec<[_; 1]> = SmallVec::new();
- let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
- for next_forest in iter {
- // Union with the empty set is a no-op.
- if next_forest.is_empty() {
- continue;
- }
-
- // We add everything in `ret` that is not in `next_forest`.
- next_ret.extend(ret.iter().copied().filter(|&id| !next_forest.contains(tcx, id)));
- // We add everything in `next_forest` that we haven't added yet.
- for id in next_forest.iter() {
- if !slice_contains(tcx, &next_ret, id) {
- next_ret.push(id);
- }
- }
-
- mem::swap(&mut next_ret, &mut ret);
- next_ret.clear();
- }
- DefIdForest::from_vec(tcx, ret)
- }
-}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
new file mode 100644
index 000000000..b7aa45572
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
@@ -0,0 +1,204 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::{self, DefId, DefIdTree, ParamEnv, Ty};
+
+/// Represents whether some type is inhabited in a given context.
+/// Examples of uninhabited types are `!`, `enum Void {}`, or a struct
+/// containing either of those types.
+/// A type's inhabitedness may depend on the `ParamEnv` as well as what types
+/// are visible in the current module.
+#[derive(Clone, Copy, Debug, PartialEq, HashStable)]
+pub enum InhabitedPredicate<'tcx> {
+ /// Inhabited
+ True,
+ /// Uninhabited
+ False,
+ /// Uninhabited when a const value is non-zero. This occurs when there is an
+ /// array of uninhabited items, but the array is inhabited if it is empty.
+ ConstIsZero(ty::Const<'tcx>),
+ /// Uninhabited if within a certain module. This occurs when an uninhabited
+ /// type has restricted visibility.
+ NotInModule(DefId),
+ /// Inhabited if some generic type is inhabited.
+ /// These are replaced by calling [`Self::subst`].
+ GenericType(Ty<'tcx>),
+ /// A AND B
+ And(&'tcx [InhabitedPredicate<'tcx>; 2]),
+ /// A OR B
+ Or(&'tcx [InhabitedPredicate<'tcx>; 2]),
+}
+
+impl<'tcx> InhabitedPredicate<'tcx> {
+ /// Returns true if the corresponding type is inhabited in the given
+ /// `ParamEnv` and module
+ pub fn apply(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, module_def_id: DefId) -> bool {
+ let Ok(result) = self
+ .apply_inner::<!>(tcx, param_env, &|id| Ok(tcx.is_descendant_of(module_def_id, id)));
+ result
+ }
+
+ /// Same as `apply`, but returns `None` if self contains a module predicate
+ pub fn apply_any_module(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+ self.apply_inner(tcx, param_env, &|_| Err(())).ok()
+ }
+
+ fn apply_inner<E>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ in_module: &impl Fn(DefId) -> Result<bool, E>,
+ ) -> Result<bool, E> {
+ match self {
+ Self::False => Ok(false),
+ Self::True => Ok(true),
+ Self::ConstIsZero(const_) => match const_.try_eval_usize(tcx, param_env) {
+ None | Some(0) => Ok(true),
+ Some(1..) => Ok(false),
+ },
+ Self::NotInModule(id) => in_module(id).map(|in_mod| !in_mod),
+ Self::GenericType(_) => Ok(true),
+ Self::And([a, b]) => try_and(a, b, |x| x.apply_inner(tcx, param_env, in_module)),
+ Self::Or([a, b]) => try_or(a, b, |x| x.apply_inner(tcx, param_env, in_module)),
+ }
+ }
+
+ pub fn and(self, tcx: TyCtxt<'tcx>, other: Self) -> Self {
+ self.reduce_and(tcx, other).unwrap_or_else(|| Self::And(tcx.arena.alloc([self, other])))
+ }
+
+ pub fn or(self, tcx: TyCtxt<'tcx>, other: Self) -> Self {
+ self.reduce_or(tcx, other).unwrap_or_else(|| Self::Or(tcx.arena.alloc([self, other])))
+ }
+
+ pub fn all(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self {
+ let mut result = Self::True;
+ for pred in iter {
+ if matches!(pred, Self::False) {
+ return Self::False;
+ }
+ result = result.and(tcx, pred);
+ }
+ result
+ }
+
+ pub fn any(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self {
+ let mut result = Self::False;
+ for pred in iter {
+ if matches!(pred, Self::True) {
+ return Self::True;
+ }
+ result = result.or(tcx, pred);
+ }
+ result
+ }
+
+ fn reduce_and(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> {
+ match (self, other) {
+ (Self::True, a) | (a, Self::True) => Some(a),
+ (Self::False, _) | (_, Self::False) => Some(Self::False),
+ (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => {
+ Some(Self::NotInModule(b))
+ }
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => {
+ Some(Self::NotInModule(a))
+ }
+ (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)),
+ (Self::And(&[a, b]), c) | (c, Self::And(&[a, b])) => {
+ if let Some(ac) = a.reduce_and(tcx, c) {
+ Some(ac.and(tcx, b))
+ } else if let Some(bc) = b.reduce_and(tcx, c) {
+ Some(Self::And(tcx.arena.alloc([a, bc])))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ fn reduce_or(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> {
+ match (self, other) {
+ (Self::True, _) | (_, Self::True) => Some(Self::True),
+ (Self::False, a) | (a, Self::False) => Some(a),
+ (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => {
+ Some(Self::NotInModule(a))
+ }
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => {
+ Some(Self::NotInModule(b))
+ }
+ (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)),
+ (Self::Or(&[a, b]), c) | (c, Self::Or(&[a, b])) => {
+ if let Some(ac) = a.reduce_or(tcx, c) {
+ Some(ac.or(tcx, b))
+ } else if let Some(bc) = b.reduce_or(tcx, c) {
+ Some(Self::Or(tcx.arena.alloc([a, bc])))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ /// Replaces generic types with its corresponding predicate
+ pub fn subst(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Self {
+ self.subst_opt(tcx, substs).unwrap_or(self)
+ }
+
+ fn subst_opt(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Option<Self> {
+ match self {
+ Self::ConstIsZero(c) => {
+ let c = ty::EarlyBinder(c).subst(tcx, substs);
+ let pred = match c.kind().try_to_machine_usize(tcx) {
+ Some(0) => Self::True,
+ Some(1..) => Self::False,
+ None => Self::ConstIsZero(c),
+ };
+ Some(pred)
+ }
+ Self::GenericType(t) => {
+ Some(ty::EarlyBinder(t).subst(tcx, substs).inhabited_predicate(tcx))
+ }
+ Self::And(&[a, b]) => match a.subst_opt(tcx, substs) {
+ None => b.subst_opt(tcx, substs).map(|b| a.and(tcx, b)),
+ Some(InhabitedPredicate::False) => Some(InhabitedPredicate::False),
+ Some(a) => Some(a.and(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ },
+ Self::Or(&[a, b]) => match a.subst_opt(tcx, substs) {
+ None => b.subst_opt(tcx, substs).map(|b| a.or(tcx, b)),
+ Some(InhabitedPredicate::True) => Some(InhabitedPredicate::True),
+ Some(a) => Some(a.or(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ },
+ _ => None,
+ }
+ }
+}
+
+// this is basically like `f(a)? && f(b)?` but different in the case of
+// `Ok(false) && Err(_) -> Ok(false)`
+fn try_and<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> {
+ let a = f(a);
+ if matches!(a, Ok(false)) {
+ return Ok(false);
+ }
+ match (a, f(b)) {
+ (_, Ok(false)) | (Ok(false), _) => Ok(false),
+ (Ok(true), Ok(true)) => Ok(true),
+ (Err(e), _) | (_, Err(e)) => Err(e),
+ }
+}
+
+fn try_or<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> {
+ let a = f(a);
+ if matches!(a, Ok(true)) {
+ return Ok(true);
+ }
+ match (a, f(b)) {
+ (_, Ok(true)) | (Ok(true), _) => Ok(true),
+ (Ok(false), Ok(false)) => Ok(false),
+ (Err(e), _) | (_, Err(e)) => Err(e),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
index 3d22f5a04..279a728ea 100644
--- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -1,57 +1,60 @@
-pub use self::def_id_forest::DefIdForest;
+//! This module contains logic for determining whether a type is inhabited or
+//! uninhabited. The [`InhabitedPredicate`] type captures the minimum
+//! information needed to determine whether a type is inhabited given a
+//! `ParamEnv` and module ID.
+//!
+//! # Example
+//! ```rust
+//! enum Void {}
+//! mod a {
+//! pub mod b {
+//! pub struct SecretlyUninhabited {
+//! _priv: !,
+//! }
+//! }
+//! }
+//!
+//! mod c {
+//! pub struct AlsoSecretlyUninhabited {
+//! _priv: Void,
+//! }
+//! mod d {
+//! }
+//! }
+//!
+//! struct Foo {
+//! x: a::b::SecretlyUninhabited,
+//! y: c::AlsoSecretlyUninhabited,
+//! }
+//! ```
+//! In this code, the type `Foo` will only be visibly uninhabited inside the
+//! modules `b`, `c` and `d`. Calling `uninhabited_predicate` on `Foo` will
+//! return `NotInModule(b) AND NotInModule(c)`.
+//!
+//! We need this information for pattern-matching on `Foo` or types that contain
+//! `Foo`.
+//!
+//! # Example
+//! ```rust
+//! let foo_result: Result<T, Foo> = ... ;
+//! let Ok(t) = foo_result;
+//! ```
+//! This code should only compile in modules where the uninhabitedness of `Foo`
+//! is visible.
-use crate::ty;
use crate::ty::context::TyCtxt;
-use crate::ty::{AdtDef, FieldDef, Ty, VariantDef};
-use crate::ty::{AdtKind, Visibility};
-use crate::ty::{DefId, SubstsRef};
+use crate::ty::{self, DefId, Ty, VariantDef, Visibility};
use rustc_type_ir::sty::TyKind::*;
-mod def_id_forest;
+pub mod inhabited_predicate;
-// The methods in this module calculate `DefIdForest`s of modules in which an
-// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited.
-//
-// # Example
-// ```rust
-// enum Void {}
-// mod a {
-// pub mod b {
-// pub struct SecretlyUninhabited {
-// _priv: !,
-// }
-// }
-// }
-//
-// mod c {
-// pub struct AlsoSecretlyUninhabited {
-// _priv: Void,
-// }
-// mod d {
-// }
-// }
-//
-// struct Foo {
-// x: a::b::SecretlyUninhabited,
-// y: c::AlsoSecretlyUninhabited,
-// }
-// ```
-// In this code, the type `Foo` will only be visibly uninhabited inside the
-// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will
-// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the
-// set {`b`, `c`}).
-//
-// We need this information for pattern-matching on `Foo` or types that contain
-// `Foo`.
-//
-// # Example
-// ```rust
-// let foo_result: Result<T, Foo> = ... ;
-// let Ok(t) = foo_result;
-// ```
-// This code should only compile in modules where the uninhabitedness of `Foo` is
-// visible.
+pub use inhabited_predicate::InhabitedPredicate;
+
+pub(crate) fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { inhabited_predicate_adt, inhabited_predicate_type, ..*providers };
+}
impl<'tcx> TyCtxt<'tcx> {
/// Checks whether a type is visibly uninhabited from a particular module.
@@ -100,135 +103,92 @@ impl<'tcx> TyCtxt<'tcx> {
ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> bool {
- // To check whether this type is uninhabited at all (not just from the
- // given node), you could check whether the forest is empty.
- // ```
- // forest.is_empty()
- // ```
- ty.uninhabited_from(self, param_env).contains(self, module)
+ !ty.inhabited_predicate(self).apply(self, param_env, module)
}
}
-impl<'tcx> AdtDef<'tcx> {
- /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
- fn uninhabited_from(
- self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- // Non-exhaustive ADTs from other crates are always considered inhabited.
- if self.is_variant_list_non_exhaustive() && !self.did().is_local() {
- DefIdForest::empty()
- } else {
- DefIdForest::intersection(
- tcx,
- self.variants()
- .iter()
- .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
- )
+/// Returns an `InhabitedPredicate` that is generic over type parameters and
+/// requires calling [`InhabitedPredicate::subst`]
+fn inhabited_predicate_adt(tcx: TyCtxt<'_>, def_id: DefId) -> InhabitedPredicate<'_> {
+ if let Some(def_id) = def_id.as_local() {
+ if matches!(tcx.representability(def_id), ty::Representability::Infinite) {
+ return InhabitedPredicate::True;
}
}
+ let adt = tcx.adt_def(def_id);
+ InhabitedPredicate::any(
+ tcx,
+ adt.variants().iter().map(|variant| variant.inhabited_predicate(tcx, adt)),
+ )
}
impl<'tcx> VariantDef {
/// Calculates the forest of `DefId`s from which this variant is visibly uninhabited.
- pub fn uninhabited_from(
+ pub fn inhabited_predicate(
&self,
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- adt_kind: AdtKind,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- let is_enum = match adt_kind {
- // For now, `union`s are never considered uninhabited.
- // The precise semantics of inhabitedness with respect to unions is currently undecided.
- AdtKind::Union => return DefIdForest::empty(),
- AdtKind::Enum => true,
- AdtKind::Struct => false,
- };
- // Non-exhaustive variants from other crates are always considered inhabited.
+ adt: ty::AdtDef<'_>,
+ ) -> InhabitedPredicate<'tcx> {
+ debug_assert!(!adt.is_union());
if self.is_field_list_non_exhaustive() && !self.def_id.is_local() {
- DefIdForest::empty()
- } else {
- DefIdForest::union(
- tcx,
- self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)),
- )
+ // Non-exhaustive variants from other crates are always considered inhabited.
+ return InhabitedPredicate::True;
}
- }
-}
-
-impl<'tcx> FieldDef {
- /// Calculates the forest of `DefId`s from which this field is visibly uninhabited.
- fn uninhabited_from(
- &self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- is_enum: bool,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env);
- // FIXME(canndrew): Currently enum fields are (incorrectly) stored with
- // `Visibility::Invisible` so we need to override `self.vis` if we're
- // dealing with an enum.
- if is_enum {
- data_uninhabitedness()
- } else {
- match self.vis {
- Visibility::Invisible => DefIdForest::empty(),
- Visibility::Restricted(from) => {
- let forest = DefIdForest::from_id(from);
- let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness()));
- DefIdForest::intersection(tcx, iter)
+ InhabitedPredicate::all(
+ tcx,
+ self.fields.iter().map(|field| {
+ let pred = tcx.type_of(field.did).inhabited_predicate(tcx);
+ if adt.is_enum() {
+ return pred;
}
- Visibility::Public => data_uninhabitedness(),
- }
- }
+ match field.vis {
+ Visibility::Public => pred,
+ Visibility::Restricted(from) => {
+ pred.or(tcx, InhabitedPredicate::NotInModule(from))
+ }
+ }
+ }),
+ )
}
}
impl<'tcx> Ty<'tcx> {
- /// Calculates the forest of `DefId`s from which this type is visibly uninhabited.
- fn uninhabited_from(
- self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- tcx.type_uninhabited_from(param_env.and(self))
+ pub fn inhabited_predicate(self, tcx: TyCtxt<'tcx>) -> InhabitedPredicate<'tcx> {
+ match self.kind() {
+ // For now, union`s are always considered inhabited
+ Adt(adt, _) if adt.is_union() => InhabitedPredicate::True,
+ // Non-exhaustive ADTs from other crates are always considered inhabited
+ Adt(adt, _) if adt.is_variant_list_non_exhaustive() && !adt.did().is_local() => {
+ InhabitedPredicate::True
+ }
+ Never => InhabitedPredicate::False,
+ Param(_) | Projection(_) => InhabitedPredicate::GenericType(self),
+ Tuple(tys) if tys.is_empty() => InhabitedPredicate::True,
+ // use a query for more complex cases
+ Adt(..) | Array(..) | Tuple(_) => tcx.inhabited_predicate_type(self),
+ // references and other types are inhabited
+ _ => InhabitedPredicate::True,
+ }
}
}
-// Query provider for `type_uninhabited_from`.
-pub(crate) fn type_uninhabited_from<'tcx>(
- tcx: TyCtxt<'tcx>,
- key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
-) -> DefIdForest<'tcx> {
- let ty = key.value;
- let param_env = key.param_env;
+/// N.B. this query should only be called through `Ty::inhabited_predicate`
+fn inhabited_predicate_type<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> InhabitedPredicate<'tcx> {
match *ty.kind() {
- Adt(def, substs) => def.uninhabited_from(tcx, substs, param_env),
+ Adt(adt, substs) => tcx.inhabited_predicate_adt(adt.did()).subst(tcx, substs),
- Never => DefIdForest::full(),
-
- Tuple(ref tys) => {
- DefIdForest::union(tcx, tys.iter().map(|ty| ty.uninhabited_from(tcx, param_env)))
+ Tuple(tys) => {
+ InhabitedPredicate::all(tcx, tys.iter().map(|ty| ty.inhabited_predicate(tcx)))
}
- Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
- Some(0) | None => DefIdForest::empty(),
- // If the array is definitely non-empty, it's uninhabited if
- // the type of its elements is uninhabited.
- Some(1..) => ty.uninhabited_from(tcx, param_env),
+ // If we can evaluate the array length before having a `ParamEnv`, then
+ // we can simplify the predicate. This is an optimization.
+ Array(ty, len) => match len.kind().try_to_machine_usize(tcx) {
+ Some(0) => InhabitedPredicate::True,
+ Some(1..) => ty.inhabited_predicate(tcx),
+ None => ty.inhabited_predicate(tcx).or(tcx, InhabitedPredicate::ConstIsZero(len)),
},
- // References to uninitialised memory are valid for any type, including
- // uninhabited types, in unsafe code, so we treat all references as
- // inhabited.
- // The precise semantics of inhabitedness with respect to references is currently
- // undecided.
- Ref(..) => DefIdForest::empty(),
-
- _ => DefIdForest::empty(),
+ _ => bug!("unexpected TyKind, use `Ty::inhabited_predicate`"),
}
}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index 53218225d..6c1414f7b 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -1,9 +1,7 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::print::{FmtPrinter, Printer};
-use crate::ty::subst::{InternalSubsts, Subst};
-use crate::ty::{
- self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable,
-};
+use crate::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable};
+use crate::ty::{EarlyBinder, InternalSubsts, SubstsRef};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::Namespace;
use rustc_hir::def_id::{CrateNum, DefId};
@@ -20,14 +18,14 @@ use std::fmt;
/// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval
/// will do all required substitution as they run.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, Lift)]
+#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
pub struct Instance<'tcx> {
pub def: InstanceDef<'tcx>,
pub substs: SubstsRef<'tcx>,
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
pub enum InstanceDef<'tcx> {
/// A user-defined callable item.
///
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index ad78d24e9..3312f44c6 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,38 +1,23 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
use crate::ty::normalize_erasing_regions::NormalizationError;
-use crate::ty::subst::Subst;
-use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable};
+use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable};
use rustc_ast as ast;
use rustc_attr as attr;
+use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_hir::lang_items::LangItem;
-use rustc_index::bit_set::BitSet;
-use rustc_index::vec::{Idx, IndexVec};
-use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
-use rustc_span::symbol::Symbol;
+use rustc_index::vec::Idx;
+use rustc_session::config::OptLevel;
use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::call::{
- ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
-};
+use rustc_target::abi::call::FnAbi;
use rustc_target::abi::*;
use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
-use std::cmp;
+use std::cmp::{self};
use std::fmt;
-use std::iter;
use std::num::NonZeroUsize;
use std::ops::Bound;
-use rand::{seq::SliceRandom, SeedableRng};
-use rand_xoshiro::Xoshiro128StarStar;
-
-pub fn provide(providers: &mut ty::query::Providers) {
- *providers =
- ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
-}
-
pub trait IntegerExt {
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
@@ -204,6 +189,31 @@ pub enum LayoutError<'tcx> {
NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
}
+impl<'a> IntoDiagnostic<'a, !> for LayoutError<'a> {
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, !> {
+ let mut diag = handler.struct_fatal("");
+
+ match self {
+ LayoutError::Unknown(ty) => {
+ diag.set_arg("ty", ty);
+ diag.set_primary_message(rustc_errors::fluent::middle_unknown_layout);
+ }
+ LayoutError::SizeOverflow(ty) => {
+ diag.set_arg("ty", ty);
+ diag.set_primary_message(rustc_errors::fluent::middle_values_too_big);
+ }
+ LayoutError::NormalizationFailure(ty, e) => {
+ diag.set_arg("ty", ty);
+ diag.set_arg("failure_ty", e.get_type_for_failure());
+ diag.set_primary_message(rustc_errors::fluent::middle_cannot_be_normalized);
+ }
+ }
+ diag
+ }
+}
+
+// FIXME: Once the other errors that embed this error have been converted to translateable
+// diagnostics, this Display impl should be removed.
impl<'tcx> fmt::Display for LayoutError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
@@ -221,1842 +231,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
}
}
-/// Enforce some basic invariants on layouts.
-fn sanity_check_layout<'tcx>(
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- layout: &TyAndLayout<'tcx>,
-) {
- // Type-level uninhabitedness should always imply ABI uninhabitedness.
- if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
- assert!(layout.abi.is_uninhabited());
- }
-
- if layout.size.bytes() % layout.align.abi.bytes() != 0 {
- bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
- }
-
- if cfg!(debug_assertions) {
- fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
- match layout.abi() {
- Abi::Scalar(scalar) => {
- // No padding in scalars.
- assert_eq!(
- layout.align().abi,
- scalar.align(&tcx).abi,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- assert_eq!(
- layout.size(),
- scalar.size(&tcx),
- "size mismatch between ABI and layout in {layout:#?}"
- );
- }
- Abi::Vector { count, element } => {
- // No padding in vectors. Alignment can be strengthened, though.
- assert!(
- layout.align().abi >= element.align(&tcx).abi,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- let size = element.size(&tcx) * count;
- assert_eq!(
- layout.size(),
- size.align_to(tcx.data_layout().vector_align(size).abi),
- "size mismatch between ABI and layout in {layout:#?}"
- );
- }
- Abi::ScalarPair(scalar1, scalar2) => {
- // Sanity-check scalar pairs. These are a bit more flexible and support
- // padding, but we can at least ensure both fields actually fit into the layout
- // and the alignment requirement has not been weakened.
- let align1 = scalar1.align(&tcx).abi;
- let align2 = scalar2.align(&tcx).abi;
- assert!(
- layout.align().abi >= cmp::max(align1, align2),
- "alignment mismatch between ABI and layout in {layout:#?}",
- );
- let field2_offset = scalar1.size(&tcx).align_to(align2);
- assert!(
- layout.size() >= field2_offset + scalar2.size(&tcx),
- "size mismatch between ABI and layout in {layout:#?}"
- );
- }
- Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
- }
- }
-
- check_layout_abi(tcx, layout.layout);
-
- if let Variants::Multiple { variants, .. } = &layout.variants {
- for variant in variants {
- check_layout_abi(tcx, *variant);
- // No nested "multiple".
- assert!(matches!(variant.variants(), Variants::Single { .. }));
- // Skip empty variants.
- if variant.size() == Size::ZERO
- || variant.fields().count() == 0
- || variant.abi().is_uninhabited()
- {
- // These are never actually accessed anyway, so we can skip them. (Note that
- // sometimes, variants with fields have size 0, and sometimes, variants without
- // fields have non-0 size.)
- continue;
- }
- // Variants should have the same or a smaller size as the full thing.
- if variant.size() > layout.size {
- bug!(
- "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
- layout.size.bytes(),
- variant.size().bytes(),
- )
- }
- // The top-level ABI and the ABI of the variants should be coherent.
- let abi_coherent = match (layout.abi, variant.abi()) {
- (Abi::Scalar(..), Abi::Scalar(..)) => true,
- (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
- (Abi::Uninhabited, _) => true,
- (Abi::Aggregate { .. }, _) => true,
- _ => false,
- };
- if !abi_coherent {
- bug!(
- "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
- variant
- );
- }
- }
- }
- }
-}
-
-#[instrument(skip(tcx, query), level = "debug")]
-fn layout_of<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
-) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
- ty::tls::with_related_context(tcx, move |icx| {
- let (param_env, ty) = query.into_parts();
- debug!(?ty);
-
- if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
- tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
- }
-
- // Update the ImplicitCtxt to increase the layout_depth
- let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
-
- ty::tls::enter_context(&icx, |_| {
- let param_env = param_env.with_reveal_all_normalized(tcx);
- let unnormalized_ty = ty;
-
- // FIXME: We might want to have two different versions of `layout_of`:
- // One that can be called after typecheck has completed and can use
- // `normalize_erasing_regions` here and another one that can be called
- // before typecheck has completed and uses `try_normalize_erasing_regions`.
- let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
- Ok(t) => t,
- Err(normalization_error) => {
- return Err(LayoutError::NormalizationFailure(ty, normalization_error));
- }
- };
-
- if ty != unnormalized_ty {
- // Ensure this layout is also cached for the normalized type.
- return tcx.layout_of(param_env.and(ty));
- }
-
- let cx = LayoutCx { tcx, param_env };
-
- let layout = cx.layout_of_uncached(ty)?;
- let layout = TyAndLayout { ty, layout };
-
- cx.record_layout_for_printing(layout);
-
- sanity_check_layout(tcx, param_env, &layout);
-
- Ok(layout)
- })
- })
-}
-
+#[derive(Clone, Copy)]
pub struct LayoutCx<'tcx, C> {
pub tcx: C,
pub param_env: ty::ParamEnv<'tcx>,
}
-#[derive(Copy, Clone, Debug)]
-enum StructKind {
- /// A tuple, closure, or univariant which cannot be coerced to unsized.
- AlwaysSized,
- /// A univariant, the last field of which may be coerced to unsized.
- MaybeUnsized,
- /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
- Prefixed(Size, Align),
-}
-
-// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
-// This is used to go between `memory_index` (source field order to memory order)
-// and `inverse_memory_index` (memory order to source field order).
-// See also `FieldsShape::Arbitrary::memory_index` for more details.
-// FIXME(eddyb) build a better abstraction for permutations, if possible.
-fn invert_mapping(map: &[u32]) -> Vec<u32> {
- let mut inverse = vec![0; map.len()];
- for i in 0..map.len() {
- inverse[map[i] as usize] = i as u32;
- }
- inverse
-}
-
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
- let dl = self.data_layout();
- let b_align = b.align(dl);
- let align = a.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.size(dl).align_to(b_align.abi);
- let size = (b_offset + b.size(dl)).align_to(align.abi);
-
- // HACK(nox): We iter on `b` and then `a` because `max_by_key`
- // returns the last maximum.
- let largest_niche = Niche::from_scalar(dl, b_offset, b)
- .into_iter()
- .chain(Niche::from_scalar(dl, Size::ZERO, a))
- .max_by_key(|niche| niche.available(dl));
-
- LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1],
- },
- abi: Abi::ScalarPair(a, b),
- largest_niche,
- align,
- size,
- }
- }
-
- fn univariant_uninterned(
- &self,
- ty: Ty<'tcx>,
- fields: &[TyAndLayout<'_>],
- repr: &ReprOptions,
- kind: StructKind,
- ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
- let dl = self.data_layout();
- let pack = repr.pack;
- if pack.is_some() && repr.align.is_some() {
- self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
- return Err(LayoutError::Unknown(ty));
- }
-
- let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
- let optimize = !repr.inhibit_struct_field_reordering_opt();
- if optimize {
- let end =
- if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
- let optimizing = &mut inverse_memory_index[..end];
- let field_align = |f: &TyAndLayout<'_>| {
- if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
- };
-
- // If `-Z randomize-layout` was enabled for the type definition we can shuffle
- // the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
- if repr.can_randomize_type_layout() {
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
- // Shuffle the ordering of the fields
- optimizing.shuffle(&mut rng);
-
- // Otherwise we just leave things alone and actually optimize the type's fields
- } else {
- match kind {
- StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- optimizing.sort_by_key(|&x| {
- // Place ZSTs first to avoid "interesting offsets",
- // especially with only one or two non-ZST fields.
- let f = &fields[x as usize];
- (!f.is_zst(), cmp::Reverse(field_align(f)))
- });
- }
-
- StructKind::Prefixed(..) => {
- // Sort in ascending alignment so that the layout stays optimal
- // regardless of the prefix
- optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
- }
- }
-
- // FIXME(Kixiron): We can always shuffle fields within a given alignment class
- // regardless of the status of `-Z randomize-layout`
- }
- }
-
- // inverse_memory_index holds field indices by increasing memory offset.
- // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
- // We now write field offsets to the corresponding offset slot;
- // field 5 with offset 0 puts 0 in offsets[5].
- // At the bottom of this function, we invert `inverse_memory_index` to
- // produce `memory_index` (see `invert_mapping`).
-
- let mut sized = true;
- let mut offsets = vec![Size::ZERO; fields.len()];
- let mut offset = Size::ZERO;
- let mut largest_niche = None;
- let mut largest_niche_available = 0;
-
- if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
- let prefix_align =
- if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
- align = align.max(AbiAndPrefAlign::new(prefix_align));
- offset = prefix_size.align_to(prefix_align);
- }
-
- for &i in &inverse_memory_index {
- let field = fields[i as usize];
- if !sized {
- self.tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!(
- "univariant: field #{} of `{}` comes after unsized field",
- offsets.len(),
- ty
- ),
- );
- }
-
- if field.is_unsized() {
- sized = false;
- }
-
- // Invariant: offset < dl.obj_size_bound() <= 1<<61
- let field_align = if let Some(pack) = pack {
- field.align.min(AbiAndPrefAlign::new(pack))
- } else {
- field.align
- };
- offset = offset.align_to(field_align.abi);
- align = align.max(field_align);
-
- debug!("univariant offset: {:?} field: {:#?}", offset, field);
- offsets[i as usize] = offset;
-
- if let Some(mut niche) = field.largest_niche {
- let available = niche.available(dl);
- if available > largest_niche_available {
- largest_niche_available = available;
- niche.offset += offset;
- largest_niche = Some(niche);
- }
- }
-
- offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- }
-
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- debug!("univariant min_size: {:?}", offset);
- let min_size = offset;
-
- // As stated above, inverse_memory_index holds field indices by increasing offset.
- // This makes it an already-sorted view of the offsets vec.
- // To invert it, consider:
- // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
- // Field 5 would be the first element, so memory_index is i:
- // Note: if we didn't optimize, it's already right.
-
- let memory_index =
- if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
- let size = min_size.align_to(align.abi);
- let mut abi = Abi::Aggregate { sized };
-
- // Unpack newtype ABIs and find scalar pairs.
- if sized && size.bytes() > 0 {
- // All other fields must be ZSTs.
- let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
- match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
- // We have exactly one non-ZST field.
- (Some((i, field)), None, None) => {
- // Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
- {
- match field.abi {
- // For plain scalars, or vectors of them, we can't unpack
- // newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi;
- }
- // But scalar pairs are Rust-specific and get
- // treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi;
- }
- _ => {}
- }
- }
- }
-
- // Two non-ZST fields, and they're both scalars.
- (Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
- (Abi::Scalar(a), Abi::Scalar(b)) => {
- // Order by the memory placement, not source order.
- let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
- ((i, a), (j, b))
- } else {
- ((j, b), (i, a))
- };
- let pair = self.scalar_pair(a, b);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if offsets[i] == pair_offsets[0]
- && offsets[j] == pair_offsets[1]
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- _ => {}
- }
- }
-
- _ => {}
- }
- }
-
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- }
-
- Ok(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary { offsets, memory_index },
- abi,
- largest_niche,
- align,
- size,
- })
- }
-
- fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
- let tcx = self.tcx;
- let param_env = self.param_env;
- let dl = self.data_layout();
- let scalar_unit = |value: Primitive| {
- let size = value.size(dl);
- assert!(size.bits() <= 128);
- Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
- };
- let scalar =
- |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
-
- let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
- Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
- };
- debug_assert!(!ty.has_infer_types_or_consts());
-
- Ok(match *ty.kind() {
- // Basic scalars.
- ty::Bool => tcx.intern_layout(LayoutS::scalar(
- self,
- Scalar::Initialized {
- value: Int(I8, false),
- valid_range: WrappingRange { start: 0, end: 1 },
- },
- )),
- ty::Char => tcx.intern_layout(LayoutS::scalar(
- self,
- Scalar::Initialized {
- value: Int(I32, false),
- valid_range: WrappingRange { start: 0, end: 0x10FFFF },
- },
- )),
- ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
- ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
- ty::Float(fty) => scalar(match fty {
- ty::FloatTy::F32 => F32,
- ty::FloatTy::F64 => F64,
- }),
- ty::FnPtr(_) => {
- let mut ptr = scalar_unit(Pointer);
- ptr.valid_range_mut().start = 1;
- tcx.intern_layout(LayoutS::scalar(self, ptr))
- }
-
- // The never type.
- ty::Never => tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Primitive,
- abi: Abi::Uninhabited,
- largest_niche: None,
- align: dl.i8_align,
- size: Size::ZERO,
- }),
-
- // Potentially-wide pointers.
- ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
- let mut data_ptr = scalar_unit(Pointer);
- if !ty.is_unsafe_ptr() {
- data_ptr.valid_range_mut().start = 1;
- }
-
- let pointee = tcx.normalize_erasing_regions(param_env, pointee);
- if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
- return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
- }
-
- let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
- let metadata = match unsized_part.kind() {
- ty::Foreign(..) => {
- return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
- }
- ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
- ty::Dynamic(..) => {
- let mut vtable = scalar_unit(Pointer);
- vtable.valid_range_mut().start = 1;
- vtable
- }
- _ => return Err(LayoutError::Unknown(unsized_part)),
- };
-
- // Effectively a (ptr, meta) tuple.
- tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
- }
-
- // Arrays and slices.
- ty::Array(element, mut count) => {
- if count.has_projections() {
- count = tcx.normalize_erasing_regions(param_env, count);
- if count.has_projections() {
- return Err(LayoutError::Unknown(ty));
- }
- }
-
- let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
- let element = self.layout_of(element)?;
- let size =
- element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
-
- let abi =
- if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let largest_niche = if count != 0 { element.largest_niche } else { None };
-
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: element.size, count },
- abi,
- largest_niche,
- align: element.align,
- size,
- })
- }
- ty::Slice(element) => {
- let element = self.layout_of(element)?;
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: element.size, count: 0 },
- abi: Abi::Aggregate { sized: false },
- largest_niche: None,
- align: element.align,
- size: Size::ZERO,
- })
- }
- ty::Str => tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
- abi: Abi::Aggregate { sized: false },
- largest_niche: None,
- align: dl.i8_align,
- size: Size::ZERO,
- }),
-
- // Odd unit types.
- ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
- ty::Dynamic(..) | ty::Foreign(..) => {
- let mut unit = self.univariant_uninterned(
- ty,
- &[],
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?;
- match unit.abi {
- Abi::Aggregate { ref mut sized } => *sized = false,
- _ => bug!(),
- }
- tcx.intern_layout(unit)
- }
-
- ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
-
- ty::Closure(_, ref substs) => {
- let tys = substs.as_closure().upvar_tys();
- univariant(
- &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?
- }
-
- ty::Tuple(tys) => {
- let kind =
- if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
-
- univariant(
- &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- kind,
- )?
- }
-
- // SIMD vector types.
- ty::Adt(def, substs) if def.repr().simd() => {
- if !def.is_struct() {
- // Should have yielded E0517 by now.
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- "#[repr(simd)] was applied to an ADT that is not a struct",
- );
- return Err(LayoutError::Unknown(ty));
- }
-
- // Supported SIMD vectors are homogeneous ADTs with at least one field:
- //
- // * #[repr(simd)] struct S(T, T, T, T);
- // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
- // * #[repr(simd)] struct S([T; 4])
- //
- // where T is a primitive scalar (integer/float/pointer).
-
- // SIMD vectors with zero fields are not supported.
- // (should be caught by typeck)
- if def.non_enum_variant().fields.is_empty() {
- tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
- }
-
- // Type of the first ADT field:
- let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
-
- // Heterogeneous SIMD vectors are not supported:
- // (should be caught by typeck)
- for fi in &def.non_enum_variant().fields {
- if fi.ty(tcx, substs) != f0_ty {
- tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
- }
- }
-
- // The element type and number of elements of the SIMD vector
- // are obtained from:
- //
- // * the element type and length of the single array field, if
- // the first field is of array type, or
- //
- // * the homogenous field type and the number of fields.
- let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
- // First ADT field is an array:
-
- // SIMD vectors with multiple array fields are not supported:
- // (should be caught by typeck)
- if def.non_enum_variant().fields.len() != 1 {
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` with more than one array field",
- ty
- ));
- }
-
- // Extract the number of elements from the layout of the array field:
- let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
- return Err(LayoutError::Unknown(ty));
- };
-
- (*e_ty, *count, true)
- } else {
- // First ADT field is not an array:
- (f0_ty, def.non_enum_variant().fields.len() as _, false)
- };
-
- // SIMD vectors of zero length are not supported.
- // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
- // support.
- //
- // Can't be caught in typeck if the array length is generic.
- if e_len == 0 {
- tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
- } else if e_len > MAX_SIMD_LANES {
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` of length greater than {}",
- ty, MAX_SIMD_LANES,
- ));
- }
-
- // Compute the ABI of the element type:
- let e_ly = self.layout_of(e_ty)?;
- let Abi::Scalar(e_abi) = e_ly.abi else {
- // This error isn't caught in typeck, e.g., if
- // the element type of the vector is generic.
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` with a non-primitive-scalar \
- (integer/float/pointer) element type `{}`",
- ty, e_ty
- ))
- };
-
- // Compute the size and alignment of the vector:
- let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- let align = dl.vector_align(size);
- let size = size.align_to(align.abi);
-
- // Compute the placement of the vector fields:
- let fields = if is_array {
- FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
- } else {
- FieldsShape::Array { stride: e_ly.size, count: e_len }
- };
-
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields,
- abi: Abi::Vector { element: e_abi, count: e_len },
- largest_niche: e_ly.largest_niche,
- size,
- align,
- })
- }
-
- // ADTs.
- ty::Adt(def, substs) => {
- // Cache the field layouts.
- let variants = def
- .variants()
- .iter()
- .map(|v| {
- v.fields
- .iter()
- .map(|field| self.layout_of(field.ty(tcx, substs)))
- .collect::<Result<Vec<_>, _>>()
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- if def.is_union() {
- if def.repr().pack.is_some() && def.repr().align.is_some() {
- self.tcx.sess.delay_span_bug(
- tcx.def_span(def.did()),
- "union cannot be packed and aligned",
- );
- return Err(LayoutError::Unknown(ty));
- }
-
- let mut align =
- if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- if let Some(repr_align) = def.repr().align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- let optimize = !def.repr().inhibit_union_abi_opt();
- let mut size = Size::ZERO;
- let mut abi = Abi::Aggregate { sized: true };
- let index = VariantIdx::new(0);
- for field in &variants[index] {
- assert!(!field.is_unsized());
- align = align.max(field.align);
-
- // If all non-ZST fields have the same ABI, forward this ABI
- if optimize && !field.is_zst() {
- // Discard valid range information and allow undef
- let field_abi = match field.abi {
- Abi::Scalar(x) => Abi::Scalar(x.to_union()),
- Abi::ScalarPair(x, y) => {
- Abi::ScalarPair(x.to_union(), y.to_union())
- }
- Abi::Vector { element: x, count } => {
- Abi::Vector { element: x.to_union(), count }
- }
- Abi::Uninhabited | Abi::Aggregate { .. } => {
- Abi::Aggregate { sized: true }
- }
- };
-
- if size == Size::ZERO {
- // first non ZST: initialize 'abi'
- abi = field_abi;
- } else if abi != field_abi {
- // different fields have different ABI: reset to Aggregate
- abi = Abi::Aggregate { sized: true };
- }
- }
-
- size = cmp::max(size, field.size);
- }
-
- if let Some(pack) = def.repr().pack {
- align = align.min(AbiAndPrefAlign::new(pack));
- }
-
- return Ok(tcx.intern_layout(LayoutS {
- variants: Variants::Single { index },
- fields: FieldsShape::Union(
- NonZeroUsize::new(variants[index].len())
- .ok_or(LayoutError::Unknown(ty))?,
- ),
- abi,
- largest_niche: None,
- align,
- size: size.align_to(align.abi),
- }));
- }
-
- // A variant is absent if it's uninhabited and only has ZST fields.
- // Present uninhabited variants only require space for their fields,
- // but *not* an encoding of the discriminant (e.g., a tag value).
- // See issue #49298 for more details on the need to leave space
- // for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &[TyAndLayout<'_>]| {
- let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
- let is_zst = fields.iter().all(|f| f.is_zst());
- uninhabited && is_zst
- };
- let (present_first, present_second) = {
- let mut present_variants = variants
- .iter_enumerated()
- .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
- (present_variants.next(), present_variants.next())
- };
- let present_first = match present_first {
- Some(present_first) => present_first,
- // Uninhabited because it has no variants, or only absent ones.
- None if def.is_enum() => {
- return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
- }
- // If it's a struct, still compute a layout so that we can still compute the
- // field offsets.
- None => VariantIdx::new(0),
- };
-
- let is_struct = !def.is_enum() ||
- // Only one variant is present.
- (present_second.is_none() &&
- // Representation optimizations are allowed.
- !def.repr().inhibit_enum_layout_opt());
- if is_struct {
- // Struct, or univariant enum equivalent to a struct.
- // (Typechecking will reject discriminant-sizing attrs.)
-
- let v = present_first;
- let kind = if def.is_enum() || variants[v].is_empty() {
- StructKind::AlwaysSized
- } else {
- let param_env = tcx.param_env(def.did());
- let last_field = def.variant(v).fields.last().unwrap();
- let always_sized =
- tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
- if !always_sized {
- StructKind::MaybeUnsized
- } else {
- StructKind::AlwaysSized
- }
- };
-
- let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
- st.variants = Variants::Single { index: v };
-
- if def.is_unsafe_cell() {
- let hide_niches = |scalar: &mut _| match scalar {
- Scalar::Initialized { value, valid_range } => {
- *valid_range = WrappingRange::full(value.size(dl))
- }
- // Already doesn't have any niches
- Scalar::Union { .. } => {}
- };
- match &mut st.abi {
- Abi::Uninhabited => {}
- Abi::Scalar(scalar) => hide_niches(scalar),
- Abi::ScalarPair(a, b) => {
- hide_niches(a);
- hide_niches(b);
- }
- Abi::Vector { element, count: _ } => hide_niches(element),
- Abi::Aggregate { sized: _ } => {}
- }
- st.largest_niche = None;
- return Ok(tcx.intern_layout(st));
- }
-
- let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
- match st.abi {
- Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
- // the asserts ensure that we are not using the
- // `#[rustc_layout_scalar_valid_range(n)]`
- // attribute to widen the range of anything as that would probably
- // result in UB somewhere
- // FIXME(eddyb) the asserts are probably not needed,
- // as larger validity ranges would result in missed
- // optimizations, *not* wrongly assuming the inner
- // value is valid. e.g. unions enlarge validity ranges,
- // because the values may be uninitialized.
- if let Bound::Included(start) = start {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.start <= start);
- valid_range.start = start;
- }
- if let Bound::Included(end) = end {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.end >= end);
- valid_range.end = end;
- }
-
- // Update `largest_niche` if we have introduced a larger niche.
- let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
- if let Some(niche) = niche {
- match st.largest_niche {
- Some(largest_niche) => {
- // Replace the existing niche even if they're equal,
- // because this one is at a lower offset.
- if largest_niche.available(dl) <= niche.available(dl) {
- st.largest_niche = Some(niche);
- }
- }
- None => st.largest_niche = Some(niche),
- }
- }
- }
- _ => assert!(
- start == Bound::Unbounded && end == Bound::Unbounded,
- "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
- def,
- st,
- ),
- }
-
- return Ok(tcx.intern_layout(st));
- }
-
- // At this point, we have handled all unions and
- // structs. (We have also handled univariant enums
- // that allow representation optimization.)
- assert!(def.is_enum());
-
- // The current code for niche-filling relies on variant indices
- // instead of actual discriminants, so dataful enums with
- // explicit discriminants (RFC #2363) would misbehave.
- let no_explicit_discriminants = def
- .variants()
- .iter_enumerated()
- .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
-
- let mut niche_filling_layout = None;
-
- // Niche-filling enum optimization.
- if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
- let mut dataful_variant = None;
- let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
-
- // Find one non-ZST variant.
- 'variants: for (v, fields) in variants.iter_enumerated() {
- if absent(fields) {
- continue 'variants;
- }
- for f in fields {
- if !f.is_zst() {
- if dataful_variant.is_none() {
- dataful_variant = Some(v);
- continue 'variants;
- } else {
- dataful_variant = None;
- break 'variants;
- }
- }
- }
- niche_variants = *niche_variants.start().min(&v)..=v;
- }
-
- if niche_variants.start() > niche_variants.end() {
- dataful_variant = None;
- }
-
- if let Some(i) = dataful_variant {
- let count = (niche_variants.end().as_u32()
- - niche_variants.start().as_u32()
- + 1) as u128;
-
- // Find the field with the largest niche
- let niche_candidate = variants[i]
- .iter()
- .enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche?)))
- .max_by_key(|(_, niche)| niche.available(dl));
-
- if let Some((field_index, niche, (niche_start, niche_scalar))) =
- niche_candidate.and_then(|(field_index, niche)| {
- Some((field_index, niche, niche.reserve(self, count)?))
- })
- {
- let mut align = dl.aggregate_align;
- let st = variants
- .iter_enumerated()
- .map(|(j, v)| {
- let mut st = self.univariant_uninterned(
- ty,
- v,
- &def.repr(),
- StructKind::AlwaysSized,
- )?;
- st.variants = Variants::Single { index: j };
-
- align = align.max(st.align);
-
- Ok(tcx.intern_layout(st))
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- let offset = st[i].fields().offset(field_index) + niche.offset;
-
- // Align the total size to the largest alignment.
- let size = st[i].size().align_to(align.abi);
-
- let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
- Abi::Uninhabited
- } else if align == st[i].align() && size == st[i].size() {
- // When the total alignment and size match, we can use the
- // same ABI as the scalar variant with the reserved niche.
- match st[i].abi() {
- Abi::Scalar(_) => Abi::Scalar(niche_scalar),
- Abi::ScalarPair(first, second) => {
- // Only the niche is guaranteed to be initialised,
- // so use union layout for the other primitive.
- if offset.bytes() == 0 {
- Abi::ScalarPair(niche_scalar, second.to_union())
- } else {
- Abi::ScalarPair(first.to_union(), niche_scalar)
- }
- }
- _ => Abi::Aggregate { sized: true },
- }
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
-
- niche_filling_layout = Some(LayoutS {
- variants: Variants::Multiple {
- tag: niche_scalar,
- tag_encoding: TagEncoding::Niche {
- dataful_variant: i,
- niche_variants,
- niche_start,
- },
- tag_field: 0,
- variants: st,
- },
- fields: FieldsShape::Arbitrary {
- offsets: vec![offset],
- memory_index: vec![0],
- },
- abi,
- largest_niche,
- size,
- align,
- });
- }
- }
- }
-
- let (mut min, mut max) = (i128::MAX, i128::MIN);
- let discr_type = def.repr().discr_type();
- let bits = Integer::from_attr(self, discr_type).size().bits();
- for (i, discr) in def.discriminants(tcx) {
- if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
- continue;
- }
- let mut x = discr.val as i128;
- if discr_type.is_signed() {
- // sign extend the raw representation to be an i128
- x = (x << (128 - bits)) >> (128 - bits);
- }
- if x < min {
- min = x;
- }
- if x > max {
- max = x;
- }
- }
- // We might have no inhabited variants, so pretend there's at least one.
- if (min, max) == (i128::MAX, i128::MIN) {
- min = 0;
- max = 0;
- }
- assert!(min <= max, "discriminant range is {}...{}", min, max);
- let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
-
- let mut align = dl.aggregate_align;
- let mut size = Size::ZERO;
-
- // We're interested in the smallest alignment, so start large.
- let mut start_align = Align::from_bytes(256).unwrap();
- assert_eq!(Integer::for_align(dl, start_align), None);
-
- // repr(C) on an enum tells us to make a (tag, union) layout,
- // so we need to grow the prefix alignment to be at least
- // the alignment of the union. (This value is used both for
- // determining the alignment of the overall enum, and the
- // determining the alignment of the payload after the tag.)
- let mut prefix_align = min_ity.align(dl).abi;
- if def.repr().c() {
- for fields in &variants {
- for field in fields {
- prefix_align = prefix_align.max(field.align.abi);
- }
- }
- }
-
- // Create the set of structs that represent each variant.
- let mut layout_variants = variants
- .iter_enumerated()
- .map(|(i, field_layouts)| {
- let mut st = self.univariant_uninterned(
- ty,
- &field_layouts,
- &def.repr(),
- StructKind::Prefixed(min_ity.size(), prefix_align),
- )?;
- st.variants = Variants::Single { index: i };
- // Find the first field we can't move later
- // to make room for a larger discriminant.
- for field in
- st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
- {
- if !field.is_zst() || field.align.abi.bytes() != 1 {
- start_align = start_align.min(field.align.abi);
- break;
- }
- }
- size = cmp::max(size, st.size);
- align = align.max(st.align);
- Ok(st)
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- // Align the maximum variant size to the largest alignment.
- size = size.align_to(align.abi);
-
- if size.bytes() >= dl.obj_size_bound() {
- return Err(LayoutError::SizeOverflow(ty));
- }
-
- let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
- if typeck_ity < min_ity {
- // It is a bug if Layout decided on a greater discriminant size than typeck for
- // some reason at this point (based on values discriminant can take on). Mostly
- // because this discriminant will be loaded, and then stored into variable of
- // type calculated by typeck. Consider such case (a bug): typeck decided on
- // byte-sized discriminant, but layout thinks we need a 16-bit to store all
- // discriminant values. That would be a bug, because then, in codegen, in order
- // to store this 16-bit discriminant into 8-bit sized temporary some of the
- // space necessary to represent would have to be discarded (or layout is wrong
- // on thinking it needs 16 bits)
- bug!(
- "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
- min_ity,
- typeck_ity
- );
- // However, it is fine to make discr type however large (as an optimisation)
- // after this point – we’ll just truncate the value we load in codegen.
- }
-
- // Check to see if we should use a different type for the
- // discriminant. We can safely use a type with the same size
- // as the alignment of the first field of each variant.
- // We increase the size of the discriminant to avoid LLVM copying
- // padding when it doesn't need to. This normally causes unaligned
- // load/stores and excessive memcpy/memset operations. By using a
- // bigger integer size, LLVM can be sure about its contents and
- // won't be so conservative.
-
- // Use the initial field alignment
- let mut ity = if def.repr().c() || def.repr().int.is_some() {
- min_ity
- } else {
- Integer::for_align(dl, start_align).unwrap_or(min_ity)
- };
-
- // If the alignment is not larger than the chosen discriminant size,
- // don't use the alignment as the final size.
- if ity <= min_ity {
- ity = min_ity;
- } else {
- // Patch up the variants' first few fields.
- let old_ity_size = min_ity.size();
- let new_ity_size = ity.size();
- for variant in &mut layout_variants {
- match variant.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for i in offsets {
- if *i <= old_ity_size {
- assert_eq!(*i, old_ity_size);
- *i = new_ity_size;
- }
- }
- // We might be making the struct larger.
- if variant.size <= old_ity_size {
- variant.size = new_ity_size;
- }
- }
- _ => bug!(),
- }
- }
- }
-
- let tag_mask = ity.size().unsigned_int_max();
- let tag = Scalar::Initialized {
- value: Int(ity, signed),
- valid_range: WrappingRange {
- start: (min as u128 & tag_mask),
- end: (max as u128 & tag_mask),
- },
- };
- let mut abi = Abi::Aggregate { sized: true };
-
- if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- } else if tag.size(dl) == size {
- // Make sure we only use scalar layout when the enum is entirely its
- // own tag (i.e. it has no padding nor any non-ZST variant fields).
- abi = Abi::Scalar(tag);
- } else {
- // Try to use a ScalarPair for all tagged enums.
- let mut common_prim = None;
- let mut common_prim_initialized_in_all_variants = true;
- for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
- let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
- bug!();
- };
- let mut fields =
- iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
- let (field, offset) = match (fields.next(), fields.next()) {
- (None, None) => {
- common_prim_initialized_in_all_variants = false;
- continue;
- }
- (Some(pair), None) => pair,
- _ => {
- common_prim = None;
- break;
- }
- };
- let prim = match field.abi {
- Abi::Scalar(scalar) => {
- common_prim_initialized_in_all_variants &=
- matches!(scalar, Scalar::Initialized { .. });
- scalar.primitive()
- }
- _ => {
- common_prim = None;
- break;
- }
- };
- if let Some(pair) = common_prim {
- // This is pretty conservative. We could go fancier
- // by conflating things like i32 and u32, or even
- // realising that (u8, u8) could just cohabit with
- // u16 or even u32.
- if pair != (prim, offset) {
- common_prim = None;
- break;
- }
- } else {
- common_prim = Some((prim, offset));
- }
- }
- if let Some((prim, offset)) = common_prim {
- let prim_scalar = if common_prim_initialized_in_all_variants {
- scalar_unit(prim)
- } else {
- // Common prim might be uninit.
- Scalar::Union { value: prim }
- };
- let pair = self.scalar_pair(tag, prim_scalar);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if pair_offsets[0] == Size::ZERO
- && pair_offsets[1] == *offset
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- }
-
- // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
- // variants to ensure they are consistent. This is because a downcast is
- // semantically a NOP, and thus should not affect layout.
- if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
- for variant in &mut layout_variants {
- // We only do this for variants with fields; the others are not accessed anyway.
- // Also do not overwrite any already existing "clever" ABIs.
- if variant.fields.count() > 0
- && matches!(variant.abi, Abi::Aggregate { .. })
- {
- variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
- variant.size = cmp::max(variant.size, size);
- variant.align.abi = cmp::max(variant.align.abi, align.abi);
- }
- }
- }
-
- let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
- let layout_variants =
- layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
-
- let tagged_layout = LayoutS {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: 0,
- variants: layout_variants,
- },
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO],
- memory_index: vec![0],
- },
- largest_niche,
- abi,
- align,
- size,
- };
-
- let best_layout = match (tagged_layout, niche_filling_layout) {
- (tagged_layout, Some(niche_filling_layout)) => {
- // Pick the smaller layout; otherwise,
- // pick the layout with the larger niche; otherwise,
- // pick tagged as it has simpler codegen.
- cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
- let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
- (layout.size, cmp::Reverse(niche_size))
- })
- }
- (tagged_layout, None) => tagged_layout,
- };
-
- tcx.intern_layout(best_layout)
- }
-
- // Types with no meaningful known layout.
- ty::Projection(_) | ty::Opaque(..) => {
- // NOTE(eddyb) `layout_of` query should've normalized these away,
- // if that was possible, so there's no reason to try again here.
- return Err(LayoutError::Unknown(ty));
- }
-
- ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
- bug!("Layout::compute: unexpected type `{}`", ty)
- }
-
- ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
- return Err(LayoutError::Unknown(ty));
- }
- })
- }
-}
-
-/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
-#[derive(Clone, Debug, PartialEq)]
-enum SavedLocalEligibility {
- Unassigned,
- Assigned(VariantIdx),
- // FIXME: Use newtype_index so we aren't wasting bytes
- Ineligible(Option<u32>),
-}
-
-// When laying out generators, we divide our saved local fields into two
-// categories: overlap-eligible and overlap-ineligible.
-//
-// Those fields which are ineligible for overlap go in a "prefix" at the
-// beginning of the layout, and always have space reserved for them.
-//
-// Overlap-eligible fields are only assigned to one variant, so we lay
-// those fields out for each variant and put them right after the
-// prefix.
-//
-// Finally, in the layout details, we point to the fields from the
-// variants they are assigned to. It is possible for some fields to be
-// included in multiple variants. No field ever "moves around" in the
-// layout; its offset is always the same.
-//
-// Also included in the layout are the upvars and the discriminant.
-// These are included as fields on the "outer" layout; they are not part
-// of any variant.
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- /// Compute the eligibility and assignment of each local.
- fn generator_saved_local_eligibility(
- &self,
- info: &GeneratorLayout<'tcx>,
- ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
- use SavedLocalEligibility::*;
-
- let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
- IndexVec::from_elem_n(Unassigned, info.field_tys.len());
-
- // The saved locals not eligible for overlap. These will get
- // "promoted" to the prefix of our generator.
- let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
-
- // Figure out which of our saved locals are fields in only
- // one variant. The rest are deemed ineligible for overlap.
- for (variant_index, fields) in info.variant_fields.iter_enumerated() {
- for local in fields {
- match assignments[*local] {
- Unassigned => {
- assignments[*local] = Assigned(variant_index);
- }
- Assigned(idx) => {
- // We've already seen this local at another suspension
- // point, so it is no longer a candidate.
- trace!(
- "removing local {:?} in >1 variant ({:?}, {:?})",
- local,
- variant_index,
- idx
- );
- ineligible_locals.insert(*local);
- assignments[*local] = Ineligible(None);
- }
- Ineligible(_) => {}
- }
- }
- }
-
- // Next, check every pair of eligible locals to see if they
- // conflict.
- for local_a in info.storage_conflicts.rows() {
- let conflicts_a = info.storage_conflicts.count(local_a);
- if ineligible_locals.contains(local_a) {
- continue;
- }
-
- for local_b in info.storage_conflicts.iter(local_a) {
- // local_a and local_b are storage live at the same time, therefore they
- // cannot overlap in the generator layout. The only way to guarantee
- // this is if they are in the same variant, or one is ineligible
- // (which means it is stored in every variant).
- if ineligible_locals.contains(local_b)
- || assignments[local_a] == assignments[local_b]
- {
- continue;
- }
-
- // If they conflict, we will choose one to make ineligible.
- // This is not always optimal; it's just a greedy heuristic that
- // seems to produce good results most of the time.
- let conflicts_b = info.storage_conflicts.count(local_b);
- let (remove, other) =
- if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
- ineligible_locals.insert(remove);
- assignments[remove] = Ineligible(None);
- trace!("removing local {:?} due to conflict with {:?}", remove, other);
- }
- }
-
- // Count the number of variants in use. If only one of them, then it is
- // impossible to overlap any locals in our layout. In this case it's
- // always better to make the remaining locals ineligible, so we can
- // lay them out with the other locals in the prefix and eliminate
- // unnecessary padding bytes.
- {
- let mut used_variants = BitSet::new_empty(info.variant_fields.len());
- for assignment in &assignments {
- if let Assigned(idx) = assignment {
- used_variants.insert(*idx);
- }
- }
- if used_variants.count() < 2 {
- for assignment in assignments.iter_mut() {
- *assignment = Ineligible(None);
- }
- ineligible_locals.insert_all();
- }
- }
-
- // Write down the order of our locals that will be promoted to the prefix.
- {
- for (idx, local) in ineligible_locals.iter().enumerate() {
- assignments[local] = Ineligible(Some(idx as u32));
- }
- }
- debug!("generator saved local assignments: {:?}", assignments);
-
- (ineligible_locals, assignments)
- }
-
- /// Compute the full generator layout.
- fn generator_layout(
- &self,
- ty: Ty<'tcx>,
- def_id: hir::def_id::DefId,
- substs: SubstsRef<'tcx>,
- ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
- use SavedLocalEligibility::*;
- let tcx = self.tcx;
- let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
-
- let Some(info) = tcx.generator_layout(def_id) else {
- return Err(LayoutError::Unknown(ty));
- };
- let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
-
- // Build a prefix layout, including "promoting" all ineligible
- // locals as part of the prefix. We compute the layout of all of
- // these fields at once to get optimal packing.
- let tag_index = substs.as_generator().prefix_tys().count();
-
- // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
- let max_discr = (info.variant_fields.len() - 1) as u128;
- let discr_int = Integer::fit_unsigned(max_discr);
- let discr_int_ty = discr_int.to_ty(tcx, false);
- let tag = Scalar::Initialized {
- value: Primitive::Int(discr_int, false),
- valid_range: WrappingRange { start: 0, end: max_discr },
- };
- let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
- let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
-
- let promoted_layouts = ineligible_locals
- .iter()
- .map(|local| subst_field(info.field_tys[local]))
- .map(|ty| tcx.mk_maybe_uninit(ty))
- .map(|ty| self.layout_of(ty));
- let prefix_layouts = substs
- .as_generator()
- .prefix_tys()
- .map(|ty| self.layout_of(ty))
- .chain(iter::once(Ok(tag_layout)))
- .chain(promoted_layouts)
- .collect::<Result<Vec<_>, _>>()?;
- let prefix = self.univariant_uninterned(
- ty,
- &prefix_layouts,
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?;
-
- let (prefix_size, prefix_align) = (prefix.size, prefix.align);
-
- // Split the prefix layout into the "outer" fields (upvars and
- // discriminant) and the "promoted" fields. Promoted fields will
- // get included in each variant that requested them in
- // GeneratorLayout.
- debug!("prefix = {:#?}", prefix);
- let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
- FieldsShape::Arbitrary { mut offsets, memory_index } => {
- let mut inverse_memory_index = invert_mapping(&memory_index);
-
- // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
- // "outer" and "promoted" fields respectively.
- let b_start = (tag_index + 1) as u32;
- let offsets_b = offsets.split_off(b_start as usize);
- let offsets_a = offsets;
-
- // Disentangle the "a" and "b" components of `inverse_memory_index`
- // by preserving the order but keeping only one disjoint "half" each.
- // FIXME(eddyb) build a better abstraction for permutations, if possible.
- let inverse_memory_index_b: Vec<_> =
- inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
- inverse_memory_index.retain(|&i| i < b_start);
- let inverse_memory_index_a = inverse_memory_index;
-
- // Since `inverse_memory_index_{a,b}` each only refer to their
- // respective fields, they can be safely inverted
- let memory_index_a = invert_mapping(&inverse_memory_index_a);
- let memory_index_b = invert_mapping(&inverse_memory_index_b);
-
- let outer_fields =
- FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
- (outer_fields, offsets_b, memory_index_b)
- }
- _ => bug!(),
- };
-
- let mut size = prefix.size;
- let mut align = prefix.align;
- let variants = info
- .variant_fields
- .iter_enumerated()
- .map(|(index, variant_fields)| {
- // Only include overlap-eligible fields when we compute our variant layout.
- let variant_only_tys = variant_fields
- .iter()
- .filter(|local| match assignments[**local] {
- Unassigned => bug!(),
- Assigned(v) if v == index => true,
- Assigned(_) => bug!("assignment does not match variant"),
- Ineligible(_) => false,
- })
- .map(|local| subst_field(info.field_tys[*local]));
-
- let mut variant = self.univariant_uninterned(
- ty,
- &variant_only_tys
- .map(|ty| self.layout_of(ty))
- .collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::Prefixed(prefix_size, prefix_align.abi),
- )?;
- variant.variants = Variants::Single { index };
-
- let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
- bug!();
- };
-
- // Now, stitch the promoted and variant-only fields back together in
- // the order they are mentioned by our GeneratorLayout.
- // Because we only use some subset (that can differ between variants)
- // of the promoted fields, we can't just pick those elements of the
- // `promoted_memory_index` (as we'd end up with gaps).
- // So instead, we build an "inverse memory_index", as if all of the
- // promoted fields were being used, but leave the elements not in the
- // subset as `INVALID_FIELD_IDX`, which we can filter out later to
- // obtain a valid (bijective) mapping.
- const INVALID_FIELD_IDX: u32 = !0;
- let mut combined_inverse_memory_index =
- vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
- let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
- let combined_offsets = variant_fields
- .iter()
- .enumerate()
- .map(|(i, local)| {
- let (offset, memory_index) = match assignments[*local] {
- Unassigned => bug!(),
- Assigned(_) => {
- let (offset, memory_index) =
- offsets_and_memory_index.next().unwrap();
- (offset, promoted_memory_index.len() as u32 + memory_index)
- }
- Ineligible(field_idx) => {
- let field_idx = field_idx.unwrap() as usize;
- (promoted_offsets[field_idx], promoted_memory_index[field_idx])
- }
- };
- combined_inverse_memory_index[memory_index as usize] = i as u32;
- offset
- })
- .collect();
-
- // Remove the unused slots and invert the mapping to obtain the
- // combined `memory_index` (also see previous comment).
- combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
- let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
-
- variant.fields = FieldsShape::Arbitrary {
- offsets: combined_offsets,
- memory_index: combined_memory_index,
- };
-
- size = size.max(variant.size);
- align = align.max(variant.align);
- Ok(tcx.intern_layout(variant))
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- size = size.align_to(align.abi);
-
- let abi =
- if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let layout = tcx.intern_layout(LayoutS {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: tag_index,
- variants,
- },
- fields: outer_fields,
- abi,
- largest_niche: prefix.largest_niche,
- size,
- align,
- });
- debug!("generator layout ({:?}): {:#?}", ty, layout);
- Ok(layout)
- }
-
- /// This is invoked by the `layout_of` query to record the final
- /// layout of each type.
- #[inline(always)]
- fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
- // If we are running with `-Zprint-type-sizes`, maybe record layouts
- // for dumping later.
- if self.tcx.sess.opts.unstable_opts.print_type_sizes {
- self.record_layout_for_printing_outlined(layout)
- }
- }
-
- fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
- // Ignore layouts that are done with non-empty environments or
- // non-monomorphic layouts, as the user only wants to see the stuff
- // resulting from the final codegen session.
- if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
- return;
- }
-
- // (delay format until we actually need it)
- let record = |kind, packed, opt_discr_size, variants| {
- let type_desc = format!("{:?}", layout.ty);
- self.tcx.sess.code_stats.record_type_size(
- kind,
- type_desc,
- layout.align.abi,
- layout.size,
- packed,
- opt_discr_size,
- variants,
- );
- };
-
- let adt_def = match *layout.ty.kind() {
- ty::Adt(ref adt_def, _) => {
- debug!("print-type-size t: `{:?}` process adt", layout.ty);
- adt_def
- }
-
- ty::Closure(..) => {
- debug!("print-type-size t: `{:?}` record closure", layout.ty);
- record(DataTypeKind::Closure, false, None, vec![]);
- return;
- }
-
- _ => {
- debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
- return;
- }
- };
-
- let adt_kind = adt_def.adt_kind();
- let adt_packed = adt_def.repr().pack.is_some();
-
- let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
- let mut min_size = Size::ZERO;
- let field_info: Vec<_> = flds
- .iter()
- .enumerate()
- .map(|(i, &name)| {
- let field_layout = layout.field(self, i);
- let offset = layout.fields.offset(i);
- let field_end = offset + field_layout.size;
- if min_size < field_end {
- min_size = field_end;
- }
- FieldInfo {
- name,
- offset: offset.bytes(),
- size: field_layout.size.bytes(),
- align: field_layout.align.abi.bytes(),
- }
- })
- .collect();
-
- VariantInfo {
- name: n,
- kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
- align: layout.align.abi.bytes(),
- size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
- fields: field_info,
- }
- };
-
- match layout.variants {
- Variants::Single { index } => {
- if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
- debug!(
- "print-type-size `{:#?}` variant {}",
- layout,
- adt_def.variant(index).name
- );
- let variant_def = &adt_def.variant(index);
- let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
- record(
- adt_kind.into(),
- adt_packed,
- None,
- vec![build_variant_info(Some(variant_def.name), &fields, layout)],
- );
- } else {
- // (This case arises for *empty* enums; so give it
- // zero variants.)
- record(adt_kind.into(), adt_packed, None, vec![]);
- }
- }
-
- Variants::Multiple { tag, ref tag_encoding, .. } => {
- debug!(
- "print-type-size `{:#?}` adt general variants def {}",
- layout.ty,
- adt_def.variants().len()
- );
- let variant_infos: Vec<_> = adt_def
- .variants()
- .iter_enumerated()
- .map(|(i, variant_def)| {
- let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
- build_variant_info(
- Some(variant_def.name),
- &fields,
- layout.for_variant(self, i),
- )
- })
- .collect();
- record(
- adt_kind.into(),
- adt_packed,
- match tag_encoding {
- TagEncoding::Direct => Some(tag.size(self)),
- _ => None,
- },
- variant_infos,
- );
- }
- }
- }
-}
-
/// Type size "skeleton", i.e., the only information determining a type's size.
/// While this is conservative, (aside from constant sizes, only pointers,
/// newtypes thereof and null pointer optimized enums are allowed), it is
@@ -2083,7 +263,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
- debug_assert!(!ty.has_infer_types_or_consts());
+ debug_assert!(!ty.has_non_region_infer());
// First try computing a static layout.
let err = match tcx.layout_of(param_env.and(ty)) {
@@ -2099,7 +279,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
match tail.kind() {
ty::Param(_) | ty::Projection(_) => {
- debug_assert!(tail.has_param_types_or_consts());
+ debug_assert!(tail.has_non_region_param());
Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
}
_ => bug!(
@@ -2468,7 +648,9 @@ where
| ty::FnDef(..)
| ty::GeneratorWitness(..)
| ty::Foreign(..)
- | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
+ | ty::Dynamic(_, _, ty::Dyn) => {
+ bug!("TyAndLayout::field({:?}): not applicable", this)
+ }
// Potentially-fat pointers.
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
@@ -2497,7 +679,7 @@ where
match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
- ty::Dynamic(_, _) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
tcx.lifetimes.re_static,
tcx.mk_array(tcx.types.usize, 3),
@@ -2566,6 +748,22 @@ where
}
}
+ ty::Dynamic(_, _, ty::DynStar) => {
+ if i == 0 {
+ TyMaybeWithLayout::Ty(tcx.types.usize)
+ } else if i == 1 {
+ // FIXME(dyn-star) same FIXME as above applies here too
+ TyMaybeWithLayout::Ty(
+ tcx.mk_imm_ref(
+ tcx.lifetimes.re_static,
+ tcx.mk_array(tcx.types.usize, 3),
+ ),
+ )
+ } else {
+ bug!("no field {i} on dyn*")
+ }
+ }
+
ty::Projection(_)
| ty::Bound(..)
| ty::Placeholder(..)
@@ -2632,7 +830,7 @@ where
} else {
match mt {
hir::Mutability::Not => {
- if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
+ if ty.is_freeze(tcx, cx.param_env()) {
PointerKind::Frozen
} else {
PointerKind::SharedMutable
@@ -2643,7 +841,7 @@ where
// noalias, as another pointer to the structure can be obtained, that
// is not based-on the original reference. We consider all !Unpin
// types to be potentially self-referential here.
- if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
+ if ty.is_unpin(tcx, cx.param_env()) {
PointerKind::UniqueBorrowed
} else {
PointerKind::UniqueBorrowedPinned
@@ -2674,11 +872,11 @@ where
// using more niches than just null (e.g., the first page of
// the address space, or unaligned pointers).
Variants::Multiple {
- tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag_encoding: TagEncoding::Niche { untagged_variant, .. },
tag_field,
..
} if this.fields.offset(tag_field) == offset => {
- Some(this.for_variant(cx, dataful_variant))
+ Some(this.for_variant(cx, untagged_variant))
}
_ => Some(this),
};
@@ -2755,111 +953,6 @@ where
}
}
-impl<'tcx> ty::Instance<'tcx> {
- // NOTE(eddyb) this is private to avoid using it from outside of
- // `fn_abi_of_instance` - any other uses are either too high-level
- // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
- // or should go through `FnAbi` instead, to avoid losing any
- // adjustments `fn_abi_of_instance` might be performing.
- fn fn_sig_for_fn_abi(
- &self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> ty::PolyFnSig<'tcx> {
- let ty = self.ty(tcx, param_env);
- match *ty.kind() {
- ty::FnDef(..) => {
- // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
- // parameters unused if they show up in the signature, but not in the `mir::Body`
- // (i.e. due to being inside a projection that got normalized, see
- // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
- // track of a polymorphization `ParamEnv` to allow normalizing later.
- let mut sig = match *ty.kind() {
- ty::FnDef(def_id, substs) => tcx
- .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
- .subst(tcx, substs),
- _ => unreachable!(),
- };
-
- if let ty::InstanceDef::VTableShim(..) = self.def {
- // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
- sig = sig.map_bound(|mut sig| {
- let mut inputs_and_output = sig.inputs_and_output.to_vec();
- inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
- sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
- sig
- });
- }
- sig
- }
- ty::Closure(def_id, substs) => {
- let sig = substs.as_closure().sig();
-
- let bound_vars = tcx.mk_bound_variable_kinds(
- sig.bound_vars()
- .iter()
- .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
- );
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind: ty::BoundRegionKind::BrEnv,
- };
- let env_region = ty::ReLateBound(ty::INNERMOST, br);
- let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
-
- let sig = sig.skip_binder();
- ty::Binder::bind_with_vars(
- tcx.mk_fn_sig(
- iter::once(env_ty).chain(sig.inputs().iter().cloned()),
- sig.output(),
- sig.c_variadic,
- sig.unsafety,
- sig.abi,
- ),
- bound_vars,
- )
- }
- ty::Generator(_, substs, _) => {
- let sig = substs.as_generator().poly_sig();
-
- let bound_vars = tcx.mk_bound_variable_kinds(
- sig.bound_vars()
- .iter()
- .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
- );
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind: ty::BoundRegionKind::BrEnv,
- };
- let env_region = ty::ReLateBound(ty::INNERMOST, br);
- let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
-
- let pin_did = tcx.require_lang_item(LangItem::Pin, None);
- let pin_adt_ref = tcx.adt_def(pin_did);
- let pin_substs = tcx.intern_substs(&[env_ty.into()]);
- let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
-
- let sig = sig.skip_binder();
- let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
- let state_adt_ref = tcx.adt_def(state_did);
- let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
- let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
- ty::Binder::bind_with_vars(
- tcx.mk_fn_sig(
- [env_ty, sig.resume_ty].iter(),
- &ret_ty,
- false,
- hir::Unsafety::Normal,
- rustc_target::spec::abi::Abi::Rust,
- ),
- bound_vars,
- )
- }
- _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
- }
- }
-}
-
/// Calculates whether a function's ABI can unwind or not.
///
/// This takes two primary parameters:
@@ -2907,6 +1000,7 @@ impl<'tcx> ty::Instance<'tcx> {
/// with `-Cpanic=abort` will look like they can't unwind when in fact they
/// might (from a foreign exception or similar).
#[inline]
+#[tracing::instrument(level = "debug", skip(tcx))]
pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
if let Some(did) = fn_def_id {
// Special attribute for functions which can't unwind.
@@ -3001,40 +1095,6 @@ pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: Spe
}
}
-#[inline]
-pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
- use rustc_target::spec::abi::Abi::*;
- match tcx.sess.target.adjust_abi(abi) {
- RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
- RustCold => Conv::RustCold,
-
- // It's the ABI's job to select this, not ours.
- System { .. } => bug!("system abi should be selected elsewhere"),
- EfiApi => bug!("eficall abi should be selected elsewhere"),
-
- Stdcall { .. } => Conv::X86Stdcall,
- Fastcall { .. } => Conv::X86Fastcall,
- Vectorcall { .. } => Conv::X86VectorCall,
- Thiscall { .. } => Conv::X86ThisCall,
- C { .. } => Conv::C,
- Unadjusted => Conv::C,
- Win64 { .. } => Conv::X86_64Win64,
- SysV64 { .. } => Conv::X86_64SysV,
- Aapcs { .. } => Conv::ArmAapcs,
- CCmseNonSecureCall => Conv::CCmseNonSecureCall,
- PtxKernel => Conv::PtxKernel,
- Msp430Interrupt => Conv::Msp430Intr,
- X86Interrupt => Conv::X86Intr,
- AmdGpuKernel => Conv::AmdGpuKernel,
- AvrInterrupt => Conv::AvrInterrupt,
- AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
- Wasm => Conv::C,
-
- // These API constants ought to be more specific...
- Cdecl { .. } => Conv::C,
- }
-}
-
/// Error produced by attempting to compute or adjust a `FnAbi`.
#[derive(Copy, Clone, Debug, HashStable)]
pub enum FnAbiError<'tcx> {
@@ -3066,6 +1126,12 @@ impl<'tcx> fmt::Display for FnAbiError<'tcx> {
}
}
+impl<'tcx> IntoDiagnostic<'tcx, !> for FnAbiError<'tcx> {
+ fn into_diagnostic(self, handler: &'tcx Handler) -> DiagnosticBuilder<'tcx, !> {
+ handler.struct_fatal(self.to_string())
+ }
+}
+
// FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
// just for error handling.
#[derive(Debug)]
@@ -3123,6 +1189,7 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
/// NB: that includes virtual calls, which are represented by "direct calls"
/// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
#[inline]
+ #[tracing::instrument(level = "debug", skip(self))]
fn fn_abi_of_instance(
&self,
instance: ty::Instance<'tcx>,
@@ -3146,359 +1213,3 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
}
impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
-
-fn fn_abi_of_fn_ptr<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
-) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- let (param_env, (sig, extra_args)) = query.into_parts();
-
- LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
-}
-
-fn fn_abi_of_instance<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
-) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- let (param_env, (instance, extra_args)) = query.into_parts();
-
- let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
-
- let caller_location = if instance.def.requires_caller_location(tcx) {
- Some(tcx.caller_location_ty())
- } else {
- None
- };
-
- LayoutCx { tcx, param_env }.fn_abi_new_uncached(
- sig,
- extra_args,
- caller_location,
- Some(instance.def_id()),
- matches!(instance.def, ty::InstanceDef::Virtual(..)),
- )
-}
-
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
- // arguments of this method, into a separate `struct`.
- fn fn_abi_new_uncached(
- &self,
- sig: ty::PolyFnSig<'tcx>,
- extra_args: &[Ty<'tcx>],
- caller_location: Option<Ty<'tcx>>,
- fn_def_id: Option<DefId>,
- // FIXME(eddyb) replace this with something typed, like an `enum`.
- force_thin_self_ptr: bool,
- ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
-
- let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
-
- let conv = conv_from_spec_abi(self.tcx(), sig.abi);
-
- let mut inputs = sig.inputs();
- let extra_args = if sig.abi == RustCall {
- assert!(!sig.c_variadic && extra_args.is_empty());
-
- if let Some(input) = sig.inputs().last() {
- if let ty::Tuple(tupled_arguments) = input.kind() {
- inputs = &sig.inputs()[0..sig.inputs().len() - 1];
- tupled_arguments
- } else {
- bug!(
- "argument to function with \"rust-call\" ABI \
- is not a tuple"
- );
- }
- } else {
- bug!(
- "argument to function with \"rust-call\" ABI \
- is not a tuple"
- );
- }
- } else {
- assert!(sig.c_variadic || extra_args.is_empty());
- extra_args
- };
-
- let target = &self.tcx.sess.target;
- let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
- let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
- let linux_s390x_gnu_like =
- target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
- let linux_sparc64_gnu_like =
- target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
- let linux_powerpc_gnu_like =
- target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
- use SpecAbi::*;
- let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
-
- // Handle safe Rust thin and fat pointers.
- let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
- scalar: Scalar,
- layout: TyAndLayout<'tcx>,
- offset: Size,
- is_return: bool| {
- // Booleans are always a noundef i1 that needs to be zero-extended.
- if scalar.is_bool() {
- attrs.ext(ArgExtension::Zext);
- attrs.set(ArgAttribute::NoUndef);
- return;
- }
-
- // Scalars which have invalid values cannot be undef.
- if !scalar.is_always_valid(self) {
- attrs.set(ArgAttribute::NoUndef);
- }
-
- // Only pointer types handled below.
- let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
-
- if !valid_range.contains(0) {
- attrs.set(ArgAttribute::NonNull);
- }
-
- if let Some(pointee) = layout.pointee_info_at(self, offset) {
- if let Some(kind) = pointee.safe {
- attrs.pointee_align = Some(pointee.align);
-
- // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
- // for the entire duration of the function as they can be deallocated
- // at any time. Same for shared mutable references. If LLVM had a
- // way to say "dereferenceable on entry" we could use it here.
- attrs.pointee_size = match kind {
- PointerKind::UniqueBorrowed
- | PointerKind::UniqueBorrowedPinned
- | PointerKind::Frozen => pointee.size,
- PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
- };
-
- // `Box`, `&T`, and `&mut T` cannot be undef.
- // Note that this only applies to the value of the pointer itself;
- // this attribute doesn't make it UB for the pointed-to data to be undef.
- attrs.set(ArgAttribute::NoUndef);
-
- // The aliasing rules for `Box<T>` are still not decided, but currently we emit
- // `noalias` for it. This can be turned off using an unstable flag.
- // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
- let noalias_for_box =
- self.tcx().sess.opts.unstable_opts.box_noalias.unwrap_or(true);
-
- // `&mut` pointer parameters never alias other parameters,
- // or mutable global data
- //
- // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
- // and can be marked as both `readonly` and `noalias`, as
- // LLVM's definition of `noalias` is based solely on memory
- // dependencies rather than pointer equality
- //
- // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
- // for UniqueBorrowed arguments, so that the codegen backend can decide whether
- // or not to actually emit the attribute. It can also be controlled with the
- // `-Zmutable-noalias` debugging option.
- let no_alias = match kind {
- PointerKind::SharedMutable
- | PointerKind::UniqueBorrowed
- | PointerKind::UniqueBorrowedPinned => false,
- PointerKind::UniqueOwned => noalias_for_box,
- PointerKind::Frozen => !is_return,
- };
- if no_alias {
- attrs.set(ArgAttribute::NoAlias);
- }
-
- if kind == PointerKind::Frozen && !is_return {
- attrs.set(ArgAttribute::ReadOnly);
- }
-
- if kind == PointerKind::UniqueBorrowed && !is_return {
- attrs.set(ArgAttribute::NoAliasMutRef);
- }
- }
- }
- };
-
- let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
- let is_return = arg_idx.is_none();
-
- let layout = self.layout_of(ty)?;
- let layout = if force_thin_self_ptr && arg_idx == Some(0) {
- // Don't pass the vtable, it's not an argument of the virtual fn.
- // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
- // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
- make_thin_self_ptr(self, layout)
- } else {
- layout
- };
-
- let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
- let mut attrs = ArgAttributes::new();
- adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
- attrs
- });
-
- if arg.layout.is_zst() {
- // For some forsaken reason, x86_64-pc-windows-gnu
- // doesn't ignore zero-sized struct arguments.
- // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
- if is_return
- || rust_abi
- || (!win_x64_gnu
- && !linux_s390x_gnu_like
- && !linux_sparc64_gnu_like
- && !linux_powerpc_gnu_like)
- {
- arg.mode = PassMode::Ignore;
- }
- }
-
- Ok(arg)
- };
-
- let mut fn_abi = FnAbi {
- ret: arg_of(sig.output(), None)?,
- args: inputs
- .iter()
- .copied()
- .chain(extra_args.iter().copied())
- .chain(caller_location)
- .enumerate()
- .map(|(i, ty)| arg_of(ty, Some(i)))
- .collect::<Result<_, _>>()?,
- c_variadic: sig.c_variadic,
- fixed_count: inputs.len(),
- conv,
- can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
- };
- self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
- debug!("fn_abi_new_uncached = {:?}", fn_abi);
- Ok(self.tcx.arena.alloc(fn_abi))
- }
-
- fn fn_abi_adjust_for_abi(
- &self,
- fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
- abi: SpecAbi,
- ) -> Result<(), FnAbiError<'tcx>> {
- if abi == SpecAbi::Unadjusted {
- return Ok(());
- }
-
- if abi == SpecAbi::Rust
- || abi == SpecAbi::RustCall
- || abi == SpecAbi::RustIntrinsic
- || abi == SpecAbi::PlatformIntrinsic
- {
- let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
- if arg.is_ignore() {
- return;
- }
-
- match arg.layout.abi {
- Abi::Aggregate { .. } => {}
-
- // This is a fun case! The gist of what this is doing is
- // that we want callers and callees to always agree on the
- // ABI of how they pass SIMD arguments. If we were to *not*
- // make these arguments indirect then they'd be immediates
- // in LLVM, which means that they'd used whatever the
- // appropriate ABI is for the callee and the caller. That
- // means, for example, if the caller doesn't have AVX
- // enabled but the callee does, then passing an AVX argument
- // across this boundary would cause corrupt data to show up.
- //
- // This problem is fixed by unconditionally passing SIMD
- // arguments through memory between callers and callees
- // which should get them all to agree on ABI regardless of
- // target feature sets. Some more information about this
- // issue can be found in #44367.
- //
- // Note that the platform intrinsic ABI is exempt here as
- // that's how we connect up to LLVM and it's unstable
- // anyway, we control all calls to it in libstd.
- Abi::Vector { .. }
- if abi != SpecAbi::PlatformIntrinsic
- && self.tcx.sess.target.simd_types_indirect =>
- {
- arg.make_indirect();
- return;
- }
-
- _ => return,
- }
-
- let size = arg.layout.size;
- if arg.layout.is_unsized() || size > Pointer.size(self) {
- arg.make_indirect();
- } else {
- // We want to pass small aggregates as immediates, but using
- // a LLVM aggregate type for this leads to bad optimizations,
- // so we pick an appropriately sized integer type instead.
- arg.cast_to(Reg { kind: RegKind::Integer, size });
- }
- };
- fixup(&mut fn_abi.ret);
- for arg in &mut fn_abi.args {
- fixup(arg);
- }
- } else {
- fn_abi.adjust_for_foreign_abi(self, abi)?;
- }
-
- Ok(())
- }
-}
-
-fn make_thin_self_ptr<'tcx>(
- cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
- layout: TyAndLayout<'tcx>,
-) -> TyAndLayout<'tcx> {
- let tcx = cx.tcx();
- let fat_pointer_ty = if layout.is_unsized() {
- // unsized `self` is passed as a pointer to `self`
- // FIXME (mikeyhew) change this to use &own if it is ever added to the language
- tcx.mk_mut_ptr(layout.ty)
- } else {
- match layout.abi {
- Abi::ScalarPair(..) => (),
- _ => bug!("receiver type has unsupported layout: {:?}", layout),
- }
-
- // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
- // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
- // elsewhere in the compiler as a method on a `dyn Trait`.
- // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
- // get a built-in pointer type
- let mut fat_pointer_layout = layout;
- 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
- && !fat_pointer_layout.ty.is_region_ptr()
- {
- for i in 0..fat_pointer_layout.fields.count() {
- let field_layout = fat_pointer_layout.field(cx, i);
-
- if !field_layout.is_zst() {
- fat_pointer_layout = field_layout;
- continue 'descend_newtypes;
- }
- }
-
- bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
- }
-
- fat_pointer_layout.ty
- };
-
- // we now have a type like `*mut RcBox<dyn Trait>`
- // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
- // this is understood as a special case elsewhere in the compiler
- let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
-
- TyAndLayout {
- ty: fat_pointer_ty,
-
- // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
- // should always work because the type is always `*mut ()`.
- ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
- }
-}
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
index db3b5cfd1..79365ef28 100644
--- a/compiler/rustc_middle/src/ty/list.rs
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -65,6 +65,10 @@ impl<T> List<T> {
pub fn len(&self) -> usize {
self.len
}
+
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
}
impl<T: Copy> List<T> {
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 02da02568..a42d05706 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -15,8 +15,9 @@ pub use self::AssocItemContainer::*;
pub use self::BorrowKind::*;
pub use self::IntVarValue::*;
pub use self::Variance::*;
+use crate::error::{OpaqueHiddenTypeMismatch, TypeMismatchReason};
use crate::metadata::ModChild;
-use crate::middle::privacy::AccessLevels;
+use crate::middle::privacy::EffectiveVisibilities;
use crate::mir::{Body, GeneratorLayout};
use crate::traits::{self, Reveal};
use crate::ty;
@@ -25,6 +26,7 @@ use crate::ty::util::Discr;
pub use adt::*;
pub use assoc::*;
pub use generics::*;
+use hir::OpaqueTyOrigin;
use rustc_ast as ast;
use rustc_ast::node_id::NodeMap;
use rustc_attr as attr;
@@ -36,10 +38,13 @@ use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, LifetimeRes, Res};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap};
+use rustc_hir::definitions::Definitions;
use rustc_hir::Node;
use rustc_index::vec::IndexVec;
use rustc_macros::HashStable;
use rustc_query_system::ich::StableHashingContext;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_session::cstore::CrateStoreDyn;
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{ExpnId, Span};
@@ -49,10 +54,14 @@ pub use vtable::*;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
+use std::marker::PhantomData;
+use std::mem;
+use std::num::NonZeroUsize;
use std::ops::ControlFlow;
use std::{fmt, str};
pub use crate::ty::diagnostics::*;
+pub use rustc_type_ir::DynKind::*;
pub use rustc_type_ir::InferTy::*;
pub use rustc_type_ir::RegionKind::*;
pub use rustc_type_ir::TyKind::*;
@@ -67,11 +76,11 @@ pub use self::closure::{
CAPTURE_STRUCT_LOCAL,
};
pub use self::consts::{
- Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, Unevaluated, ValTree,
+ Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, UnevaluatedConst, ValTree,
};
pub use self::context::{
tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
- CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
+ CtxtInterners, DeducedParamAttrs, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
GeneratorInteriorTypeCause, GlobalCtxt, Lift, OnDiskCache, TyCtxt, TypeckResults, UserType,
UserTypeAnnotationIndex,
};
@@ -83,9 +92,9 @@ pub use self::sty::BoundRegionKind::*;
pub use self::sty::{
Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstVid,
- EarlyBinder, EarlyBoundRegion, ExistentialPredicate, ExistentialProjection,
- ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts,
- InlineConstSubsts, InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
+ EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef, FnSig,
+ FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts, InlineConstSubsts,
+ InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind,
RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo,
};
@@ -125,6 +134,7 @@ mod generics;
mod impls_ty;
mod instance;
mod list;
+mod opaque_types;
mod parameterized;
mod rvalue_scopes;
mod structural_impls;
@@ -134,8 +144,15 @@ mod sty;
pub type RegisteredTools = FxHashSet<Ident>;
-#[derive(Debug)]
pub struct ResolverOutputs {
+ pub definitions: Definitions,
+ pub global_ctxt: ResolverGlobalCtxt,
+ pub ast_lowering: ResolverAstLowering,
+}
+
+#[derive(Debug)]
+pub struct ResolverGlobalCtxt {
+ pub cstore: Box<CrateStoreDyn>,
pub visibilities: FxHashMap<LocalDefId, Visibility>,
/// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error.
pub has_pub_restricted: bool,
@@ -143,7 +160,7 @@ pub struct ResolverOutputs {
pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
/// Reference span for definitions.
pub source_span: IndexVec<LocalDefId, Span>,
- pub access_levels: AccessLevels,
+ pub effective_visibilities: EffectiveVisibilities,
pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
pub maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
@@ -177,11 +194,6 @@ pub struct ResolverAstLowering {
pub label_res_map: NodeMap<ast::NodeId>,
/// Resolutions for lifetimes.
pub lifetimes_res_map: NodeMap<LifetimeRes>,
- /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
- /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
- /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
- /// field from the original parameter 'a to the new parameter 'a1.
- pub generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
/// Lifetime parameters that lowering will have to introduce.
pub extra_lifetime_params_map: NodeMap<Vec<(Ident, ast::NodeId, LifetimeRes)>>,
@@ -262,13 +274,11 @@ impl fmt::Display for ImplPolarity {
}
#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Encodable, Decodable, HashStable)]
-pub enum Visibility {
+pub enum Visibility<Id = LocalDefId> {
/// Visible everywhere (including in other crates).
Public,
/// Visible only in the given crate-local module.
- Restricted(DefId),
- /// Not visible anywhere in the local crate. This is the visibility of private external items.
- Invisible,
+ Restricted(Id),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
@@ -359,31 +369,45 @@ impl<'tcx> DefIdTree for TyCtxt<'tcx> {
}
}
-impl Visibility {
- /// Returns `true` if an item with this visibility is accessible from the given block.
- pub fn is_accessible_from<T: DefIdTree>(self, module: DefId, tree: T) -> bool {
- let restriction = match self {
- // Public items are visible everywhere.
- Visibility::Public => return true,
- // Private items from other crates are visible nowhere.
- Visibility::Invisible => return false,
- // Restricted items are visible in an arbitrary local module.
- Visibility::Restricted(other) if other.krate != module.krate => return false,
- Visibility::Restricted(module) => module,
- };
+impl<Id> Visibility<Id> {
+ pub fn is_public(self) -> bool {
+ matches!(self, Visibility::Public)
+ }
- tree.is_descendant_of(module, restriction)
+ pub fn map_id<OutId>(self, f: impl FnOnce(Id) -> OutId) -> Visibility<OutId> {
+ match self {
+ Visibility::Public => Visibility::Public,
+ Visibility::Restricted(id) => Visibility::Restricted(f(id)),
+ }
+ }
+}
+
+impl<Id: Into<DefId>> Visibility<Id> {
+ pub fn to_def_id(self) -> Visibility<DefId> {
+ self.map_id(Into::into)
+ }
+
+ /// Returns `true` if an item with this visibility is accessible from the given module.
+ pub fn is_accessible_from(self, module: impl Into<DefId>, tree: impl DefIdTree) -> bool {
+ match self {
+ // Public items are visible everywhere.
+ Visibility::Public => true,
+ Visibility::Restricted(id) => tree.is_descendant_of(module.into(), id.into()),
+ }
}
/// Returns `true` if this visibility is at least as accessible as the given visibility
- pub fn is_at_least<T: DefIdTree>(self, vis: Visibility, tree: T) -> bool {
- let vis_restriction = match vis {
- Visibility::Public => return self == Visibility::Public,
- Visibility::Invisible => return true,
- Visibility::Restricted(module) => module,
- };
+ pub fn is_at_least(self, vis: Visibility<impl Into<DefId>>, tree: impl DefIdTree) -> bool {
+ match vis {
+ Visibility::Public => self.is_public(),
+ Visibility::Restricted(id) => self.is_accessible_from(id, tree),
+ }
+ }
+}
- self.is_accessible_from(vis_restriction, tree)
+impl Visibility<DefId> {
+ pub fn expect_local(self) -> Visibility {
+ self.map_id(|id| id.expect_local())
}
// Returns `true` if this item is visible anywhere in the local crate.
@@ -391,13 +415,8 @@ impl Visibility {
match self {
Visibility::Public => true,
Visibility::Restricted(def_id) => def_id.is_local(),
- Visibility::Invisible => false,
}
}
-
- pub fn is_public(self) -> bool {
- matches!(self, Visibility::Public)
- }
}
/// The crate variances map is computed during typeck and contains the
@@ -468,15 +487,6 @@ pub(crate) struct TyS<'tcx> {
outer_exclusive_binder: ty::DebruijnIndex,
}
-// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(TyS<'_>, 40);
-
-// We are actually storing a stable hash cache next to the type, so let's
-// also check the full size
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(WithStableHash<TyS<'_>>, 56);
-
/// Use this rather than `TyS`, whenever possible.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
#[rustc_diagnostic_item = "Ty"]
@@ -533,10 +543,6 @@ pub(crate) struct PredicateS<'tcx> {
outer_exclusive_binder: ty::DebruijnIndex,
}
-// This type is used a lot. Make sure it doesn't unintentionally get bigger.
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(PredicateS<'_>, 56);
-
/// Use this rather than `PredicateS`, whenever possible.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[rustc_pass_by_value]
@@ -593,6 +599,29 @@ impl<'tcx> Predicate<'tcx> {
}
self
}
+
+ /// Whether this projection can be soundly normalized.
+ ///
+ /// Wf predicates must not be normalized, as normalization
+ /// can remove required bounds which would cause us to
+ /// unsoundly accept some programs. See #91068.
+ #[inline]
+ pub fn allow_normalization(self) -> bool {
+ match self.kind().skip_binder() {
+ PredicateKind::WellFormed(_) => false,
+ PredicateKind::Trait(_)
+ | PredicateKind::RegionOutlives(_)
+ | PredicateKind::TypeOutlives(_)
+ | PredicateKind::Projection(_)
+ | PredicateKind::ObjectSafe(_)
+ | PredicateKind::ClosureKind(_, _, _)
+ | PredicateKind::Subtype(_)
+ | PredicateKind::Coerce(_)
+ | PredicateKind::ConstEvaluatable(_)
+ | PredicateKind::ConstEquate(_, _)
+ | PredicateKind::TypeWellFormedFromEnv(_) => true,
+ }
+ }
}
impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Predicate<'tcx> {
@@ -617,7 +646,7 @@ impl rustc_errors::IntoDiagnosticArg for Predicate<'_> {
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub enum PredicateKind<'tcx> {
/// Corresponds to `where Foo: Bar<A, B, C>`. `Foo` here would be
/// the `Self` type of the trait reference and `A`, `B`, and `C`
@@ -663,7 +692,7 @@ pub enum PredicateKind<'tcx> {
Coerce(CoercePredicate<'tcx>),
/// Constant initializer must evaluate successfully.
- ConstEvaluatable(ty::Unevaluated<'tcx, ()>),
+ ConstEvaluatable(ty::Const<'tcx>),
/// Constants must be equal. The first component is the const that is expected.
ConstEquate(Const<'tcx>, Const<'tcx>),
@@ -789,7 +818,7 @@ impl<'tcx> Predicate<'tcx> {
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct TraitPredicate<'tcx> {
pub trait_ref: TraitRef<'tcx>,
@@ -842,6 +871,11 @@ impl<'tcx> TraitPredicate<'tcx> {
(BoundConstness::ConstIfConst, hir::Constness::NotConst) => false,
}
}
+
+ pub fn without_const(mut self) -> Self {
+ self.constness = BoundConstness::NotConst;
+ self
+ }
}
impl<'tcx> PolyTraitPredicate<'tcx> {
@@ -869,7 +903,7 @@ impl<'tcx> PolyTraitPredicate<'tcx> {
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B`
pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>;
pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>;
@@ -880,7 +914,7 @@ pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder<'tcx, TypeOutlivesPredicat
/// whether the `a` type is the type that we should label as "expected" when
/// presenting user diagnostics.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct SubtypePredicate<'tcx> {
pub a_is_expected: bool,
pub a: Ty<'tcx>,
@@ -890,49 +924,142 @@ pub type PolySubtypePredicate<'tcx> = ty::Binder<'tcx, SubtypePredicate<'tcx>>;
/// Encodes that we have to coerce *from* the `a` type to the `b` type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct CoercePredicate<'tcx> {
pub a: Ty<'tcx>,
pub b: Ty<'tcx>,
}
pub type PolyCoercePredicate<'tcx> = ty::Binder<'tcx, CoercePredicate<'tcx>>;
-#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
-pub enum Term<'tcx> {
- Ty(Ty<'tcx>),
- Const(Const<'tcx>),
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Term<'tcx> {
+ ptr: NonZeroUsize,
+ marker: PhantomData<(Ty<'tcx>, Const<'tcx>)>,
+}
+
+impl Debug for Term<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let data = if let Some(ty) = self.ty() {
+ format!("Term::Ty({:?})", ty)
+ } else if let Some(ct) = self.ct() {
+ format!("Term::Ct({:?})", ct)
+ } else {
+ unreachable!()
+ };
+ f.write_str(&data)
+ }
}
impl<'tcx> From<Ty<'tcx>> for Term<'tcx> {
fn from(ty: Ty<'tcx>) -> Self {
- Term::Ty(ty)
+ TermKind::Ty(ty).pack()
}
}
impl<'tcx> From<Const<'tcx>> for Term<'tcx> {
fn from(c: Const<'tcx>) -> Self {
- Term::Const(c)
+ TermKind::Const(c).pack()
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Term<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.unpack().hash_stable(hcx, hasher);
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Term<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self.unpack().try_fold_with(folder)?.pack())
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Term<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.unpack().visit_with(visitor)
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for Term<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.unpack().encode(e)
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Term<'tcx> {
+ fn decode(d: &mut D) -> Self {
+ let res: TermKind<'tcx> = Decodable::decode(d);
+ res.pack()
}
}
impl<'tcx> Term<'tcx> {
+ #[inline]
+ pub fn unpack(self) -> TermKind<'tcx> {
+ let ptr = self.ptr.get();
+ // SAFETY: use of `Interned::new_unchecked` here is ok because these
+ // pointers were originally created from `Interned` types in `pack()`,
+ // and this is just going in the other direction.
+ unsafe {
+ match ptr & TAG_MASK {
+ TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const WithStableHash<ty::TyS<'tcx>>),
+ ))),
+ CONST_TAG => TermKind::Const(ty::Const(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const ty::ConstS<'tcx>),
+ ))),
+ _ => core::intrinsics::unreachable(),
+ }
+ }
+ }
+
pub fn ty(&self) -> Option<Ty<'tcx>> {
- if let Term::Ty(ty) = self { Some(*ty) } else { None }
+ if let TermKind::Ty(ty) = self.unpack() { Some(ty) } else { None }
}
pub fn ct(&self) -> Option<Const<'tcx>> {
- if let Term::Const(c) = self { Some(*c) } else { None }
+ if let TermKind::Const(c) = self.unpack() { Some(c) } else { None }
}
pub fn into_arg(self) -> GenericArg<'tcx> {
- match self {
- Term::Ty(ty) => ty.into(),
- Term::Const(c) => c.into(),
+ match self.unpack() {
+ TermKind::Ty(ty) => ty.into(),
+ TermKind::Const(c) => c.into(),
}
}
}
+const TAG_MASK: usize = 0b11;
+const TYPE_TAG: usize = 0b00;
+const CONST_TAG: usize = 0b01;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum TermKind<'tcx> {
+ Ty(Ty<'tcx>),
+ Const(Const<'tcx>),
+}
+
+impl<'tcx> TermKind<'tcx> {
+ #[inline]
+ fn pack(self) -> Term<'tcx> {
+ let (tag, ptr) = match self {
+ TermKind::Ty(ty) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
+ (TYPE_TAG, ty.0.0 as *const WithStableHash<ty::TyS<'tcx>> as usize)
+ }
+ TermKind::Const(ct) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
+ (CONST_TAG, ct.0.0 as *const ty::ConstS<'tcx> as usize)
+ }
+ };
+
+ Term { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData }
+ }
+}
+
/// This kind of predicate has no *direct* correspondent in the
/// syntax, but it roughly corresponds to the syntactic forms:
///
@@ -946,7 +1073,7 @@ impl<'tcx> Term<'tcx> {
/// Form #2 eventually yields one of these `ProjectionPredicate`
/// instances to normalize the LHS.
#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ProjectionPredicate<'tcx> {
pub projection_ty: ProjectionTy<'tcx>,
pub term: Term<'tcx>,
@@ -1002,6 +1129,12 @@ pub trait ToPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx>;
}
+impl<'tcx> ToPredicate<'tcx> for Predicate<'tcx> {
+ fn to_predicate(self, _tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self
+ }
+}
+
impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, PredicateKind<'tcx>> {
#[inline(always)]
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
@@ -1166,20 +1299,117 @@ pub struct OpaqueHiddenType<'tcx> {
impl<'tcx> OpaqueHiddenType<'tcx> {
pub fn report_mismatch(&self, other: &Self, tcx: TyCtxt<'tcx>) {
// Found different concrete types for the opaque type.
- let mut err = tcx.sess.struct_span_err(
- other.span,
- "concrete type differs from previous defining opaque type use",
- );
- err.span_label(other.span, format!("expected `{}`, got `{}`", self.ty, other.ty));
- if self.span == other.span {
- err.span_label(
- self.span,
- "this expression supplies two conflicting concrete types for the same opaque type",
- );
+ let sub_diag = if self.span == other.span {
+ TypeMismatchReason::ConflictType { span: self.span }
} else {
- err.span_note(self.span, "previous use here");
- }
- err.emit();
+ TypeMismatchReason::PreviousUse { span: self.span }
+ };
+ tcx.sess.emit_err(OpaqueHiddenTypeMismatch {
+ self_ty: self.ty,
+ other_ty: other.ty,
+ other_span: other.span,
+ sub: sub_diag,
+ });
+ }
+
+ #[instrument(level = "debug", skip(tcx), ret)]
+ pub fn remap_generic_params_to_declaration_params(
+ self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ // typeck errors have subpar spans for opaque types, so delay error reporting until borrowck.
+ ignore_errors: bool,
+ origin: OpaqueTyOrigin,
+ ) -> Self {
+ let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+
+ // Use substs to build up a reverse map from regions to their
+ // identity mappings. This is necessary because of `impl
+ // Trait` lifetimes are computed by replacing existing
+ // lifetimes with 'static and remapping only those used in the
+ // `impl Trait` return type, resulting in the parameters
+ // shifting.
+ let id_substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ debug!(?id_substs);
+
+ let map = substs.iter().zip(id_substs);
+
+ let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> = match origin {
+ // HACK: The HIR lowering for async fn does not generate
+ // any `+ Captures<'x>` bounds for the `impl Future<...>`, so all async fns with lifetimes
+ // would now fail to compile. We should probably just make hir lowering fill this in properly.
+ OpaqueTyOrigin::AsyncFn(_) => map.collect(),
+ OpaqueTyOrigin::FnReturn(_) | OpaqueTyOrigin::TyAlias => {
+ // Opaque types may only use regions that are bound. So for
+ // ```rust
+ // type Foo<'a, 'b, 'c> = impl Trait<'a> + 'b;
+ // ```
+ // we may not use `'c` in the hidden type.
+ struct OpaqueTypeLifetimeCollector<'tcx> {
+ lifetimes: FxHashSet<ty::Region<'tcx>>,
+ }
+
+ impl<'tcx> ty::TypeVisitor<'tcx> for OpaqueTypeLifetimeCollector<'tcx> {
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ self.lifetimes.insert(r);
+ r.super_visit_with(self)
+ }
+ }
+
+ let mut collector = OpaqueTypeLifetimeCollector { lifetimes: Default::default() };
+
+ for pred in tcx.bound_explicit_item_bounds(def_id.to_def_id()).transpose_iter() {
+ let pred = pred.map_bound(|(pred, _)| *pred).subst(tcx, id_substs);
+
+ trace!(pred=?pred.kind());
+
+ // We only ignore opaque type substs if the opaque type is the outermost type.
+ // The opaque type may be nested within itself via recursion in e.g.
+ // type Foo<'a> = impl PartialEq<Foo<'a>>;
+ // which thus mentions `'a` and should thus accept hidden types that borrow 'a
+ // instead of requiring an additional `+ 'a`.
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(TraitPredicate {
+ trait_ref: ty::TraitRef { def_id: _, substs },
+ constness: _,
+ polarity: _,
+ }) => {
+ trace!(?substs);
+ for subst in &substs[1..] {
+ subst.visit_with(&mut collector);
+ }
+ }
+ ty::PredicateKind::Projection(ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy { substs, item_def_id: _ },
+ term,
+ }) => {
+ for subst in &substs[1..] {
+ subst.visit_with(&mut collector);
+ }
+ term.visit_with(&mut collector);
+ }
+ _ => {
+ pred.visit_with(&mut collector);
+ }
+ }
+ }
+ let lifetimes = collector.lifetimes;
+ trace!(?lifetimes);
+ map.filter(|(_, v)| {
+ let ty::GenericArgKind::Lifetime(lt) = v.unpack() else {
+ return true;
+ };
+ lifetimes.contains(&lt)
+ })
+ .collect()
+ }
+ };
+ debug!("map = {:#?}", map);
+
+ // Convert the type from the function into a type valid outside
+ // the function, by replacing invalid regions with 'static,
+ // after producing an error for each of them.
+ self.fold_with(&mut opaque_types::ReverseMapper::new(tcx, map, self.span, ignore_errors))
}
}
@@ -1411,7 +1641,7 @@ impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> {
Ok(ParamEnv::new(
self.caller_bounds().try_fold_with(folder)?,
self.reveal().try_fold_with(folder)?,
- self.constness().try_fold_with(folder)?,
+ self.constness(),
))
}
}
@@ -1419,8 +1649,7 @@ impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> {
impl<'tcx> TypeVisitable<'tcx> for ParamEnv<'tcx> {
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.caller_bounds().visit_with(visitor)?;
- self.reveal().visit_with(visitor)?;
- self.constness().visit_with(visitor)
+ self.reveal().visit_with(visitor)
}
}
@@ -1577,7 +1806,7 @@ impl<'tcx> PolyTraitRef<'tcx> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
-#[derive(HashStable)]
+#[derive(HashStable, Lift)]
pub struct ParamEnvAnd<'tcx, T> {
pub param_env: ParamEnv<'tcx>,
pub value: T,
@@ -1779,7 +2008,7 @@ pub enum VariantDiscr {
pub struct FieldDef {
pub did: DefId,
pub name: Symbol,
- pub vis: Visibility,
+ pub vis: Visibility<DefId>,
}
impl PartialEq for FieldDef {
@@ -2256,7 +2485,11 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn get_attr(self, did: DefId, attr: Symbol) -> Option<&'tcx ast::Attribute> {
- self.get_attrs(did, attr).next()
+ if cfg!(debug_assertions) && !rustc_feature::is_valid_for_get_attr(attr) {
+ bug!("get_attr: unexpected called with DefId `{:?}`, attr `{:?}`", did, attr);
+ } else {
+ self.get_attrs(did, attr).next()
+ }
}
/// Determines whether an item is annotated with an attribute.
@@ -2358,6 +2591,25 @@ impl<'tcx> TyCtxt<'tcx> {
(ident, scope)
}
+ /// Returns `true` if the debuginfo for `span` should be collapsed to the outermost expansion
+ /// site. Only applies when `Span` is the result of macro expansion.
+ ///
+ /// - If the `collapse_debuginfo` feature is enabled then debuginfo is not collapsed by default
+ /// and only when a macro definition is annotated with `#[collapse_debuginfo]`.
+ /// - If `collapse_debuginfo` is not enabled, then debuginfo is collapsed by default.
+ ///
+ /// When `-Zdebug-macros` is provided then debuginfo will never be collapsed.
+ pub fn should_collapse_debuginfo(self, span: Span) -> bool {
+ !self.sess.opts.unstable_opts.debug_macros
+ && if self.features().collapse_debuginfo {
+ span.in_macro_expansion_with_collapse_debuginfo()
+ } else {
+ // Inlined spans should not be collapsed as that leads to all of the
+ // inlined code being attributed to the inline callsite.
+ span.from_expansion() && !span.is_inlined()
+ }
+ }
+
pub fn is_object_safe(self, key: DefId) -> bool {
self.object_safety_violations(key).is_empty()
}
@@ -2372,6 +2624,14 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn is_const_default_method(self, def_id: DefId) -> bool {
matches!(self.trait_of_item(def_id), Some(trait_id) if self.has_attr(trait_id, sym::const_trait))
}
+
+ pub fn impl_trait_in_trait_parent(self, mut def_id: DefId) -> DefId {
+ while let def_kind = self.def_kind(def_id) && def_kind != DefKind::AssocFn {
+ debug_assert_eq!(def_kind, DefKind::ImplTraitPlaceholder);
+ def_id = self.parent(def_id);
+ }
+ def_id
+ }
}
/// Yields the parent function's `LocalDefId` if `def_id` is an `impl Trait` definition.
@@ -2445,7 +2705,7 @@ pub fn provide(providers: &mut ty::query::Providers) {
closure::provide(providers);
context::provide(providers);
erase_regions::provide(providers);
- layout::provide(providers);
+ inhabitedness::provide(providers);
util::provide(providers);
print::provide(providers);
super::util::bug::provide(providers);
@@ -2453,7 +2713,6 @@ pub fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers {
trait_impls_of: trait_def::trait_impls_of_provider,
incoherent_impls: trait_def::incoherent_impls_provider,
- type_uninhabited_from: inhabitedness::type_uninhabited_from,
const_param_default: consts::const_param_default,
vtable_allocation: vtable::vtable_allocation_provider,
..*providers
@@ -2516,3 +2775,15 @@ pub struct DestructuredConst<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [ty::Const<'tcx>],
}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(PredicateS<'_>, 48);
+ static_assert_size!(TyS<'_>, 40);
+ static_assert_size!(WithStableHash<TyS<'_>>, 56);
+ // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
index 9d8a81165..ee13920d5 100644
--- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -10,8 +10,7 @@
use crate::mir;
use crate::traits::query::NoSolution;
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder};
-use crate::ty::subst::{Subst, SubstsRef};
-use crate::ty::{self, EarlyBinder, Ty, TyCtxt};
+use crate::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt};
#[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)]
pub enum NormalizationError<'tcx> {
@@ -36,6 +35,7 @@ impl<'tcx> TyCtxt<'tcx> {
///
/// This should only be used outside of type inference. For example,
/// it assumes that normalization will succeed.
+ #[tracing::instrument(level = "debug", skip(self, param_env))]
pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
@@ -100,6 +100,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// N.B., currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// codegen, we need to normalize the contents.
+ #[tracing::instrument(level = "debug", skip(self, param_env))]
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,
@@ -188,13 +189,11 @@ struct NormalizeAfterErasingRegionsFolder<'tcx> {
}
impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
- #[instrument(skip(self), level = "debug")]
fn normalize_generic_arg_after_erasing_regions(
&self,
arg: ty::GenericArg<'tcx>,
) -> ty::GenericArg<'tcx> {
let arg = self.param_env.and(arg);
- debug!(?arg);
self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| bug!(
"Failed to normalize {:?}, maybe try to call `try_normalize_erasing_regions` instead",
@@ -215,15 +214,6 @@ impl<'tcx> TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const()
}
-
- #[inline]
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- // FIXME: This *probably* needs canonicalization too!
- let arg = self.param_env.and(c);
- self.tcx
- .try_normalize_mir_const_after_erasing_regions(arg)
- .unwrap_or_else(|_| bug!("failed to normalize {:?}", c))
- }
}
struct TryNormalizeAfterErasingRegionsFolder<'tcx> {
@@ -268,16 +258,4 @@ impl<'tcx> FallibleTypeFolder<'tcx> for TryNormalizeAfterErasingRegionsFolder<'t
Err(_) => Err(NormalizationError::Const(c)),
}
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
- // FIXME: This *probably* needs canonicalization too!
- let arg = self.param_env.and(c);
- match self.tcx.try_normalize_mir_const_after_erasing_regions(arg) {
- Ok(c) => Ok(c),
- Err(_) => Err(NormalizationError::ConstantKind(c)),
- }
- }
}
diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs
new file mode 100644
index 000000000..b05c63109
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/opaque_types.rs
@@ -0,0 +1,218 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+
+/// Converts generic params of a TypeFoldable from one
+/// item's generics to another. Usually from a function's generics
+/// list to the opaque type's own generics.
+pub(super) struct ReverseMapper<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ /// see call sites to fold_kind_no_missing_regions_error
+ /// for an explanation of this field.
+ do_not_error: bool,
+
+ /// We do not want to emit any errors in typeck because
+ /// the spans in typeck are subpar at the moment.
+ /// Borrowck will do the same work again (this time with
+ /// lifetime information) and thus report better errors.
+ ignore_errors: bool,
+
+ /// Span of function being checked.
+ span: Span,
+}
+
+impl<'tcx> ReverseMapper<'tcx> {
+ pub(super) fn new(
+ tcx: TyCtxt<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ span: Span,
+ ignore_errors: bool,
+ ) -> Self {
+ Self { tcx, map, do_not_error: false, ignore_errors, span }
+ }
+
+ fn fold_kind_no_missing_regions_error(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
+ assert!(!self.do_not_error);
+ self.do_not_error = true;
+ let kind = kind.fold_with(self);
+ self.do_not_error = false;
+ kind
+ }
+
+ fn fold_kind_normally(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
+ assert!(!self.do_not_error);
+ kind.fold_with(self)
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ReverseMapper<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ // Ignore bound regions and `'static` regions that appear in the
+ // type, we only need to remap regions that reference lifetimes
+ // from the function declaration.
+ // This would ignore `'r` in a type like `for<'r> fn(&'r u32)`.
+ ty::ReLateBound(..) | ty::ReStatic => return r,
+
+ // If regions have been erased (by writeback), don't try to unerase
+ // them.
+ ty::ReErased => return r,
+
+ // The regions that we expect from borrow checking.
+ ty::ReEarlyBound(_) | ty::ReFree(_) => {}
+
+ ty::RePlaceholder(_) | ty::ReVar(_) => {
+ // All of the regions in the type should either have been
+ // erased by writeback, or mapped back to named regions by
+ // borrow checking.
+ bug!("unexpected region kind in opaque type: {:?}", r);
+ }
+ }
+
+ match self.map.get(&r.into()).map(|k| k.unpack()) {
+ Some(GenericArgKind::Lifetime(r1)) => r1,
+ Some(u) => panic!("region mapped to unexpected kind: {:?}", u),
+ None if self.do_not_error => self.tcx.lifetimes.re_static,
+ None => {
+ self.tcx
+ .sess
+ .struct_span_err(self.span, "non-defining opaque type use in defining scope")
+ .span_label(
+ self.span,
+ format!(
+ "lifetime `{}` is part of concrete type but not used in \
+ parameter list of the `impl Trait` type alias",
+ r
+ ),
+ )
+ .emit();
+
+ self.tcx().lifetimes.re_static
+ }
+ }
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Closure(def_id, substs) => {
+ // I am a horrible monster and I pray for death. When
+ // we encounter a closure here, it is always a closure
+ // from within the function that we are currently
+ // type-checking -- one that is now being encapsulated
+ // in an opaque type. Ideally, we would
+ // go through the types/lifetimes that it references
+ // and treat them just like we would any other type,
+ // which means we would error out if we find any
+ // reference to a type/region that is not in the
+ // "reverse map".
+ //
+ // **However,** in the case of closures, there is a
+ // somewhat subtle (read: hacky) consideration. The
+ // problem is that our closure types currently include
+ // all the lifetime parameters declared on the
+ // enclosing function, even if they are unused by the
+ // closure itself. We can't readily filter them out,
+ // so here we replace those values with `'empty`. This
+ // can't really make a difference to the rest of the
+ // compiler; those regions are ignored for the
+ // outlives relation, and hence don't affect trait
+ // selection or auto traits, and they are erased
+ // during codegen.
+
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_no_missing_regions_error(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_closure(def_id, substs)
+ }
+
+ ty::Generator(def_id, substs, movability) => {
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_no_missing_regions_error(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_generator(def_id, substs, movability)
+ }
+
+ ty::Param(param) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ty.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list; replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Type(t1)) => t1,
+ Some(u) => panic!("type mapped to unexpected kind: {:?}", u),
+ None => {
+ debug!(?param, ?self.map);
+ if !self.ignore_errors {
+ self.tcx
+ .sess
+ .struct_span_err(
+ self.span,
+ &format!(
+ "type parameter `{}` is part of concrete type but not \
+ used in parameter list for the `impl Trait` type alias",
+ ty
+ ),
+ )
+ .emit();
+ }
+
+ self.tcx().ty_error()
+ }
+ }
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ trace!("checking const {:?}", ct);
+ // Find a const parameter
+ match ct.kind() {
+ ty::ConstKind::Param(..) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ct.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list, replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Const(c1)) => c1,
+ Some(u) => panic!("const mapped to unexpected kind: {:?}", u),
+ None => {
+ if !self.ignore_errors {
+ self.tcx.sess.emit_err(ty::ConstNotUsedTraitAlias {
+ ct: ct.to_string(),
+ span: self.span,
+ });
+ }
+
+ self.tcx().const_error(ct.ty())
+ }
+ }
+ }
+
+ _ => ct,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
index e189ee2fc..e1e705a92 100644
--- a/compiler/rustc_middle/src/ty/parameterized.rs
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -1,4 +1,5 @@
-use rustc_hir::def_id::DefId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::{DefId, DefIndex};
use rustc_index::vec::{Idx, IndexVec};
use crate::middle::exported_symbols::ExportedSymbol;
@@ -29,6 +30,10 @@ impl<I: Idx + 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for IndexVe
type Value<'tcx> = IndexVec<I, T::Value<'tcx>>;
}
+impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for FxHashMap<I, T> {
+ type Value<'tcx> = FxHashMap<I, T::Value<'tcx>>;
+}
+
impl<T: ParameterizedOverTcx> ParameterizedOverTcx for ty::Binder<'static, T> {
type Value<'tcx> = ty::Binder<'tcx, T::Value<'tcx>>;
}
@@ -53,17 +58,21 @@ trivially_parameterized_over_tcx! {
crate::metadata::ModChild,
crate::middle::codegen_fn_attrs::CodegenFnAttrs,
crate::middle::exported_symbols::SymbolExportInfo,
+ crate::middle::resolve_lifetime::ObjectLifetimeDefault,
crate::mir::ConstQualifs,
+ ty::AssocItemContainer,
+ ty::DeducedParamAttrs,
ty::Generics,
ty::ImplPolarity,
ty::ReprOptions,
ty::TraitDef,
- ty::Visibility,
+ ty::Visibility<DefIndex>,
ty::adjustment::CoerceUnsizedInfo,
ty::fast_reject::SimplifiedTypeGen<DefId>,
rustc_ast::Attribute,
rustc_ast::MacArgs,
rustc_attr::ConstStability,
+ rustc_attr::DefaultBodyStability,
rustc_attr::Deprecation,
rustc_attr::Stability,
rustc_hir::Constness,
@@ -74,6 +83,7 @@ trivially_parameterized_over_tcx! {
rustc_hir::def::DefKind,
rustc_hir::def_id::DefIndex,
rustc_hir::definitions::DefKey,
+ rustc_index::bit_set::BitSet<u32>,
rustc_index::bit_set::FiniteBitSet<u32>,
rustc_session::cstore::ForeignModule,
rustc_session::cstore::LinkagePreference,
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
index d57cf8f01..44b9548db 100644
--- a/compiler/rustc_middle/src/ty/print/mod.rs
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -1,9 +1,9 @@
-use crate::ty::subst::{GenericArg, Subst};
+use crate::ty::GenericArg;
use crate::ty::{self, DefIdTree, Ty, TyCtxt};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sso::SsoHashSet;
-use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
// `pretty` is a separate module only for organization.
@@ -325,3 +325,12 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> {
cx.print_const(*self)
}
}
+
+// This is only used by query descriptions
+pub fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+ if def_id.is_top_level_module() {
+ "top-level module".to_string()
+ } else {
+ format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 7f2e81a71..ef9aa236b 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1,9 +1,9 @@
use crate::mir::interpret::{AllocRange, GlobalAlloc, Pointer, Provenance, Scalar};
-use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
use crate::ty::{
- self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, Ty, TyCtxt, TypeFoldable,
+ self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, TermKind, Ty, TyCtxt, TypeFoldable,
TypeSuperFoldable, TypeSuperVisitable, TypeVisitable,
};
+use crate::ty::{GenericArg, GenericArgKind};
use rustc_apfloat::ieee::{Double, Single};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sso::SsoHashSet;
@@ -16,6 +16,7 @@ use rustc_session::cstore::{ExternCrate, ExternCrateSource};
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
+use smallvec::SmallVec;
use std::cell::Cell;
use std::char;
@@ -62,6 +63,7 @@ thread_local! {
static NO_TRIMMED_PATH: Cell<bool> = const { Cell::new(false) };
static NO_QUERIES: Cell<bool> = const { Cell::new(false) };
static NO_VISIBLE_PATH: Cell<bool> = const { Cell::new(false) };
+ static NO_VERBOSE_CONSTANTS: Cell<bool> = const { Cell::new(false) };
}
macro_rules! define_helper {
@@ -116,6 +118,9 @@ define_helper!(
/// Prevent selection of visible paths. `Display` impl of DefId will prefer
/// visible (public) reexports of types as paths.
fn with_no_visible_paths(NoVisibleGuard, NO_VISIBLE_PATH);
+ /// Prevent verbose printing of constants. Verbose printing of constants is
+ /// never desirable in some contexts like `std::any::type_name`.
+ fn with_no_verbose_constants(NoVerboseConstantsGuard, NO_VERBOSE_CONSTANTS);
);
/// The "region highlights" are used to control region printing during
@@ -619,12 +624,16 @@ pub trait PrettyPrinter<'tcx>:
ty::Adt(def, substs) => {
p!(print_def_path(def.did(), substs));
}
- ty::Dynamic(data, r) => {
+ ty::Dynamic(data, r, repr) => {
let print_r = self.should_print_region(r);
if print_r {
p!("(");
}
- p!("dyn ", print(data));
+ match repr {
+ ty::Dyn => p!("dyn "),
+ ty::DynStar => p!("dyn* "),
+ }
+ p!(print(data));
if print_r {
p!(" + ", print(r), ")");
}
@@ -632,7 +641,15 @@ pub trait PrettyPrinter<'tcx>:
ty::Foreign(def_id) => {
p!(print_def_path(def_id, &[]));
}
- ty::Projection(ref data) => p!(print(data)),
+ ty::Projection(ref data) => {
+ if !(self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()))
+ && self.tcx().def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder
+ {
+ return self.pretty_print_opaque_impl_type(data.item_def_id, data.substs);
+ } else {
+ p!(print(data))
+ }
+ }
ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
ty::Opaque(def_id, substs) => {
// FIXME(eddyb) print this with `print_def_path`.
@@ -746,7 +763,7 @@ pub trait PrettyPrinter<'tcx>:
}
ty::Array(ty, sz) => {
p!("[", print(ty), "; ");
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("{:?}", sz));
} else if let ty::ConstKind::Unevaluated(..) = sz.kind() {
// Do not try to evaluate unevaluated constants. If we are const evaluating an
@@ -782,9 +799,9 @@ pub trait PrettyPrinter<'tcx>:
let mut traits = FxIndexMap::default();
let mut fn_traits = FxIndexMap::default();
let mut is_sized = false;
+ let mut lifetimes = SmallVec::<[ty::Region<'tcx>; 1]>::new();
- for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
- let predicate = predicate.subst(tcx, substs);
+ for (predicate, _) in bounds.subst_iter_copied(tcx, substs) {
let bound_predicate = predicate.kind();
match bound_predicate.skip_binder() {
@@ -813,6 +830,9 @@ pub trait PrettyPrinter<'tcx>:
&mut fn_traits,
);
}
+ ty::PredicateKind::TypeOutlives(outlives) => {
+ lifetimes.push(outlives.1);
+ }
_ => {}
}
}
@@ -855,7 +875,7 @@ pub trait PrettyPrinter<'tcx>:
}
p!(")");
- if let Term::Ty(ty) = return_ty.skip_binder() {
+ if let Some(ty) = return_ty.skip_binder().ty() {
if !ty.is_unit() {
p!(" -> ", print(return_ty));
}
@@ -916,12 +936,14 @@ pub trait PrettyPrinter<'tcx>:
// Skip printing `<[generator@] as Generator<_>>::Return` from async blocks,
// unless we can find out what generator return type it comes from.
let term = if let Some(ty) = term.skip_binder().ty()
- && let ty::Projection(ty::ProjectionTy { item_def_id, substs }) = ty.kind()
- && Some(*item_def_id) == tcx.lang_items().generator_return()
+ && let ty::Projection(proj) = ty.kind()
+ && let Some(assoc) = tcx.opt_associated_item(proj.item_def_id)
+ && assoc.trait_container(tcx) == tcx.lang_items().gen_trait()
+ && assoc.name == rustc_span::sym::Return
{
if let ty::Generator(_, substs, _) = substs.type_at(0).kind() {
let return_ty = substs.as_generator().return_ty();
- if !return_ty.is_ty_infer() {
+ if !return_ty.is_ty_var() {
return_ty.into()
} else {
continue;
@@ -942,13 +964,9 @@ pub trait PrettyPrinter<'tcx>:
p!(write("{} = ", tcx.associated_item(assoc_item_def_id).name));
- match term {
- Term::Ty(ty) => {
- p!(print(ty))
- }
- Term::Const(c) => {
- p!(print(c));
- }
+ match term.unpack() {
+ TermKind::Ty(ty) => p!(print(ty)),
+ TermKind::Const(c) => p!(print(c)),
};
}
@@ -968,6 +986,11 @@ pub trait PrettyPrinter<'tcx>:
write!(self, "Sized")?;
}
+ for re in lifetimes {
+ write!(self, " + ")?;
+ self = self.print_region(re)?;
+ }
+
Ok(self)
}
@@ -1080,17 +1103,9 @@ pub trait PrettyPrinter<'tcx>:
.generics_of(principal.def_id)
.own_substs_no_defaults(cx.tcx(), principal.substs);
- // Don't print `'_` if there's no unerased regions.
- let print_regions = args.iter().any(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(r) => !r.is_erased(),
- _ => false,
- });
- let mut args = args.iter().cloned().filter(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(_) => print_regions,
- _ => true,
- });
let mut projections = predicates.projection_bounds();
+ let mut args = args.iter().cloned();
let arg0 = args.next();
let projection0 = projections.next();
if arg0.is_some() || projection0.is_some() {
@@ -1170,7 +1185,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("Const({:?}: {:?})", ct.kind(), ct.ty()));
return Ok(self);
}
@@ -1193,15 +1208,7 @@ pub trait PrettyPrinter<'tcx>:
}
match ct.kind() {
- ty::ConstKind::Unevaluated(ty::Unevaluated {
- def,
- substs,
- promoted: Some(promoted),
- }) => {
- p!(print_value_path(def.did, substs));
- p!(write("::{:?}", promoted));
- }
- ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted: None }) => {
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
match self.tcx().def_kind(def.did) {
DefKind::Static(..) | DefKind::Const | DefKind::AssocConst => {
p!(print_value_path(def.did, substs))
@@ -1275,7 +1282,7 @@ pub trait PrettyPrinter<'tcx>:
let range =
AllocRange { start: offset, size: Size::from_bytes(len) };
if let Ok(byte_str) =
- alloc.inner().get_bytes(&self.tcx(), range)
+ alloc.inner().get_bytes_strip_provenance(&self.tcx(), range)
{
p!(pretty_print_byte_str(byte_str))
} else {
@@ -1401,14 +1408,7 @@ pub trait PrettyPrinter<'tcx>:
}
fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> {
- define_scoped_cx!(self);
- p!("b\"");
- for &c in byte_str {
- for e in std::ascii::escape_default(c) {
- self.write_char(e as char)?;
- }
- }
- p!("\"");
+ write!(self, "b\"{}\"", byte_str.escape_ascii())?;
Ok(self)
}
@@ -1420,7 +1420,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("ValTree({:?}: ", valtree), print(ty), ")");
return Ok(self);
}
@@ -1513,6 +1513,10 @@ pub trait PrettyPrinter<'tcx>:
}
return Ok(self);
}
+ (ty::ValTree::Leaf(leaf), ty::Ref(_, inner_ty, _)) => {
+ p!(write("&"));
+ return self.pretty_print_const_scalar_int(leaf, *inner_ty, print_ty);
+ }
(ty::ValTree::Leaf(leaf), _) => {
return self.pretty_print_const_scalar_int(leaf, ty, print_ty);
}
@@ -1532,6 +1536,34 @@ pub trait PrettyPrinter<'tcx>:
}
Ok(self)
}
+
+ fn pretty_closure_as_impl(
+ mut self,
+ closure: ty::ClosureSubsts<'tcx>,
+ ) -> Result<Self::Const, Self::Error> {
+ let sig = closure.sig();
+ let kind = closure.kind_ty().to_opt_closure_kind().unwrap_or(ty::ClosureKind::Fn);
+
+ write!(self, "impl ")?;
+ self.wrap_binder(&sig, |sig, mut cx| {
+ define_scoped_cx!(cx);
+
+ p!(print(kind), "(");
+ for (i, arg) in sig.inputs()[0].tuple_fields().iter().enumerate() {
+ if i > 0 {
+ p!(", ");
+ }
+ p!(print(arg));
+ }
+ p!(")");
+
+ if !sig.output().is_unit() {
+ p!(" -> ", print(sig.output()));
+ }
+
+ Ok(cx)
+ })
+ }
}
// HACK(eddyb) boxed to avoid moving around a large struct by-value.
@@ -1545,7 +1577,9 @@ pub struct FmtPrinterData<'a, 'tcx> {
in_value: bool,
pub print_alloc_ids: bool,
+ // set of all named (non-anonymous) region names
used_region_names: FxHashSet<Symbol>,
+
region_index: usize,
binder_depth: usize,
printed_type_count: usize,
@@ -1820,22 +1854,11 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
) -> Result<Self::Path, Self::Error> {
self = print_prefix(self)?;
- // Don't print `'_` if there's no unerased regions.
- let print_regions = self.tcx.sess.verbose()
- || args.iter().any(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(r) => !r.is_erased(),
- _ => false,
- });
- let args = args.iter().cloned().filter(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(_) => print_regions,
- _ => true,
- });
-
- if args.clone().next().is_some() {
+ if args.first().is_some() {
if self.in_value {
write!(self, "::")?;
}
- self.generic_delimiters(|cx| cx.comma_sep(args))
+ self.generic_delimiters(|cx| cx.comma_sep(args.iter().cloned()))
} else {
Ok(self)
}
@@ -1950,7 +1973,7 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> {
ty::ReVar(_) | ty::ReErased => false,
- ty::ReStatic | ty::ReEmpty(_) => true,
+ ty::ReStatic => true,
}
}
@@ -2034,14 +2057,6 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
p!("'static");
return Ok(self);
}
- ty::ReEmpty(ty::UniverseIndex::ROOT) => {
- p!("'<empty>");
- return Ok(self);
- }
- ty::ReEmpty(ui) => {
- p!(write("'<empty:{:?}>", ui));
- return Ok(self);
- }
}
p!("'_");
@@ -2055,7 +2070,14 @@ struct RegionFolder<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
current_index: ty::DebruijnIndex,
region_map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
- name: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+ name: &'a mut (
+ dyn FnMut(
+ Option<ty::DebruijnIndex>, // Debruijn index of the folded late-bound region
+ ty::DebruijnIndex, // Index corresponding to binder level
+ ty::BoundRegion,
+ ) -> ty::Region<'tcx>
+ + 'a
+ ),
}
impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
@@ -2086,7 +2108,9 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
let name = &mut self.name;
let region = match *r {
- ty::ReLateBound(_, br) => *self.region_map.entry(br).or_insert_with(|| name(br)),
+ ty::ReLateBound(db, br) if db >= self.current_index => {
+ *self.region_map.entry(br).or_insert_with(|| name(Some(db), self.current_index, br))
+ }
ty::RePlaceholder(ty::PlaceholderRegion { name: kind, .. }) => {
// If this is an anonymous placeholder, don't rename. Otherwise, in some
// async fns, we get a `for<'r> Send` bound
@@ -2095,7 +2119,10 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
_ => {
// Index doesn't matter, since this is just for naming and these never get bound
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
- *self.region_map.entry(br).or_insert_with(|| name(br))
+ *self
+ .region_map
+ .entry(br)
+ .or_insert_with(|| name(None, self.current_index, br))
}
}
}
@@ -2120,23 +2147,31 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
where
T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
{
- fn name_by_region_index(index: usize) -> Symbol {
- match index {
- 0 => Symbol::intern("'r"),
- 1 => Symbol::intern("'s"),
- i => Symbol::intern(&format!("'t{}", i - 2)),
+ fn name_by_region_index(
+ index: usize,
+ available_names: &mut Vec<Symbol>,
+ num_available: usize,
+ ) -> Symbol {
+ if let Some(name) = available_names.pop() {
+ name
+ } else {
+ Symbol::intern(&format!("'z{}", index - num_available))
}
}
+ debug!("name_all_regions");
+
// Replace any anonymous late-bound regions with named
// variants, using new unique identifiers, so that we can
// clearly differentiate between named and unnamed regions in
// the output. We'll probably want to tweak this over time to
// decide just how much information to give.
if self.binder_depth == 0 {
- self.prepare_late_bound_region_info(value);
+ self.prepare_region_info(value);
}
+ debug!("self.used_region_names: {:?}", &self.used_region_names);
+
let mut empty = true;
let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| {
let w = if empty {
@@ -2153,13 +2188,30 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
define_scoped_cx!(self);
+ let possible_names =
+ ('a'..='z').rev().map(|s| Symbol::intern(&format!("'{s}"))).collect::<Vec<_>>();
+
+ let mut available_names = possible_names
+ .into_iter()
+ .filter(|name| !self.used_region_names.contains(&name))
+ .collect::<Vec<_>>();
+ debug!(?available_names);
+ let num_available = available_names.len();
+
let mut region_index = self.region_index;
- let mut next_name = |this: &Self| loop {
- let name = name_by_region_index(region_index);
- region_index += 1;
- if !this.used_region_names.contains(&name) {
- break name;
+ let mut next_name = |this: &Self| {
+ let mut name;
+
+ loop {
+ name = name_by_region_index(region_index, &mut available_names, num_available);
+ region_index += 1;
+
+ if !this.used_region_names.contains(&name) {
+ break;
+ }
}
+
+ name
};
// If we want to print verbosely, then print *all* binders, even if they
@@ -2180,6 +2232,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
ty::BrAnon(_) | ty::BrEnv => {
start_or_continue(&mut self, "for<", ", ");
let name = next_name(&self);
+ debug!(?name);
do_continue(&mut self, name);
ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
}
@@ -2208,24 +2261,63 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
})
} else {
let tcx = self.tcx;
- let mut name = |br: ty::BoundRegion| {
- start_or_continue(&mut self, "for<", ", ");
- let kind = match br.kind {
+
+ // Closure used in `RegionFolder` to create names for anonymous late-bound
+ // regions. We use two `DebruijnIndex`es (one for the currently folded
+ // late-bound region and the other for the binder level) to determine
+ // whether a name has already been created for the currently folded region,
+ // see issue #102392.
+ let mut name = |lifetime_idx: Option<ty::DebruijnIndex>,
+ binder_level_idx: ty::DebruijnIndex,
+ br: ty::BoundRegion| {
+ let (name, kind) = match br.kind {
ty::BrAnon(_) | ty::BrEnv => {
let name = next_name(&self);
- do_continue(&mut self, name);
- ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = ty::BrNamed(CRATE_DEF_ID.to_def_id(), name);
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, ty::BrNamed(CRATE_DEF_ID.to_def_id(), name))
}
ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
let name = next_name(&self);
- do_continue(&mut self, name);
- ty::BrNamed(def_id, name)
+
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = ty::BrNamed(def_id, name);
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, ty::BrNamed(def_id, name))
}
ty::BrNamed(_, name) => {
- do_continue(&mut self, name);
- br.kind
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = br.kind;
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, br.kind)
}
};
+
+ start_or_continue(&mut self, "for<", ", ");
+ do_continue(&mut self, name);
tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind }))
};
let mut folder = RegionFolder {
@@ -2273,29 +2365,37 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
Ok(inner)
}
- fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
+ fn prepare_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
where
T: TypeVisitable<'tcx>,
{
- struct LateBoundRegionNameCollector<'a, 'tcx> {
- used_region_names: &'a mut FxHashSet<Symbol>,
+ struct RegionNameCollector<'tcx> {
+ used_region_names: FxHashSet<Symbol>,
type_collector: SsoHashSet<Ty<'tcx>>,
}
- impl<'tcx> ty::visit::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_, 'tcx> {
+ impl<'tcx> RegionNameCollector<'tcx> {
+ fn new() -> Self {
+ RegionNameCollector {
+ used_region_names: Default::default(),
+ type_collector: SsoHashSet::new(),
+ }
+ }
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for RegionNameCollector<'tcx> {
type BreakTy = ();
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
trace!("address: {:p}", r.0.0);
- if let ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name), .. }) = *r {
- self.used_region_names.insert(name);
- } else if let ty::RePlaceholder(ty::PlaceholderRegion {
- name: ty::BrNamed(_, name),
- ..
- }) = *r
- {
+
+ // Collect all named lifetimes. These allow us to prevent duplication
+ // of already existing lifetime names when introducing names for
+ // anonymous late-bound regions.
+ if let Some(name) = r.get_name() {
self.used_region_names.insert(name);
}
+
r.super_visit_with(self)
}
@@ -2311,12 +2411,9 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
}
}
- self.used_region_names.clear();
- let mut collector = LateBoundRegionNameCollector {
- used_region_names: &mut self.used_region_names,
- type_collector: SsoHashSet::new(),
- };
+ let mut collector = RegionNameCollector::new();
value.visit_with(&mut collector);
+ self.used_region_names = collector.used_region_names;
self.region_index = 0;
}
}
@@ -2446,6 +2543,11 @@ impl<'tcx> ty::PolyTraitPredicate<'tcx> {
}
}
+#[derive(Debug, Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct PrintClosureAsImpl<'tcx> {
+ pub closure: ty::ClosureSubsts<'tcx>,
+}
+
forward_display_to_print! {
ty::Region<'tcx>,
Ty<'tcx>,
@@ -2538,6 +2640,10 @@ define_print_and_forward_display! {
p!(print(self.0.trait_ref.print_only_trait_path()));
}
+ PrintClosureAsImpl<'tcx> {
+ p!(pretty_closure_as_impl(self.closure))
+ }
+
ty::ParamTy {
p!(write("{}", self.name))
}
@@ -2567,9 +2673,9 @@ define_print_and_forward_display! {
}
ty::Term<'tcx> {
- match self {
- ty::Term::Ty(ty) => p!(print(ty)),
- ty::Term::Const(c) => p!(print(c)),
+ match self.unpack() {
+ ty::TermKind::Ty(ty) => p!(print(ty)),
+ ty::TermKind::Const(c) => p!(print(c)),
}
}
@@ -2609,8 +2715,8 @@ define_print_and_forward_display! {
print_value_path(closure_def_id, &[]),
write("` implements the trait `{}`", kind))
}
- ty::PredicateKind::ConstEvaluatable(uv) => {
- p!("the constant `", print_value_path(uv.def.did, uv.substs), "` can be evaluated")
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ p!("the constant `", print(ct), "` can be evaluated")
}
ty::PredicateKind::ConstEquate(c1, c2) => {
p!("the constant `", print(c1), "` equals `", print(c2), "`")
@@ -2634,7 +2740,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
// Iterate all local crate items no matter where they are defined.
let hir = tcx.hir();
for id in hir.items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::Use) {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Use) {
continue;
}
@@ -2643,7 +2749,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
continue;
}
- let def_id = item.def_id.to_def_id();
+ let def_id = item.owner_id.to_def_id();
let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
collect_fn(&item.ident, ns, def_id);
}
diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs
index 2452bcf6a..ec90590ad 100644
--- a/compiler/rustc_middle/src/ty/query.rs
+++ b/compiler/rustc_middle/src/ty/query.rs
@@ -1,11 +1,11 @@
use crate::dep_graph;
use crate::infer::canonical::{self, Canonical};
-use crate::lint::LintLevelMap;
+use crate::lint::LintExpectation;
use crate::metadata::ModChild;
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use crate::middle::lib_features::LibFeatures;
-use crate::middle::privacy::AccessLevels;
+use crate::middle::privacy::EffectiveVisibilities;
use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
use crate::middle::stability::{self, DeprecationEntry};
use crate::mir;
@@ -32,7 +32,7 @@ use crate::ty::layout::TyAndLayout;
use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::GeneratorDiagnosticData;
-use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
+use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
use rustc_ast as ast;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_attr as attr;
@@ -40,17 +40,19 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::unord::UnordSet;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
+use rustc_hir::hir_id::OwnerId;
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
use rustc_session::cstore::{CrateDepKind, CrateSource};
use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib};
-use rustc_session::utils::NativeLibKind;
+use rustc_session::lint::LintExpectationId;
use rustc_session::Limits;
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};
@@ -121,8 +123,8 @@ macro_rules! query_storage {
([][$K:ty, $V:ty]) => {
<DefaultCacheSelector as CacheSelector<$K, $V>>::Cache
};
- ([(storage $ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
- <$ty as CacheSelector<$K, $V>>::Cache
+ ([(arena_cache) $($rest:tt)*][$K:ty, $V:ty]) => {
+ <ArenaCacheSelector<'tcx> as CacheSelector<$K, $V>>::Cache
};
([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
query_storage!([$($modifiers)*][$($args)*])
@@ -173,7 +175,7 @@ macro_rules! opt_remap_env_constness {
}
macro_rules! define_callbacks {
- (<$tcx:tt>
+ (
$($(#[$attr:meta])*
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
@@ -187,33 +189,33 @@ macro_rules! define_callbacks {
pub mod query_keys {
use super::*;
- $(pub type $name<$tcx> = $($K)*;)*
+ $(pub type $name<'tcx> = $($K)*;)*
}
#[allow(nonstandard_style, unused_lifetimes)]
pub mod query_values {
use super::*;
- $(pub type $name<$tcx> = $V;)*
+ $(pub type $name<'tcx> = $V;)*
}
#[allow(nonstandard_style, unused_lifetimes)]
pub mod query_storage {
use super::*;
- $(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
+ $(pub type $name<'tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
}
#[allow(nonstandard_style, unused_lifetimes)]
pub mod query_stored {
use super::*;
- $(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
+ $(pub type $name<'tcx> = <query_storage::$name<'tcx> as QueryStorage>::Stored;)*
}
#[derive(Default)]
- pub struct QueryCaches<$tcx> {
- $($(#[$attr])* pub $name: query_storage::$name<$tcx>,)*
+ pub struct QueryCaches<'tcx> {
+ $($(#[$attr])* pub $name: query_storage::$name<'tcx>,)*
}
- impl<$tcx> TyCtxtEnsure<$tcx> {
+ impl<'tcx> TyCtxtEnsure<'tcx> {
$($(#[$attr])*
#[inline(always)]
pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
@@ -231,20 +233,20 @@ macro_rules! define_callbacks {
})*
}
- impl<$tcx> TyCtxt<$tcx> {
+ impl<'tcx> TyCtxt<'tcx> {
$($(#[$attr])*
#[inline(always)]
#[must_use]
- pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<'tcx>
{
self.at(DUMMY_SP).$name(key)
})*
}
- impl<$tcx> TyCtxtAt<$tcx> {
+ impl<'tcx> TyCtxtAt<'tcx> {
$($(#[$attr])*
#[inline(always)]
- pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<'tcx>
{
let key = key.into_query_param();
opt_remap_env_constness!([$($modifiers)*][key]);
@@ -275,8 +277,9 @@ macro_rules! define_callbacks {
fn default() -> Self {
Providers {
$($name: |_, key| bug!(
- "`tcx.{}({:?})` unsupported by its crate; \
- perhaps the `{}` query was never assigned a provider function",
+ "`tcx.{}({:?})` is not supported for external or local crate;\n
+ hint: Queries can be either made to the local crate, or the external crate. This error means you tried to use it for one that's not supported (likely the local crate).\n
+ If that's not the case, {} was likely never assigned to a provider function.\n",
stringify!($name),
key,
stringify!($name),
@@ -311,11 +314,11 @@ macro_rules! define_callbacks {
$($(#[$attr])*
fn $name(
&'tcx self,
- tcx: TyCtxt<$tcx>,
+ tcx: TyCtxt<'tcx>,
span: Span,
- key: query_keys::$name<$tcx>,
+ key: query_keys::$name<'tcx>,
mode: QueryMode,
- ) -> Option<query_stored::$name<$tcx>>;)*
+ ) -> Option<query_stored::$name<'tcx>>;)*
}
};
}
@@ -332,10 +335,10 @@ macro_rules! define_callbacks {
// Queries marked with `fatal_cycle` do not need the latter implementation,
// as they will raise an fatal error on query cycles instead.
-rustc_query_append! { [define_callbacks!][<'tcx>] }
+rustc_query_append! { define_callbacks! }
mod sealed {
- use super::{DefId, LocalDefId};
+ use super::{DefId, LocalDefId, OwnerId};
/// An analogue of the `Into` trait that's intended only for query parameters.
///
@@ -365,6 +368,13 @@ mod sealed {
self.to_def_id()
}
}
+
+ impl IntoQueryParam<DefId> for OwnerId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
}
use sealed::IntoQueryParam;
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index 818affa71..b25b4bd4f 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -5,8 +5,8 @@
//! subtyping, type equality, etc.
use crate::ty::error::{ExpectedFound, TypeError};
-use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
-use crate::ty::{self, ImplSubject, Term, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{self, ImplSubject, Term, TermKind, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{GenericArg, GenericArgKind, SubstsRef};
use rustc_hir as ast;
use rustc_hir::def_id::DefId;
use rustc_span::DUMMY_SP;
@@ -441,7 +441,9 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>(
(&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)),
- (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => {
+ (&ty::Dynamic(a_obj, a_region, a_repr), &ty::Dynamic(b_obj, b_region, b_repr))
+ if a_repr == b_repr =>
+ {
let region_bound = relation.with_cause(Cause::ExistentialRegionBound, |relation| {
relation.relate_with_variance(
ty::Contravariant,
@@ -450,7 +452,7 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>(
b_region,
)
})?;
- Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
+ Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound, a_repr))
}
(&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _))
@@ -572,8 +574,8 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>(
/// it.
pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
relation: &mut R,
- a: ty::Const<'tcx>,
- b: ty::Const<'tcx>,
+ mut a: ty::Const<'tcx>,
+ mut b: ty::Const<'tcx>,
) -> RelateResult<'tcx, ty::Const<'tcx>> {
debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b);
let tcx = relation.tcx();
@@ -594,9 +596,16 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
);
}
- let eagerly_eval = |x: ty::Const<'tcx>| x.eval(tcx, relation.param_env());
- let a = eagerly_eval(a);
- let b = eagerly_eval(b);
+ // HACK(const_generics): We still need to eagerly evaluate consts when
+ // relating them because during `normalize_param_env_or_error`,
+ // we may relate an evaluated constant in a obligation against
+ // an unnormalized (i.e. unevaluated) const in the param-env.
+ // FIXME(generic_const_exprs): Once we always lazily unify unevaluated constants
+ // these `eval` calls can be removed.
+ if !relation.tcx().features().generic_const_exprs {
+ a = a.eval(tcx, relation.param_env());
+ b = b.eval(tcx, relation.param_env());
+ }
// Currently, the values that can be unified are primitive types,
// and those that derive both `PartialEq` and `Eq`, corresponding
@@ -617,15 +626,13 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
(ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
if tcx.features().generic_const_exprs =>
{
- tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink())))
+ tcx.try_unify_abstract_consts(relation.param_env().and((au, bu)))
}
// While this is slightly incorrect, it shouldn't matter for `min_const_generics`
// and is the better alternative to waiting until `generic_const_exprs` can
// be stabilized.
- (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
- if au.def == bu.def && au.promoted == bu.promoted =>
- {
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) if au.def == bu.def => {
let substs = relation.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
@@ -633,11 +640,7 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
bu.substs,
)?;
return Ok(tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: au.def,
- substs,
- promoted: au.promoted,
- }),
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def: au.def, substs }),
ty: a.ty(),
}));
}
@@ -803,15 +806,15 @@ impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> {
}
}
-impl<'tcx> Relate<'tcx> for ty::Term<'tcx> {
+impl<'tcx> Relate<'tcx> for Term<'tcx> {
fn relate<R: TypeRelation<'tcx>>(
relation: &mut R,
a: Self,
b: Self,
) -> RelateResult<'tcx, Self> {
- Ok(match (a, b) {
- (Term::Ty(a), Term::Ty(b)) => relation.relate(a, b)?.into(),
- (Term::Const(a), Term::Const(b)) => relation.relate(a, b)?.into(),
+ Ok(match (a.unpack(), b.unpack()) {
+ (TermKind::Ty(a), TermKind::Ty(b)) => relation.relate(a, b)?.into(),
+ (TermKind::Const(a), TermKind::Const(b)) => relation.relate(a, b)?.into(),
_ => return Err(TypeError::Mismatch),
})
}
diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
index e86dafae3..e79b79a25 100644
--- a/compiler/rustc_middle/src/ty/rvalue_scopes.rs
+++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
@@ -3,7 +3,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
/// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by
-/// rules laid out in `rustc_typeck::check::rvalue_scopes`.
+/// rules laid out in `rustc_hir_analysis::check::rvalue_scopes`.
#[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)]
pub struct RvalueScopes {
map: FxHashMap<hir::ItemLocalId, Option<Scope>>,
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index 7660a2f3a..2cad333e3 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -3,13 +3,12 @@
//! hand, though we've recently added some macros and proc-macros to help with the tedium.
use crate::mir::interpret;
-use crate::mir::ProjectionKind;
+use crate::mir::{Field, ProjectionKind};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer};
use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
-use crate::ty::{self, InferConst, Lift, Term, Ty, TyCtxt};
+use crate::ty::{self, InferConst, Lift, Term, TermKind, Ty, TyCtxt};
use rustc_data_structures::functor::IdFunctor;
-use rustc_hir as hir;
use rustc_hir::def::Namespace;
use rustc_index::vec::{Idx, IndexVec};
@@ -167,8 +166,8 @@ impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
}
- ty::PredicateKind::ConstEvaluatable(uv) => {
- write!(f, "ConstEvaluatable({:?}, {:?})", uv.def, uv.substs)
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ write!(f, "ConstEvaluatable({ct:?})")
}
ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
@@ -238,12 +237,24 @@ TrivialTypeTraversalAndLiftImpls! {
crate::ty::Variance,
::rustc_span::Span,
::rustc_errors::ErrorGuaranteed,
+ Field,
+ interpret::Scalar,
+ rustc_target::abi::Size,
+ ty::DelaySpanBugEmitted,
+ rustc_type_ir::DebruijnIndex,
+ ty::BoundVar,
+ ty::Placeholder<ty::BoundVar>,
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ for<'tcx> {
+ ty::ValTree<'tcx>,
+ }
}
///////////////////////////////////////////////////////////////////////////
// Lift implementations
-// FIXME(eddyb) replace all the uses of `Option::map` with `?`.
impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
type Lifted = (A::Lifted, B::Lifted);
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
@@ -261,10 +272,10 @@ impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C)
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
type Lifted = Option<T::Lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- Some(x) => tcx.lift(x).map(Some),
- None => Some(None),
- }
+ Some(match self {
+ Some(x) => Some(tcx.lift(x)?),
+ None => None,
+ })
}
}
@@ -281,21 +292,21 @@ impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
type Lifted = Box<T::Lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(*self).map(Box::new)
+ Some(Box::new(tcx.lift(*self)?))
}
}
impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> {
type Lifted = Rc<T::Lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.as_ref().clone()).map(Rc::new)
+ Some(Rc::new(tcx.lift(self.as_ref().clone())?))
}
}
impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> {
type Lifted = Arc<T::Lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.as_ref().clone()).map(Arc::new)
+ Some(Arc::new(tcx.lift(self.as_ref().clone())?))
}
}
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
@@ -312,159 +323,18 @@ impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
}
}
-impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
- type Lifted = ty::TraitRef<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> {
- type Lifted = ty::ExistentialTraitRef<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> {
- type Lifted = ty::ExistentialPredicate<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait),
- ty::ExistentialPredicate::Projection(x) => {
- tcx.lift(x).map(ty::ExistentialPredicate::Projection)
- }
- ty::ExistentialPredicate::AutoTrait(def_id) => {
- Some(ty::ExistentialPredicate::AutoTrait(def_id))
- }
- }
- }
-}
-
impl<'a, 'tcx> Lift<'tcx> for Term<'a> {
type Lifted = ty::Term<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some(match self {
- Term::Ty(ty) => Term::Ty(tcx.lift(ty)?),
- Term::Const(c) => Term::Const(tcx.lift(c)?),
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
- type Lifted = ty::TraitPredicate<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
- tcx.lift(self.trait_ref).map(|trait_ref| ty::TraitPredicate {
- trait_ref,
- constness: self.constness,
- polarity: self.polarity,
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
- type Lifted = ty::SubtypePredicate<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
- tcx.lift((self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
- a_is_expected: self.a_is_expected,
- a,
- b,
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::CoercePredicate<'a> {
- type Lifted = ty::CoercePredicate<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::CoercePredicate<'tcx>> {
- tcx.lift((self.a, self.b)).map(|(a, b)| ty::CoercePredicate { a, b })
- }
-}
-
-impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
- type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift((self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> {
- type Lifted = ty::ProjectionTy<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
- tcx.lift(self.substs)
- .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
- type Lifted = ty::ProjectionPredicate<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
- tcx.lift((self.projection_ty, self.term))
- .map(|(projection_ty, term)| ty::ProjectionPredicate { projection_ty, term })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> {
- type Lifted = ty::ExistentialProjection<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.substs).map(|substs| ty::ExistentialProjection {
- substs,
- term: tcx.lift(self.term).expect("type must lift when substs do"),
- item_def_id: self.item_def_id,
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> {
- type Lifted = ty::PredicateKind<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- ty::PredicateKind::Trait(data) => tcx.lift(data).map(ty::PredicateKind::Trait),
- ty::PredicateKind::Subtype(data) => tcx.lift(data).map(ty::PredicateKind::Subtype),
- ty::PredicateKind::Coerce(data) => tcx.lift(data).map(ty::PredicateKind::Coerce),
- ty::PredicateKind::RegionOutlives(data) => {
- tcx.lift(data).map(ty::PredicateKind::RegionOutlives)
- }
- ty::PredicateKind::TypeOutlives(data) => {
- tcx.lift(data).map(ty::PredicateKind::TypeOutlives)
- }
- ty::PredicateKind::Projection(data) => {
- tcx.lift(data).map(ty::PredicateKind::Projection)
- }
- ty::PredicateKind::WellFormed(ty) => tcx.lift(ty).map(ty::PredicateKind::WellFormed),
- ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
- tcx.lift(closure_substs).map(|closure_substs| {
- ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind)
- })
+ Some(
+ match self.unpack() {
+ TermKind::Ty(ty) => TermKind::Ty(tcx.lift(ty)?),
+ TermKind::Const(c) => TermKind::Const(tcx.lift(c)?),
}
- ty::PredicateKind::ObjectSafe(trait_def_id) => {
- Some(ty::PredicateKind::ObjectSafe(trait_def_id))
- }
- ty::PredicateKind::ConstEvaluatable(uv) => {
- tcx.lift(uv).map(|uv| ty::PredicateKind::ConstEvaluatable(uv))
- }
- ty::PredicateKind::ConstEquate(c1, c2) => {
- tcx.lift((c1, c2)).map(|(c1, c2)| ty::PredicateKind::ConstEquate(c1, c2))
- }
- ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
- tcx.lift(ty).map(ty::PredicateKind::TypeWellFormedFromEnv)
- }
- }
- }
-}
-
-impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<'a, T>
-where
- <T as Lift<'tcx>>::Lifted: TypeVisitable<'tcx>,
-{
- type Lifted = ty::Binder<'tcx, T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- let bound_vars = tcx.lift(self.bound_vars());
- tcx.lift(self.skip_binder())
- .zip(bound_vars)
- .map(|(value, vars)| ty::Binder::bind_with_vars(value, vars))
+ .pack(),
+ )
}
}
-
impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
type Lifted = ty::ParamEnv<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
@@ -473,178 +343,6 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
}
}
-impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> {
- type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.param_env).and_then(|param_env| {
- tcx.lift(self.value).map(|value| ty::ParamEnvAnd { param_env, value })
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
- type Lifted = ty::ClosureSubsts<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.substs).map(|substs| ty::ClosureSubsts { substs })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> {
- type Lifted = ty::GeneratorSubsts<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.substs).map(|substs| ty::GeneratorSubsts { substs })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> {
- type Lifted = ty::adjustment::Adjustment<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- let ty::adjustment::Adjustment { kind, target } = self;
- tcx.lift(kind).and_then(|kind| {
- tcx.lift(target).map(|target| ty::adjustment::Adjustment { kind, target })
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> {
- type Lifted = ty::adjustment::Adjust<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny),
- ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)),
- ty::adjustment::Adjust::Deref(overloaded) => {
- tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref)
- }
- ty::adjustment::Adjust::Borrow(autoref) => {
- tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow)
- }
- }
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> {
- type Lifted = ty::adjustment::OverloadedDeref<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.region).map(|region| ty::adjustment::OverloadedDeref {
- region,
- mutbl: self.mutbl,
- span: self.span,
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> {
- type Lifted = ty::adjustment::AutoBorrow<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- ty::adjustment::AutoBorrow::Ref(r, m) => {
- tcx.lift(r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
- }
- ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)),
- }
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> {
- type Lifted = ty::GenSig<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift((self.resume_ty, self.yield_ty, self.return_ty))
- .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
- type Lifted = ty::FnSig<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.inputs_and_output).map(|x| ty::FnSig {
- inputs_and_output: x,
- c_variadic: self.c_variadic,
- unsafety: self.unsafety,
- abi: self.abi,
- })
- }
-}
-
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
- type Lifted = ty::error::ExpectedFound<T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- let ty::error::ExpectedFound { expected, found } = self;
- tcx.lift(expected).and_then(|expected| {
- tcx.lift(found).map(|found| ty::error::ExpectedFound { expected, found })
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
- type Lifted = ty::error::TypeError<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- use crate::ty::error::TypeError::*;
-
- Some(match self {
- Mismatch => Mismatch,
- ConstnessMismatch(x) => ConstnessMismatch(x),
- PolarityMismatch(x) => PolarityMismatch(x),
- UnsafetyMismatch(x) => UnsafetyMismatch(x),
- AbiMismatch(x) => AbiMismatch(x),
- Mutability => Mutability,
- ArgumentMutability(i) => ArgumentMutability(i),
- TupleSize(x) => TupleSize(x),
- FixedArraySize(x) => FixedArraySize(x),
- ArgCount => ArgCount,
- FieldMisMatch(x, y) => FieldMisMatch(x, y),
- RegionsDoesNotOutlive(a, b) => {
- return tcx.lift((a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
- }
- RegionsInsufficientlyPolymorphic(a, b) => {
- return tcx.lift(b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
- }
- RegionsOverlyPolymorphic(a, b) => {
- return tcx.lift(b).map(|b| RegionsOverlyPolymorphic(a, b));
- }
- RegionsPlaceholderMismatch => RegionsPlaceholderMismatch,
- IntMismatch(x) => IntMismatch(x),
- FloatMismatch(x) => FloatMismatch(x),
- Traits(x) => Traits(x),
- VariadicMismatch(x) => VariadicMismatch(x),
- CyclicTy(t) => return tcx.lift(t).map(|t| CyclicTy(t)),
- CyclicConst(ct) => return tcx.lift(ct).map(|ct| CyclicConst(ct)),
- ProjectionMismatched(x) => ProjectionMismatched(x),
- ArgumentSorts(x, i) => return tcx.lift(x).map(|x| ArgumentSorts(x, i)),
- Sorts(x) => return tcx.lift(x).map(Sorts),
- ExistentialMismatch(x) => return tcx.lift(x).map(ExistentialMismatch),
- ConstMismatch(x) => return tcx.lift(x).map(ConstMismatch),
- IntrinsicCast => IntrinsicCast,
- TargetFeatureCast(x) => TargetFeatureCast(x),
- ObjectUnsafeCoercion(x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
- })
- }
-}
-
-impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> {
- type Lifted = ty::InstanceDef<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)),
- ty::InstanceDef::VTableShim(def_id) => Some(ty::InstanceDef::VTableShim(def_id)),
- ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)),
- ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)),
- ty::InstanceDef::FnPtrShim(def_id, ty) => {
- Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?))
- }
- ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)),
- ty::InstanceDef::ClosureOnceShim { call_once, track_caller } => {
- Some(ty::InstanceDef::ClosureOnceShim { call_once, track_caller })
- }
- ty::InstanceDef::DropGlue(def_id, ty) => {
- Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?))
- }
- ty::InstanceDef::CloneShim(def_id, ty) => {
- Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?))
- }
- }
- }
-}
-
///////////////////////////////////////////////////////////////////////////
// TypeFoldable implementations.
@@ -844,27 +542,21 @@ impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Vec<T> {
}
}
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- self.try_map_id(|t| t.try_fold_with(folder))
- }
-}
-
-impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> {
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for &[T] {
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.iter().try_for_each(|t| t.visit_with(visitor))
}
}
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::EarlyBinder<T> {
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- self.try_map_bound(|ty| ty.try_fold_with(folder))
+ self.try_map_id(|t| t.try_fold_with(folder))
}
}
-impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::EarlyBinder<T> {
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> {
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.as_ref().0.visit_with(visitor)
+ self.iter().try_for_each(|t| t.visit_with(visitor))
}
}
@@ -901,88 +593,12 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::Existentia
}
}
-impl<'tcx> TypeVisitable<'tcx>
- for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
-{
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|p| p.visit_with(visitor))
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
ty::util::fold_list(self, folder, |tcx, v| tcx.intern_projs(v))
}
}
-impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ProjectionKind> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|t| t.visit_with(visitor))
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- use crate::ty::InstanceDef::*;
- Ok(Self {
- substs: self.substs.try_fold_with(folder)?,
- def: match self.def {
- Item(def) => Item(def.try_fold_with(folder)?),
- VTableShim(did) => VTableShim(did.try_fold_with(folder)?),
- ReifyShim(did) => ReifyShim(did.try_fold_with(folder)?),
- Intrinsic(did) => Intrinsic(did.try_fold_with(folder)?),
- FnPtrShim(did, ty) => {
- FnPtrShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
- }
- Virtual(did, i) => Virtual(did.try_fold_with(folder)?, i),
- ClosureOnceShim { call_once, track_caller } => {
- ClosureOnceShim { call_once: call_once.try_fold_with(folder)?, track_caller }
- }
- DropGlue(did, ty) => {
- DropGlue(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
- }
- CloneShim(did, ty) => {
- CloneShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
- }
- },
- })
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::instance::Instance<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- use crate::ty::InstanceDef::*;
- self.substs.visit_with(visitor)?;
- match self.def {
- Item(def) => def.visit_with(visitor),
- VTableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
- did.visit_with(visitor)
- }
- FnPtrShim(did, ty) | CloneShim(did, ty) => {
- did.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- DropGlue(did, ty) => {
- did.visit_with(visitor)?;
- ty.visit_with(visitor)
- }
- ClosureOnceShim { call_once, track_caller: _ } => call_once.visit_with(visitor),
- }
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(Self { instance: self.instance.try_fold_with(folder)?, promoted: self.promoted })
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for interpret::GlobalId<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.instance.visit_with(visitor)
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
folder.try_fold_ty(self)
@@ -1005,9 +621,11 @@ impl<'tcx> TypeSuperFoldable<'tcx> for Ty<'tcx> {
ty::Array(typ, sz) => ty::Array(typ.try_fold_with(folder)?, sz.try_fold_with(folder)?),
ty::Slice(typ) => ty::Slice(typ.try_fold_with(folder)?),
ty::Adt(tid, substs) => ty::Adt(tid, substs.try_fold_with(folder)?),
- ty::Dynamic(trait_ty, region) => {
- ty::Dynamic(trait_ty.try_fold_with(folder)?, region.try_fold_with(folder)?)
- }
+ ty::Dynamic(trait_ty, region, representation) => ty::Dynamic(
+ trait_ty.try_fold_with(folder)?,
+ region.try_fold_with(folder)?,
+ representation,
+ ),
ty::Tuple(ts) => ty::Tuple(ts.try_fold_with(folder)?),
ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.try_fold_with(folder)?),
ty::FnPtr(f) => ty::FnPtr(f.try_fold_with(folder)?),
@@ -1051,7 +669,7 @@ impl<'tcx> TypeSuperVisitable<'tcx> for Ty<'tcx> {
}
ty::Slice(typ) => typ.visit_with(visitor),
ty::Adt(_, substs) => substs.visit_with(visitor),
- ty::Dynamic(ref trait_ty, ref reg) => {
+ ty::Dynamic(ref trait_ty, ref reg, _) => {
trait_ty.visit_with(visitor)?;
reg.visit_with(visitor)
}
@@ -1156,12 +774,6 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
}
}
-impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|p| p.visit_with(visitor))
- }
-}
-
impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
self.try_map_id(|x| x.try_fold_with(folder))
@@ -1208,34 +820,6 @@ impl<'tcx> TypeSuperVisitable<'tcx> for ty::Const<'tcx> {
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(match self {
- ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.try_fold_with(folder)?),
- ty::ConstKind::Param(p) => ty::ConstKind::Param(p.try_fold_with(folder)?),
- ty::ConstKind::Unevaluated(uv) => ty::ConstKind::Unevaluated(uv.try_fold_with(folder)?),
- ty::ConstKind::Value(_)
- | ty::ConstKind::Bound(..)
- | ty::ConstKind::Placeholder(..)
- | ty::ConstKind::Error(_) => self,
- })
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::ConstKind<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- match *self {
- ty::ConstKind::Infer(ic) => ic.visit_with(visitor),
- ty::ConstKind::Param(p) => p.visit_with(visitor),
- ty::ConstKind::Unevaluated(uv) => uv.visit_with(visitor),
- ty::ConstKind::Value(_)
- | ty::ConstKind::Bound(..)
- | ty::ConstKind::Placeholder(_)
- | ty::ConstKind::Error(_) => ControlFlow::CONTINUE,
- }
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
Ok(self)
@@ -1248,57 +832,8 @@ impl<'tcx> TypeVisitable<'tcx> for InferConst<'tcx> {
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- folder.try_fold_unevaluated(self)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- visitor.visit_unevaluated(*self)
- }
-}
-
-impl<'tcx> TypeSuperFoldable<'tcx> for ty::Unevaluated<'tcx> {
- fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
- self,
- folder: &mut F,
- ) -> Result<Self, F::Error> {
- Ok(ty::Unevaluated {
- def: self.def,
- substs: self.substs.try_fold_with(folder)?,
- promoted: self.promoted,
- })
- }
-}
-
-impl<'tcx> TypeSuperVisitable<'tcx> for ty::Unevaluated<'tcx> {
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::UnevaluatedConst<'tcx> {
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.substs.visit_with(visitor)
}
}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx, ()> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(self.expand().try_fold_with(folder)?.shrink())
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx, ()> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.expand().visit_with(visitor)
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for hir::Constness {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
- Ok(self)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for hir::Constness {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
- ControlFlow::CONTINUE
- }
-}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 52c3a3886..cf420bafe 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -3,7 +3,7 @@
#![allow(rustc::usage_of_ty_tykind)]
use crate::infer::canonical::Canonical;
-use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
use crate::ty::visit::ValidateBoundVars;
use crate::ty::InferTy::*;
use crate::ty::{
@@ -11,6 +11,7 @@ use crate::ty::{
TypeVisitor,
};
use crate::ty::{List, ParamEnv};
+use hir::def::DefKind;
use polonius_engine::Atom;
use rustc_data_structures::captures::Captures;
use rustc_data_structures::intern::Interned;
@@ -18,7 +19,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_macros::HashStable;
-use rustc_span::symbol::{kw, Symbol};
+use rustc_span::symbol::{kw, sym, Symbol};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi;
use std::borrow::Cow;
@@ -84,6 +85,17 @@ impl BoundRegionKind {
_ => false,
}
}
+
+ pub fn get_name(&self) -> Option<Symbol> {
+ if self.is_named() {
+ match *self {
+ BoundRegionKind::BrNamed(_, name) => return Some(name),
+ _ => unreachable!(),
+ }
+ }
+
+ None
+ }
}
pub trait Article {
@@ -201,7 +213,7 @@ static_assert_size!(TyKind<'_>, 32);
/// * `GR`: The "return type", which is the type of value returned upon
/// completion of the generator.
/// * `GW`: The "generator witness".
-#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
pub struct ClosureSubsts<'tcx> {
/// Lifetime and type parameters from the enclosing function,
/// concatenated with a tuple containing the types of the upvars.
@@ -303,7 +315,7 @@ impl<'tcx> ClosureSubsts<'tcx> {
/// closure.
// FIXME(eddyb) this should be unnecessary, as the shallowly resolved
// type is known at the time of the creation of `ClosureSubsts`,
- // see `rustc_typeck::check::closure`.
+ // see `rustc_hir_analysis::check::closure`.
pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
self.split().closure_sig_as_fn_ptr_ty.expect_ty()
}
@@ -325,10 +337,14 @@ impl<'tcx> ClosureSubsts<'tcx> {
_ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind()),
}
}
+
+ pub fn print_as_impl_trait(self) -> ty::print::PrintClosureAsImpl<'tcx> {
+ ty::print::PrintClosureAsImpl { closure: self }
+ }
}
/// Similar to `ClosureSubsts`; see the above documentation for more.
-#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
pub struct GeneratorSubsts<'tcx> {
pub substs: SubstsRef<'tcx>,
}
@@ -546,7 +562,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
layout.variant_fields.iter().map(move |variant| {
variant
.iter()
- .map(move |field| EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
+ .map(move |field| ty::EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
})
}
@@ -655,7 +671,7 @@ impl<'tcx> InlineConstSubsts<'tcx> {
}
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub enum ExistentialPredicate<'tcx> {
/// E.g., `Iterator`.
Trait(ExistentialTraitRef<'tcx>),
@@ -687,6 +703,9 @@ impl<'tcx> ExistentialPredicate<'tcx> {
}
impl<'tcx> Binder<'tcx, ExistentialPredicate<'tcx>> {
+ /// Given an existential predicate like `?Self: PartialEq<u32>` (e.g., derived from `dyn PartialEq<u32>`),
+ /// and a concrete type `self_ty`, returns a full predicate where the existentially quantified variable `?Self`
+ /// has been replaced with `self_ty` (e.g., `self_ty: PartialEq<u32>`, in our example).
pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> {
use crate::ty::ToPredicate;
match self.skip_binder() {
@@ -781,7 +800,7 @@ impl<'tcx> List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> {
/// Trait references also appear in object types like `Foo<U>`, but in
/// that case the `Self` parameter is absent from the substitutions.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct TraitRef<'tcx> {
pub def_id: DefId,
pub substs: SubstsRef<'tcx>,
@@ -845,6 +864,12 @@ impl<'tcx> PolyTraitRef<'tcx> {
}
}
+impl rustc_errors::IntoDiagnosticArg for PolyTraitRef<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+}
+
/// An existential reference to a trait, where `Self` is erased.
/// For example, the trait object `Trait<'a, 'b, X, Y>` is:
/// ```ignore (illustrative)
@@ -853,7 +878,7 @@ impl<'tcx> PolyTraitRef<'tcx> {
/// The substitutions don't include the erased `Self`, only trait
/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above).
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ExistentialTraitRef<'tcx> {
pub def_id: DefId,
pub substs: SubstsRef<'tcx>,
@@ -901,73 +926,6 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> {
}
}
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(Encodable, Decodable, HashStable)]
-pub struct EarlyBinder<T>(pub T);
-
-impl<T> EarlyBinder<T> {
- pub fn as_ref(&self) -> EarlyBinder<&T> {
- EarlyBinder(&self.0)
- }
-
- pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
- where
- F: FnOnce(&T) -> U,
- {
- self.as_ref().map_bound(f)
- }
-
- pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
- where
- F: FnOnce(T) -> U,
- {
- let value = f(self.0);
- EarlyBinder(value)
- }
-
- pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
- where
- F: FnOnce(T) -> Result<U, E>,
- {
- let value = f(self.0)?;
- Ok(EarlyBinder(value))
- }
-
- pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
- EarlyBinder(value)
- }
-}
-
-impl<T> EarlyBinder<Option<T>> {
- pub fn transpose(self) -> Option<EarlyBinder<T>> {
- self.0.map(|v| EarlyBinder(v))
- }
-}
-
-impl<T, U> EarlyBinder<(T, U)> {
- pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
- (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
- }
-}
-
-pub struct EarlyBinderIter<T> {
- t: T,
-}
-
-impl<T: IntoIterator> EarlyBinder<T> {
- pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
- EarlyBinderIter { t: self.0.into_iter() }
- }
-}
-
-impl<T: Iterator> Iterator for EarlyBinderIter<T> {
- type Item = EarlyBinder<T::Item>;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.t.next().map(|i| EarlyBinder(i))
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub enum BoundVariableKind {
@@ -1009,7 +967,7 @@ impl BoundVariableKind {
///
/// `Decodable` and `Encodable` are implemented for `Binder<T>` using the `impl_binder_encode_decode!` macro.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(HashStable)]
+#[derive(HashStable, Lift)]
pub struct Binder<'tcx, T>(T, &'tcx List<BoundVariableKind>);
impl<'tcx, T> Binder<'tcx, T>
@@ -1171,7 +1129,7 @@ impl<'tcx, T> Binder<'tcx, Option<T>> {
/// Represents the projection of an associated type. In explicit UFCS
/// form this would be written `<T as Trait<..>>::N`.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ProjectionTy<'tcx> {
/// The parameters of the associated item.
pub substs: SubstsRef<'tcx>,
@@ -1186,7 +1144,13 @@ pub struct ProjectionTy<'tcx> {
impl<'tcx> ProjectionTy<'tcx> {
pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
- tcx.parent(self.item_def_id)
+ match tcx.def_kind(self.item_def_id) {
+ DefKind::AssocTy | DefKind::AssocConst => tcx.parent(self.item_def_id),
+ DefKind::ImplTraitPlaceholder => {
+ tcx.parent(tcx.impl_trait_in_trait_parent(self.item_def_id))
+ }
+ kind => bug!("unexpected DefKind in ProjectionTy: {kind:?}"),
+ }
}
/// Extracts the underlying trait reference and own substs from this projection.
@@ -1197,6 +1161,7 @@ impl<'tcx> ProjectionTy<'tcx> {
tcx: TyCtxt<'tcx>,
) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
let def_id = tcx.parent(self.item_def_id);
+ assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
let trait_generics = tcx.generics_of(def_id);
(
ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) },
@@ -1221,7 +1186,7 @@ impl<'tcx> ProjectionTy<'tcx> {
}
}
-#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
pub struct GenSig<'tcx> {
pub resume_ty: Ty<'tcx>,
pub yield_ty: Ty<'tcx>,
@@ -1237,7 +1202,7 @@ pub type PolyGenSig<'tcx> = Binder<'tcx, GenSig<'tcx>>;
/// - `output`: is the return type.
/// - `c_variadic`: indicates whether this is a C-variadic function.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct FnSig<'tcx> {
pub inputs_and_output: &'tcx List<Ty<'tcx>>,
pub c_variadic: bool,
@@ -1419,7 +1384,7 @@ impl From<BoundVar> for BoundTy {
/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
pub struct ExistentialProjection<'tcx> {
pub item_def_id: DefId,
pub substs: SubstsRef<'tcx>,
@@ -1492,6 +1457,23 @@ impl<'tcx> Region<'tcx> {
*self.0.0
}
+ pub fn get_name(self) -> Option<Symbol> {
+ if self.has_name() {
+ let name = match *self {
+ ty::ReEarlyBound(ebr) => Some(ebr.name),
+ ty::ReLateBound(_, br) => br.kind.get_name(),
+ ty::ReFree(fr) => fr.bound_region.get_name(),
+ ty::ReStatic => Some(kw::StaticLifetime),
+ ty::RePlaceholder(placeholder) => placeholder.name.get_name(),
+ _ => None,
+ };
+
+ return name;
+ }
+
+ None
+ }
+
/// Is this region named by the user?
pub fn has_name(self) -> bool {
match *self {
@@ -1501,7 +1483,6 @@ impl<'tcx> Region<'tcx> {
ty::ReStatic => true,
ty::ReVar(..) => false,
ty::RePlaceholder(placeholder) => placeholder.name.is_named(),
- ty::ReEmpty(_) => false,
ty::ReErased => false,
}
}
@@ -1527,11 +1508,6 @@ impl<'tcx> Region<'tcx> {
}
#[inline]
- pub fn is_empty(self) -> bool {
- matches!(*self, ty::ReEmpty(..))
- }
-
- #[inline]
pub fn bound_at_or_above_binder(self, index: ty::DebruijnIndex) -> bool {
match *self {
ty::ReLateBound(debruijn, _) => debruijn >= index,
@@ -1562,7 +1538,7 @@ impl<'tcx> Region<'tcx> {
flags = flags | TypeFlags::HAS_FREE_REGIONS;
flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
}
- ty::ReEmpty(_) | ty::ReStatic => {
+ ty::ReStatic => {
flags = flags | TypeFlags::HAS_FREE_REGIONS;
}
ty::ReLateBound(..) => {
@@ -1617,6 +1593,10 @@ impl<'tcx> Region<'tcx> {
_ => self.is_free(),
}
}
+
+ pub fn is_var(self) -> bool {
+ matches!(self.kind(), ty::ReVar(_))
+ }
}
/// Type utilities
@@ -1838,7 +1818,12 @@ impl<'tcx> Ty<'tcx> {
#[inline]
pub fn is_trait(self) -> bool {
- matches!(self.kind(), Dynamic(..))
+ matches!(self.kind(), Dynamic(_, _, ty::Dyn))
+ }
+
+ #[inline]
+ pub fn is_dyn_star(self) -> bool {
+ matches!(self.kind(), Dynamic(_, _, ty::DynStar))
}
#[inline]
@@ -2137,7 +2122,7 @@ impl<'tcx> Ty<'tcx> {
///
/// Note that during type checking, we use an inference variable
/// to represent the closure kind, because it has not yet been
- /// inferred. Once upvar inference (in `rustc_typeck/src/check/upvar.rs`)
+ /// inferred. Once upvar inference (in `rustc_hir_analysis/src/check/upvar.rs`)
/// is complete, that type variable will be unified.
pub fn to_opt_closure_kind(self) -> Option<ty::ClosureKind> {
match self.kind() {
@@ -2220,7 +2205,10 @@ impl<'tcx> Ty<'tcx> {
// These aren't even `Clone`
ty::Str | ty::Slice(..) | ty::Foreign(..) | ty::Dynamic(..) => false,
- ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
+ ty::Infer(ty::InferTy::FloatVar(_) | ty::InferTy::IntVar(_))
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..) => true,
// The voldemort ZSTs are fine.
ty::FnDef(..) => true,
@@ -2255,6 +2243,35 @@ impl<'tcx> Ty<'tcx> {
}
}
}
+
+ // If `self` is a primitive, return its [`Symbol`].
+ pub fn primitive_symbol(self) -> Option<Symbol> {
+ match self.kind() {
+ ty::Bool => Some(sym::bool),
+ ty::Char => Some(sym::char),
+ ty::Float(f) => match f {
+ ty::FloatTy::F32 => Some(sym::f32),
+ ty::FloatTy::F64 => Some(sym::f64),
+ },
+ ty::Int(f) => match f {
+ ty::IntTy::Isize => Some(sym::isize),
+ ty::IntTy::I8 => Some(sym::i8),
+ ty::IntTy::I16 => Some(sym::i16),
+ ty::IntTy::I32 => Some(sym::i32),
+ ty::IntTy::I64 => Some(sym::i64),
+ ty::IntTy::I128 => Some(sym::i128),
+ },
+ ty::Uint(f) => match f {
+ ty::UintTy::Usize => Some(sym::usize),
+ ty::UintTy::U8 => Some(sym::u8),
+ ty::UintTy::U16 => Some(sym::u16),
+ ty::UintTy::U32 => Some(sym::u32),
+ ty::UintTy::U64 => Some(sym::u64),
+ ty::UintTy::U128 => Some(sym::u128),
+ },
+ _ => None,
+ }
+ }
}
/// Extra information about why we ended up with a particular variance.
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
index 6262aa180..0660e9b79 100644
--- a/compiler/rustc_middle/src/ty/subst.rs
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -1,12 +1,12 @@
// Type substitutions.
-use crate::mir;
use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts};
use crate::ty::visit::{TypeVisitable, TypeVisitor};
use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
+use rustc_data_structures::captures::Captures;
use rustc_data_structures::intern::{Interned, WithStableHash};
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
@@ -189,6 +189,14 @@ impl<'tcx> GenericArg<'tcx> {
_ => bug!("expected a const, but found another kind"),
}
}
+
+ pub fn is_non_region_infer(self) -> bool {
+ match self.unpack() {
+ GenericArgKind::Lifetime(_) => false,
+ GenericArgKind::Type(ty) => ty.is_ty_infer(),
+ GenericArgKind::Const(ct) => ct.is_ct_infer(),
+ }
+ }
}
impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
@@ -459,12 +467,6 @@ impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> {
}
}
-impl<'tcx> TypeVisitable<'tcx> for SubstsRef<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.iter().try_for_each(|t| t.visit_with(visitor))
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
// This code is fairly hot, though not as hot as `SubstsRef`.
@@ -497,24 +499,108 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
}
}
-impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for &'tcx ty::List<T> {
+ #[inline]
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.iter().try_for_each(|t| t.visit_with(visitor))
}
}
-// Just call `foo.subst(tcx, substs)` to perform a substitution across `foo`.
-#[rustc_on_unimplemented(message = "Calling `subst` must now be done through an `EarlyBinder`")]
-pub trait Subst<'tcx>: Sized {
- type Inner;
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable)]
+pub struct EarlyBinder<T>(pub T);
+
+/// For early binders, you should first call `subst` before using any visitors.
+impl<'tcx, T> !TypeFoldable<'tcx> for ty::EarlyBinder<T> {}
+impl<'tcx, T> !TypeVisitable<'tcx> for ty::EarlyBinder<T> {}
+
+impl<T> EarlyBinder<T> {
+ pub fn as_ref(&self) -> EarlyBinder<&T> {
+ EarlyBinder(&self.0)
+ }
+
+ pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ EarlyBinder(value)
+ }
+
+ pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ Ok(EarlyBinder(value))
+ }
+
+ pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
+ EarlyBinder(value)
+ }
+}
+
+impl<T> EarlyBinder<Option<T>> {
+ pub fn transpose(self) -> Option<EarlyBinder<T>> {
+ self.0.map(|v| EarlyBinder(v))
+ }
+}
+
+impl<T, U> EarlyBinder<(T, U)> {
+ pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
+ (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
+ }
+}
- fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner;
+impl<'tcx, 's, T: IntoIterator<Item = I>, I: TypeFoldable<'tcx>> EarlyBinder<T> {
+ pub fn subst_iter(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'s [GenericArg<'tcx>],
+ ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> {
+ self.0.into_iter().map(move |t| EarlyBinder(t).subst(tcx, substs))
+ }
}
-impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for ty::EarlyBinder<T> {
- type Inner = T;
+impl<'tcx, 's, 'a, T: IntoIterator<Item = &'a I>, I: Copy + TypeFoldable<'tcx> + 'a>
+ EarlyBinder<T>
+{
+ pub fn subst_iter_copied(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'s [GenericArg<'tcx>],
+ ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> + Captures<'a> {
+ self.0.into_iter().map(move |t| EarlyBinder(*t).subst(tcx, substs))
+ }
+}
+
+pub struct EarlyBinderIter<T> {
+ t: T,
+}
+
+impl<T: IntoIterator> EarlyBinder<T> {
+ pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
+ EarlyBinderIter { t: self.0.into_iter() }
+ }
+}
- fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner {
+impl<T: Iterator> Iterator for EarlyBinderIter<T> {
+ type Item = EarlyBinder<T::Item>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.t.next().map(|i| EarlyBinder(i))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> ty::EarlyBinder<T> {
+ pub fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> T {
let mut folder = SubstFolder { tcx, substs, binders_passed: 0 };
self.0.fold_with(&mut folder)
}
@@ -550,9 +636,21 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
#[cold]
#[inline(never)]
- fn region_param_out_of_range(data: ty::EarlyBoundRegion) -> ! {
+ fn region_param_out_of_range(data: ty::EarlyBoundRegion, substs: &[GenericArg<'_>]) -> ! {
+ bug!(
+ "Region parameter out of range when substituting in region {} (index={}, substs = {:?})",
+ data.name,
+ data.index,
+ substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn region_param_invalid(data: ty::EarlyBoundRegion, other: GenericArgKind<'_>) -> ! {
bug!(
- "Region parameter out of range when substituting in region {} (index={})",
+ "Unexpected parameter {:?} when substituting in region {} (index={})",
+ other,
data.name,
data.index
)
@@ -568,7 +666,8 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
match rk {
Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
- _ => region_param_out_of_range(data),
+ Some(other) => region_param_invalid(data, other),
+ None => region_param_out_of_range(data, self.substs),
}
}
_ => r,
@@ -593,11 +692,6 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
c.super_fold_with(self)
}
}
-
- #[inline]
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- c.super_fold_with(self)
- }
}
impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
index 541dace5c..ac79949fc 100644
--- a/compiler/rustc_middle/src/ty/trait_def.rs
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -256,7 +256,6 @@ pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> Trait
}
// Query provider for `incoherent_impls`.
-#[instrument(level = "debug", skip(tcx))]
pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] {
let mut impls = Vec::new();
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index 591bb7831..f72e236ed 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -2,12 +2,11 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::layout::IntegerExt;
-use crate::ty::query::TyCtxtAt;
-use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
use crate::ty::{
self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitable,
};
+use crate::ty::{GenericArgKind, SubstsRef};
use rustc_apfloat::Float as _;
use rustc_ast as ast;
use rustc_attr::{self as attr, SignedInt, UnsignedInt};
@@ -627,7 +626,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
/// Expands the given impl trait type, stopping if the type is recursive.
- #[instrument(skip(self), level = "debug")]
+ #[instrument(skip(self), level = "debug", ret)]
pub fn try_expand_impl_trait_type(
self,
def_id: DefId,
@@ -644,7 +643,6 @@ impl<'tcx> TyCtxt<'tcx> {
};
let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
- trace!(?expanded_type);
if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
}
@@ -652,6 +650,13 @@ impl<'tcx> TyCtxt<'tcx> {
ty::EarlyBinder(self.type_of(def_id))
}
+ pub fn bound_trait_impl_trait_tys(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed>> {
+ ty::EarlyBinder(self.collect_trait_impl_trait_tys(def_id))
+ }
+
pub fn bound_fn_sig(self, def_id: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> {
ty::EarlyBinder(self.fn_sig(def_id))
}
@@ -815,12 +820,8 @@ impl<'tcx> Ty<'tcx> {
/// does copies even when the type actually doesn't satisfy the
/// full requirements for the `Copy` trait (cc #29149) -- this
/// winds up being reported as an error during NLL borrow check.
- pub fn is_copy_modulo_regions(
- self,
- tcx_at: TyCtxtAt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> bool {
- self.is_trivially_pure_clone_copy() || tcx_at.is_copy_raw(param_env.and(self))
+ pub fn is_copy_modulo_regions(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_pure_clone_copy() || tcx.is_copy_raw(param_env.and(self))
}
/// Checks whether values of this type `T` have a size known at
@@ -829,8 +830,8 @@ impl<'tcx> Ty<'tcx> {
/// over-approximation in generic contexts, where one can have
/// strange rules like `<T as Foo<'static>>::Bar: Sized` that
/// actually carry lifetime requirements.
- pub fn is_sized(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
+ pub fn is_sized(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_sized(tcx) || tcx.is_sized_raw(param_env.and(self))
}
/// Checks whether values of this type `T` implement the `Freeze`
@@ -840,8 +841,8 @@ impl<'tcx> Ty<'tcx> {
/// optimization as well as the rules around static values. Note
/// that the `Freeze` trait is not exposed to end users and is
/// effectively an implementation detail.
- pub fn is_freeze(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
+ pub fn is_freeze(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_freeze() || tcx.is_freeze_raw(param_env.and(self))
}
/// Fast path helper for testing if a type is `Freeze`.
@@ -880,8 +881,8 @@ impl<'tcx> Ty<'tcx> {
}
/// Checks whether values of this type `T` implement the `Unpin` trait.
- pub fn is_unpin(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
+ pub fn is_unpin(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_unpin() || tcx.is_unpin_raw(param_env.and(self))
}
/// Fast path helper for testing if a type is `Unpin`.
@@ -952,7 +953,7 @@ impl<'tcx> Ty<'tcx> {
}
}
- /// Checks if `ty` has has a significant drop.
+ /// Checks if `ty` has a significant drop.
///
/// Note that this method can return false even if `ty` has a destructor
/// attached; even if that is the case then the adt has been marked with
@@ -1283,12 +1284,24 @@ pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
.any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
}
+/// Determines whether an item is annotated with `doc(notable_trait)`.
+pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.get_attrs(def_id, sym::doc)
+ .filter_map(|attr| attr.meta_item_list())
+ .any(|items| items.iter().any(|item| item.has_name(sym::notable_trait)))
+}
+
/// Determines whether an item is an intrinsic by Abi.
pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
}
pub fn provide(providers: &mut ty::query::Providers) {
- *providers =
- ty::query::Providers { normalize_opaque_types, is_doc_hidden, is_intrinsic, ..*providers }
+ *providers = ty::query::Providers {
+ normalize_opaque_types,
+ is_doc_hidden,
+ is_doc_notable_trait,
+ is_intrinsic,
+ ..*providers
+ }
}
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
index 536506720..c09f71f9a 100644
--- a/compiler/rustc_middle/src/ty/visit.rs
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -10,8 +10,7 @@
//!
//! There are three groups of traits involved in each traversal.
//! - `TypeVisitable`. This is implemented once for many types, including:
-//! - Types of interest, for which the the methods delegate to the
-//! visitor.
+//! - Types of interest, for which the methods delegate to the visitor.
//! - All other types, including generic containers like `Vec` and `Option`.
//! It defines a "skeleton" of how they should be visited.
//! - `TypeSuperVisitable`. This is implemented only for each type of interest,
@@ -39,7 +38,6 @@
//! - ty.super_visit_with(visitor)
//! - u.visit_with(visitor)
//! ```
-use crate::mir;
use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
use rustc_errors::ErrorGuaranteed;
@@ -84,7 +82,7 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
self.has_vars_bound_at_or_above(ty::INNERMOST)
}
- #[instrument(level = "trace")]
+ #[instrument(level = "trace", ret)]
fn has_type_flags(&self, flags: TypeFlags) -> bool {
self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags)
}
@@ -104,8 +102,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
None
}
}
- fn has_param_types_or_consts(&self) -> bool {
- self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
+ fn has_non_region_param(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_SUBST - TypeFlags::HAS_RE_PARAM)
}
fn has_infer_regions(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_RE_INFER)
@@ -113,8 +111,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
fn has_infer_types(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_TY_INFER)
}
- fn has_infer_types_or_consts(&self) -> bool {
- self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
+ fn has_non_region_infer(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_INFER - TypeFlags::HAS_RE_INFER)
}
fn needs_infer(&self) -> bool {
self.has_type_flags(TypeFlags::NEEDS_INFER)
@@ -199,17 +197,9 @@ pub trait TypeVisitor<'tcx>: Sized {
c.super_visit_with(self)
}
- fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
- uv.super_visit_with(self)
- }
-
fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
p.super_visit_with(self)
}
-
- fn visit_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> ControlFlow<Self::BreakTy> {
- c.super_visit_with(self)
- }
}
///////////////////////////////////////////////////////////////////////////
@@ -560,7 +550,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
type BreakTy = FoundFlags;
#[inline]
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
let flags = t.flags();
trace!(t.flags=?t.flags());
@@ -572,7 +562,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
}
#[inline]
- #[instrument(skip(self), level = "trace")]
+ #[instrument(skip(self), level = "trace", ret)]
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
let flags = r.type_flags();
trace!(r.flags=?flags);
@@ -584,7 +574,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
}
#[inline]
- #[instrument(level = "trace")]
+ #[instrument(level = "trace", ret)]
fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
let flags = FlagComputation::for_const(c);
trace!(r.flags=?flags);
@@ -596,19 +586,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
}
#[inline]
- #[instrument(level = "trace")]
- fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
- let flags = FlagComputation::for_unevaluated_const(uv);
- trace!(r.flags=?flags);
- if flags.intersects(self.flags) {
- ControlFlow::Break(FoundFlags)
- } else {
- ControlFlow::CONTINUE
- }
- }
-
- #[inline]
- #[instrument(level = "trace")]
+ #[instrument(level = "trace", ret)]
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
debug!(
"HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
@@ -666,7 +644,7 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
// ignore the inputs to a projection, as they may not appear
// in the normalized form
if self.just_constrained {
- if let ty::Projection(..) = t.kind() {
+ if let ty::Projection(..) | ty::Opaque(..) = t.kind() {
return ControlFlow::CONTINUE;
}
}
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
index 04a9fd1f7..5ca51c25a 100644
--- a/compiler/rustc_middle/src/ty/vtable.rs
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -1,7 +1,7 @@
use std::convert::TryFrom;
use std::fmt;
-use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit};
+use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar};
use crate::ty::{self, Instance, PolyTraitRef, Ty, TyCtxt};
use rustc_ast::Mutability;
@@ -87,7 +87,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
- ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ Scalar::from_pointer(fn_ptr, &tcx)
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
@@ -97,14 +97,14 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let instance = instance.polymorphize(tcx);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
- ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ Scalar::from_pointer(fn_ptr, &tcx)
}
VtblEntry::TraitVPtr(trait_ref) => {
let super_trait_ref = trait_ref
.map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref)));
let vptr = Pointer::from(supertrait_alloc_id);
- ScalarMaybeUninit::from_pointer(vptr, &tcx)
+ Scalar::from_pointer(vptr, &tcx)
}
};
vtable
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index 02fe1f3a7..91db9698c 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -112,6 +112,22 @@ impl<'tcx> Ty<'tcx> {
}
}
+impl<'tcx> ty::Const<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self.into())
+ }
+}
+
/// We push `GenericArg`s on the stack in reverse order so as to
/// maintain a pre-order traversal. As of the time of this
/// writing, the fact that the traversal is pre-order is not
@@ -152,7 +168,7 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>)
ty::Projection(data) => {
stack.extend(data.substs.iter().rev());
}
- ty::Dynamic(obj, lt) => {
+ ty::Dynamic(obj, lt, _) => {
stack.push(lt.into());
stack.extend(obj.iter().rev().flat_map(|predicate| {
let (substs, opt_ty) = match predicate.skip_binder() {
@@ -165,9 +181,9 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>)
}
};
- substs.iter().rev().chain(opt_ty.map(|term| match term {
- ty::Term::Ty(ty) => ty.into(),
- ty::Term::Const(ct) => ct.into(),
+ substs.iter().rev().chain(opt_ty.map(|term| match term.unpack() {
+ ty::TermKind::Ty(ty) => ty.into(),
+ ty::TermKind::Const(ct) => ct.into(),
}))
}));
}
diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs
new file mode 100644
index 000000000..f4b4c3fb0
--- /dev/null
+++ b/compiler/rustc_middle/src/values.rs
@@ -0,0 +1,202 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::ty::Representability;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt};
+use rustc_query_system::query::QueryInfo;
+use rustc_query_system::Value;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::Span;
+
+use std::fmt::Write;
+
+impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
+ // SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
+ }
+}
+
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
+ // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe {
+ std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
+ tcx, "<error>",
+ ))
+ }
+ }
+}
+
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
+ let err = tcx.ty_error();
+ // FIXME(compiler-errors): It would be nice if we could get the
+ // query key, so we could at least generate a fn signature that
+ // has the right arity.
+ let fn_sig = ty::Binder::dummy(tcx.mk_fn_sig(
+ [].into_iter(),
+ err,
+ false,
+ rustc_hir::Unsafety::Normal,
+ rustc_target::spec::abi::Abi::Rust,
+ ));
+
+ // SAFETY: This is never called when `Self` is not `ty::Binder<'tcx, ty::FnSig<'tcx>>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe { std::mem::transmute::<ty::PolyFnSig<'tcx>, ty::Binder<'_, ty::FnSig<'_>>>(fn_sig) }
+ }
+}
+
+impl<'tcx> Value<TyCtxt<'tcx>> for Representability {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo]) -> Self {
+ let mut item_and_field_ids = Vec::new();
+ let mut representable_ids = FxHashSet::default();
+ for info in cycle {
+ if info.query.name == "representability"
+ && let Some(field_id) = info.query.def_id
+ && let Some(field_id) = field_id.as_local()
+ && let Some(DefKind::Field) = info.query.def_kind
+ {
+ let parent_id = tcx.parent(field_id.to_def_id());
+ let item_id = match tcx.def_kind(parent_id) {
+ DefKind::Variant => tcx.parent(parent_id),
+ _ => parent_id,
+ };
+ item_and_field_ids.push((item_id.expect_local(), field_id));
+ }
+ }
+ for info in cycle {
+ if info.query.name == "representability_adt_ty"
+ && let Some(def_id) = info.query.ty_adt_id
+ && let Some(def_id) = def_id.as_local()
+ && !item_and_field_ids.iter().any(|&(id, _)| id == def_id)
+ {
+ representable_ids.insert(def_id);
+ }
+ }
+ recursive_type_error(tcx, item_and_field_ids, &representable_ids);
+ Representability::Infinite
+ }
+}
+
+// item_and_field_ids should form a cycle where each field contains the
+// type in the next element in the list
+pub fn recursive_type_error(
+ tcx: TyCtxt<'_>,
+ mut item_and_field_ids: Vec<(LocalDefId, LocalDefId)>,
+ representable_ids: &FxHashSet<LocalDefId>,
+) {
+ const ITEM_LIMIT: usize = 5;
+
+ // Rotate the cycle so that the item with the lowest span is first
+ let start_index = item_and_field_ids
+ .iter()
+ .enumerate()
+ .min_by_key(|&(_, &(id, _))| tcx.def_span(id))
+ .unwrap()
+ .0;
+ item_and_field_ids.rotate_left(start_index);
+
+ let cycle_len = item_and_field_ids.len();
+ let show_cycle_len = cycle_len.min(ITEM_LIMIT);
+
+ let mut err_span = MultiSpan::from_spans(
+ item_and_field_ids[..show_cycle_len]
+ .iter()
+ .map(|(id, _)| tcx.def_span(id.to_def_id()))
+ .collect(),
+ );
+ let mut suggestion = Vec::with_capacity(show_cycle_len * 2);
+ for i in 0..show_cycle_len {
+ let (_, field_id) = item_and_field_ids[i];
+ let (next_item_id, _) = item_and_field_ids[(i + 1) % cycle_len];
+ // Find the span(s) that contain the next item in the cycle
+ let hir_id = tcx.hir().local_def_id_to_hir_id(field_id);
+ let hir::Node::Field(field) = tcx.hir().get(hir_id) else { bug!("expected field") };
+ let mut found = Vec::new();
+ find_item_ty_spans(tcx, field.ty, next_item_id, &mut found, representable_ids);
+
+ // Couldn't find the type. Maybe it's behind a type alias?
+ // In any case, we'll just suggest boxing the whole field.
+ if found.is_empty() {
+ found.push(field.ty.span);
+ }
+
+ for span in found {
+ err_span.push_span_label(span, "recursive without indirection");
+ // FIXME(compiler-errors): This suggestion might be erroneous if Box is shadowed
+ suggestion.push((span.shrink_to_lo(), "Box<".to_string()));
+ suggestion.push((span.shrink_to_hi(), ">".to_string()));
+ }
+ }
+ let items_list = {
+ let mut s = String::new();
+ for (i, (item_id, _)) in item_and_field_ids.iter().enumerate() {
+ let path = tcx.def_path_str(item_id.to_def_id());
+ write!(&mut s, "`{path}`").unwrap();
+ if i == (ITEM_LIMIT - 1) && cycle_len > ITEM_LIMIT {
+ write!(&mut s, " and {} more", cycle_len - 5).unwrap();
+ break;
+ }
+ if cycle_len > 1 && i < cycle_len - 2 {
+ s.push_str(", ");
+ } else if cycle_len > 1 && i == cycle_len - 2 {
+ s.push_str(" and ")
+ }
+ }
+ s
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ err_span,
+ E0072,
+ "recursive type{} {} {} infinite size",
+ pluralize!(cycle_len),
+ items_list,
+ pluralize!("has", cycle_len),
+ );
+ err.multipart_suggestion(
+ "insert some indirection (e.g., a `Box`, `Rc`, or `&`) to break the cycle",
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ err.emit();
+}
+
+fn find_item_ty_spans(
+ tcx: TyCtxt<'_>,
+ ty: &hir::Ty<'_>,
+ needle: LocalDefId,
+ spans: &mut Vec<Span>,
+ seen_representable: &FxHashSet<LocalDefId>,
+) {
+ match ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(_, path)) => {
+ if let Some(def_id) = path.res.opt_def_id() {
+ let check_params = def_id.as_local().map_or(true, |def_id| {
+ if def_id == needle {
+ spans.push(ty.span);
+ }
+ seen_representable.contains(&def_id)
+ });
+ if check_params && let Some(args) = path.segments.last().unwrap().args {
+ let params_in_repr = tcx.params_in_repr(def_id);
+ for (i, arg) in args.args.iter().enumerate() {
+ if let hir::GenericArg::Type(ty) = arg && params_in_repr.contains(i as u32) {
+ find_item_ty_spans(tcx, ty, needle, spans, seen_representable);
+ }
+ }
+ }
+ }
+ }
+ hir::TyKind::Array(ty, _) => find_item_ty_spans(tcx, ty, needle, spans, seen_representable),
+ hir::TyKind::Tup(tys) => {
+ tys.iter().for_each(|ty| find_item_ty_spans(tcx, ty, needle, spans, seen_representable))
+ }
+ _ => {}
+ }
+}
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 30f90e383..c726fa3a3 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_arena = { path = "../rustc_arena" }
diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs
index 687560012..183db56d7 100644
--- a/compiler/rustc_mir_build/src/build/block.rs
+++ b/compiler/rustc_mir_build/src/build/block.rs
@@ -1,6 +1,7 @@
use crate::build::matches::ArmHasGuard;
use crate::build::ForGuard::OutsideGuard;
use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
+use rustc_middle::middle::region::Scope;
use rustc_middle::thir::*;
use rustc_middle::{mir::*, ty};
use rustc_span::Span;
@@ -10,7 +11,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
destination: Place<'tcx>,
block: BasicBlock,
- ast_block: &Block,
+ ast_block: BlockId,
source_info: SourceInfo,
) -> BlockAnd<()> {
let Block {
@@ -21,7 +22,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
expr,
targeted_by_break,
safety_mode,
- } = *ast_block;
+ } = self.thir[ast_block];
let expr = expr.map(|expr| &self.thir[expr]);
self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| {
this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| {
@@ -34,10 +35,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&stmts,
expr,
safety_mode,
+ region_scope,
))
})
} else {
- this.ast_block_stmts(destination, block, span, &stmts, expr, safety_mode)
+ this.ast_block_stmts(
+ destination,
+ block,
+ span,
+ &stmts,
+ expr,
+ safety_mode,
+ region_scope,
+ )
}
})
})
@@ -51,6 +61,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
stmts: &[StmtId],
expr: Option<&Expr<'tcx>>,
safety_mode: BlockSafety,
+ region_scope: Scope,
) -> BlockAnd<()> {
let this = self;
@@ -73,6 +84,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut let_scope_stack = Vec::with_capacity(8);
let outer_source_scope = this.source_scope;
let outer_in_scope_unsafe = this.in_scope_unsafe;
+ // This scope information is kept for breaking out of the parent remainder scope in case
+ // one let-else pattern matching fails.
+ // By doing so, we can be sure that even temporaries that receive extended lifetime
+ // assignments are dropped, too.
+ let mut last_remainder_scope = region_scope;
this.update_source_scope_for_safety_mode(span, safety_mode);
let source_info = this.source_info(span);
@@ -96,12 +112,175 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
StmtKind::Let {
remainder_scope,
init_scope,
+ pattern,
+ initializer: Some(initializer),
+ lint_level,
+ else_block: Some(else_block),
+ } => {
+ // When lowering the statement `let <pat> = <expr> else { <else> };`,
+ // the `<else>` block is nested in the parent scope enclosing this statment.
+ // That scope is usually either the enclosing block scope,
+ // or the remainder scope of the last statement.
+ // This is to make sure that temporaries instantiated in `<expr>` are dropped
+ // as well.
+ // In addition, even though bindings in `<pat>` only come into scope if
+ // the pattern matching passes, in the MIR building the storages for them
+ // are declared as live any way.
+ // This is similar to `let x;` statements without an initializer expression,
+ // where the value of `x` in this example may or may be assigned,
+ // because the storage for their values may not be live after all due to
+ // failure in pattern matching.
+ // For this reason, we declare those storages as live but we do not schedule
+ // any drop yet- they are scheduled later after the pattern matching.
+ // The generated MIR will have `StorageDead` whenever the control flow breaks out
+ // of the parent scope, regardless of the result of the pattern matching.
+ // However, the drops are inserted in MIR only when the control flow breaks out of
+ // the scope of the remainder scope associated with this `let .. else` statement.
+ // Pictorial explanation of the scope structure:
+ // ┌─────────────────────────────────┐
+ // │ Scope of the enclosing block, │
+ // │ or the last remainder scope │
+ // │ ┌───────────────────────────┐ │
+ // │ │ Scope for <else> block │ │
+ // │ └───────────────────────────┘ │
+ // │ ┌───────────────────────────┐ │
+ // │ │ Remainder scope of │ │
+ // │ │ this let-else statement │ │
+ // │ │ ┌─────────────────────┐ │ │
+ // │ │ │ <expr> scope │ │ │
+ // │ │ └─────────────────────┘ │ │
+ // │ │ extended temporaries in │ │
+ // │ │ <expr> lives in this │ │
+ // │ │ scope │ │
+ // │ │ ┌─────────────────────┐ │ │
+ // │ │ │ Scopes for the rest │ │ │
+ // │ │ └─────────────────────┘ │ │
+ // │ └───────────────────────────┘ │
+ // └─────────────────────────────────┘
+ // Generated control flow:
+ // │ let Some(x) = y() else { return; }
+ // │
+ // ┌────────▼───────┐
+ // │ evaluate y() │
+ // └────────┬───────┘
+ // │ ┌────────────────┐
+ // ┌────────▼───────┐ │Drop temporaries│
+ // │Test the pattern├──────►in y() │
+ // └────────┬───────┘ │because breaking│
+ // │ │out of <expr> │
+ // ┌────────▼───────┐ │scope │
+ // │Move value into │ └───────┬────────┘
+ // │binding x │ │
+ // └────────┬───────┘ ┌───────▼────────┐
+ // │ │Drop extended │
+ // ┌────────▼───────┐ │temporaries in │
+ // │Drop temporaries│ │<expr> because │
+ // │in y() │ │breaking out of │
+ // │because breaking│ │remainder scope │
+ // │out of <expr> │ └───────┬────────┘
+ // │scope │ │
+ // └────────┬───────┘ ┌───────▼────────┐
+ // │ │Enter <else> ├────────►
+ // ┌────────▼───────┐ │block │ return;
+ // │Continue... │ └────────────────┘
+ // └────────────────┘
+
+ let ignores_expr_result = matches!(pattern.kind, PatKind::Wild);
+ this.block_context.push(BlockFrame::Statement { ignores_expr_result });
+
+ // Lower the `else` block first because its parent scope is actually
+ // enclosing the rest of the `let .. else ..` parts.
+ let else_block_span = this.thir[*else_block].span;
+ // This place is not really used because this destination place
+ // should never be used to take values at the end of the failure
+ // block.
+ let dummy_place = this.temp(this.tcx.types.never, else_block_span);
+ let failure_entry = this.cfg.start_new_block();
+ let failure_block;
+ unpack!(
+ failure_block = this.ast_block(
+ dummy_place,
+ failure_entry,
+ *else_block,
+ this.source_info(else_block_span),
+ )
+ );
+ this.cfg.terminate(
+ failure_block,
+ this.source_info(else_block_span),
+ TerminatorKind::Unreachable,
+ );
+
+ // Declare the bindings, which may create a source scope.
+ let remainder_span = remainder_scope.span(this.tcx, this.region_scope_tree);
+ this.push_scope((*remainder_scope, source_info));
+ let_scope_stack.push(remainder_scope);
+
+ let visibility_scope =
+ Some(this.new_source_scope(remainder_span, LintLevel::Inherited, None));
+
+ let init = &this.thir[*initializer];
+ let initializer_span = init.span;
+ let failure = unpack!(
+ block = this.in_opt_scope(
+ opt_destruction_scope.map(|de| (de, source_info)),
+ |this| {
+ let scope = (*init_scope, source_info);
+ this.in_scope(scope, *lint_level, |this| {
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ this.visit_primary_bindings(
+ pattern,
+ UserTypeProjections::none(),
+ &mut |this, _, _, _, node, span, _, _| {
+ this.storage_live_binding(
+ block,
+ node,
+ span,
+ OutsideGuard,
+ true,
+ );
+ },
+ );
+ this.ast_let_else(
+ block,
+ init,
+ initializer_span,
+ *else_block,
+ &last_remainder_scope,
+ pattern,
+ )
+ })
+ }
+ )
+ );
+ this.cfg.goto(failure, source_info, failure_entry);
+
+ if let Some(source_scope) = visibility_scope {
+ this.source_scope = source_scope;
+ }
+ last_remainder_scope = *remainder_scope;
+ }
+ StmtKind::Let { init_scope, initializer: None, else_block: Some(_), .. } => {
+ span_bug!(
+ init_scope.span(this.tcx, this.region_scope_tree),
+ "initializer is missing, but else block is present in this let binding",
+ )
+ }
+ StmtKind::Let {
+ remainder_scope,
+ init_scope,
ref pattern,
initializer,
lint_level,
- else_block,
+ else_block: None,
} => {
- let ignores_expr_result = matches!(*pattern.kind, PatKind::Wild);
+ let ignores_expr_result = matches!(pattern.kind, PatKind::Wild);
this.block_context.push(BlockFrame::Statement { ignores_expr_result });
// Enter the remainder scope, i.e., the bindings' destruction scope.
@@ -125,27 +304,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|this| {
let scope = (*init_scope, source_info);
this.in_scope(scope, *lint_level, |this| {
- if let Some(else_block) = else_block {
- this.ast_let_else(
- block,
- init,
- initializer_span,
- else_block,
- visibility_scope,
- *remainder_scope,
- remainder_span,
- pattern,
- )
- } else {
- this.declare_bindings(
- visibility_scope,
- remainder_span,
- pattern,
- ArmHasGuard(false),
- Some((None, initializer_span)),
- );
- this.expr_into_pattern(block, pattern.clone(), init) // irrefutable pattern
- }
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ this.expr_into_pattern(block, &pattern, init)
+ // irrefutable pattern
})
},
)
@@ -178,6 +345,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if let Some(source_scope) = visibility_scope {
this.source_scope = source_scope;
}
+ last_remainder_scope = *remainder_scope;
}
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index 648d10b9e..37dc1ad9f 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -2,26 +2,19 @@
use crate::build::{parse_float_into_constval, Builder};
use rustc_ast as ast;
-use rustc_hir::def_id::DefId;
+use rustc_middle::mir;
use rustc_middle::mir::interpret::{
Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
};
use rustc_middle::mir::*;
use rustc_middle::thir::*;
-use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt};
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, TyCtxt};
use rustc_target::abi::Size;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, yielding a compile-time constant. Assumes that
/// `expr` is a valid compile-time constant!
pub(crate) fn as_constant(&mut self, expr: &Expr<'tcx>) -> Constant<'tcx> {
- let create_uneval_from_def_id =
- |tcx: TyCtxt<'tcx>, def_id: DefId, ty: Ty<'tcx>, substs: SubstsRef<'tcx>| {
- let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
- tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Unevaluated(uneval), ty })
- };
-
let this = self;
let tcx = this.tcx;
let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
@@ -41,11 +34,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Constant { span, user_ty: None, literal }
}
- ExprKind::NonHirLiteral { lit, user_ty } => {
- let user_ty = user_ty.map(|user_ty| {
+ ExprKind::NonHirLiteral { lit, ref user_ty } => {
+ let user_ty = user_ty.as_ref().map(|user_ty| {
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span,
- user_ty,
+ user_ty: user_ty.clone(),
inferred_ty: ty,
})
});
@@ -53,11 +46,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Constant { span, user_ty: user_ty, literal }
}
- ExprKind::ZstLiteral { user_ty } => {
- let user_ty = user_ty.map(|user_ty| {
+ ExprKind::ZstLiteral { ref user_ty } => {
+ let user_ty = user_ty.as_ref().map(|user_ty| {
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span,
- user_ty,
+ user_ty: user_ty.clone(),
inferred_ty: ty,
})
});
@@ -65,15 +58,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Constant { span, user_ty: user_ty, literal }
}
- ExprKind::NamedConst { def_id, substs, user_ty } => {
- let user_ty = user_ty.map(|user_ty| {
+ ExprKind::NamedConst { def_id, substs, ref user_ty } => {
+ let user_ty = user_ty.as_ref().map(|user_ty| {
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span,
- user_ty,
+ user_ty: user_ty.clone(),
inferred_ty: ty,
})
});
- let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ let uneval =
+ mir::UnevaluatedConst::new(ty::WithOptConstParam::unknown(def_id), substs);
+ let literal = ConstantKind::Unevaluated(uneval, ty);
Constant { user_ty, span, literal }
}
@@ -85,7 +81,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Constant { user_ty: None, span, literal }
}
ExprKind::ConstBlock { did: def_id, substs } => {
- let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+ let uneval =
+ mir::UnevaluatedConst::new(ty::WithOptConstParam::unknown(def_id), substs);
+ let literal = ConstantKind::Unevaluated(uneval, ty);
Constant { user_ty: None, span, literal }
}
@@ -144,7 +142,7 @@ pub(crate) fn lit_to_mir_constant<'tcx>(
}
(ast::LitKind::Bool(b), ty::Bool) => ConstValue::Scalar(Scalar::from_bool(*b)),
(ast::LitKind::Char(c), ty::Char) => ConstValue::Scalar(Scalar::from_char(*c)),
- (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ (ast::LitKind::Err, _) => return Err(LitToConstError::Reported),
_ => return Err(LitToConstError::TypeError),
};
diff --git a/compiler/rustc_mir_build/src/build/expr/as_operand.rs b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
index e707c373f..c8610af70 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_operand.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
@@ -153,12 +153,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if tcx.features().unsized_fn_params {
let ty = expr.ty;
- let span = expr.span;
let param_env = this.param_env;
- if !ty.is_sized(tcx.at(span), param_env) {
+ if !ty.is_sized(tcx, param_env) {
// !sized means !copy, so this is an unsized move
- assert!(!ty.is_copy_modulo_regions(tcx.at(span), param_env));
+ assert!(!ty.is_copy_modulo_regions(tcx, param_env));
// As described above, detect the case where we are passing a value of unsized
// type, and that value is coming from the deref of a box.
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index 0c06aad4e..396782d45 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -2,7 +2,7 @@
use crate::build::expr::category::Category;
use crate::build::ForGuard::{OutsideGuard, RefWithinGuard};
-use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use crate::build::{BlockAnd, BlockAndExtension, Builder, Capture, CaptureMap};
use rustc_hir::def_id::LocalDefId;
use rustc_middle::hir::place::Projection as HirProjection;
use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
@@ -17,6 +17,7 @@ use rustc_target::abi::VariantIdx;
use rustc_index::vec::Idx;
+use std::assert_matches::assert_matches;
use std::iter;
/// The "outermost" place that holds this value.
@@ -59,8 +60,6 @@ pub(crate) enum PlaceBase {
var_hir_id: LocalVarId,
/// DefId of the closure
closure_def_id: LocalDefId,
- /// The trait closure implements, `Fn`, `FnMut`, `FnOnce`
- closure_kind: ty::ClosureKind,
},
}
@@ -71,7 +70,7 @@ pub(crate) enum PlaceBase {
/// This is used internally when building a place for an expression like `a.b.c`. The fields `b`
/// and `c` can be progressively pushed onto the place builder that is created when converting `a`.
#[derive(Clone, Debug, PartialEq)]
-pub(crate) struct PlaceBuilder<'tcx> {
+pub(in crate::build) struct PlaceBuilder<'tcx> {
base: PlaceBase,
projection: Vec<PlaceElem<'tcx>>,
}
@@ -104,6 +103,8 @@ fn convert_to_hir_projections_and_truncate_for_capture<'tcx>(
variant = Some(*idx);
continue;
}
+ // These do not affect anything, they just make sure we know the right type.
+ ProjectionElem::OpaqueCast(_) => continue,
ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. } => {
@@ -145,27 +146,6 @@ fn is_ancestor_or_same_capture(
iter::zip(proj_possible_ancestor, proj_capture).all(|(a, b)| a == b)
}
-/// Computes the index of a capture within the desugared closure provided the closure's
-/// `closure_min_captures` and the capture's index of the capture in the
-/// `ty::MinCaptureList` of the root variable `var_hir_id`.
-fn compute_capture_idx<'tcx>(
- closure_min_captures: &ty::RootVariableMinCaptureList<'tcx>,
- var_hir_id: LocalVarId,
- root_var_idx: usize,
-) -> usize {
- let mut res = 0;
- for (var_id, capture_list) in closure_min_captures {
- if *var_id == var_hir_id.0 {
- res += root_var_idx;
- break;
- } else {
- res += capture_list.len();
- }
- }
-
- res
-}
-
/// Given a closure, returns the index of a capture within the desugared closure struct and the
/// `ty::CapturedPlace` which is the ancestor of the Place represented using the `var_hir_id`
/// and `projection`.
@@ -174,58 +154,39 @@ fn compute_capture_idx<'tcx>(
///
/// Returns None, when the ancestor is not found.
fn find_capture_matching_projections<'a, 'tcx>(
- typeck_results: &'a ty::TypeckResults<'tcx>,
+ upvars: &'a CaptureMap<'tcx>,
var_hir_id: LocalVarId,
- closure_def_id: LocalDefId,
projections: &[PlaceElem<'tcx>],
-) -> Option<(usize, &'a ty::CapturedPlace<'tcx>)> {
- let closure_min_captures = typeck_results.closure_min_captures.get(&closure_def_id)?;
- let root_variable_min_captures = closure_min_captures.get(&var_hir_id.0)?;
-
+) -> Option<(usize, &'a Capture<'tcx>)> {
let hir_projections = convert_to_hir_projections_and_truncate_for_capture(projections);
- // If an ancestor is found, `idx` is the index within the list of captured places
- // for root variable `var_hir_id` and `capture` is the `ty::CapturedPlace` itself.
- let (idx, capture) = root_variable_min_captures.iter().enumerate().find(|(_, capture)| {
+ upvars.get_by_key_enumerated(var_hir_id.0).find(|(_, capture)| {
let possible_ancestor_proj_kinds: Vec<_> =
- capture.place.projections.iter().map(|proj| proj.kind).collect();
+ capture.captured_place.place.projections.iter().map(|proj| proj.kind).collect();
is_ancestor_or_same_capture(&possible_ancestor_proj_kinds, &hir_projections)
- })?;
-
- // Convert index to be from the perspective of the entire closure_min_captures map
- // instead of just the root variable capture list
- Some((compute_capture_idx(closure_min_captures, var_hir_id, idx), capture))
+ })
}
/// Takes a PlaceBuilder and resolves the upvar (if any) within it, so that the
/// `PlaceBuilder` now starts from `PlaceBase::Local`.
///
/// Returns a Result with the error being the PlaceBuilder (`from_builder`) that was not found.
-fn to_upvars_resolved_place_builder<'a, 'tcx>(
+#[instrument(level = "trace", skip(cx), ret)]
+fn to_upvars_resolved_place_builder<'tcx>(
from_builder: PlaceBuilder<'tcx>,
- tcx: TyCtxt<'tcx>,
- typeck_results: &'a ty::TypeckResults<'tcx>,
+ cx: &Builder<'_, 'tcx>,
) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
match from_builder.base {
PlaceBase::Local(_) => Ok(from_builder),
- PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind } => {
- let mut upvar_resolved_place_builder = PlaceBuilder::from(ty::CAPTURE_STRUCT_LOCAL);
- match closure_kind {
- ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
- upvar_resolved_place_builder = upvar_resolved_place_builder.deref();
- }
- ty::ClosureKind::FnOnce => {}
- }
-
+ PlaceBase::Upvar { var_hir_id, closure_def_id } => {
let Some((capture_index, capture)) =
find_capture_matching_projections(
- typeck_results,
+ &cx.upvars,
var_hir_id,
- closure_def_id,
&from_builder.projection,
) else {
- let closure_span = tcx.def_span(closure_def_id);
- if !enable_precise_capture(tcx, closure_span) {
+ let closure_span = cx.tcx.def_span(closure_def_id);
+ if !enable_precise_capture(cx.tcx, closure_span) {
bug!(
"No associated capture found for {:?}[{:#?}] even though \
capture_disjoint_fields isn't enabled",
@@ -241,39 +202,18 @@ fn to_upvars_resolved_place_builder<'a, 'tcx>(
return Err(from_builder);
};
- // We won't be building MIR if the closure wasn't local
- let closure_hir_id = tcx.hir().local_def_id_to_hir_id(closure_def_id);
- let closure_ty = typeck_results.node_type(closure_hir_id);
-
- let substs = match closure_ty.kind() {
- ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
- ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
- _ => bug!("Lowering capture for non-closure type {:?}", closure_ty),
- };
-
// Access the capture by accessing the field within the Closure struct.
- //
- // We must have inferred the capture types since we are building MIR, therefore
- // it's safe to call `tuple_element_ty` and we can unwrap here because
- // we know that the capture exists and is the `capture_index`-th capture.
- let var_ty = substs.tupled_upvars_ty().tuple_fields()[capture_index];
-
- upvar_resolved_place_builder =
- upvar_resolved_place_builder.field(Field::new(capture_index), var_ty);
-
- // If the variable is captured via ByRef(Immutable/Mutable) Borrow,
- // we need to deref it
- upvar_resolved_place_builder = match capture.info.capture_kind {
- ty::UpvarCapture::ByRef(_) => upvar_resolved_place_builder.deref(),
- ty::UpvarCapture::ByValue => upvar_resolved_place_builder,
- };
+ let capture_info = &cx.upvars[capture_index];
+
+ let mut upvar_resolved_place_builder = PlaceBuilder::from(capture_info.use_place);
// We used some of the projections to build the capture itself,
// now we apply the remaining to the upvar resolved place.
+ trace!(?capture.captured_place, ?from_builder.projection);
let remaining_projections = strip_prefix(
- capture.place.base_ty,
+ capture.captured_place.place.base_ty,
from_builder.projection,
- &capture.place.projections,
+ &capture.captured_place.place.projections,
);
upvar_resolved_place_builder.projection.extend(remaining_projections);
@@ -293,17 +233,20 @@ fn strip_prefix<'tcx>(
projections: Vec<PlaceElem<'tcx>>,
prefix_projections: &[HirProjection<'tcx>],
) -> impl Iterator<Item = PlaceElem<'tcx>> {
- let mut iter = projections.into_iter();
+ let mut iter = projections
+ .into_iter()
+ // Filter out opaque casts, they are unnecessary in the prefix.
+ .filter(|elem| !matches!(elem, ProjectionElem::OpaqueCast(..)));
for projection in prefix_projections {
match projection.kind {
HirProjectionKind::Deref => {
- assert!(matches!(iter.next(), Some(ProjectionElem::Deref)));
+ assert_matches!(iter.next(), Some(ProjectionElem::Deref));
}
HirProjectionKind::Field(..) => {
if base_ty.is_enum() {
- assert!(matches!(iter.next(), Some(ProjectionElem::Downcast(..))));
+ assert_matches!(iter.next(), Some(ProjectionElem::Downcast(..)));
}
- assert!(matches!(iter.next(), Some(ProjectionElem::Field(..))));
+ assert_matches!(iter.next(), Some(ProjectionElem::Field(..)));
}
HirProjectionKind::Index | HirProjectionKind::Subslice => {
bug!("unexpected projection kind: {:?}", projection);
@@ -315,24 +258,16 @@ fn strip_prefix<'tcx>(
}
impl<'tcx> PlaceBuilder<'tcx> {
- pub(crate) fn into_place<'a>(
- self,
- tcx: TyCtxt<'tcx>,
- typeck_results: &'a ty::TypeckResults<'tcx>,
- ) -> Place<'tcx> {
+ pub(in crate::build) fn into_place(self, cx: &Builder<'_, 'tcx>) -> Place<'tcx> {
if let PlaceBase::Local(local) = self.base {
- Place { local, projection: tcx.intern_place_elems(&self.projection) }
+ Place { local, projection: cx.tcx.intern_place_elems(&self.projection) }
} else {
- self.expect_upvars_resolved(tcx, typeck_results).into_place(tcx, typeck_results)
+ self.expect_upvars_resolved(cx).into_place(cx)
}
}
- fn expect_upvars_resolved<'a>(
- self,
- tcx: TyCtxt<'tcx>,
- typeck_results: &'a ty::TypeckResults<'tcx>,
- ) -> PlaceBuilder<'tcx> {
- to_upvars_resolved_place_builder(self, tcx, typeck_results).unwrap()
+ fn expect_upvars_resolved(self, cx: &Builder<'_, 'tcx>) -> PlaceBuilder<'tcx> {
+ to_upvars_resolved_place_builder(self, cx).unwrap()
}
/// Attempts to resolve the `PlaceBuilder`.
@@ -346,18 +281,21 @@ impl<'tcx> PlaceBuilder<'tcx> {
/// not captured. This can happen because the final mir that will be
/// generated doesn't require a read for this place. Failures will only
/// happen inside closures.
- pub(crate) fn try_upvars_resolved<'a>(
+ pub(in crate::build) fn try_upvars_resolved(
self,
- tcx: TyCtxt<'tcx>,
- typeck_results: &'a ty::TypeckResults<'tcx>,
+ cx: &Builder<'_, 'tcx>,
) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
- to_upvars_resolved_place_builder(self, tcx, typeck_results)
+ to_upvars_resolved_place_builder(self, cx)
}
pub(crate) fn base(&self) -> PlaceBase {
self.base
}
+ pub(crate) fn projection(&self) -> &[PlaceElem<'tcx>] {
+ &self.projection
+ }
+
pub(crate) fn field(self, f: Field, ty: Ty<'tcx>) -> Self {
self.project(PlaceElem::Field(f, ty))
}
@@ -392,6 +330,12 @@ impl<'tcx> From<PlaceBase> for PlaceBuilder<'tcx> {
}
}
+impl<'tcx> From<Place<'tcx>> for PlaceBuilder<'tcx> {
+ fn from(p: Place<'tcx>) -> Self {
+ Self { base: PlaceBase::Local(p.local), projection: p.projection.to_vec() }
+ }
+}
+
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, yielding a place that we can move from etc.
///
@@ -411,7 +355,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
expr: &Expr<'tcx>,
) -> BlockAnd<Place<'tcx>> {
let place_builder = unpack!(block = self.as_place_builder(block, expr));
- block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ block.and(place_builder.into_place(self))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
@@ -435,7 +379,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
expr: &Expr<'tcx>,
) -> BlockAnd<Place<'tcx>> {
let place_builder = unpack!(block = self.as_read_only_place_builder(block, expr));
- block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ block.and(place_builder.into_place(self))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
@@ -513,7 +457,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block.and(place_builder)
}
- ExprKind::PlaceTypeAscription { source, user_ty } => {
+ ExprKind::PlaceTypeAscription { source, ref user_ty } => {
let place_builder = unpack!(
block = this.expr_as_place(
block,
@@ -526,11 +470,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let annotation_index =
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span: source_info.span,
- user_ty,
+ user_ty: user_ty.clone(),
inferred_ty: expr.ty,
});
- let place = place_builder.clone().into_place(this.tcx, this.typeck_results);
+ let place = place_builder.clone().into_place(this);
this.cfg.push(
block,
Statement {
@@ -547,7 +491,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
block.and(place_builder)
}
- ExprKind::ValueTypeAscription { source, user_ty } => {
+ ExprKind::ValueTypeAscription { source, ref user_ty } => {
let source = &this.thir[source];
let temp =
unpack!(block = this.as_temp(block, source.temp_lifetime, source, mutability));
@@ -555,7 +499,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let annotation_index =
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span: source_info.span,
- user_ty,
+ user_ty: user_ty.clone(),
inferred_ty: expr.ty,
});
this.cfg.push(
@@ -629,17 +573,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
closure_def_id: LocalDefId,
var_hir_id: LocalVarId,
) -> BlockAnd<PlaceBuilder<'tcx>> {
- let closure_ty =
- self.typeck_results.node_type(self.tcx.hir().local_def_id_to_hir_id(closure_def_id));
-
- let closure_kind = if let ty::Closure(_, closure_substs) = closure_ty.kind() {
- self.infcx.closure_kind(closure_substs).unwrap()
- } else {
- // Generators are considered FnOnce.
- ty::ClosureKind::FnOnce
- };
-
- block.and(PlaceBuilder::from(PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind }))
+ block.and(PlaceBuilder::from(PlaceBase::Upvar { var_hir_id, closure_def_id }))
}
/// Lower an index expression
@@ -678,7 +612,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if is_outermost_index {
self.read_fake_borrows(block, fake_borrow_temps, source_info)
} else {
- base_place = base_place.expect_upvars_resolved(self.tcx, self.typeck_results);
+ base_place = base_place.expect_upvars_resolved(self);
self.add_fake_borrows_of_base(
&base_place,
block,
@@ -706,12 +640,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let lt = self.temp(bool_ty, expr_span);
// len = len(slice)
- self.cfg.push_assign(
- block,
- source_info,
- len,
- Rvalue::Len(slice.into_place(self.tcx, self.typeck_results)),
- );
+ self.cfg.push_assign(block, source_info, len, Rvalue::Len(slice.into_place(self)));
// lt = idx < len
self.cfg.push_assign(
block,
@@ -791,6 +720,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
ProjectionElem::Field(..)
| ProjectionElem::Downcast(..)
+ | ProjectionElem::OpaqueCast(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. } => (),
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index 15f2d17c4..3dafdcb78 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -12,7 +12,7 @@ use rustc_middle::mir::AssertKind;
use rustc_middle::mir::Place;
use rustc_middle::mir::*;
use rustc_middle::thir::*;
-use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::cast::{mir_cast_kind, CastTy};
use rustc_middle::ty::{self, Ty, UpvarSubsts};
use rustc_span::Span;
@@ -197,13 +197,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// create all the steps directly in MIR with operations all backends need to support anyway.
let (source, ty) = if let ty::Adt(adt_def, ..) = source.ty.kind() && adt_def.is_enum() {
let discr_ty = adt_def.repr().discr_type().to_ty(this.tcx);
- let place = unpack!(block = this.as_place(block, source));
+ let temp = unpack!(block = this.as_temp(block, scope, source, Mutability::Not));
let discr = this.temp(discr_ty, source.span);
this.cfg.push_assign(
block,
source_info,
discr,
- Rvalue::Discriminant(place),
+ Rvalue::Discriminant(temp.into()),
);
(Operand::Move(discr), discr_ty)
@@ -216,15 +216,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
};
let from_ty = CastTy::from_ty(ty);
let cast_ty = CastTy::from_ty(expr.ty);
- let cast_kind = match (from_ty, cast_ty) {
- (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
- CastKind::PointerExposeAddress
- }
- (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => {
- CastKind::PointerFromExposedAddress
- }
- (_, _) => CastKind::Misc,
- };
+ debug!("ExprKind::Cast from_ty={from_ty:?}, cast_ty={:?}/{cast_ty:?}", expr.ty,);
+ let cast_kind = mir_cast_kind(ty, expr.ty);
block.and(Rvalue::Cast(cast_kind, source, expr.ty))
}
ExprKind::Pointer { cast, source } => {
@@ -302,7 +295,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block.and(Rvalue::Aggregate(Box::new(AggregateKind::Tuple), fields))
}
- ExprKind::Closure { closure_id, substs, ref upvars, movability, ref fake_reads } => {
+ ExprKind::Closure(box ClosureExpr {
+ closure_id,
+ substs,
+ ref upvars,
+ movability,
+ ref fake_reads,
+ }) => {
// Convert the closure fake reads, if any, from `ExprRef` to mir `Place`
// and push the fake reads.
// This must come before creating the operands. This is required in case
@@ -321,11 +320,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let place_builder =
unpack!(block = this.as_place_builder(block, &this.thir[*thir_place]));
- if let Ok(place_builder_resolved) =
- place_builder.try_upvars_resolved(this.tcx, this.typeck_results)
- {
- let mir_place =
- place_builder_resolved.into_place(this.tcx, this.typeck_results);
+ if let Ok(place_builder_resolved) = place_builder.try_upvars_resolved(this) {
+ let mir_place = place_builder_resolved.into_place(this);
this.cfg.push_fake_read(
block,
this.source_info(this.tcx.hir().span(*hir_id)),
@@ -616,8 +612,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// by the parent itself. The mutability of the current capture
// is same as that of the capture in the parent closure.
PlaceBase::Upvar { .. } => {
- let enclosing_upvars_resolved =
- arg_place_builder.clone().into_place(this.tcx, this.typeck_results);
+ let enclosing_upvars_resolved = arg_place_builder.clone().into_place(this);
match enclosing_upvars_resolved.as_ref() {
PlaceRef {
@@ -637,12 +632,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
);
// Not in a closure
debug_assert!(
- this.upvar_mutbls.len() > upvar_index.index(),
- "Unexpected capture place, upvar_mutbls={:#?}, upvar_index={:?}",
- this.upvar_mutbls,
+ this.upvars.len() > upvar_index.index(),
+ "Unexpected capture place, upvars={:#?}, upvar_index={:?}",
+ this.upvars,
upvar_index
);
- this.upvar_mutbls[upvar_index.index()]
+ this.upvars[upvar_index.index()].mutability
}
_ => bug!("Unexpected capture place"),
}
@@ -654,7 +649,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
};
- let arg_place = arg_place_builder.into_place(this.tcx, this.typeck_results);
+ let arg_place = arg_place_builder.into_place(this);
this.cfg.push_assign(
block,
diff --git a/compiler/rustc_mir_build/src/build/expr/as_temp.rs b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
index 724b72f87..0ca4e3745 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_temp.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
@@ -23,6 +23,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
ensure_sufficient_stack(|| self.as_temp_inner(block, temp_lifetime, expr, mutability))
}
+ #[instrument(skip(self), level = "debug")]
fn as_temp_inner(
&mut self,
mut block: BasicBlock,
@@ -30,10 +31,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
expr: &Expr<'tcx>,
mutability: Mutability,
) -> BlockAnd<Local> {
- debug!(
- "as_temp(block={:?}, temp_lifetime={:?}, expr={:?}, mutability={:?})",
- block, temp_lifetime, expr, mutability
- );
let this = self;
let expr_span = expr.span;
@@ -83,8 +80,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Don't bother with StorageLive and Dead for these temporaries,
// they are never assigned.
ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } => (),
- ExprKind::Block { body: Block { expr: None, targeted_by_break: false, .. } }
- if expr_ty.is_never() => {}
+ ExprKind::Block { block }
+ if let Block { expr: None, targeted_by_break: false, .. } = this.thir[block]
+ && expr_ty.is_never() => {}
_ => {
this.cfg
.push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
index 017d43d10..24ecd0a53 100644
--- a/compiler/rustc_mir_build/src/build/expr/into.rs
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -15,14 +15,13 @@ use std::iter;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, storing the result into `destination`, which
/// is assumed to be uninitialized.
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn expr_into_dest(
&mut self,
destination: Place<'tcx>,
mut block: BasicBlock,
expr: &Expr<'tcx>,
) -> BlockAnd<()> {
- debug!("expr_into_dest(destination={:?}, block={:?}, expr={:?})", destination, block, expr);
-
// since we frequently have to reference `self` from within a
// closure, where `self` would be shadowed, it's easier to
// just use the name `this` uniformly
@@ -46,7 +45,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
})
})
}
- ExprKind::Block { body: ref ast_block } => {
+ ExprKind::Block { block: ast_block } => {
this.ast_block(destination, block, ast_block, source_info)
}
ExprKind::Match { scrutinee, ref arms } => {
@@ -75,7 +74,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
this.source_info(then_expr.span)
};
let (then_block, else_block) =
- this.in_if_then_scope(condition_scope, |this| {
+ this.in_if_then_scope(condition_scope, then_expr.span, |this| {
let then_blk = unpack!(this.then_else_break(
block,
&this.thir[cond],
@@ -108,7 +107,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
ExprKind::Let { expr, ref pat } => {
let scope = this.local_scope();
- let (true_block, false_block) = this.in_if_then_scope(scope, |this| {
+ let (true_block, false_block) = this.in_if_then_scope(scope, expr_span, |this| {
this.lower_let_expr(block, &this.thir[expr], pat, scope, None, expr_span)
});
@@ -314,11 +313,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
this.cfg.push_assign(block, source_info, destination, address_of);
block.unit()
}
- ExprKind::Adt(box Adt {
+ ExprKind::Adt(box AdtExpr {
adt_def,
variant_index,
substs,
- user_ty,
+ ref user_ty,
ref fields,
ref base,
}) => {
@@ -366,9 +365,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
None => {
let place_builder = place_builder.clone();
this.consume_by_copy_or_move(
- place_builder
- .field(n, *ty)
- .into_place(this.tcx, this.typeck_results),
+ place_builder.field(n, *ty).into_place(this),
)
}
})
@@ -378,10 +375,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
};
let inferred_ty = expr.ty;
- let user_ty = user_ty.map(|ty| {
+ let user_ty = user_ty.as_ref().map(|user_ty| {
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span: source_info.span,
- user_ty: ty,
+ user_ty: user_ty.clone(),
inferred_ty,
})
});
@@ -400,7 +397,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
);
block.unit()
}
- ExprKind::InlineAsm { template, ref operands, options, line_spans } => {
+ ExprKind::InlineAsm(box InlineAsmExpr {
+ template,
+ ref operands,
+ options,
+ line_spans,
+ }) => {
use rustc_middle::{mir, thir};
let operands = operands
.into_iter()
diff --git a/compiler/rustc_mir_build/src/build/expr/stmt.rs b/compiler/rustc_mir_build/src/build/expr/stmt.rs
index a7e1331aa..00dbcaeb0 100644
--- a/compiler/rustc_mir_build/src/build/expr/stmt.rs
+++ b/compiler/rustc_mir_build/src/build/expr/stmt.rs
@@ -116,14 +116,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// it is usually better to focus on `the_value` rather
// than the entirety of block(s) surrounding it.
let adjusted_span = (|| {
- if let ExprKind::Block { body } = &expr.kind && let Some(tail_ex) = body.expr {
+ if let ExprKind::Block { block } = expr.kind
+ && let Some(tail_ex) = this.thir[block].expr
+ {
let mut expr = &this.thir[tail_ex];
- while let ExprKind::Block {
- body: Block { expr: Some(nested_expr), .. },
- }
- | ExprKind::Scope { value: nested_expr, .. } = expr.kind
- {
- expr = &this.thir[nested_expr];
+ loop {
+ match expr.kind {
+ ExprKind::Block { block }
+ if let Some(nested_expr) = this.thir[block].expr =>
+ {
+ expr = &this.thir[nested_expr];
+ }
+ ExprKind::Scope { value: nested_expr, .. } => {
+ expr = &this.thir[nested_expr];
+ }
+ _ => break,
+ }
}
this.block_context.push(BlockFrame::TailExpr {
tail_result_is_ignored: true,
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index 58b1564cc..3f813e0af 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -155,7 +155,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
///
/// * From each pre-binding block to the next pre-binding block.
/// * From each otherwise block to the next pre-binding block.
- #[tracing::instrument(level = "debug", skip(self, arms))]
+ #[instrument(level = "debug", skip(self, arms))]
pub(crate) fn match_expr(
&mut self,
destination: Place<'tcx>,
@@ -170,7 +170,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut arm_candidates = self.create_match_candidates(scrutinee_place.clone(), &arms);
- let match_has_guard = arms.iter().copied().any(|arm| self.thir[arm].guard.is_some());
+ let match_has_guard = arm_candidates.iter().any(|(_, candidate)| candidate.has_guard);
let mut candidates =
arm_candidates.iter_mut().map(|(_, candidate)| candidate).collect::<Vec<_>>();
@@ -220,10 +220,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let cause_matched_place = FakeReadCause::ForMatchedPlace(None);
let source_info = self.source_info(scrutinee_span);
- if let Ok(scrutinee_builder) =
- scrutinee_place_builder.clone().try_upvars_resolved(self.tcx, self.typeck_results)
- {
- let scrutinee_place = scrutinee_builder.into_place(self.tcx, self.typeck_results);
+ if let Ok(scrutinee_builder) = scrutinee_place_builder.clone().try_upvars_resolved(self) {
+ let scrutinee_place = scrutinee_builder.into_place(self);
self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place);
}
@@ -246,7 +244,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.map(|arm| {
let arm = &self.thir[arm];
let arm_has_guard = arm.guard.is_some();
- let arm_candidate = Candidate::new(scrutinee.clone(), &arm.pattern, arm_has_guard);
+ let arm_candidate =
+ Candidate::new(scrutinee.clone(), &arm.pattern, arm_has_guard, self);
(arm, arm_candidate)
})
.collect()
@@ -348,12 +347,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// ```
let mut opt_scrutinee_place: Option<(Option<&Place<'tcx>>, Span)> = None;
let scrutinee_place: Place<'tcx>;
- if let Ok(scrutinee_builder) = scrutinee_place_builder
- .clone()
- .try_upvars_resolved(this.tcx, this.typeck_results)
+ if let Ok(scrutinee_builder) =
+ scrutinee_place_builder.clone().try_upvars_resolved(this)
{
- scrutinee_place =
- scrutinee_builder.into_place(this.tcx, this.typeck_results);
+ scrutinee_place = scrutinee_builder.into_place(this);
opt_scrutinee_place = Some((Some(&scrutinee_place), scrutinee_span));
}
let scope = this.declare_bindings(
@@ -373,6 +370,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Some(arm.span),
Some(arm.scope),
Some(match_scope),
+ false,
);
if let Some(source_scope) = scope {
@@ -418,6 +416,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
arm_span: Option<Span>,
arm_scope: Option<region::Scope>,
match_scope: Option<region::Scope>,
+ storages_alive: bool,
) -> BasicBlock {
if candidate.subcandidates.is_empty() {
// Avoid generating another `BasicBlock` when we only have one
@@ -431,6 +430,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
arm_span,
match_scope,
true,
+ storages_alive,
)
} else {
// It's helpful to avoid scheduling drops multiple times to save
@@ -468,6 +468,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
arm_span,
match_scope,
schedule_drops,
+ storages_alive,
);
if arm_scope.is_none() {
schedule_drops = false;
@@ -490,10 +491,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
pub(super) fn expr_into_pattern(
&mut self,
mut block: BasicBlock,
- irrefutable_pat: Pat<'tcx>,
+ irrefutable_pat: &Pat<'tcx>,
initializer: &Expr<'tcx>,
) -> BlockAnd<()> {
- match *irrefutable_pat.kind {
+ match irrefutable_pat.kind {
// Optimize the case of `let x = ...` to write directly into `x`
PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => {
let place =
@@ -518,17 +519,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// broken.
PatKind::AscribeUserType {
subpattern:
- Pat {
+ box Pat {
kind:
- box PatKind::Binding {
- mode: BindingMode::ByValue,
- var,
- subpattern: None,
- ..
+ PatKind::Binding {
+ mode: BindingMode::ByValue, var, subpattern: None, ..
},
..
},
- ascription: thir::Ascription { annotation, variance: _ },
+ ascription: thir::Ascription { ref annotation, variance: _ },
} => {
let place =
self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
@@ -541,7 +539,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let ty_source_info = self.source_info(annotation.span);
- let base = self.canonical_user_type_annotations.push(annotation);
+ let base = self.canonical_user_type_annotations.push(annotation.clone());
self.cfg.push(
block,
Statement {
@@ -573,7 +571,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
_ => {
let place_builder = unpack!(block = self.as_place_builder(block, initializer));
- self.place_into_pattern(block, irrefutable_pat, place_builder, true)
+ self.place_into_pattern(block, &irrefutable_pat, place_builder, true)
}
}
}
@@ -581,11 +579,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
pub(crate) fn place_into_pattern(
&mut self,
block: BasicBlock,
- irrefutable_pat: Pat<'tcx>,
+ irrefutable_pat: &Pat<'tcx>,
initializer: PlaceBuilder<'tcx>,
set_match_place: bool,
) -> BlockAnd<()> {
- let mut candidate = Candidate::new(initializer.clone(), &irrefutable_pat, false);
+ let mut candidate = Candidate::new(initializer.clone(), &irrefutable_pat, false, self);
let fake_borrow_temps = self.lower_match_tree(
block,
irrefutable_pat.span,
@@ -602,12 +600,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
while let Some(next) = {
for binding in &candidate_ref.bindings {
let local = self.var_local_id(binding.var_id, OutsideGuard);
-
- let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
- VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
- )))) = self.local_decls[local].local_info else {
- bug!("Let binding to non-user variable.")
- };
// `try_upvars_resolved` may fail if it is unable to resolve the given
// `PlaceBuilder` inside a closure. In this case, we don't want to include
// a scrutinee place. `scrutinee_place_builder` will fail for destructured
@@ -622,10 +614,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// let (v1, v2) = foo;
// };
// ```
- if let Ok(match_pair_resolved) =
- initializer.clone().try_upvars_resolved(self.tcx, self.typeck_results)
- {
- let place = match_pair_resolved.into_place(self.tcx, self.typeck_results);
+ if let Ok(match_pair_resolved) = initializer.clone().try_upvars_resolved(self) {
+ let place = match_pair_resolved.into_place(self);
+
+ let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
+ )))) = self.local_decls[local].local_info else {
+ bug!("Let binding to non-user variable.")
+ };
*match_place = Some(place);
}
}
@@ -646,6 +642,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
None,
None,
None,
+ false,
)
.unit()
}
@@ -654,6 +651,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// scope for the bindings in these patterns, if such a scope had to be
/// created. NOTE: Declaring the bindings should always be done in their
/// drop scope.
+ #[instrument(skip(self), level = "debug")]
pub(crate) fn declare_bindings(
&mut self,
mut visibility_scope: Option<SourceScope>,
@@ -662,7 +660,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
has_guard: ArmHasGuard,
opt_match_place: Option<(Option<&Place<'tcx>>, Span)>,
) -> Option<SourceScope> {
- debug!("declare_bindings: pattern={:?}", pattern);
self.visit_primary_bindings(
&pattern,
UserTypeProjections::none(),
@@ -702,9 +699,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let local_id = self.var_local_id(var, for_guard);
let source_info = self.source_info(span);
self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
- // Altough there is almost always scope for given variable in corner cases
+ // Although there is almost always scope for given variable in corner cases
// like #92893 we might get variable with no scope.
- if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop{
+ if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop {
self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
}
Place::from(local_id)
@@ -744,7 +741,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
"visit_primary_bindings: pattern={:?} pattern_user_ty={:?}",
pattern, pattern_user_ty
);
- match *pattern.kind {
+ match pattern.kind {
PatKind::Binding {
mutability,
name,
@@ -767,7 +764,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
| PatKind::Slice { ref prefix, ref slice, ref suffix } => {
let from = u64::try_from(prefix.len()).unwrap();
let to = u64::try_from(suffix.len()).unwrap();
- for subpattern in prefix {
+ for subpattern in prefix.iter() {
self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
}
for subpattern in slice {
@@ -777,7 +774,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
f,
);
}
- for subpattern in suffix {
+ for subpattern in suffix.iter() {
self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
}
}
@@ -830,7 +827,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// may not all be in the leftmost subpattern. For example in
// `let (x | y) = ...`, the primary binding of `y` occurs in
// the right subpattern
- for subpattern in pats {
+ for subpattern in pats.iter() {
self.visit_primary_bindings(subpattern, pattern_user_ty.clone(), f);
}
}
@@ -868,11 +865,16 @@ struct Candidate<'pat, 'tcx> {
}
impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
- fn new(place: PlaceBuilder<'tcx>, pattern: &'pat Pat<'tcx>, has_guard: bool) -> Self {
+ fn new(
+ place: PlaceBuilder<'tcx>,
+ pattern: &'pat Pat<'tcx>,
+ has_guard: bool,
+ cx: &Builder<'_, 'tcx>,
+ ) -> Self {
Candidate {
span: pattern.span,
has_guard,
- match_pairs: smallvec![MatchPair { place, pattern }],
+ match_pairs: smallvec![MatchPair::new(place, pattern, cx)],
bindings: Vec::new(),
ascriptions: Vec::new(),
subcandidates: Vec::new(),
@@ -982,7 +984,7 @@ enum TestKind<'tcx> {
},
/// Test whether the value falls within an inclusive or exclusive range
- Range(PatRange<'tcx>),
+ Range(Box<PatRange<'tcx>>),
/// Test that the length of the slice is equal to `len`.
Len { len: u64, op: BinOp },
@@ -1048,6 +1050,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// if `x.0` matches `false` (for the third arm). In the (impossible at
/// runtime) case when `x.0` is now `true`, we branch to
/// `otherwise_block`.
+ #[instrument(skip(self, fake_borrows), level = "debug")]
fn match_candidates<'pat>(
&mut self,
span: Span,
@@ -1057,11 +1060,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
candidates: &mut [&mut Candidate<'pat, 'tcx>],
fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
) {
- debug!(
- "matched_candidate(span={:?}, candidates={:?}, start_block={:?}, otherwise_block={:?})",
- span, candidates, start_block, otherwise_block,
- );
-
// Start by simplifying candidates. Once this process is complete, all
// the match pairs which remain require some form of test, whether it
// be a switch or pattern comparison.
@@ -1330,7 +1328,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// All of the or-patterns have been sorted to the end, so if the first
// pattern is an or-pattern we only have or-patterns.
- match *first_candidate.match_pairs[0].pattern.kind {
+ match first_candidate.match_pairs[0].pattern.kind {
PatKind::Or { .. } => (),
_ => {
self.test_candidates(
@@ -1350,7 +1348,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut otherwise = None;
for match_pair in match_pairs {
- let PatKind::Or { ref pats } = &*match_pair.pattern.kind else {
+ let PatKind::Or { ref pats } = &match_pair.pattern.kind else {
bug!("Or-patterns should have been sorted to the end");
};
let or_span = match_pair.pattern.span;
@@ -1380,19 +1378,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
)
}
+ #[instrument(
+ skip(self, otherwise, or_span, place, fake_borrows, candidate, pats),
+ level = "debug"
+ )]
fn test_or_pattern<'pat>(
&mut self,
candidate: &mut Candidate<'pat, 'tcx>,
otherwise: &mut Option<BasicBlock>,
- pats: &'pat [Pat<'tcx>],
+ pats: &'pat [Box<Pat<'tcx>>],
or_span: Span,
place: PlaceBuilder<'tcx>,
fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
) {
- debug!("test_or_pattern:\ncandidate={:#?}\npats={:#?}", candidate, pats);
+ debug!("candidate={:#?}\npats={:#?}", candidate, pats);
let mut or_candidates: Vec<_> = pats
.iter()
- .map(|pat| Candidate::new(place.clone(), pat, candidate.has_guard))
+ .map(|pat| Candidate::new(place.clone(), pat, candidate.has_guard, self))
.collect();
let mut or_candidate_refs: Vec<_> = or_candidates.iter_mut().collect();
let otherwise = if candidate.otherwise_block.is_some() {
@@ -1605,9 +1607,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Insert a Shallow borrow of any places that is switched on.
if let Some(fb) = fake_borrows && let Ok(match_place_resolved) =
- match_place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ match_place.clone().try_upvars_resolved(self)
{
- let resolved_place = match_place_resolved.into_place(self.tcx, self.typeck_results);
+ let resolved_place = match_place_resolved.into_place(self);
fb.insert(resolved_place);
}
@@ -1634,9 +1636,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
candidates = rest;
}
// at least the first candidate ought to be tested
- assert!(total_candidate_count > candidates.len());
- debug!("test_candidates: tested_candidates: {}", total_candidate_count - candidates.len());
- debug!("test_candidates: untested_candidates: {}", candidates.len());
+ assert!(
+ total_candidate_count > candidates.len(),
+ "{}, {:#?}",
+ total_candidate_count,
+ candidates
+ );
+ debug!("tested_candidates: {}", total_candidate_count - candidates.len());
+ debug!("untested_candidates: {}", candidates.len());
// HACK(matthewjasper) This is a closure so that we can let the test
// create its blocks before the rest of the match. This currently
@@ -1783,8 +1790,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let expr_span = expr.span;
let expr_place_builder = unpack!(block = self.lower_scrutinee(block, expr, expr_span));
let wildcard = Pat::wildcard_from_ty(pat.ty);
- let mut guard_candidate = Candidate::new(expr_place_builder.clone(), &pat, false);
- let mut otherwise_candidate = Candidate::new(expr_place_builder.clone(), &wildcard, false);
+ let mut guard_candidate = Candidate::new(expr_place_builder.clone(), &pat, false, self);
+ let mut otherwise_candidate =
+ Candidate::new(expr_place_builder.clone(), &wildcard, false, self);
let fake_borrow_temps = self.lower_match_tree(
block,
pat.span,
@@ -1794,10 +1802,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
);
let mut opt_expr_place: Option<(Option<&Place<'tcx>>, Span)> = None;
let expr_place: Place<'tcx>;
- if let Ok(expr_builder) =
- expr_place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
- {
- expr_place = expr_builder.into_place(self.tcx, self.typeck_results);
+ if let Ok(expr_builder) = expr_place_builder.try_upvars_resolved(self) {
+ expr_place = expr_builder.into_place(self);
opt_expr_place = Some((Some(&expr_place), expr_span));
}
let otherwise_post_guard_block = otherwise_candidate.pre_binding_block.unwrap();
@@ -1820,6 +1826,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
None,
None,
None,
+ false,
);
post_guard_block.unit()
@@ -1843,6 +1850,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
arm_span: Option<Span>,
match_scope: Option<region::Scope>,
schedule_drops: bool,
+ storages_alive: bool,
) -> BasicBlock {
debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate);
@@ -1978,7 +1986,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut guard_span = rustc_span::DUMMY_SP;
let (post_guard_block, otherwise_post_guard_block) =
- self.in_if_then_scope(match_scope, |this| match *guard {
+ self.in_if_then_scope(match_scope, guard_span, |this| match *guard {
Guard::If(e) => {
let e = &this.thir[e];
guard_span = e.span;
@@ -2058,7 +2066,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(local_id));
}
assert!(schedule_drops, "patterns with guards must schedule drops");
- self.bind_matched_candidate_for_arm_body(post_guard_block, true, by_value_bindings);
+ self.bind_matched_candidate_for_arm_body(
+ post_guard_block,
+ true,
+ by_value_bindings,
+ storages_alive,
+ );
post_guard_block
} else {
@@ -2072,6 +2085,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.iter()
.flat_map(|(bindings, _)| bindings)
.chain(&candidate.bindings),
+ storages_alive,
);
block
}
@@ -2161,6 +2175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block: BasicBlock,
schedule_drops: bool,
bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
+ storages_alive: bool,
) where
'tcx: 'b,
{
@@ -2170,13 +2185,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Assign each of the bindings. This may trigger moves out of the candidate.
for binding in bindings {
let source_info = self.source_info(binding.span);
- let local = self.storage_live_binding(
- block,
- binding.var_id,
- binding.span,
- OutsideGuard,
- schedule_drops,
- );
+ let local = if storages_alive {
+ // Here storages are already alive, probably because this is a binding
+ // from let-else.
+ // We just need to schedule drop for the value.
+ self.var_local_id(binding.var_id, OutsideGuard).into()
+ } else {
+ self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ OutsideGuard,
+ schedule_drops,
+ )
+ };
if schedule_drops {
self.schedule_drop_for_binding(binding.var_id, binding.span, OutsideGuard);
}
@@ -2195,6 +2217,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// first local is a binding for occurrences of `var` in the guard, which
/// will have type `&T`. The second local is a binding for occurrences of
/// `var` in the arm body, which will have type `T`.
+ #[instrument(skip(self), level = "debug")]
fn declare_binding(
&mut self,
source_info: SourceInfo,
@@ -2209,19 +2232,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
pat_span: Span,
) {
- debug!(
- "declare_binding(var_id={:?}, name={:?}, mode={:?}, var_ty={:?}, \
- visibility_scope={:?}, source_info={:?})",
- var_id, name, mode, var_ty, visibility_scope, source_info
- );
-
let tcx = self.tcx;
let debug_source_info = SourceInfo { span: source_info.span, scope: visibility_scope };
let binding_mode = match mode {
BindingMode::ByValue => ty::BindingMode::BindByValue(mutability),
BindingMode::ByRef(_) => ty::BindingMode::BindByReference(mutability),
};
- debug!("declare_binding: user_ty={:?}", user_ty);
let local = LocalDecl::<'tcx> {
mutability,
ty: var_ty,
@@ -2271,7 +2287,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} else {
LocalsForNode::One(for_arm_body)
};
- debug!("declare_binding: vars={:?}", locals);
+ debug!(?locals);
self.var_indices.insert(var_id, locals);
}
@@ -2280,24 +2296,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
mut block: BasicBlock,
init: &Expr<'tcx>,
initializer_span: Span,
- else_block: &Block,
- visibility_scope: Option<SourceScope>,
- remainder_scope: region::Scope,
- remainder_span: Span,
+ else_block: BlockId,
+ let_else_scope: &region::Scope,
pattern: &Pat<'tcx>,
- ) -> BlockAnd<()> {
- let (matching, failure) = self.in_if_then_scope(remainder_scope, |this| {
+ ) -> BlockAnd<BasicBlock> {
+ let else_block_span = self.thir[else_block].span;
+ let (matching, failure) = self.in_if_then_scope(*let_else_scope, else_block_span, |this| {
let scrutinee = unpack!(block = this.lower_scrutinee(block, init, initializer_span));
- let pat = Pat { ty: init.ty, span: else_block.span, kind: Box::new(PatKind::Wild) };
- let mut wildcard = Candidate::new(scrutinee.clone(), &pat, false);
- this.declare_bindings(
- visibility_scope,
- remainder_span,
- pattern,
- ArmHasGuard(false),
- Some((None, initializer_span)),
- );
- let mut candidate = Candidate::new(scrutinee.clone(), pattern, false);
+ let pat = Pat { ty: init.ty, span: else_block_span, kind: PatKind::Wild };
+ let mut wildcard = Candidate::new(scrutinee.clone(), &pat, false, this);
+ let mut candidate = Candidate::new(scrutinee.clone(), pattern, false, this);
let fake_borrow_temps = this.lower_match_tree(
block,
initializer_span,
@@ -2315,10 +2323,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
None,
None,
None,
+ true,
);
// This block is for the failure case
let failure = this.bind_pattern(
- this.source_info(else_block.span),
+ this.source_info(else_block_span),
wildcard,
None,
&fake_borrow_temps,
@@ -2326,29 +2335,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
None,
None,
None,
+ true,
);
- this.break_for_else(failure, remainder_scope, this.source_info(initializer_span));
+ this.break_for_else(failure, *let_else_scope, this.source_info(initializer_span));
matching.unit()
});
-
- // This place is not really used because this destination place
- // should never be used to take values at the end of the failure
- // block.
- let dummy_place = Place { local: RETURN_PLACE, projection: ty::List::empty() };
- let failure_block;
- unpack!(
- failure_block = self.ast_block(
- dummy_place,
- failure,
- else_block,
- self.source_info(else_block.span),
- )
- );
- self.cfg.terminate(
- failure_block,
- self.source_info(else_block.span),
- TerminatorKind::Unreachable,
- );
- matching.unit()
+ matching.and(failure)
}
}
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
index c62989041..924d2f555 100644
--- a/compiler/rustc_mir_build/src/build/matches/simplify.rs
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -37,12 +37,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
///
/// only generates a single switch. If this happens this method returns
/// `true`.
+ #[instrument(skip(self, candidate), level = "debug")]
pub(super) fn simplify_candidate<'pat>(
&mut self,
candidate: &mut Candidate<'pat, 'tcx>,
) -> bool {
// repeatedly simplify match pairs until fixed point is reached
- debug!(?candidate, "simplify_candidate");
+ debug!("{candidate:#?}");
// existing_bindings and new_bindings exists to keep the semantics in order.
// Reversing the binding order for bindings after `@` changes the binding order in places
@@ -67,7 +68,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
loop {
let match_pairs = mem::take(&mut candidate.match_pairs);
- if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] =
+ if let [MatchPair { pattern: Pat { kind: PatKind::Or { pats }, .. }, place }] =
&*match_pairs
{
existing_bindings.extend_from_slice(&new_bindings);
@@ -113,7 +114,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// late as possible.
candidate
.match_pairs
- .sort_by_key(|pair| matches!(*pair.pattern.kind, PatKind::Or { .. }));
+ .sort_by_key(|pair| matches!(pair.pattern.kind, PatKind::Or { .. }));
debug!(simplified = ?candidate, "simplify_candidate");
return false; // if we were not able to simplify any, done.
}
@@ -127,11 +128,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
candidate: &Candidate<'pat, 'tcx>,
place: PlaceBuilder<'tcx>,
- pats: &'pat [Pat<'tcx>],
+ pats: &'pat [Box<Pat<'tcx>>],
) -> Vec<Candidate<'pat, 'tcx>> {
pats.iter()
- .map(|pat| {
- let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard);
+ .map(|box pat| {
+ let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard, self);
self.simplify_candidate(&mut candidate);
candidate
})
@@ -149,23 +150,21 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
candidate: &mut Candidate<'pat, 'tcx>,
) -> Result<(), MatchPair<'pat, 'tcx>> {
let tcx = self.tcx;
- match *match_pair.pattern.kind {
+ match match_pair.pattern.kind {
PatKind::AscribeUserType {
ref subpattern,
ascription: thir::Ascription { ref annotation, variance },
} => {
// Apply the type ascription to the value at `match_pair.place`, which is the
- if let Ok(place_resolved) =
- match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
- {
+ if let Ok(place_resolved) = match_pair.place.clone().try_upvars_resolved(self) {
candidate.ascriptions.push(Ascription {
annotation: annotation.clone(),
- source: place_resolved.into_place(self.tcx, self.typeck_results),
+ source: place_resolved.into_place(self),
variance,
});
}
- candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern, self));
Ok(())
}
@@ -184,12 +183,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
ref subpattern,
is_primary: _,
} => {
- if let Ok(place_resolved) =
- match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
- {
+ if let Ok(place_resolved) = match_pair.place.clone().try_upvars_resolved(self) {
candidate.bindings.push(Binding {
span: match_pair.pattern.span,
- source: place_resolved.into_place(self.tcx, self.typeck_results),
+ source: place_resolved.into_place(self),
var_id: var,
binding_mode: mode,
});
@@ -197,7 +194,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if let Some(subpattern) = subpattern.as_ref() {
// this is the `x @ P` case; have to keep matching against `P` now
- candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern, self));
}
Ok(())
@@ -208,7 +205,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Err(match_pair)
}
- PatKind::Range(PatRange { lo, hi, end }) => {
+ PatKind::Range(box PatRange { lo, hi, end }) => {
let (range, bias) = match *lo.ty().kind() {
ty::Char => {
(Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0)
@@ -254,7 +251,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut candidate.match_pairs,
&match_pair.place,
prefix,
- slice.as_ref(),
+ slice,
suffix,
);
Ok(())
@@ -267,14 +264,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
i == variant_index || {
self.tcx.features().exhaustive_patterns
- && !v
- .uninhabited_from(
- self.tcx,
- substs,
- adt_def.adt_kind(),
- self.param_env,
- )
- .is_empty()
+ && v.inhabited_predicate(self.tcx, adt_def)
+ .subst(self.tcx, substs)
+ .apply_any_module(self.tcx, self.param_env)
+ != Some(true)
}
}) && (adt_def.did().is_local()
|| !adt_def.is_variant_list_non_exhaustive());
@@ -294,7 +287,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut candidate.match_pairs,
&match_pair.place,
prefix,
- slice.as_ref(),
+ slice,
suffix,
);
Ok(())
@@ -308,7 +301,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
PatKind::Deref { ref subpattern } => {
let place_builder = match_pair.place.deref();
- candidate.match_pairs.push(MatchPair::new(place_builder, subpattern));
+ candidate.match_pairs.push(MatchPair::new(place_builder, subpattern, self));
Ok(())
}
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index 598da80c5..b597ecfaa 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -14,8 +14,8 @@ use rustc_hir::{LangItem, RangeEnd};
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::*;
use rustc_middle::thir::*;
-use rustc_middle::ty::subst::{GenericArg, Subst};
use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::GenericArg;
use rustc_middle::ty::{self, adjustment::PointerCast, Ty, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_span::symbol::{sym, Symbol};
@@ -29,7 +29,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
///
/// It is a bug to call this with a not-fully-simplified pattern.
pub(super) fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
- match *match_pair.pattern.kind {
+ match match_pair.pattern.kind {
PatKind::Variant { adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
span: match_pair.pattern.span,
kind: TestKind::Switch {
@@ -58,10 +58,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
kind: TestKind::Eq { value, ty: match_pair.pattern.ty },
},
- PatKind::Range(range) => {
+ PatKind::Range(ref range) => {
assert_eq!(range.lo.ty(), match_pair.pattern.ty);
assert_eq!(range.hi.ty(), match_pair.pattern.ty);
- Test { span: match_pair.pattern.span, kind: TestKind::Range(range) }
+ Test { span: match_pair.pattern.span, kind: TestKind::Range(range.clone()) }
}
PatKind::Slice { ref prefix, ref slice, ref suffix } => {
@@ -92,7 +92,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
return false;
};
- match *match_pair.pattern.kind {
+ match match_pair.pattern.kind {
PatKind::Constant { value } => {
options
.entry(value)
@@ -102,9 +102,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
PatKind::Variant { .. } => {
panic!("you should have called add_variants_to_switch instead!");
}
- PatKind::Range(range) => {
+ PatKind::Range(ref range) => {
// Check that none of the switch values are in the range.
- self.values_not_contained_in_range(range, options).unwrap_or(false)
+ self.values_not_contained_in_range(&*range, options).unwrap_or(false)
}
PatKind::Slice { .. }
| PatKind::Array { .. }
@@ -130,7 +130,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
return false;
};
- match *match_pair.pattern.kind {
+ match match_pair.pattern.kind {
PatKind::Variant { adt_def: _, variant_index, .. } => {
// We have a pattern testing for variant `variant_index`
// set the corresponding index to true
@@ -144,6 +144,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
+ #[instrument(skip(self, make_target_blocks, place_builder), level = "debug")]
pub(super) fn perform_test(
&mut self,
match_start_span: Span,
@@ -153,21 +154,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
test: &Test<'tcx>,
make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
) {
- let place: Place<'tcx>;
- if let Ok(test_place_builder) =
- place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
- {
- place = test_place_builder.into_place(self.tcx, self.typeck_results);
- } else {
- return;
- }
- debug!(
- "perform_test({:?}, {:?}: {:?}, {:?})",
- block,
- place,
- place.ty(&self.local_decls, self.tcx),
- test
- );
+ let place = place_builder.into_place(self);
+ let place_ty = place.ty(&self.local_decls, self.tcx);
+ debug!(?place, ?place_ty,);
let source_info = self.source_info(test.span);
match test.kind {
@@ -272,7 +261,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
- TestKind::Range(PatRange { lo, hi, ref end }) => {
+ TestKind::Range(box PatRange { lo, hi, ref end }) => {
let lower_bound_success = self.cfg.start_new_block();
let target_blocks = make_target_blocks(self);
@@ -506,7 +495,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let (match_pair_index, match_pair) =
candidate.match_pairs.iter().enumerate().find(|&(_, mp)| mp.place == *test_place)?;
- match (&test.kind, &*match_pair.pattern.kind) {
+ match (&test.kind, &match_pair.pattern.kind) {
// If we are performing a variant switch, then this
// informs variant patterns, but nothing else.
(
@@ -540,9 +529,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
Some(index)
}
- (&TestKind::SwitchInt { switch_ty: _, ref options }, &PatKind::Range(range)) => {
+ (&TestKind::SwitchInt { switch_ty: _, ref options }, &PatKind::Range(ref range)) => {
let not_contained =
- self.values_not_contained_in_range(range, options).unwrap_or(false);
+ self.values_not_contained_in_range(&*range, options).unwrap_or(false);
if not_contained {
// No switch values are contained in the pattern range,
@@ -569,7 +558,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
match_pair_index,
candidate,
prefix,
- slice.as_ref(),
+ slice,
suffix,
);
Some(0)
@@ -607,7 +596,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
match_pair_index,
candidate,
prefix,
- slice.as_ref(),
+ slice,
suffix,
);
Some(0)
@@ -631,7 +620,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
- (&TestKind::Range(test), &PatKind::Range(pat)) => {
+ (&TestKind::Range(ref test), &PatKind::Range(ref pat)) => {
use std::cmp::Ordering::*;
if test == pat {
@@ -658,8 +647,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
no_overlap
}
- (&TestKind::Range(range), &PatKind::Constant { value }) => {
- if let Some(false) = self.const_range_contains(range, value) {
+ (&TestKind::Range(ref range), &PatKind::Constant { value }) => {
+ if let Some(false) = self.const_range_contains(&*range, value) {
// `value` is not contained in the testing range,
// so `value` can be matched only if this test fails.
Some(1)
@@ -678,7 +667,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// However, at this point we can still encounter or-patterns that were extracted
// from previous calls to `sort_candidate`, so we need to manually address that
// case to avoid panicking in `self.test()`.
- if let PatKind::Or { .. } = &*match_pair.pattern.kind {
+ if let PatKind::Or { .. } = &match_pair.pattern.kind {
return None;
}
@@ -708,9 +697,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
match_pair_index: usize,
candidate: &mut Candidate<'pat, 'tcx>,
- prefix: &'pat [Pat<'tcx>],
- opt_slice: Option<&'pat Pat<'tcx>>,
- suffix: &'pat [Pat<'tcx>],
+ prefix: &'pat [Box<Pat<'tcx>>],
+ opt_slice: &'pat Option<Box<Pat<'tcx>>>,
+ suffix: &'pat [Box<Pat<'tcx>>],
) {
let removed_place = candidate.match_pairs.remove(match_pair_index).place;
self.prefix_slice_suffix(
@@ -735,14 +724,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`,
// we want to create a set of derived match-patterns like
// `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
- let elem =
- ProjectionElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index);
- let downcast_place = match_pair.place.project(elem); // `(x as Variant)`
+ let downcast_place = match_pair.place.downcast(adt_def, variant_index); // `(x as Variant)`
let consequent_match_pairs = subpatterns.iter().map(|subpattern| {
// e.g., `(x as Variant).0`
let place = downcast_place.clone().field(subpattern.field, subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1`
- MatchPair::new(place, &subpattern.pattern)
+ MatchPair::new(place, &subpattern.pattern, self)
});
candidate.match_pairs.extend(consequent_match_pairs);
@@ -754,7 +741,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
fn const_range_contains(
&self,
- range: PatRange<'tcx>,
+ range: &PatRange<'tcx>,
value: ConstantKind<'tcx>,
) -> Option<bool> {
use std::cmp::Ordering::*;
@@ -772,7 +759,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
fn values_not_contained_in_range(
&self,
- range: PatRange<'tcx>,
+ range: &PatRange<'tcx>,
options: &FxIndexMap<ConstantKind<'tcx>, u128>,
) -> Option<bool> {
for &val in options.keys() {
diff --git a/compiler/rustc_mir_build/src/build/matches/util.rs b/compiler/rustc_mir_build/src/build/matches/util.rs
index 9a1e98d3b..b854ba47f 100644
--- a/compiler/rustc_mir_build/src/build/matches/util.rs
+++ b/compiler/rustc_mir_build/src/build/matches/util.rs
@@ -1,9 +1,11 @@
+use crate::build::expr::as_place::PlaceBase;
use crate::build::expr::as_place::PlaceBuilder;
use crate::build::matches::MatchPair;
use crate::build::Builder;
use rustc_middle::mir::*;
use rustc_middle::thir::*;
use rustc_middle::ty;
+use rustc_middle::ty::TypeVisitable;
use smallvec::SmallVec;
use std::convert::TryInto;
@@ -17,7 +19,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.iter()
.map(|fieldpat| {
let place = place.clone().field(fieldpat.field, fieldpat.pattern.ty);
- MatchPair::new(place, &fieldpat.pattern)
+ MatchPair::new(place, &fieldpat.pattern, self)
})
.collect()
}
@@ -26,32 +28,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
match_pairs: &mut SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
place: &PlaceBuilder<'tcx>,
- prefix: &'pat [Pat<'tcx>],
- opt_slice: Option<&'pat Pat<'tcx>>,
- suffix: &'pat [Pat<'tcx>],
+ prefix: &'pat [Box<Pat<'tcx>>],
+ opt_slice: &'pat Option<Box<Pat<'tcx>>>,
+ suffix: &'pat [Box<Pat<'tcx>>],
) {
let tcx = self.tcx;
- let (min_length, exact_size) = if let Ok(place_resolved) =
- place.clone().try_upvars_resolved(tcx, self.typeck_results)
- {
- match place_resolved
- .into_place(tcx, self.typeck_results)
- .ty(&self.local_decls, tcx)
- .ty
- .kind()
- {
- ty::Array(_, length) => (length.eval_usize(tcx, self.param_env), true),
- _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
- }
- } else {
- ((prefix.len() + suffix.len()).try_into().unwrap(), false)
- };
+ let (min_length, exact_size) =
+ if let Ok(place_resolved) = place.clone().try_upvars_resolved(self) {
+ match place_resolved.into_place(self).ty(&self.local_decls, tcx).ty.kind() {
+ ty::Array(_, length) => (length.eval_usize(tcx, self.param_env), true),
+ _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
+ }
+ } else {
+ ((prefix.len() + suffix.len()).try_into().unwrap(), false)
+ };
match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
let elem =
ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
let place = place.clone().project(elem);
- MatchPair::new(place, subpattern)
+ MatchPair::new(place, subpattern, self)
}));
if let Some(subslice_pat) = opt_slice {
@@ -61,7 +57,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
to: if exact_size { min_length - suffix_len } else { suffix_len },
from_end: !exact_size,
});
- match_pairs.push(MatchPair::new(subslice, subslice_pat));
+ match_pairs.push(MatchPair::new(subslice, subslice_pat, self));
}
match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| {
@@ -72,7 +68,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
from_end: !exact_size,
};
let place = place.clone().project(elem);
- MatchPair::new(place, subpattern)
+ MatchPair::new(place, subpattern, self)
}));
}
@@ -100,10 +96,29 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
- pub(crate) fn new(
+ pub(in crate::build) fn new(
place: PlaceBuilder<'tcx>,
pattern: &'pat Pat<'tcx>,
+ cx: &Builder<'_, 'tcx>,
) -> MatchPair<'pat, 'tcx> {
+ // Force the place type to the pattern's type.
+ // FIXME(oli-obk): can we use this to simplify slice/array pattern hacks?
+ let mut place = match place.try_upvars_resolved(cx) {
+ Ok(val) | Err(val) => val,
+ };
+
+ // Only add the OpaqueCast projection if the given place is an opaque type and the
+ // expected type from the pattern is not.
+ let may_need_cast = match place.base() {
+ PlaceBase::Local(local) => {
+ let ty = Place::ty_from(local, place.projection(), &cx.local_decls, cx.tcx).ty;
+ ty != pattern.ty && ty.has_opaque_types()
+ }
+ _ => true,
+ };
+ if may_need_cast {
+ place = place.project(ProjectionElem::OpaqueCast(pattern.ty));
+ }
MatchPair { place, pattern }
}
}
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index 12b8ceede..cbcf9cd12 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -1,15 +1,14 @@
-use crate::build;
pub(crate) use crate::build::expr::as_constant::lit_to_mir_constant;
use crate::build::expr::as_place::PlaceBuilder;
use crate::build::scope::DropKind;
-use crate::thir::pattern::pat_from_hir;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedIndexMultiMap;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::lang_items::LangItem;
use rustc_hir::{GeneratorKind, Node};
use rustc_index::vec::{Idx, IndexVec};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
@@ -18,8 +17,9 @@ use rustc_middle::middle::region;
use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::*;
-use rustc_middle::thir::{BindingMode, Expr, ExprId, LintLevel, LocalVarId, PatKind, Thir};
-use rustc_middle::ty::subst::Subst;
+use rustc_middle::thir::{
+ self, BindingMode, Expr, ExprId, LintLevel, LocalVarId, Param, ParamId, PatKind, Thir,
+};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable, TypeckResults};
use rustc_span::symbol::sym;
use rustc_span::Span;
@@ -47,9 +47,7 @@ pub(crate) fn mir_built<'tcx>(
/// Construct the MIR for a given `DefId`.
fn mir_build(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
- let id = tcx.hir().local_def_id_to_hir_id(def.did);
let body_owner_kind = tcx.hir().body_owner_kind(def.did);
- let typeck_results = tcx.typeck_opt_const_arg(def);
// Ensure unsafeck and abstract const building is ran before we steal the THIR.
// We can't use `ensure()` for `thir_abstract_const` as it doesn't compute the query
@@ -66,235 +64,42 @@ fn mir_build(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_
}
}
- // Figure out what primary body this item has.
- let (body_id, return_ty_span, span_with_body) = match tcx.hir().get(id) {
- Node::Expr(hir::Expr {
- kind: hir::ExprKind::Closure(hir::Closure { fn_decl, body, .. }),
- ..
- }) => (*body, fn_decl.output.span(), None),
- Node::Item(hir::Item {
- kind: hir::ItemKind::Fn(hir::FnSig { decl, .. }, _, body_id),
- span,
- ..
- })
- | Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::Fn(hir::FnSig { decl, .. }, body_id),
- span,
- ..
- })
- | Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Fn(hir::FnSig { decl, .. }, hir::TraitFn::Provided(body_id)),
- span,
- ..
- }) => {
- // Use the `Span` of the `Item/ImplItem/TraitItem` as the body span,
- // since the def span of a function does not include the body
- (*body_id, decl.output.span(), Some(*span))
- }
- Node::Item(hir::Item {
- kind: hir::ItemKind::Static(ty, _, body_id) | hir::ItemKind::Const(ty, body_id),
- ..
- })
- | Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(ty, body_id), .. })
- | Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Const(ty, Some(body_id)),
- ..
- }) => (*body_id, ty.span, None),
- Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => {
- (*body, tcx.hir().span(*hir_id), None)
- }
-
- _ => span_bug!(tcx.hir().span(id), "can't build MIR for {:?}", def.did),
- };
-
- // If we don't have a specialized span for the body, just use the
- // normal def span.
- let span_with_body = span_with_body.unwrap_or_else(|| tcx.hir().span(id));
-
- tcx.infer_ctxt().enter(|infcx| {
- let body = if let Some(error_reported) = typeck_results.tainted_by_errors {
- build::construct_error(&infcx, def, id, body_id, body_owner_kind, error_reported)
- } else if body_owner_kind.is_fn_or_closure() {
- // fetch the fully liberated fn signature (that is, all bound
- // types/lifetimes replaced)
- let fn_sig = typeck_results.liberated_fn_sigs()[id];
- let fn_def_id = tcx.hir().local_def_id(id);
-
- let safety = match fn_sig.unsafety {
- hir::Unsafety::Normal => Safety::Safe,
- hir::Unsafety::Unsafe => Safety::FnUnsafe,
- };
-
- let body = tcx.hir().body(body_id);
- let (thir, expr) = tcx
- .thir_body(def)
- .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
+ let body = match tcx.thir_body(def) {
+ Err(error_reported) => construct_error(tcx, def.did, body_owner_kind, error_reported),
+ Ok((thir, expr)) => {
// We ran all queries that depended on THIR at the beginning
// of `mir_build`, so now we can steal it
let thir = thir.steal();
- let ty = tcx.type_of(fn_def_id);
- let mut abi = fn_sig.abi;
- let implicit_argument = match ty.kind() {
- ty::Closure(..) => {
- // HACK(eddyb) Avoid having RustCall on closures,
- // as it adds unnecessary (and wrong) auto-tupling.
- abi = Abi::Rust;
- vec![ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None)]
- }
- ty::Generator(..) => {
- let gen_ty = tcx.typeck_body(body_id).node_type(id);
-
- // The resume argument may be missing, in that case we need to provide it here.
- // It will always be `()` in this case.
- if body.params.is_empty() {
- vec![
- ArgInfo(gen_ty, None, None, None),
- ArgInfo(tcx.mk_unit(), None, None, None),
- ]
- } else {
- vec![ArgInfo(gen_ty, None, None, None)]
- }
- }
- _ => vec![],
- };
-
- let explicit_arguments = body.params.iter().enumerate().map(|(index, arg)| {
- let owner_id = tcx.hir().body_owner(body_id);
- let opt_ty_info;
- let self_arg;
- if let Some(ref fn_decl) = tcx.hir().fn_decl_by_hir_id(owner_id) {
- opt_ty_info = fn_decl
- .inputs
- .get(index)
- // Make sure that inferred closure args have no type span
- .and_then(|ty| if arg.pat.span != ty.span { Some(ty.span) } else { None });
- self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
- match fn_decl.implicit_self {
- hir::ImplicitSelfKind::Imm => Some(ImplicitSelfKind::Imm),
- hir::ImplicitSelfKind::Mut => Some(ImplicitSelfKind::Mut),
- hir::ImplicitSelfKind::ImmRef => Some(ImplicitSelfKind::ImmRef),
- hir::ImplicitSelfKind::MutRef => Some(ImplicitSelfKind::MutRef),
- _ => None,
- }
- } else {
- None
- };
- } else {
- opt_ty_info = None;
- self_arg = None;
- }
-
- // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
- // (as it's created inside the body itself, not passed in from outside).
- let ty = if fn_sig.c_variadic && index == fn_sig.inputs().len() {
- let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(arg.span));
- tcx.bound_type_of(va_list_did).subst(tcx, &[tcx.lifetimes.re_erased.into()])
- } else {
- fn_sig.inputs()[index]
- };
-
- ArgInfo(ty, opt_ty_info, Some(&arg), self_arg)
- });
-
- let arguments = implicit_argument.into_iter().chain(explicit_arguments);
-
- let (yield_ty, return_ty) = if body.generator_kind.is_some() {
- let gen_ty = tcx.typeck_body(body_id).node_type(id);
- let gen_sig = match gen_ty.kind() {
- ty::Generator(_, gen_substs, ..) => gen_substs.as_generator().sig(),
- _ => span_bug!(tcx.hir().span(id), "generator w/o generator type: {:?}", ty),
- };
- (Some(gen_sig.yield_ty), gen_sig.return_ty)
+ if body_owner_kind.is_fn_or_closure() {
+ construct_fn(tcx, def, &thir, expr)
} else {
- (None, fn_sig.output())
- };
-
- let mut mir = build::construct_fn(
- &thir,
- &infcx,
- def,
- id,
- arguments,
- safety,
- abi,
- return_ty,
- return_ty_span,
- body,
- expr,
- span_with_body,
- );
- if yield_ty.is_some() {
- mir.generator.as_mut().unwrap().yield_ty = yield_ty;
+ construct_const(tcx, def, &thir, expr)
}
- mir
- } else {
- // Get the revealed type of this const. This is *not* the adjusted
- // type of its body, which may be a subtype of this type. For
- // example:
- //
- // fn foo(_: &()) {}
- // static X: fn(&'static ()) = foo;
- //
- // The adjusted type of the body of X is `for<'a> fn(&'a ())` which
- // is not the same as the type of X. We need the type of the return
- // place to be the type of the constant because NLL typeck will
- // equate them.
-
- let return_ty = typeck_results.node_type(id);
-
- let (thir, expr) = tcx
- .thir_body(def)
- .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
- // We ran all queries that depended on THIR at the beginning
- // of `mir_build`, so now we can steal it
- let thir = thir.steal();
-
- build::construct_const(&thir, &infcx, expr, def, id, return_ty, return_ty_span)
- };
+ }
+ };
- lints::check(tcx, &body);
-
- // The borrow checker will replace all the regions here with its own
- // inference variables. There's no point having non-erased regions here.
- // The exception is `body.user_type_annotations`, which is used unmodified
- // by borrow checking.
- debug_assert!(
- !(body.local_decls.has_free_regions()
- || body.basic_blocks().has_free_regions()
- || body.var_debug_info.has_free_regions()
- || body.yield_ty().has_free_regions()),
- "Unexpected free regions in MIR: {:?}",
- body,
- );
+ lints::check(tcx, &body);
+
+ // The borrow checker will replace all the regions here with its own
+ // inference variables. There's no point having non-erased regions here.
+ // The exception is `body.user_type_annotations`, which is used unmodified
+ // by borrow checking.
+ debug_assert!(
+ !(body.local_decls.has_free_regions()
+ || body.basic_blocks.has_free_regions()
+ || body.var_debug_info.has_free_regions()
+ || body.yield_ty().has_free_regions()),
+ "Unexpected free regions in MIR: {:?}",
+ body,
+ );
- body
- })
+ body
}
///////////////////////////////////////////////////////////////////////////
// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
-fn liberated_closure_env_ty(
- tcx: TyCtxt<'_>,
- closure_expr_id: hir::HirId,
- body_id: hir::BodyId,
-) -> Ty<'_> {
- let closure_ty = tcx.typeck_body(body_id).node_type(closure_expr_id);
-
- let ty::Closure(closure_def_id, closure_substs) = *closure_ty.kind() else {
- bug!("closure expr does not have closure type: {:?}", closure_ty);
- };
-
- let bound_vars =
- tcx.mk_bound_variable_kinds(std::iter::once(ty::BoundVariableKind::Region(ty::BrEnv)));
- let br =
- ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BrEnv };
- let env_region = ty::ReLateBound(ty::INNERMOST, br);
- let closure_env_ty = tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap();
- tcx.erase_late_bound_regions(ty::Binder::bind_with_vars(closure_env_ty, bound_vars))
-}
-
#[derive(Debug, PartialEq, Eq)]
enum BlockFrame {
/// Evaluation is currently within a statement.
@@ -352,7 +157,7 @@ struct BlockContext(Vec<BlockFrame>);
struct Builder<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: InferCtxt<'tcx>,
typeck_results: &'tcx TypeckResults<'tcx>,
region_scope_tree: &'tcx region::ScopeTree,
param_env: ty::ParamEnv<'tcx>,
@@ -404,12 +209,21 @@ struct Builder<'a, 'tcx> {
var_indices: FxHashMap<LocalVarId, LocalsForNode>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
canonical_user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
- upvar_mutbls: Vec<Mutability>,
+ upvars: CaptureMap<'tcx>,
unit_temp: Option<Place<'tcx>>,
var_debug_info: Vec<VarDebugInfo<'tcx>>,
}
+type CaptureMap<'tcx> = SortedIndexMultiMap<usize, hir::HirId, Capture<'tcx>>;
+
+#[derive(Debug)]
+struct Capture<'tcx> {
+ captured_place: &'tcx ty::CapturedPlace<'tcx>,
+ use_place: Place<'tcx>,
+ mutability: Mutability,
+}
+
impl<'a, 'tcx> Builder<'a, 'tcx> {
fn is_bound_var_in_guard(&self, id: LocalVarId) -> bool {
self.guard_context.iter().any(|frame| frame.locals.iter().any(|local| local.id == id))
@@ -615,35 +429,59 @@ macro_rules! unpack {
///////////////////////////////////////////////////////////////////////////
/// the main entry point for building MIR for a function
-struct ArgInfo<'tcx>(
- Ty<'tcx>,
- Option<Span>,
- Option<&'tcx hir::Param<'tcx>>,
- Option<ImplicitSelfKind>,
-);
-
-fn construct_fn<'tcx, A>(
- thir: &Thir<'tcx>,
- infcx: &InferCtxt<'_, 'tcx>,
+fn construct_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
fn_def: ty::WithOptConstParam<LocalDefId>,
- fn_id: hir::HirId,
- arguments: A,
- safety: Safety,
- abi: Abi,
- return_ty: Ty<'tcx>,
- return_ty_span: Span,
- body: &'tcx hir::Body<'tcx>,
+ thir: &Thir<'tcx>,
expr: ExprId,
- span_with_body: Span,
-) -> Body<'tcx>
-where
- A: Iterator<Item = ArgInfo<'tcx>>,
-{
- let arguments: Vec<_> = arguments.collect();
+) -> Body<'tcx> {
+ let span = tcx.def_span(fn_def.did);
+ let fn_id = tcx.hir().local_def_id_to_hir_id(fn_def.did);
+ let generator_kind = tcx.generator_kind(fn_def.did);
- let tcx = infcx.tcx;
- let span = tcx.hir().span(fn_id);
+ // Figure out what primary body this item has.
+ let body_id = tcx.hir().body_owned_by(fn_def.did);
+ let span_with_body = tcx.hir().span_with_body(fn_id);
+ let return_ty_span = tcx
+ .hir()
+ .fn_decl_by_hir_id(fn_id)
+ .unwrap_or_else(|| span_bug!(span, "can't build MIR for {:?}", fn_def.did))
+ .output
+ .span();
+
+ // fetch the fully liberated fn signature (that is, all bound
+ // types/lifetimes replaced)
+ let typeck_results = tcx.typeck_opt_const_arg(fn_def);
+ let fn_sig = typeck_results.liberated_fn_sigs()[fn_id];
+
+ let safety = match fn_sig.unsafety {
+ hir::Unsafety::Normal => Safety::Safe,
+ hir::Unsafety::Unsafe => Safety::FnUnsafe,
+ };
+ let mut abi = fn_sig.abi;
+ if let DefKind::Closure = tcx.def_kind(fn_def.did) {
+ // HACK(eddyb) Avoid having RustCall on closures,
+ // as it adds unnecessary (and wrong) auto-tupling.
+ abi = Abi::Rust;
+ }
+
+ let arguments = &thir.params;
+
+ let (yield_ty, return_ty) = if generator_kind.is_some() {
+ let gen_ty = arguments[thir::UPVAR_ENV_PARAM].ty;
+ let gen_sig = match gen_ty.kind() {
+ ty::Generator(_, gen_substs, ..) => gen_substs.as_generator().sig(),
+ _ => {
+ span_bug!(span, "generator w/o generator type: {:?}", gen_ty)
+ }
+ };
+ (Some(gen_sig.yield_ty), gen_sig.return_ty)
+ } else {
+ (None, fn_sig.output())
+ };
+
+ let infcx = tcx.infer_ctxt().build();
let mut builder = Builder::new(
thir,
infcx,
@@ -654,13 +492,13 @@ where
safety,
return_ty,
return_ty_span,
- body.generator_kind,
+ generator_kind,
);
let call_site_scope =
- region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite };
+ region::Scope { id: body_id.hir_id.local_id, data: region::ScopeData::CallSite };
let arg_scope =
- region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments };
+ region::Scope { id: body_id.hir_id.local_id, data: region::ScopeData::Arguments };
let source_info = builder.source_info(span);
let call_site_s = (call_site_scope, source_info);
unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
@@ -673,7 +511,7 @@ where
builder.args_and_body(
START_BLOCK,
fn_def.did,
- &arguments,
+ arguments,
arg_scope,
&thir[expr],
)
@@ -685,29 +523,63 @@ where
return_block.unit()
}));
- let spread_arg = if abi == Abi::RustCall {
+ let mut body = builder.finish();
+
+ body.spread_arg = if abi == Abi::RustCall {
// RustCall pseudo-ABI untuples the last argument.
Some(Local::new(arguments.len()))
} else {
None
};
-
- let mut body = builder.finish();
- body.spread_arg = spread_arg;
+ if yield_ty.is_some() {
+ body.generator.as_mut().unwrap().yield_ty = yield_ty;
+ }
body
}
fn construct_const<'a, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
thir: &'a Thir<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
expr: ExprId,
- def: ty::WithOptConstParam<LocalDefId>,
- hir_id: hir::HirId,
- const_ty: Ty<'tcx>,
- const_ty_span: Span,
) -> Body<'tcx> {
- let tcx = infcx.tcx;
- let span = tcx.hir().span(hir_id);
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+
+ // Figure out what primary body this item has.
+ let (span, const_ty_span) = match tcx.hir().get(hir_id) {
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Static(ty, _, _) | hir::ItemKind::Const(ty, _),
+ span,
+ ..
+ })
+ | Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(ty, _), span, .. })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const(ty, Some(_)),
+ span,
+ ..
+ }) => (*span, ty.span),
+ Node::AnonConst(_) => {
+ let span = tcx.def_span(def.did);
+ (span, span)
+ }
+ _ => span_bug!(tcx.def_span(def.did), "can't build MIR for {:?}", def.did),
+ };
+
+ // Get the revealed type of this const. This is *not* the adjusted
+ // type of its body, which may be a subtype of this type. For
+ // example:
+ //
+ // fn foo(_: &()) {}
+ // static X: fn(&'static ()) = foo;
+ //
+ // The adjusted type of the body of X is `for<'a> fn(&'a ())` which
+ // is not the same as the type of X. We need the type of the return
+ // place to be the type of the constant because NLL typeck will
+ // equate them.
+ let typeck_results = tcx.typeck_opt_const_arg(def);
+ let const_ty = typeck_results.node_type(hir_id);
+
+ let infcx = tcx.infer_ctxt().build();
let mut builder = Builder::new(
thir,
infcx,
@@ -736,28 +608,27 @@ fn construct_const<'a, 'tcx>(
///
/// This is required because we may still want to run MIR passes on an item
/// with type errors, but normal MIR construction can't handle that in general.
-fn construct_error<'a, 'tcx>(
- infcx: &'a InferCtxt<'a, 'tcx>,
- def: ty::WithOptConstParam<LocalDefId>,
- hir_id: hir::HirId,
- body_id: hir::BodyId,
+fn construct_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: LocalDefId,
body_owner_kind: hir::BodyOwnerKind,
err: ErrorGuaranteed,
) -> Body<'tcx> {
- let tcx = infcx.tcx;
- let span = tcx.hir().span(hir_id);
+ let span = tcx.def_span(def);
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def);
+ let generator_kind = tcx.generator_kind(def);
+
let ty = tcx.ty_error();
- let generator_kind = tcx.hir().body(body_id).generator_kind;
let num_params = match body_owner_kind {
- hir::BodyOwnerKind::Fn => tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len(),
+ hir::BodyOwnerKind::Fn => tcx.fn_sig(def).inputs().skip_binder().len(),
hir::BodyOwnerKind::Closure => {
- if generator_kind.is_some() {
- // Generators have an implicit `self` parameter *and* a possibly
- // implicit resume parameter.
- 2
- } else {
- // The implicit self parameter adds another local in MIR.
- 1 + tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len()
+ let ty = tcx.type_of(def);
+ match ty.kind() {
+ ty::Closure(_, substs) => {
+ 1 + substs.as_closure().sig().inputs().skip_binder().len()
+ }
+ ty::Generator(..) => 2,
+ _ => bug!("expected closure or generator, found {ty:?}"),
}
}
hir::BodyOwnerKind::Const => 0,
@@ -788,7 +659,7 @@ fn construct_error<'a, 'tcx>(
cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable);
let mut body = Body::new(
- MirSource::item(def.did.to_def_id()),
+ MirSource::item(def.to_def_id()),
cfg.basic_blocks,
source_scopes,
local_decls,
@@ -806,7 +677,7 @@ fn construct_error<'a, 'tcx>(
impl<'a, 'tcx> Builder<'a, 'tcx> {
fn new(
thir: &'a Thir<'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: InferCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
hir_id: hir::HirId,
span: Span,
@@ -855,7 +726,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
in_scope_unsafe: safety,
local_decls: IndexVec::from_elem_n(LocalDecl::new(return_ty, return_span), 1),
canonical_user_type_annotations: IndexVec::new(),
- upvar_mutbls: vec![],
+ upvars: CaptureMap::new(),
var_indices: Default::default(),
unit_temp: None,
var_debug_info: vec![],
@@ -896,20 +767,21 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
mut block: BasicBlock,
fn_def_id: LocalDefId,
- arguments: &[ArgInfo<'tcx>],
+ arguments: &IndexVec<ParamId, Param<'tcx>>,
argument_scope: region::Scope,
expr: &Expr<'tcx>,
) -> BlockAnd<()> {
// Allocate locals for the function arguments
- for &ArgInfo(ty, _, arg_opt, _) in arguments.iter() {
+ for param in arguments.iter() {
let source_info =
- SourceInfo::outermost(arg_opt.map_or(self.fn_span, |arg| arg.pat.span));
- let arg_local = self.local_decls.push(LocalDecl::with_source_info(ty, source_info));
+ SourceInfo::outermost(param.pat.as_ref().map_or(self.fn_span, |pat| pat.span));
+ let arg_local =
+ self.local_decls.push(LocalDecl::with_source_info(param.ty, source_info));
// If this is a simple binding pattern, give debuginfo a nice name.
- if let Some(arg) = arg_opt && let Some(ident) = arg.pat.simple_ident() {
+ if let Some(ref pat) = param.pat && let Some(name) = pat.simple_ident() {
self.var_debug_info.push(VarDebugInfo {
- name: ident.name,
+ name,
source_info,
value: VarDebugInfoContents::Place(arg_local.into()),
});
@@ -924,7 +796,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// indexed closure and we stored in a map called closure_min_captures in TypeckResults
// with the closure's DefId. Here, we run through that vec of UpvarIds for
// the given closure and use the necessary information to create upvar
- // debuginfo and to fill `self.upvar_mutbls`.
+ // debuginfo and to fill `self.upvars`.
if hir_typeck_results.closure_min_captures.get(&fn_def_id).is_some() {
let mut closure_env_projs = vec![];
let mut closure_ty = self.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
@@ -944,7 +816,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
.closure_min_captures_flattened(fn_def_id)
.zip(capture_tys.zip(capture_syms));
- self.upvar_mutbls = captures_with_tys
+ self.upvars = captures_with_tys
.enumerate()
.map(|(i, (captured_place, (ty, sym)))| {
let capture = captured_place.info.capture_kind;
@@ -964,48 +836,46 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
};
+ let use_place = Place {
+ local: ty::CAPTURE_STRUCT_LOCAL,
+ projection: tcx.intern_place_elems(&projs),
+ };
self.var_debug_info.push(VarDebugInfo {
name: *sym,
source_info: SourceInfo::outermost(tcx_hir.span(var_id)),
- value: VarDebugInfoContents::Place(Place {
- local: ty::CAPTURE_STRUCT_LOCAL,
- projection: tcx.intern_place_elems(&projs),
- }),
+ value: VarDebugInfoContents::Place(use_place),
});
- mutability
+ let capture = Capture { captured_place, use_place, mutability };
+ (var_id, capture)
})
.collect();
}
let mut scope = None;
// Bind the argument patterns
- for (index, arg_info) in arguments.iter().enumerate() {
+ for (index, param) in arguments.iter().enumerate() {
// Function arguments always get the first Local indices after the return place
let local = Local::new(index + 1);
let place = Place::from(local);
- let &ArgInfo(_, opt_ty_info, arg_opt, ref self_binding) = arg_info;
// Make sure we drop (parts of) the argument even when not matched on.
self.schedule_drop(
- arg_opt.as_ref().map_or(expr.span, |arg| arg.pat.span),
+ param.pat.as_ref().map_or(expr.span, |pat| pat.span),
argument_scope,
local,
DropKind::Value,
);
- let Some(arg) = arg_opt else {
+ let Some(ref pat) = param.pat else {
continue;
};
- let pat = match tcx.hir().get(arg.pat.hir_id) {
- Node::Pat(pat) => pat,
- node => bug!("pattern became {:?}", node),
- };
- let pattern = pat_from_hir(tcx, self.param_env, self.typeck_results, pat);
let original_source_scope = self.source_scope;
- let span = pattern.span;
- self.set_correct_source_scope_for_arg(arg.hir_id, original_source_scope, span);
- match *pattern.kind {
+ let span = pat.span;
+ if let Some(arg_hir_id) = param.hir_id {
+ self.set_correct_source_scope_for_arg(arg_hir_id, original_source_scope, span);
+ }
+ match pat.kind {
// Don't introduce extra copies for simple bindings
PatKind::Binding {
mutability,
@@ -1016,17 +886,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} => {
self.local_decls[local].mutability = mutability;
self.local_decls[local].source_info.scope = self.source_scope;
- self.local_decls[local].local_info = if let Some(kind) = self_binding {
+ self.local_decls[local].local_info = if let Some(kind) = param.self_kind {
Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(
- BindingForm::ImplicitSelf(*kind),
+ BindingForm::ImplicitSelf(kind),
))))
} else {
let binding_mode = ty::BindingMode::BindByValue(mutability);
Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
VarBindingForm {
binding_mode,
- opt_ty_info,
- opt_match_place: Some((Some(place), span)),
+ opt_ty_info: param.ty_span,
+ opt_match_place: Some((None, span)),
pat_span: span,
},
)))))
@@ -1037,12 +907,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
scope = self.declare_bindings(
scope,
expr.span,
- &pattern,
+ &pat,
matches::ArmHasGuard(false),
Some((Some(&place), span)),
);
let place_builder = PlaceBuilder::from(local);
- unpack!(block = self.place_into_pattern(block, pattern, place_builder, false));
+ unpack!(block = self.place_into_pattern(block, &pat, place_builder, false));
}
}
self.source_scope = original_source_scope;
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index b2fd9f25b..3cebd5ebe 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -466,9 +466,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let normal_exit_block = f(self);
let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
assert!(breakable_scope.region_scope == region_scope);
- let break_block = self.build_exit_tree(breakable_scope.break_drops, None);
+ let break_block =
+ self.build_exit_tree(breakable_scope.break_drops, region_scope, span, None);
if let Some(drops) = breakable_scope.continue_drops {
- self.build_exit_tree(drops, loop_block);
+ self.build_exit_tree(drops, region_scope, span, loop_block);
}
match (normal_exit_block, break_block) {
(Some(block), None) | (None, Some(block)) => block,
@@ -510,6 +511,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
pub(crate) fn in_if_then_scope<F>(
&mut self,
region_scope: region::Scope,
+ span: Span,
f: F,
) -> (BasicBlock, BasicBlock)
where
@@ -524,7 +526,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
assert!(if_then_scope.region_scope == region_scope);
let else_block = self
- .build_exit_tree(if_then_scope.else_drops, None)
+ .build_exit_tree(if_then_scope.else_drops, region_scope, span, None)
.map_or_else(|| self.cfg.start_new_block(), |else_block_and| unpack!(else_block_and));
(then_block, else_block)
@@ -553,6 +555,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Convenience wrapper that pushes a scope and then executes `f`
/// to build its contents, popping the scope afterwards.
+ #[instrument(skip(self, f), level = "debug")]
pub(crate) fn in_scope<F, R>(
&mut self,
region_scope: (region::Scope, SourceInfo),
@@ -562,7 +565,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
where
F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
{
- debug!("in_scope(region_scope={:?})", region_scope);
let source_scope = self.source_scope;
let tcx = self.tcx;
if let LintLevel::Explicit(current_hir_id) = lint_level {
@@ -589,7 +591,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let rv = unpack!(block = f(self));
unpack!(block = self.pop_scope(region_scope, block));
self.source_scope = source_scope;
- debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
+ debug!(?block);
block.and(rv)
}
@@ -997,10 +999,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Returns the [DropIdx] for the innermost drop if the function unwound at
/// this point. The `DropIdx` will be created if it doesn't already exist.
fn diverge_cleanup(&mut self) -> DropIdx {
- let is_generator = self.generator_kind.is_some();
- let (uncached_scope, mut cached_drop) = self
- .scopes
- .scopes
+ // It is okay to use dummy span because the getting scope index on the topmost scope
+ // must always succeed.
+ self.diverge_cleanup_target(self.scopes.topmost(), DUMMY_SP)
+ }
+
+ /// This is similar to [diverge_cleanup](Self::diverge_cleanup) except its target is set to
+ /// some ancestor scope instead of the current scope.
+ /// It is possible to unwind to some ancestor scope if some drop panics as
+ /// the program breaks out of a if-then scope.
+ fn diverge_cleanup_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
+ let target = self.scopes.scope_index(target_scope, span);
+ let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
.iter()
.enumerate()
.rev()
@@ -1009,7 +1019,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
})
.unwrap_or((0, ROOT_NODE));
- for scope in &mut self.scopes.scopes[uncached_scope..] {
+ if uncached_scope > target {
+ return cached_drop;
+ }
+
+ let is_generator = self.generator_kind.is_some();
+ for scope in &mut self.scopes.scopes[uncached_scope..=target] {
for drop in &scope.drops {
if is_generator || drop.kind == DropKind::Value {
cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
@@ -1222,21 +1237,24 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
fn build_exit_tree(
&mut self,
mut drops: DropTree,
+ else_scope: region::Scope,
+ span: Span,
continue_block: Option<BasicBlock>,
) -> Option<BlockAnd<()>> {
let mut blocks = IndexVec::from_elem(None, &drops.drops);
blocks[ROOT_NODE] = continue_block;
drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
+ let is_generator = self.generator_kind.is_some();
// Link the exit drop tree to unwind drop tree.
if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
- let unwind_target = self.diverge_cleanup();
+ let unwind_target = self.diverge_cleanup_target(else_scope, span);
let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
match drop_data.0.kind {
DropKind::Storage => {
- if self.generator_kind.is_some() {
+ if is_generator {
let unwind_drop = self
.scopes
.unwind_drops
diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs
index 864caf0ba..fb1ea9ed3 100644
--- a/compiler/rustc_mir_build/src/check_unsafety.rs
+++ b/compiler/rustc_mir_build/src/check_unsafety.rs
@@ -75,10 +75,11 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
match self.safety_context {
SafetyContext::BuiltinUnsafeBlock => {}
SafetyContext::UnsafeBlock { ref mut used, .. } => {
- if !self.body_unsafety.is_unsafe() || !unsafe_op_in_unsafe_fn_allowed {
- // Mark this block as useful
- *used = true;
- }
+ // Mark this block as useful (even inside `unsafe fn`, where it is technically
+ // redundant -- but we want to eventually enable `unsafe_op_in_unsafe_fn` by
+ // default which will require those blocks:
+ // https://github.com/rust-lang/rust/issues/71668#issuecomment-1203075594).
+ *used = true;
}
SafetyContext::UnsafeFn if unsafe_op_in_unsafe_fn_allowed => {}
SafetyContext::UnsafeFn => {
@@ -88,15 +89,8 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
UNSAFE_OP_IN_UNSAFE_FN,
self.hir_context,
span,
- |lint| {
- lint.build(&format!(
- "{} is unsafe and requires unsafe block (error E0133)",
- description,
- ))
- .span_label(span, kind.simple_description())
- .note(note)
- .emit();
- },
+ format!("{} is unsafe and requires unsafe block (error E0133)", description,),
+ |lint| lint.span_label(span, kind.simple_description()).note(note),
)
}
SafetyContext::Safe => {
@@ -124,14 +118,13 @@ impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
enclosing_unsafe: Option<(Span, &'static str)>,
) {
let block_span = self.tcx.sess.source_map().guess_head_span(block_span);
- self.tcx.struct_span_lint_hir(UNUSED_UNSAFE, hir_id, block_span, |lint| {
- let msg = "unnecessary `unsafe` block";
- let mut db = lint.build(msg);
- db.span_label(block_span, msg);
+ let msg = "unnecessary `unsafe` block";
+ self.tcx.struct_span_lint_hir(UNUSED_UNSAFE, hir_id, block_span, msg, |lint| {
+ lint.span_label(block_span, msg);
if let Some((span, kind)) = enclosing_unsafe {
- db.span_label(span, format!("because it's nested under this `unsafe` {}", kind));
+ lint.span_label(span, format!("because it's nested under this `unsafe` {}", kind));
}
- db.emit();
+ lint
});
}
@@ -213,7 +206,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
fn visit_pat(&mut self, pat: &Pat<'tcx>) {
if self.in_union_destructure {
- match *pat.kind {
+ match pat.kind {
// binding to a variable allows getting stuff out of variable
PatKind::Binding { .. }
// match is conditional on having this value
@@ -235,7 +228,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
}
};
- match &*pat.kind {
+ match &pat.kind {
PatKind::Leaf { .. } => {
if let ty::Adt(adt_def, ..) = pat.ty.kind() {
if adt_def.is_union() {
@@ -267,7 +260,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
};
match borrow_kind {
BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique => {
- if !ty.is_freeze(self.tcx.at(pat.span), self.param_env) {
+ if !ty.is_freeze(self.tcx, self.param_env) {
self.requires_unsafe(pat.span, BorrowOfLayoutConstrainedField);
}
}
@@ -363,7 +356,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
// If the called function has target features the calling function hasn't,
// the call requires `unsafe`. Don't check this on wasm
// targets, though. For more information on wasm see the
- // is_like_wasm check in typeck/src/collect.rs
+ // is_like_wasm check in hir_analysis/src/collect.rs
if !self.tcx.sess.target.options.is_like_wasm
&& !self
.tcx
@@ -390,7 +383,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
ExprKind::InlineAsm { .. } => {
self.requires_unsafe(expr.span, UseOfInlineAssembly);
}
- ExprKind::Adt(box Adt {
+ ExprKind::Adt(box AdtExpr {
adt_def,
variant_index: _,
substs: _,
@@ -401,13 +394,13 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
(Bound::Unbounded, Bound::Unbounded) => {}
_ => self.requires_unsafe(expr.span, InitializingTypeWith),
},
- ExprKind::Closure {
+ ExprKind::Closure(box ClosureExpr {
closure_id,
substs: _,
upvars: _,
movability: _,
fake_reads: _,
- } => {
+ }) => {
let closure_def = if let Some((did, const_param_id)) =
ty::WithOptConstParam::try_lookup(closure_id, self.tcx)
{
@@ -464,9 +457,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
if visitor.found {
match borrow_kind {
BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique
- if !self.thir[arg]
- .ty
- .is_freeze(self.tcx.at(self.thir[arg].span), self.param_env) =>
+ if !self.thir[arg].ty.is_freeze(self.tcx, self.param_env) =>
{
self.requires_unsafe(expr.span, BorrowOfLayoutConstrainedField)
}
diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs
index 11cd2a9aa..b53bd3d07 100644
--- a/compiler/rustc_mir_build/src/lib.rs
+++ b/compiler/rustc_mir_build/src/lib.rs
@@ -2,11 +2,11 @@
//!
//! This crate also contains the match exhaustiveness and usefulness checking.
#![allow(rustc::potential_query_instability)]
+#![feature(assert_matches)]
#![feature(box_patterns)]
#![feature(control_flow_enum)]
#![feature(if_let_guard)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(once_cell)]
#![recursion_limit = "256"]
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
index 54d549fd6..b21f30efc 100644
--- a/compiler/rustc_mir_build/src/lints.rs
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -36,16 +36,20 @@ pub(crate) fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
let sp = tcx.def_span(def_id);
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- tcx.struct_span_lint_hir(UNCONDITIONAL_RECURSION, hir_id, sp, |lint| {
- let mut db = lint.build("function cannot return without recursing");
- db.span_label(sp, "cannot return without recursing");
- // offer some help to the programmer.
- for call_span in vis.reachable_recursive_calls {
- db.span_label(call_span, "recursive call site");
- }
- db.help("a `loop` may express intention better if this is on purpose");
- db.emit();
- });
+ tcx.struct_span_lint_hir(
+ UNCONDITIONAL_RECURSION,
+ hir_id,
+ sp,
+ "function cannot return without recursing",
+ |lint| {
+ lint.span_label(sp, "cannot return without recursing");
+ // offer some help to the programmer.
+ for call_span in vis.reachable_recursive_calls {
+ lint.span_label(call_span, "recursive call site");
+ }
+ lint.help("a `loop` may express intention better if this is on purpose")
+ },
+ );
}
}
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
index a7e4403a2..f626571b5 100644
--- a/compiler/rustc_mir_build/src/thir/constant.rs
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -44,7 +44,7 @@ pub(crate) fn lit_to_const<'tcx>(
}
(ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()),
(ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()),
- (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ (ast::LitKind::Err, _) => return Err(LitToConstError::Reported),
_ => return Err(LitToConstError::TypeError),
};
diff --git a/compiler/rustc_mir_build/src/thir/cx/block.rs b/compiler/rustc_mir_build/src/thir/cx/block.rs
index dccaa61ed..321353ca2 100644
--- a/compiler/rustc_mir_build/src/thir/cx/block.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/block.rs
@@ -9,13 +9,13 @@ use rustc_index::vec::Idx;
use rustc_middle::ty::CanonicalUserTypeAnnotation;
impl<'tcx> Cx<'tcx> {
- pub(crate) fn mirror_block(&mut self, block: &'tcx hir::Block<'tcx>) -> Block {
+ pub(crate) fn mirror_block(&mut self, block: &'tcx hir::Block<'tcx>) -> BlockId {
// We have to eagerly lower the "spine" of the statements
// in order to get the lexical scoping correctly.
let stmts = self.mirror_stmts(block.hir_id.local_id, block.stmts);
let opt_destruction_scope =
self.region_scope_tree.opt_destruction_scope(block.hir_id.local_id);
- Block {
+ let block = Block {
targeted_by_break: block.targeted_by_break,
region_scope: region::Scope {
id: block.hir_id.local_id,
@@ -34,7 +34,9 @@ impl<'tcx> Cx<'tcx> {
BlockSafety::ExplicitUnsafe(block.hir_id)
}
},
- }
+ };
+
+ self.thir.blocks.push(block)
}
fn mirror_stmts(
@@ -85,21 +87,21 @@ impl<'tcx> Cx<'tcx> {
{
debug!("mirror_stmts: user_ty={:?}", user_ty);
let annotation = CanonicalUserTypeAnnotation {
- user_ty,
+ user_ty: Box::new(user_ty),
span: ty.span,
inferred_ty: self.typeck_results.node_type(ty.hir_id),
};
- pattern = Pat {
+ pattern = Box::new(Pat {
ty: pattern.ty,
span: pattern.span,
- kind: Box::new(PatKind::AscribeUserType {
+ kind: PatKind::AscribeUserType {
ascription: Ascription {
annotation,
variance: ty::Variance::Covariant,
},
subpattern: pattern,
- }),
- };
+ },
+ });
}
}
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 985601712..c7a7c3e3f 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -48,6 +48,8 @@ impl<'tcx> Cx<'tcx> {
_ => None,
};
+ trace!(?expr.ty);
+
// Now apply adjustments, if any.
for adjustment in self.typeck_results.expr_adjustments(hir_expr) {
trace!(?expr, ?adjustment);
@@ -56,6 +58,8 @@ impl<'tcx> Cx<'tcx> {
self.apply_adjustment(hir_expr, expr, adjustment, adjustment_span.unwrap_or(span));
}
+ trace!(?expr.ty, "after adjustments");
+
// Next, wrap this up in the expr's scope.
expr = Expr {
temp_lifetime,
@@ -108,8 +112,8 @@ impl<'tcx> Cx<'tcx> {
// // ^ error message points at this expression.
// }
let mut adjust_span = |expr: &mut Expr<'tcx>| {
- if let ExprKind::Block { body } = &expr.kind {
- if let Some(last_expr) = body.expr {
+ if let ExprKind::Block { block } = expr.kind {
+ if let Some(last_expr) = self.thir[block].expr {
span = self.thir[last_expr].span;
expr.span = span;
}
@@ -155,6 +159,7 @@ impl<'tcx> Cx<'tcx> {
Adjust::Borrow(AutoBorrow::RawPtr(mutability)) => {
ExprKind::AddressOf { mutability, arg: self.thir.exprs.push(expr) }
}
+ Adjust::DynStar => ExprKind::Cast { source: self.thir.exprs.push(expr) },
};
Expr { temp_lifetime, ty: adjustment.target, span, kind }
@@ -261,15 +266,19 @@ impl<'tcx> Cx<'tcx> {
let kind = match expr.kind {
// Here comes the interesting stuff:
- hir::ExprKind::MethodCall(segment, ref args, fn_span) => {
+ hir::ExprKind::MethodCall(segment, receiver, ref args, fn_span) => {
// Rewrite a.b(c) into UFCS form like Trait::b(a, c)
let expr = self.method_callee(expr, segment.ident.span, None);
// When we apply adjustments to the receiver, use the span of
// the overall method call for better diagnostics. args[0]
// is guaranteed to exist, since a method call always has a receiver.
- let old_adjustment_span = self.adjustment_span.replace((args[0].hir_id, expr_span));
- tracing::info!("Using method span: {:?}", expr.span);
- let args = self.mirror_exprs(args);
+ let old_adjustment_span =
+ self.adjustment_span.replace((receiver.hir_id, expr_span));
+ info!("Using method span: {:?}", expr.span);
+ let args = std::iter::once(receiver)
+ .chain(args.iter())
+ .map(|expr| self.mirror_expr(expr))
+ .collect();
self.adjustment_span = old_adjustment_span;
ExprKind::Call {
ty: expr.ty,
@@ -329,7 +338,7 @@ impl<'tcx> Cx<'tcx> {
if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value {
*did = adt_def.did();
}
- u_ty
+ Box::new(u_ty)
});
debug!("make_mirror_unadjusted: (call) user_ty={:?}", user_ty);
@@ -341,7 +350,7 @@ impl<'tcx> Cx<'tcx> {
expr: self.mirror_expr(e),
})
.collect();
- ExprKind::Adt(Box::new(Adt {
+ ExprKind::Adt(Box::new(AdtExpr {
adt_def,
substs,
variant_index: index,
@@ -369,7 +378,7 @@ impl<'tcx> Cx<'tcx> {
ExprKind::AddressOf { mutability, arg: self.mirror_expr(arg) }
}
- hir::ExprKind::Block(ref blk, _) => ExprKind::Block { body: self.mirror_block(blk) },
+ hir::ExprKind::Block(ref blk, _) => ExprKind::Block { block: self.mirror_block(blk) },
hir::ExprKind::Assign(ref lhs, ref rhs, _) => {
ExprKind::Assign { lhs: self.mirror_expr(lhs), rhs: self.mirror_expr(rhs) }
@@ -464,9 +473,9 @@ impl<'tcx> Cx<'tcx> {
ty::Adt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct | AdtKind::Union => {
let user_provided_types = self.typeck_results().user_provided_types();
- let user_ty = user_provided_types.get(expr.hir_id).copied();
+ let user_ty = user_provided_types.get(expr.hir_id).copied().map(Box::new);
debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty);
- ExprKind::Adt(Box::new(Adt {
+ ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt,
variant_index: VariantIdx::new(0),
substs,
@@ -490,9 +499,10 @@ impl<'tcx> Cx<'tcx> {
let index = adt.variant_index_with_id(variant_id);
let user_provided_types =
self.typeck_results().user_provided_types();
- let user_ty = user_provided_types.get(expr.hir_id).copied();
+ let user_ty =
+ user_provided_types.get(expr.hir_id).copied().map(Box::new);
debug!("make_mirror_unadjusted: (variant) user_ty={:?}", user_ty);
- ExprKind::Adt(Box::new(Adt {
+ ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt,
variant_index: index,
substs,
@@ -547,7 +557,13 @@ impl<'tcx> Cx<'tcx> {
None => Vec::new(),
};
- ExprKind::Closure { closure_id: def_id, substs, upvars, movability, fake_reads }
+ ExprKind::Closure(Box::new(ClosureExpr {
+ closure_id: def_id,
+ substs,
+ upvars,
+ movability,
+ fake_reads,
+ }))
}
hir::ExprKind::Path(ref qpath) => {
@@ -555,7 +571,7 @@ impl<'tcx> Cx<'tcx> {
self.convert_path_expr(expr, res)
}
- hir::ExprKind::InlineAsm(ref asm) => ExprKind::InlineAsm {
+ hir::ExprKind::InlineAsm(ref asm) => ExprKind::InlineAsm(Box::new(InlineAsmExpr {
template: asm.template,
operands: asm
.operands
@@ -614,7 +630,7 @@ impl<'tcx> Cx<'tcx> {
.collect(),
options: asm.options,
line_spans: asm.line_spans,
- },
+ })),
hir::ExprKind::ConstBlock(ref anon_const) => {
let ty = self.typeck_results().node_type(anon_const.hir_id);
@@ -679,8 +695,8 @@ impl<'tcx> Cx<'tcx> {
let body = self.thir.exprs.push(Expr {
ty: block_ty,
temp_lifetime,
- span: block.span,
- kind: ExprKind::Block { body: block },
+ span: self.thir[block].span,
+ kind: ExprKind::Block { block },
});
ExprKind::Loop { body }
}
@@ -712,14 +728,17 @@ impl<'tcx> Cx<'tcx> {
});
debug!("make_mirror_unadjusted: (cast) user_ty={:?}", user_ty);
- ExprKind::ValueTypeAscription { source: cast_expr, user_ty: Some(*user_ty) }
+ ExprKind::ValueTypeAscription {
+ source: cast_expr,
+ user_ty: Some(Box::new(*user_ty)),
+ }
} else {
cast
}
}
hir::ExprKind::Type(ref source, ref ty) => {
let user_provided_types = self.typeck_results.user_provided_types();
- let user_ty = user_provided_types.get(ty.hir_id).copied();
+ let user_ty = user_provided_types.get(ty.hir_id).copied().map(Box::new);
debug!("make_mirror_unadjusted: (type) user_ty={:?}", user_ty);
let mirrored = self.mirror_expr(source);
if source.is_syntactic_place_expr() {
@@ -748,7 +767,7 @@ impl<'tcx> Cx<'tcx> {
&mut self,
hir_id: hir::HirId,
res: Res,
- ) -> Option<ty::CanonicalUserType<'tcx>> {
+ ) -> Option<Box<ty::CanonicalUserType<'tcx>>> {
debug!("user_substs_applied_to_res: res={:?}", res);
let user_provided_type = match res {
// A reference to something callable -- e.g., a fn, method, or
@@ -759,7 +778,7 @@ impl<'tcx> Cx<'tcx> {
| Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
| Res::Def(DefKind::Const, _)
| Res::Def(DefKind::AssocConst, _) => {
- self.typeck_results().user_provided_types().get(hir_id).copied()
+ self.typeck_results().user_provided_types().get(hir_id).copied().map(Box::new)
}
// A unit struct/variant which is used as a value (e.g.,
@@ -767,11 +786,11 @@ impl<'tcx> Cx<'tcx> {
// this variant -- but with the substitutions given by the
// user.
Res::Def(DefKind::Ctor(_, CtorKind::Const), _) => {
- self.user_substs_applied_to_ty_of_hir_id(hir_id)
+ self.user_substs_applied_to_ty_of_hir_id(hir_id).map(Box::new)
}
// `Self` is used in expression as a tuple struct constructor or a unit struct constructor
- Res::SelfCtor(_) => self.user_substs_applied_to_ty_of_hir_id(hir_id),
+ Res::SelfCtor(_) => self.user_substs_applied_to_ty_of_hir_id(hir_id).map(Box::new),
_ => bug!("user_substs_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
};
@@ -846,22 +865,22 @@ impl<'tcx> Cx<'tcx> {
Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
- ExprKind::NamedConst { def_id, substs, user_ty: user_ty }
+ ExprKind::NamedConst { def_id, substs, user_ty }
}
Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
let user_provided_types = self.typeck_results.user_provided_types();
- let user_provided_type = user_provided_types.get(expr.hir_id).copied();
- debug!("convert_path_expr: user_provided_type={:?}", user_provided_type);
+ let user_ty = user_provided_types.get(expr.hir_id).copied().map(Box::new);
+ debug!("convert_path_expr: user_ty={:?}", user_ty);
let ty = self.typeck_results().node_type(expr.hir_id);
match ty.kind() {
// A unit struct/variant which is used as a value.
// We return a completely different ExprKind here to account for this special case.
- ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(Adt {
+ ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(AdtExpr {
adt_def: *adt_def,
variant_index: adt_def.variant_index_with_ctor_id(def_id),
substs,
- user_ty: user_provided_type,
+ user_ty,
fields: Box::new([]),
base: None,
})),
@@ -980,7 +999,7 @@ impl<'tcx> Cx<'tcx> {
.temporary_scope(self.region_scope_tree, closure_expr.hir_id.local_id);
let var_ty = place.base_ty;
- // The result of capture analysis in `rustc_typeck/check/upvar.rs`represents a captured path
+ // The result of capture analysis in `rustc_hir_analysis/check/upvar.rs`represents a captured path
// as it's seen for use within the closure and not at the time of closure creation.
//
// That is we see expect to see it start from a captured upvar and not something that is local
diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs
index f7351a4ca..1d95d6b53 100644
--- a/compiler/rustc_mir_build/src/thir/cx/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs
@@ -8,7 +8,9 @@ use crate::thir::util::UserAnnotatedTyHelpers;
use rustc_data_structures::steal::Steal;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
use rustc_hir::HirId;
use rustc_hir::Node;
use rustc_middle::middle::region;
@@ -27,6 +29,26 @@ pub(crate) fn thir_body<'tcx>(
return Err(reported);
}
let expr = cx.mirror_expr(&body.value);
+
+ let owner_id = hir.local_def_id_to_hir_id(owner_def.did);
+ if let Some(ref fn_decl) = hir.fn_decl_by_hir_id(owner_id) {
+ let closure_env_param = cx.closure_env_param(owner_def.did, owner_id);
+ let explicit_params = cx.explicit_params(owner_id, fn_decl, body);
+ cx.thir.params = closure_env_param.into_iter().chain(explicit_params).collect();
+
+ // The resume argument may be missing, in that case we need to provide it here.
+ // It will always be `()` in this case.
+ if tcx.def_kind(owner_def.did) == DefKind::Generator && body.params.is_empty() {
+ cx.thir.params.push(Param {
+ ty: tcx.mk_unit(),
+ pat: None,
+ ty_span: None,
+ self_kind: None,
+ hir_id: None,
+ });
+ }
+ }
+
Ok((tcx.alloc_steal_thir(cx.thir), expr))
}
@@ -44,11 +66,11 @@ struct Cx<'tcx> {
tcx: TyCtxt<'tcx>,
thir: Thir<'tcx>,
- pub(crate) param_env: ty::ParamEnv<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
- pub(crate) region_scope_tree: &'tcx region::ScopeTree,
- pub(crate) typeck_results: &'tcx ty::TypeckResults<'tcx>,
- pub(crate) rvalue_scopes: &'tcx RvalueScopes,
+ region_scope_tree: &'tcx region::ScopeTree,
+ typeck_results: &'tcx ty::TypeckResults<'tcx>,
+ rvalue_scopes: &'tcx RvalueScopes,
/// When applying adjustments to the expression
/// with the given `HirId`, use the given `Span`,
@@ -77,14 +99,94 @@ impl<'tcx> Cx<'tcx> {
}
}
- #[tracing::instrument(level = "debug", skip(self))]
- pub(crate) fn pattern_from_hir(&mut self, p: &hir::Pat<'_>) -> Pat<'tcx> {
+ #[instrument(level = "debug", skip(self))]
+ fn pattern_from_hir(&mut self, p: &hir::Pat<'_>) -> Box<Pat<'tcx>> {
let p = match self.tcx.hir().get(p.hir_id) {
Node::Pat(p) => p,
node => bug!("pattern became {:?}", node),
};
pat_from_hir(self.tcx, self.param_env, self.typeck_results(), p)
}
+
+ fn closure_env_param(&self, owner_def: LocalDefId, owner_id: HirId) -> Option<Param<'tcx>> {
+ match self.tcx.def_kind(owner_def) {
+ DefKind::Closure => {
+ let closure_ty = self.typeck_results.node_type(owner_id);
+
+ let ty::Closure(closure_def_id, closure_substs) = *closure_ty.kind() else {
+ bug!("closure expr does not have closure type: {:?}", closure_ty);
+ };
+
+ let bound_vars = self.tcx.mk_bound_variable_kinds(std::iter::once(
+ ty::BoundVariableKind::Region(ty::BrEnv),
+ ));
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let closure_env_ty =
+ self.tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap();
+ let liberated_closure_env_ty = self.tcx.erase_late_bound_regions(
+ ty::Binder::bind_with_vars(closure_env_ty, bound_vars),
+ );
+ let env_param = Param {
+ ty: liberated_closure_env_ty,
+ pat: None,
+ ty_span: None,
+ self_kind: None,
+ hir_id: None,
+ };
+
+ Some(env_param)
+ }
+ DefKind::Generator => {
+ let gen_ty = self.typeck_results.node_type(owner_id);
+ let gen_param =
+ Param { ty: gen_ty, pat: None, ty_span: None, self_kind: None, hir_id: None };
+ Some(gen_param)
+ }
+ _ => None,
+ }
+ }
+
+ fn explicit_params<'a>(
+ &'a mut self,
+ owner_id: HirId,
+ fn_decl: &'tcx hir::FnDecl<'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ ) -> impl Iterator<Item = Param<'tcx>> + 'a {
+ let fn_sig = self.typeck_results.liberated_fn_sigs()[owner_id];
+
+ body.params.iter().enumerate().map(move |(index, param)| {
+ let ty_span = fn_decl
+ .inputs
+ .get(index)
+ // Make sure that inferred closure args have no type span
+ .and_then(|ty| if param.pat.span != ty.span { Some(ty.span) } else { None });
+
+ let self_kind = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
+ Some(fn_decl.implicit_self)
+ } else {
+ None
+ };
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let ty = if fn_decl.c_variadic && index == fn_decl.inputs.len() {
+ let va_list_did = self.tcx.require_lang_item(LangItem::VaList, Some(param.span));
+
+ self.tcx
+ .bound_type_of(va_list_did)
+ .subst(self.tcx, &[self.tcx.lifetimes.re_erased.into()])
+ } else {
+ fn_sig.inputs()[index]
+ };
+
+ let pat = self.pattern_from_hir(param.pat);
+ Param { pat: Some(pat), ty, ty_span, self_kind, hir_id: Some(param.hir_id) }
+ })
+ }
}
impl<'tcx> UserAnnotatedTyHelpers<'tcx> for Cx<'tcx> {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index 063c07647..858129c74 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -7,7 +7,7 @@ use super::{PatCtxt, PatternError};
use rustc_arena::TypedArena;
use rustc_ast::Mutability;
use rustc_errors::{
- error_code, pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder,
+ error_code, pluralize, struct_span_err, Applicability, DelayDm, Diagnostic, DiagnosticBuilder,
ErrorGuaranteed, MultiSpan,
};
use rustc_hir as hir;
@@ -347,27 +347,35 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
let span_end = affix.last().unwrap().unwrap().0;
let span = span_start.to(span_end);
let cnt = affix.len();
- cx.tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, top, span, |lint| {
- let s = pluralize!(cnt);
- let mut diag = lint.build(&format!("{kind} irrefutable pattern{s} in let chain"));
- diag.note(&format!(
- "{these} pattern{s} will always match",
- these = pluralize!("this", cnt),
- ));
- diag.help(&format!(
- "consider moving {} {suggestion}",
- if cnt > 1 { "them" } else { "it" }
- ));
- diag.emit()
- });
+ let s = pluralize!(cnt);
+ cx.tcx.struct_span_lint_hir(
+ IRREFUTABLE_LET_PATTERNS,
+ top,
+ span,
+ format!("{kind} irrefutable pattern{s} in let chain"),
+ |lint| {
+ lint.note(format!(
+ "{these} pattern{s} will always match",
+ these = pluralize!("this", cnt),
+ ))
+ .help(format!(
+ "consider moving {} {suggestion}",
+ if cnt > 1 { "them" } else { "it" }
+ ))
+ },
+ );
};
if let Some(until) = chain_refutabilities.iter().position(|r| !matches!(*r, Some((_, false)))) && until > 0 {
// The chain has a non-zero prefix of irrefutable `let` statements.
// Check if the let source is while, for there is no alternative place to put a prefix,
// and we shouldn't lint.
+ // For let guards inside a match, prefixes might use bindings of the match pattern,
+ // so can't always be moved out.
+ // FIXME: Add checking whether the bindings are actually used in the prefix,
+ // and lint if they are not.
let let_source = let_source_parent(self.tcx, top, None);
- if !matches!(let_source, LetSource::WhileLet) {
+ if !matches!(let_source, LetSource::WhileLet | LetSource::IfLetGuard) {
// Emit the lint
let prefix = &chain_refutabilities[..until];
lint_affix(prefix, "leading", "outside of the construct");
@@ -487,12 +495,12 @@ impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
],
Applicability::HasPlaceholders,
);
- if !bindings.is_empty() && cx.tcx.sess.is_nightly_build() {
+ if !bindings.is_empty() {
err.span_suggestion_verbose(
semi_span.shrink_to_lo(),
&format!(
- "alternatively, on nightly, you might want to use \
- `#![feature(let_else)]` to handle the variant{} that {} matched",
+ "alternatively, you might want to use \
+ let else to handle the variant{} that {} matched",
pluralize!(witnesses.len()),
match witnesses.len() {
1 => "isn't",
@@ -561,26 +569,28 @@ fn check_for_bindings_named_same_as_variants(
BINDINGS_WITH_VARIANT_NAME,
p.hir_id,
p.span,
+ DelayDm(|| format!(
+ "pattern binding `{}` is named the same as one \
+ of the variants of the type `{}`",
+ ident, cx.tcx.def_path_str(edef.did())
+ )),
|lint| {
let ty_path = cx.tcx.def_path_str(edef.did());
- let mut err = lint.build(&format!(
- "pattern binding `{}` is named the same as one \
- of the variants of the type `{}`",
- ident, ty_path
- ));
- err.code(error_code!(E0170));
+ lint.code(error_code!(E0170));
+
// If this is an irrefutable pattern, and there's > 1 variant,
// then we can't actually match on this. Applying the below
// suggestion would produce code that breaks on `check_irrefutable`.
if rf == Refutable || variant_count == 1 {
- err.span_suggestion(
+ lint.span_suggestion(
p.span,
"to match on the variant, qualify the path",
format!("{}::{}", ty_path, ident),
Applicability::MachineApplicable,
);
}
- err.emit();
+
+ lint
},
)
}
@@ -598,14 +608,13 @@ fn pat_is_catchall(pat: &DeconstructedPat<'_, '_>) -> bool {
}
fn unreachable_pattern(tcx: TyCtxt<'_>, span: Span, id: HirId, catchall: Option<Span>) {
- tcx.struct_span_lint_hir(UNREACHABLE_PATTERNS, id, span, |lint| {
- let mut err = lint.build("unreachable pattern");
+ tcx.struct_span_lint_hir(UNREACHABLE_PATTERNS, id, span, "unreachable pattern", |lint| {
if let Some(catchall) = catchall {
// We had a catchall pattern, hint at that.
- err.span_label(span, "unreachable pattern");
- err.span_label(catchall, "matches any value");
+ lint.span_label(span, "unreachable pattern");
+ lint.span_label(catchall, "matches any value");
}
- err.emit();
+ lint
});
}
@@ -621,6 +630,11 @@ fn irrefutable_let_patterns(
count: usize,
span: Span,
) {
+ let span = match source {
+ LetSource::LetElse(span) => span,
+ _ => span,
+ };
+
macro_rules! emit_diag {
(
$lint:expr,
@@ -630,18 +644,23 @@ fn irrefutable_let_patterns(
) => {{
let s = pluralize!(count);
let these = pluralize!("this", count);
- let mut diag = $lint.build(&format!("irrefutable {} pattern{s}", $source_name));
- diag.note(&format!("{these} pattern{s} will always match, so the {}", $note_sufix));
- diag.help(concat!("consider ", $help_sufix));
- diag.emit()
+ tcx.struct_span_lint_hir(
+ IRREFUTABLE_LET_PATTERNS,
+ id,
+ span,
+ format!("irrefutable {} pattern{s}", $source_name),
+ |lint| {
+ lint.note(&format!(
+ "{these} pattern{s} will always match, so the {}",
+ $note_sufix
+ ))
+ .help(concat!("consider ", $help_sufix))
+ },
+ )
}};
}
- let span = match source {
- LetSource::LetElse(span) => span,
- _ => span,
- };
- tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, id, span, |lint| match source {
+ match source {
LetSource::GenericLet => {
emit_diag!(lint, "`let`", "`let` is useless", "removing `let`");
}
@@ -677,7 +696,7 @@ fn irrefutable_let_patterns(
"instead using a `loop { ... }` with a `let` inside it"
);
}
- });
+ };
}
fn is_let_irrefutable<'p, 'tcx>(
@@ -849,22 +868,22 @@ fn non_exhaustive_match<'p, 'tcx>(
));
}
[.., prev, last] if prev.span.eq_ctxt(last.span) => {
- if let Ok(snippet) = sm.span_to_snippet(prev.span.between(last.span)) {
- let comma = if matches!(last.body.kind, hir::ExprKind::Block(..))
- && last.span.eq_ctxt(last.body.span)
- {
- ""
- } else {
- ","
- };
+ let comma = if matches!(last.body.kind, hir::ExprKind::Block(..))
+ && last.span.eq_ctxt(last.body.span)
+ {
+ ""
+ } else {
+ ","
+ };
+ let spacing = if sm.is_multiline(prev.span.between(last.span)) {
+ sm.indentation_before(last.span).map(|indent| format!("\n{indent}"))
+ } else {
+ Some(" ".to_string())
+ };
+ if let Some(spacing) = spacing {
suggestion = Some((
last.span.shrink_to_hi(),
- format!(
- "{}{}{} => todo!()",
- comma,
- snippet.strip_prefix(',').unwrap_or(&snippet),
- pattern
- ),
+ format!("{}{}{} => todo!()", comma, spacing, pattern),
));
}
}
@@ -985,8 +1004,8 @@ fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
}
/// Check if a by-value binding is by-value. That is, check if the binding's type is not `Copy`.
-fn is_binding_by_move(cx: &MatchVisitor<'_, '_, '_>, hir_id: HirId, span: Span) -> bool {
- !cx.typeck_results.node_type(hir_id).is_copy_modulo_regions(cx.tcx.at(span), cx.param_env)
+fn is_binding_by_move(cx: &MatchVisitor<'_, '_, '_>, hir_id: HirId) -> bool {
+ !cx.typeck_results.node_type(hir_id).is_copy_modulo_regions(cx.tcx, cx.param_env)
}
/// Check that there are no borrow or move conflicts in `binding @ subpat` patterns.
@@ -1012,7 +1031,7 @@ fn check_borrow_conflicts_in_at_patterns(cx: &MatchVisitor<'_, '_, '_>, pat: &Pa
// Get the binding move, extract the mutability if by-ref.
let mut_outer = match typeck_results.extract_binding_mode(sess, pat.hir_id, pat.span) {
- Some(ty::BindByValue(_)) if is_binding_by_move(cx, pat.hir_id, pat.span) => {
+ Some(ty::BindByValue(_)) if is_binding_by_move(cx, pat.hir_id) => {
// We have `x @ pat` where `x` is by-move. Reject all borrows in `pat`.
let mut conflicts_ref = Vec::new();
sub.each_binding(|_, hir_id, span, _| {
@@ -1051,7 +1070,7 @@ fn check_borrow_conflicts_in_at_patterns(cx: &MatchVisitor<'_, '_, '_>, pat: &Pa
(Mutability::Mut, Mutability::Mut) => conflicts_mut_mut.push((span, name)), // 2x `ref mut`.
_ => conflicts_mut_ref.push((span, name)), // `ref` + `ref mut` in either direction.
},
- Some(ty::BindByValue(_)) if is_binding_by_move(cx, hir_id, span) => {
+ Some(ty::BindByValue(_)) if is_binding_by_move(cx, hir_id) => {
conflicts_move.push((span, name)) // `ref mut?` + by-move conflict.
}
Some(ty::BindByValue(_)) | None => {} // `ref mut?` + by-copy is fine.
@@ -1136,10 +1155,14 @@ fn let_source_parent(tcx: TyCtxt<'_>, parent: HirId, pat_id: Option<HirId>) -> L
let parent_parent = hir.get_parent_node(parent);
let parent_parent_node = hir.get(parent_parent);
- if let hir::Node::Stmt(hir::Stmt { kind: hir::StmtKind::Local(_), span, .. }) =
- parent_parent_node
- {
- return LetSource::LetElse(*span);
+ match parent_parent_node {
+ hir::Node::Stmt(hir::Stmt { kind: hir::StmtKind::Local(_), span, .. }) => {
+ return LetSource::LetElse(*span);
+ }
+ hir::Node::Arm(hir::Arm { guard: Some(hir::Guard::If(_)), .. }) => {
+ return LetSource::IfLetGuard;
+ }
+ _ => {}
}
let parent_parent_parent = hir.get_parent_node(parent_parent);
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index d6dd0f017..ad12e0116 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -1,3 +1,4 @@
+use rustc_errors::DelayDm;
use rustc_hir as hir;
use rustc_index::vec::Idx;
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
@@ -19,25 +20,21 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
/// Converts an evaluated constant to a pattern (if possible).
/// This means aggregate values (like structs and enums) are converted
/// to a pattern that matches the value (as if you'd compared via structural equality).
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
pub(super) fn const_to_pat(
&self,
cv: mir::ConstantKind<'tcx>,
id: hir::HirId,
span: Span,
mir_structural_match_violation: bool,
- ) -> Pat<'tcx> {
- let pat = self.tcx.infer_ctxt().enter(|infcx| {
- let mut convert = ConstToPat::new(self, id, span, infcx);
- convert.to_pat(cv, mir_structural_match_violation)
- });
-
- debug!(?pat);
- pat
+ ) -> Box<Pat<'tcx>> {
+ let infcx = self.tcx.infer_ctxt().build();
+ let mut convert = ConstToPat::new(self, id, span, infcx);
+ convert.to_pat(cv, mir_structural_match_violation)
}
}
-struct ConstToPat<'a, 'tcx> {
+struct ConstToPat<'tcx> {
id: hir::HirId,
span: Span,
param_env: ty::ParamEnv<'tcx>,
@@ -57,7 +54,7 @@ struct ConstToPat<'a, 'tcx> {
behind_reference: Cell<bool>,
// inference context used for checking `T: Structural` bounds.
- infcx: InferCtxt<'a, 'tcx>,
+ infcx: InferCtxt<'tcx>,
include_lint_checks: bool,
@@ -73,21 +70,19 @@ mod fallback_to_const_ref {
/// hoops to get a reference to the value.
pub(super) struct FallbackToConstRef(());
- pub(super) fn fallback_to_const_ref<'a, 'tcx>(
- c2p: &super::ConstToPat<'a, 'tcx>,
- ) -> FallbackToConstRef {
+ pub(super) fn fallback_to_const_ref<'tcx>(c2p: &super::ConstToPat<'tcx>) -> FallbackToConstRef {
assert!(c2p.behind_reference.get());
FallbackToConstRef(())
}
}
use fallback_to_const_ref::{fallback_to_const_ref, FallbackToConstRef};
-impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
+impl<'tcx> ConstToPat<'tcx> {
fn new(
pat_ctxt: &PatCtxt<'_, 'tcx>,
id: hir::HirId,
span: Span,
- infcx: InferCtxt<'a, 'tcx>,
+ infcx: InferCtxt<'tcx>,
) -> Self {
trace!(?pat_ctxt.typeck_results.hir_owner);
ConstToPat {
@@ -159,7 +154,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
&mut self,
cv: mir::ConstantKind<'tcx>,
mir_structural_match_violation: bool,
- ) -> Pat<'tcx> {
+ ) -> Box<Pat<'tcx>> {
trace!(self.treat_byte_string_as_slice);
// This method is just a wrapper handling a validity check; the heavy lifting is
// performed by the recursive `recur` method, which is not meant to be
@@ -168,7 +163,14 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
// once indirect_structural_match is a full fledged error, this
// level of indirection can be eliminated
- let inlined_const_as_pat = self.recur(cv, mir_structural_match_violation).unwrap();
+ let inlined_const_as_pat =
+ self.recur(cv, mir_structural_match_violation).unwrap_or_else(|_| {
+ Box::new(Pat {
+ span: self.span,
+ ty: cv.ty(),
+ kind: PatKind::Constant { value: cv },
+ })
+ });
if self.include_lint_checks && !self.saw_const_match_error.get() {
// If we were able to successfully convert the const to some pat,
@@ -201,9 +203,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
self.id,
self.span,
- |lint| {
- lint.build(&msg).emit();
- },
+ msg,
+ |lint| lint,
);
} else {
debug!(
@@ -269,7 +270,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
&self,
cv: mir::ConstantKind<'tcx>,
mir_structural_match_violation: bool,
- ) -> Result<Pat<'tcx>, FallbackToConstRef> {
+ ) -> Result<Box<Pat<'tcx>>, FallbackToConstRef> {
let id = self.id;
let span = self.span;
let tcx = self.tcx();
@@ -282,9 +283,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
id,
span,
- |lint| {
- lint.build("floating-point types cannot be used in patterns").emit();
- },
+ "floating-point types cannot be used in patterns",
+ |lint| lint,
);
}
PatKind::Constant { value: cv }
@@ -336,15 +336,15 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
id,
span,
- |lint| {
- let msg = format!(
+ DelayDm(|| {
+ format!(
"to use a constant of type `{}` in a pattern, \
`{}` must be annotated with `#[derive(PartialEq, Eq)]`",
cv.ty(),
cv.ty(),
- );
- lint.build(&msg).emit();
- },
+ )
+ }),
+ |lint| lint,
);
}
// Since we are behind a reference, we can just bubble the error up so we get a
@@ -396,7 +396,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
.map(|val| self.recur(*val, false))
.collect::<Result<_, _>>()?,
slice: None,
- suffix: Vec::new(),
+ suffix: Box::new([]),
},
ty::Ref(_, pointee_ty, ..) => match *pointee_ty.kind() {
// These are not allowed and will error elsewhere anyway.
@@ -423,8 +423,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
let old = self.behind_reference.replace(true);
let array = tcx.deref_mir_constant(self.param_env.and(cv));
let val = PatKind::Deref {
- subpattern: Pat {
- kind: Box::new(PatKind::Array {
+ subpattern: Box::new(Pat {
+ kind: PatKind::Array {
prefix: tcx
.destructure_mir_constant(param_env, array)
.fields
@@ -432,11 +432,11 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
.map(|val| self.recur(*val, false))
.collect::<Result<_, _>>()?,
slice: None,
- suffix: vec![],
- }),
+ suffix: Box::new([]),
+ },
span,
ty: *pointee_ty,
- },
+ }),
};
self.behind_reference.set(old);
val
@@ -449,8 +449,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
let old = self.behind_reference.replace(true);
let array = tcx.deref_mir_constant(self.param_env.and(cv));
let val = PatKind::Deref {
- subpattern: Pat {
- kind: Box::new(PatKind::Slice {
+ subpattern: Box::new(Pat {
+ kind: PatKind::Slice {
prefix: tcx
.destructure_mir_constant(param_env, array)
.fields
@@ -458,11 +458,11 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
.map(|val| self.recur(*val, false))
.collect::<Result<_, _>>()?,
slice: None,
- suffix: vec![],
- }),
+ suffix: Box::new([]),
+ },
span,
ty: tcx.mk_slice(elem_ty),
- },
+ }),
};
self.behind_reference.set(old);
val
@@ -484,7 +484,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
self.id,
self.span,
- |lint| {lint.build(&msg).emit();},
+ msg,
+ |lint| lint,
);
}
PatKind::Constant { value: cv }
@@ -505,7 +506,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
// convert the dereferenced constant to a pattern that is the sub-pattern of the
// deref pattern.
_ => {
- if !pointee_ty.is_sized(tcx.at(span), param_env) {
+ if !pointee_ty.is_sized(tcx, param_env) {
// `tcx.deref_mir_constant()` below will ICE with an unsized type
// (except slices, which are handled in a separate arm above).
let msg = format!("cannot use unsized non-slice type `{}` in constant patterns", pointee_ty);
@@ -533,7 +534,7 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::FnDef(..) => {
PatKind::Constant { value: cv }
}
- ty::RawPtr(pointee) if pointee.ty.is_sized(tcx.at(span), param_env) => {
+ ty::RawPtr(pointee) if pointee.ty.is_sized(tcx, param_env) => {
PatKind::Constant { value: cv }
}
// FIXME: these can have very surprising behaviour where optimization levels or other
@@ -552,9 +553,8 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::POINTER_STRUCTURAL_MATCH,
id,
span,
- |lint| {
- lint.build(msg).emit();
- },
+ msg,
+ |lint| lint,
);
}
PatKind::Constant { value: cv }
@@ -590,12 +590,11 @@ impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
lint::builtin::NONTRIVIAL_STRUCTURAL_MATCH,
id,
span,
- |lint| {
- lint.build(&msg).emit();
- },
+ msg,
+ |lint| lint,
);
}
- Ok(Pat { span, ty: cv.ty(), kind: Box::new(kind) })
+ Ok(Box::new(Pat { span, ty: cv.ty(), kind }))
}
}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
index 8d6f8efb6..595abc8f6 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -71,9 +71,9 @@ use std::ops::RangeInclusive;
/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> {
fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) {
- if let PatKind::Or { pats } = pat.kind.as_ref() {
- for pat in pats {
- expand(pat, vec);
+ if let PatKind::Or { pats } = &pat.kind {
+ for pat in pats.iter() {
+ expand(&pat, vec);
}
} else {
vec.push(pat)
@@ -252,10 +252,14 @@ impl IntRange {
let kind = if lo == hi {
PatKind::Constant { value: lo_const }
} else {
- PatKind::Range(PatRange { lo: lo_const, hi: hi_const, end: RangeEnd::Included })
+ PatKind::Range(Box::new(PatRange {
+ lo: lo_const,
+ hi: hi_const,
+ end: RangeEnd::Included,
+ }))
};
- Pat { ty, span: DUMMY_SP, kind: Box::new(kind) }
+ Pat { ty, span: DUMMY_SP, kind }
}
/// Lint on likely incorrect range patterns (#63987)
@@ -295,10 +299,10 @@ impl IntRange {
lint::builtin::OVERLAPPING_RANGE_ENDPOINTS,
hir_id,
pcx.span,
+ "multiple patterns overlap on their endpoints",
|lint| {
- let mut err = lint.build("multiple patterns overlap on their endpoints");
for (int_range, span) in overlaps {
- err.span_label(
+ lint.span_label(
span,
&format!(
"this range overlaps on `{}`...",
@@ -306,9 +310,9 @@ impl IntRange {
),
);
}
- err.span_label(pcx.span, "... with this range");
- err.note("you likely meant to write mutually exclusive ranges");
- err.emit();
+ lint.span_label(pcx.span, "... with this range");
+ lint.note("you likely meant to write mutually exclusive ranges");
+ lint
},
);
}
@@ -984,10 +988,12 @@ impl<'tcx> SplitWildcard<'tcx> {
.filter(|(_, v)| {
// If `exhaustive_patterns` is enabled, we exclude variants known to be
// uninhabited.
- let is_uninhabited = is_exhaustive_pat_feature
- && v.uninhabited_from(cx.tcx, substs, def.adt_kind(), cx.param_env)
- .contains(cx.tcx, cx.module);
- !is_uninhabited
+ !is_exhaustive_pat_feature
+ || v.inhabited_predicate(cx.tcx, *def).subst(cx.tcx, substs).apply(
+ cx.tcx,
+ cx.param_env,
+ cx.module,
+ )
})
.map(|(idx, _)| Variant(idx))
.collect();
@@ -1297,7 +1303,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
let mkpat = |pat| DeconstructedPat::from_pat(cx, pat);
let ctor;
let fields;
- match pat.kind.as_ref() {
+ match &pat.kind {
PatKind::AscribeUserType { subpattern, .. } => return mkpat(subpattern),
PatKind::Binding { subpattern: Some(subpat), .. } => return mkpat(subpat),
PatKind::Binding { subpattern: None, .. } | PatKind::Wild => {
@@ -1342,9 +1348,9 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
fields = Fields::singleton(cx, pat);
}
ty::Adt(adt, _) => {
- ctor = match pat.kind.as_ref() {
+ ctor = match pat.kind {
PatKind::Leaf { .. } => Single,
- PatKind::Variant { variant_index, .. } => Variant(*variant_index),
+ PatKind::Variant { variant_index, .. } => Variant(variant_index),
_ => bug!(),
};
let variant = &adt.variant(ctor.variant_index_for_adt(*adt));
@@ -1402,7 +1408,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
}
}
}
- &PatKind::Range(PatRange { lo, hi, end }) => {
+ &PatKind::Range(box PatRange { lo, hi, end }) => {
let ty = lo.ty();
ctor = if let Some(int_range) = IntRange::from_range(
cx.tcx,
@@ -1429,7 +1435,8 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
FixedLen(prefix.len() + suffix.len())
};
ctor = Slice(Slice::new(array_len, kind));
- fields = Fields::from_iter(cx, prefix.iter().chain(suffix).map(mkpat));
+ fields =
+ Fields::from_iter(cx, prefix.iter().chain(suffix.iter()).map(|p| mkpat(&*p)));
}
PatKind::Or { .. } => {
ctor = Or;
@@ -1442,15 +1449,15 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
pub(crate) fn to_pat(&self, cx: &MatchCheckCtxt<'p, 'tcx>) -> Pat<'tcx> {
let is_wildcard = |pat: &Pat<'_>| {
- matches!(*pat.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
+ matches!(pat.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
};
- let mut subpatterns = self.iter_fields().map(|p| p.to_pat(cx));
- let pat = match &self.ctor {
+ let mut subpatterns = self.iter_fields().map(|p| Box::new(p.to_pat(cx)));
+ let kind = match &self.ctor {
Single | Variant(_) => match self.ty.kind() {
ty::Tuple(..) => PatKind::Leaf {
subpatterns: subpatterns
.enumerate()
- .map(|(i, p)| FieldPat { field: Field::new(i), pattern: p })
+ .map(|(i, pattern)| FieldPat { field: Field::new(i), pattern })
.collect(),
},
ty::Adt(adt_def, _) if adt_def.is_box() => {
@@ -1485,7 +1492,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
FixedLen(_) => PatKind::Slice {
prefix: subpatterns.collect(),
slice: None,
- suffix: vec![],
+ suffix: Box::new([]),
},
VarLen(prefix, _) => {
let mut subpatterns = subpatterns.peekable();
@@ -1504,14 +1511,18 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
subpatterns.next();
}
}
- let suffix: Vec<_> = subpatterns.collect();
+ let suffix: Box<[_]> = subpatterns.collect();
let wild = Pat::wildcard_from_ty(self.ty);
- PatKind::Slice { prefix, slice: Some(wild), suffix }
+ PatKind::Slice {
+ prefix: prefix.into_boxed_slice(),
+ slice: Some(Box::new(wild)),
+ suffix,
+ }
}
}
}
&Str(value) => PatKind::Constant { value },
- &FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
+ &FloatRange(lo, hi, end) => PatKind::Range(Box::new(PatRange { lo, hi, end })),
IntRange(range) => return range.to_pat(cx.tcx, self.ty),
Wildcard | NonExhaustive => PatKind::Wild,
Missing { .. } => bug!(
@@ -1523,7 +1534,7 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
}
};
- Pat { ty: self.ty, span: DUMMY_SP, kind: Box::new(pat) }
+ Pat { ty: self.ty, span: DUMMY_SP, kind }
}
pub(super) fn is_or_pat(&self) -> bool {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index a13748a2d..2526522a2 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -29,27 +29,27 @@ use rustc_span::{Span, Symbol};
use std::cmp::Ordering;
#[derive(Clone, Debug)]
-pub(crate) enum PatternError {
+enum PatternError {
AssocConstInPattern(Span),
ConstParamInPattern(Span),
StaticInPattern(Span),
NonConstPath(Span),
}
-pub(crate) struct PatCtxt<'a, 'tcx> {
- pub(crate) tcx: TyCtxt<'tcx>,
- pub(crate) param_env: ty::ParamEnv<'tcx>,
- pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>,
- pub(crate) errors: Vec<PatternError>,
+struct PatCtxt<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ errors: Vec<PatternError>,
include_lint_checks: bool,
}
-pub(crate) fn pat_from_hir<'a, 'tcx>(
+pub(super) fn pat_from_hir<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
typeck_results: &'a ty::TypeckResults<'tcx>,
pat: &'tcx hir::Pat<'tcx>,
-) -> Pat<'tcx> {
+) -> Box<Pat<'tcx>> {
let mut pcx = PatCtxt::new(tcx, param_env, typeck_results);
let result = pcx.lower_pattern(pat);
if !pcx.errors.is_empty() {
@@ -61,7 +61,7 @@ pub(crate) fn pat_from_hir<'a, 'tcx>(
}
impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
- pub(crate) fn new(
+ fn new(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
typeck_results: &'a ty::TypeckResults<'tcx>,
@@ -69,12 +69,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
PatCtxt { tcx, param_env, typeck_results, errors: vec![], include_lint_checks: false }
}
- pub(crate) fn include_lint_checks(&mut self) -> &mut Self {
+ fn include_lint_checks(&mut self) -> &mut Self {
self.include_lint_checks = true;
self
}
- pub(crate) fn lower_pattern(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Pat<'tcx> {
+ fn lower_pattern(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Box<Pat<'tcx>> {
// When implicit dereferences have been inserted in this pattern, the unadjusted lowered
// pattern has the type that results *after* dereferencing. For example, in this code:
//
@@ -86,7 +86,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// ```
//
// the type assigned to `Some(n)` in `unadjusted_pat` would be `Option<i32>` (this is
- // determined in rustc_typeck::check::match). The adjustments would be
+ // determined in rustc_hir_analysis::check::match). The adjustments would be
//
// `vec![&&Option<i32>, &Option<i32>]`.
//
@@ -97,13 +97,13 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let unadjusted_pat = self.lower_pattern_unadjusted(pat);
self.typeck_results.pat_adjustments().get(pat.hir_id).unwrap_or(&vec![]).iter().rev().fold(
unadjusted_pat,
- |pat, ref_ty| {
+ |pat: Box<_>, ref_ty| {
debug!("{:?}: wrapping pattern with type {:?}", pat, ref_ty);
- Pat {
+ Box::new(Pat {
span: pat.span,
ty: *ref_ty,
- kind: Box::new(PatKind::Deref { subpattern: pat }),
- }
+ kind: PatKind::Deref { subpattern: pat },
+ })
},
)
}
@@ -113,7 +113,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
expr: &'tcx hir::Expr<'tcx>,
) -> (PatKind<'tcx>, Option<Ascription<'tcx>>) {
match self.lower_lit(expr) {
- PatKind::AscribeUserType { ascription, subpattern: Pat { kind: box kind, .. } } => {
+ PatKind::AscribeUserType { ascription, subpattern: box Pat { kind, .. } } => {
(kind, Some(ascription))
}
kind => (kind, None),
@@ -134,7 +134,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
match (end, cmp) {
// `x..y` where `x < y`.
// Non-empty because the range includes at least `x`.
- (RangeEnd::Excluded, Some(Ordering::Less)) => PatKind::Range(PatRange { lo, hi, end }),
+ (RangeEnd::Excluded, Some(Ordering::Less)) => {
+ PatKind::Range(Box::new(PatRange { lo, hi, end }))
+ }
// `x..y` where `x >= y`. The range is empty => error.
(RangeEnd::Excluded, _) => {
struct_span_err!(
@@ -149,7 +151,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// `x..=y` where `x == y`.
(RangeEnd::Included, Some(Ordering::Equal)) => PatKind::Constant { value: lo },
// `x..=y` where `x < y`.
- (RangeEnd::Included, Some(Ordering::Less)) => PatKind::Range(PatRange { lo, hi, end }),
+ (RangeEnd::Included, Some(Ordering::Less)) => {
+ PatKind::Range(Box::new(PatRange { lo, hi, end }))
+ }
// `x..=y` where `x > y` hence the range is empty => error.
(RangeEnd::Included, _) => {
let mut err = struct_span_err!(
@@ -196,8 +200,10 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
}
}
- fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Pat<'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Box<Pat<'tcx>> {
let mut ty = self.typeck_results.node_type(pat.hir_id);
+ let mut span = pat.span;
let kind = match pat.kind {
hir::PatKind::Wild => PatKind::Wild,
@@ -228,7 +234,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// constants somewhere. Have them on the range pattern.
for end in &[lo, hi] {
if let Some((_, Some(ascription))) = end {
- let subpattern = Pat { span: pat.span, ty, kind: Box::new(kind) };
+ let subpattern = Box::new(Pat { span: pat.span, ty, kind });
kind =
PatKind::AscribeUserType { ascription: ascription.clone(), subpattern };
}
@@ -258,6 +264,10 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
}
hir::PatKind::Binding(_, id, ident, ref sub) => {
+ if let Some(ident_span) = ident.span.find_ancestor_inside(span) {
+ span = span.with_hi(ident_span.hi());
+ }
+
let bm = *self
.typeck_results
.pat_binding_modes()
@@ -322,14 +332,14 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
hir::PatKind::Or(ref pats) => PatKind::Or { pats: self.lower_patterns(pats) },
};
- Pat { span: pat.span, ty, kind: Box::new(kind) }
+ Box::new(Pat { span, ty, kind })
}
fn lower_tuple_subpats(
&mut self,
pats: &'tcx [hir::Pat<'tcx>],
expected_len: usize,
- gap_pos: Option<usize>,
+ gap_pos: hir::DotDotPos,
) -> Vec<FieldPat<'tcx>> {
pats.iter()
.enumerate_and_adjust(expected_len, gap_pos)
@@ -340,11 +350,14 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
.collect()
}
- fn lower_patterns(&mut self, pats: &'tcx [hir::Pat<'tcx>]) -> Vec<Pat<'tcx>> {
+ fn lower_patterns(&mut self, pats: &'tcx [hir::Pat<'tcx>]) -> Box<[Box<Pat<'tcx>>]> {
pats.iter().map(|p| self.lower_pattern(p)).collect()
}
- fn lower_opt_pattern(&mut self, pat: &'tcx Option<&'tcx hir::Pat<'tcx>>) -> Option<Pat<'tcx>> {
+ fn lower_opt_pattern(
+ &mut self,
+ pat: &'tcx Option<&'tcx hir::Pat<'tcx>>,
+ ) -> Option<Box<Pat<'tcx>>> {
pat.as_ref().map(|p| self.lower_pattern(p))
}
@@ -420,7 +433,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
| DefKind::AssocTy,
_,
)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::SelfCtor(..) => PatKind::Leaf { subpatterns },
_ => {
let pattern_error = match res {
@@ -436,12 +450,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if let Some(user_ty) = self.user_substs_applied_to_ty_of_hir_id(hir_id) {
debug!("lower_variant_or_leaf: kind={:?} user_ty={:?} span={:?}", kind, user_ty, span);
let annotation = CanonicalUserTypeAnnotation {
- user_ty,
+ user_ty: Box::new(user_ty),
span,
inferred_ty: self.typeck_results.node_type(hir_id),
};
kind = PatKind::AscribeUserType {
- subpattern: Pat { span, ty, kind: Box::new(kind) },
+ subpattern: Box::new(Pat { span, ty, kind }),
ascription: Ascription { annotation, variance: ty::Variance::Covariant },
};
}
@@ -453,11 +467,11 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
/// it to `const_to_pat`. Any other path (like enum variants without fields)
/// is converted to the corresponding pattern via `lower_variant_or_leaf`.
#[instrument(skip(self), level = "debug")]
- fn lower_path(&mut self, qpath: &hir::QPath<'_>, id: hir::HirId, span: Span) -> Pat<'tcx> {
+ fn lower_path(&mut self, qpath: &hir::QPath<'_>, id: hir::HirId, span: Span) -> Box<Pat<'tcx>> {
let ty = self.typeck_results.node_type(id);
let res = self.typeck_results.qpath_res(qpath, id);
- let pat_from_kind = |kind| Pat { span, ty, kind: Box::new(kind) };
+ let pat_from_kind = |kind| Box::new(Pat { span, ty, kind });
let (def_id, is_associated_const) = match res {
Res::Def(DefKind::Const, def_id) => (def_id, false),
@@ -469,7 +483,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
// Use `Reveal::All` here because patterns are always monomorphic even if their function
// isn't.
let param_env_reveal_all = self.param_env.with_reveal_all_normalized(self.tcx);
- let substs = self.typeck_results.node_substs(id);
+ // N.B. There is no guarantee that substs collected in typeck results are fully normalized,
+ // so they need to be normalized in order to pass to `Instance::resolve`, which will ICE
+ // if given unnormalized types.
+ let substs = self
+ .tcx
+ .normalize_erasing_regions(param_env_reveal_all, self.typeck_results.node_substs(id));
let instance = match ty::Instance::resolve(self.tcx, param_env_reveal_all, def_id, substs) {
Ok(Some(i)) => i,
Ok(None) => {
@@ -505,13 +524,13 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let user_provided_types = self.typeck_results().user_provided_types();
if let Some(&user_ty) = user_provided_types.get(id) {
let annotation = CanonicalUserTypeAnnotation {
- user_ty,
+ user_ty: Box::new(user_ty),
span,
inferred_ty: self.typeck_results().node_type(id),
};
- Pat {
+ Box::new(Pat {
span,
- kind: Box::new(PatKind::AscribeUserType {
+ kind: PatKind::AscribeUserType {
subpattern: pattern,
ascription: Ascription {
annotation,
@@ -519,9 +538,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
/// `variance` field documentation for details.
variance: ty::Variance::Contravariant,
},
- }),
+ },
ty: const_.ty(),
- }
+ })
} else {
pattern
}
@@ -553,23 +572,19 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let value = value.eval(self.tcx, self.param_env);
match value {
- mir::ConstantKind::Ty(c) => {
- match c.kind() {
- ConstKind::Param(_) => {
- self.errors.push(PatternError::ConstParamInPattern(span));
- return PatKind::Wild;
- }
- ConstKind::Unevaluated(_) => {
- // If we land here it means the const can't be evaluated because it's `TooGeneric`.
- self.tcx
- .sess
- .span_err(span, "constant pattern depends on a generic parameter");
- return PatKind::Wild;
- }
- _ => bug!("Expected either ConstKind::Param or ConstKind::Unevaluated"),
+ mir::ConstantKind::Ty(c) => match c.kind() {
+ ConstKind::Param(_) => {
+ self.errors.push(PatternError::ConstParamInPattern(span));
+ return PatKind::Wild;
}
+ _ => bug!("Expected ConstKind::Param"),
+ },
+ mir::ConstantKind::Val(_, _) => self.const_to_pat(value, id, span, false).kind,
+ mir::ConstantKind::Unevaluated(..) => {
+ // If we land here it means the const can't be evaluated because it's `TooGeneric`.
+ self.tcx.sess.span_err(span, "constant pattern depends on a generic parameter");
+ return PatKind::Wild;
}
- mir::ConstantKind::Val(_, _) => *self.const_to_pat(value, id, span, false).kind,
}
}
@@ -580,7 +595,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
fn lower_lit(&mut self, expr: &'tcx hir::Expr<'tcx>) -> PatKind<'tcx> {
let (lit, neg) = match expr.kind {
hir::ExprKind::Path(ref qpath) => {
- return *self.lower_path(qpath, expr.hir_id, expr.span).kind;
+ return self.lower_path(qpath, expr.hir_id, expr.span).kind;
}
hir::ExprKind::ConstBlock(ref anon_const) => {
return self.lower_inline_const(anon_const, expr.hir_id, expr.span);
@@ -598,7 +613,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let lit_input =
LitToConstInput { lit: &lit.node, ty: self.typeck_results.expr_ty(expr), neg };
match self.tcx.at(expr.span).lit_to_mir_constant(lit_input) {
- Ok(constant) => *self.const_to_pat(constant, expr.hir_id, lit.span, false).kind,
+ Ok(constant) => self.const_to_pat(constant, expr.hir_id, lit.span, false).kind,
Err(LitToConstError::Reported) => PatKind::Wild,
Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"),
}
@@ -615,7 +630,7 @@ impl<'tcx> UserAnnotatedTyHelpers<'tcx> for PatCtxt<'_, 'tcx> {
}
}
-pub(crate) trait PatternFoldable<'tcx>: Sized {
+trait PatternFoldable<'tcx>: Sized {
fn fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
self.super_fold_with(folder)
}
@@ -623,7 +638,7 @@ pub(crate) trait PatternFoldable<'tcx>: Sized {
fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self;
}
-pub(crate) trait PatternFolder<'tcx>: Sized {
+trait PatternFolder<'tcx>: Sized {
fn fold_pattern(&mut self, pattern: &Pat<'tcx>) -> Pat<'tcx> {
pattern.super_fold_with(self)
}
@@ -646,6 +661,12 @@ impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Vec<T> {
}
}
+impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Box<[T]> {
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ self.iter().map(|t| t.fold_with(folder)).collect()
+ }
+}
+
impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Option<T> {
fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
self.as_ref().map(|t| t.fold_with(folder))
@@ -665,7 +686,7 @@ macro_rules! ClonePatternFoldableImpls {
}
ClonePatternFoldableImpls! { <'tcx>
- Span, Field, Mutability, Symbol, LocalVarId, usize, ty::Const<'tcx>,
+ Span, Field, Mutability, Symbol, LocalVarId, usize,
Region<'tcx>, Ty<'tcx>, BindingMode, AdtDef<'tcx>,
SubstsRef<'tcx>, &'tcx GenericArg<'tcx>, UserType<'tcx>,
UserTypeProjection, CanonicalUserTypeAnnotation<'tcx>
@@ -732,7 +753,7 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
PatKind::Deref { subpattern: subpattern.fold_with(folder) }
}
PatKind::Constant { value } => PatKind::Constant { value },
- PatKind::Range(range) => PatKind::Range(range),
+ PatKind::Range(ref range) => PatKind::Range(range.clone()),
PatKind::Slice { ref prefix, ref slice, ref suffix } => PatKind::Slice {
prefix: prefix.fold_with(folder),
slice: slice.fold_with(folder),
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
index 0a660ef30..8dc9976ea 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -364,8 +364,8 @@ impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> {
/// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]`
/// works well.
#[derive(Clone)]
-struct PatStack<'p, 'tcx> {
- pats: SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]>,
+pub(crate) struct PatStack<'p, 'tcx> {
+ pub(crate) pats: SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]>,
}
impl<'p, 'tcx> PatStack<'p, 'tcx> {
@@ -403,6 +403,21 @@ impl<'p, 'tcx> PatStack<'p, 'tcx> {
})
}
+ // Recursively expand all patterns into their subpatterns and push each `PatStack` to matrix.
+ fn expand_and_extend<'a>(&'a self, matrix: &mut Matrix<'p, 'tcx>) {
+ if !self.is_empty() && self.head().is_or_pat() {
+ for pat in self.head().iter_fields() {
+ let mut new_patstack = PatStack::from_pattern(pat);
+ new_patstack.pats.extend_from_slice(&self.pats[1..]);
+ if !new_patstack.is_empty() && new_patstack.head().is_or_pat() {
+ new_patstack.expand_and_extend(matrix);
+ } else if !new_patstack.is_empty() {
+ matrix.push(new_patstack);
+ }
+ }
+ }
+ }
+
/// This computes `S(self.head().ctor(), self)`. See top of the file for explanations.
///
/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
@@ -436,7 +451,7 @@ impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> {
/// A 2D matrix.
#[derive(Clone)]
pub(super) struct Matrix<'p, 'tcx> {
- patterns: Vec<PatStack<'p, 'tcx>>,
+ pub patterns: Vec<PatStack<'p, 'tcx>>,
}
impl<'p, 'tcx> Matrix<'p, 'tcx> {
@@ -453,7 +468,7 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> {
/// expands it.
fn push(&mut self, row: PatStack<'p, 'tcx>) {
if !row.is_empty() && row.head().is_or_pat() {
- self.patterns.extend(row.expand_or_pat());
+ row.expand_and_extend(self);
} else {
self.patterns.push(row);
}
@@ -731,7 +746,7 @@ impl<'p, 'tcx> Witness<'p, 'tcx> {
/// Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns`
/// is not exhaustive enough.
///
-/// NB: The partner lint for structs lives in `compiler/rustc_typeck/src/check/pat.rs`.
+/// NB: The partner lint for structs lives in `compiler/rustc_hir_analysis/src/check/pat.rs`.
fn lint_non_exhaustive_omitted_patterns<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
scrut_ty: Ty<'tcx>,
@@ -739,9 +754,8 @@ fn lint_non_exhaustive_omitted_patterns<'p, 'tcx>(
hir_id: HirId,
witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
) {
- let joined_patterns = joined_uncovered_patterns(cx, &witnesses);
- cx.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, hir_id, sp, |build| {
- let mut lint = build.build("some variants are not matched explicitly");
+ cx.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, hir_id, sp, "some variants are not matched explicitly", |lint| {
+ let joined_patterns = joined_uncovered_patterns(cx, &witnesses);
lint.span_label(sp, pattern_not_covered_label(&witnesses, &joined_patterns));
lint.help(
"ensure that all variants are matched explicitly by adding the suggested match arms",
@@ -750,7 +764,7 @@ fn lint_non_exhaustive_omitted_patterns<'p, 'tcx>(
"the matched value is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
scrut_ty,
));
- lint.emit();
+ lint
});
}
@@ -776,7 +790,7 @@ fn lint_non_exhaustive_omitted_patterns<'p, 'tcx>(
/// `is_under_guard` is used to inform if the pattern has a guard. If it
/// has one it must not be inserted into the matrix. This shouldn't be
/// relied on for soundness.
-#[instrument(level = "debug", skip(cx, matrix, hir_id))]
+#[instrument(level = "debug", skip(cx, matrix, hir_id), ret)]
fn is_useful<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
matrix: &Matrix<'p, 'tcx>,
@@ -827,7 +841,15 @@ fn is_useful<'p, 'tcx>(
}
}
} else {
- let ty = v.head().ty();
+ let mut ty = v.head().ty();
+
+ // Opaque types can't get destructured/split, but the patterns can
+ // actually hint at hidden types, so we use the patterns' types instead.
+ if let ty::Opaque(..) = ty.kind() {
+ if let Some(row) = rows.first() {
+ ty = row.head().ty();
+ }
+ }
let is_non_exhaustive = cx.is_foreign_non_exhaustive_enum(ty);
debug!("v.head: {:?}, v.span: {:?}", v.head(), v.head().span());
let pcx = &PatCtxt { cx, ty, span: v.head().span(), is_top_level, is_non_exhaustive };
@@ -863,7 +885,7 @@ fn is_useful<'p, 'tcx>(
// that has the potential to trigger the `non_exhaustive_omitted_patterns` lint.
// To understand the workings checkout `Constructor::split` and `SplitWildcard::new/into_ctors`
if is_non_exhaustive_and_wild
- // We check that the match has a wildcard pattern and that that wildcard is useful,
+ // We check that the match has a wildcard pattern and that wildcard is useful,
// meaning there are variants that are covered by the wildcard. Without the check
// for `witness_preference` the lint would trigger on `if let NonExhaustiveEnum::A = foo {}`
&& usefulness.is_useful() && matches!(witness_preference, RealArm)
@@ -902,7 +924,6 @@ fn is_useful<'p, 'tcx>(
v.head().set_reachable();
}
- debug!(?ret);
ret
}
diff --git a/compiler/rustc_mir_dataflow/Cargo.toml b/compiler/rustc_mir_dataflow/Cargo.toml
index baf9735fb..324644b67 100644
--- a/compiler/rustc_mir_dataflow/Cargo.toml
+++ b/compiler/rustc_mir_dataflow/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
polonius-engine = "0.13.0"
@@ -13,10 +12,13 @@ smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
tracing = "0.1"
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
rustc_graphviz = { path = "../rustc_graphviz" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
rustc_target = { path = "../rustc_target" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
index c0b0cc3c5..23403628c 100644
--- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
@@ -823,9 +823,10 @@ where
// tmp = &raw mut P;
// cur = tmp as *mut T;
// end = Offset(cur, len);
+ let mir_cast_kind = ty::cast::mir_cast_kind(iter_ty, tmp_ty);
vec![
self.assign(tmp, Rvalue::AddressOf(Mutability::Mut, self.place)),
- self.assign(cur, Rvalue::Cast(CastKind::Misc, Operand::Move(tmp), iter_ty)),
+ self.assign(cur, Rvalue::Cast(mir_cast_kind, Operand::Move(tmp), iter_ty)),
self.assign(
length_or_end,
Rvalue::BinaryOp(
@@ -893,7 +894,7 @@ where
}
ty::Slice(ety) => self.open_drop_for_array(*ety, None),
- _ => bug!("open drop from non-ADT `{:?}`", ty),
+ _ => span_bug!(self.source_info.span, "open drop from non-ADT `{:?}`", ty),
}
}
diff --git a/compiler/rustc_mir_dataflow/src/errors.rs b/compiler/rustc_mir_dataflow/src/errors.rs
new file mode 100644
index 000000000..cfacc0ec3
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/errors.rs
@@ -0,0 +1,71 @@
+use rustc_macros::Diagnostic;
+use rustc_span::{Span, Symbol};
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_path_must_end_in_filename)]
+pub(crate) struct PathMustEndInFilename {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_unknown_formatter)]
+pub(crate) struct UnknownFormatter {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_duplicate_values_for)]
+pub(crate) struct DuplicateValuesFor {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_requires_an_argument)]
+pub(crate) struct RequiresAnArgument {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_stop_after_dataflow_ended_compilation)]
+pub(crate) struct StopAfterDataFlowEndedCompilation;
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_peek_must_be_place_or_ref_place)]
+pub(crate) struct PeekMustBePlaceOrRefPlace {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_peek_must_be_not_temporary)]
+pub(crate) struct PeekMustBeNotTemporary {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_peek_bit_not_set)]
+pub(crate) struct PeekBitNotSet {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_peek_argument_not_a_local)]
+pub(crate) struct PeekArgumentNotALocal {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(mir_dataflow_peek_argument_untracked)]
+pub(crate) struct PeekArgumentUntracked {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/engine.rs b/compiler/rustc_mir_dataflow/src/framework/engine.rs
index f374658ce..bc75645e7 100644
--- a/compiler/rustc_mir_dataflow/src/framework/engine.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/engine.rs
@@ -1,5 +1,8 @@
//! A solver for dataflow problems.
+use crate::errors::{
+ DuplicateValuesFor, PathMustEndInFilename, RequiresAnArgument, UnknownFormatter,
+};
use crate::framework::BitSetExt;
use std::ffi::OsString;
@@ -108,9 +111,9 @@ where
// Otherwise, compute and store the cumulative transfer function for each block.
let identity = GenKillSet::identity(analysis.bottom_value(body).domain_size());
- let mut trans_for_block = IndexVec::from_elem(identity, body.basic_blocks());
+ let mut trans_for_block = IndexVec::from_elem(identity, &body.basic_blocks);
- for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ for (block, block_data) in body.basic_blocks.iter_enumerated() {
let trans = &mut trans_for_block[block];
A::Direction::gen_kill_effects_in_block(&analysis, trans, block, block_data);
}
@@ -144,7 +147,7 @@ where
apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
) -> Self {
let bottom_value = analysis.bottom_value(body);
- let mut entry_sets = IndexVec::from_elem(bottom_value.clone(), body.basic_blocks());
+ let mut entry_sets = IndexVec::from_elem(bottom_value.clone(), &body.basic_blocks);
analysis.initialize_start_block(body, &mut entry_sets[mir::START_BLOCK]);
if A::Direction::IS_BACKWARD && entry_sets[mir::START_BLOCK] != bottom_value {
@@ -197,8 +200,7 @@ where
..
} = self;
- let mut dirty_queue: WorkQueue<BasicBlock> =
- WorkQueue::with_none(body.basic_blocks().len());
+ let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_none(body.basic_blocks.len());
if A::Direction::IS_FORWARD {
for (bb, _) in traversal::reverse_postorder(body) {
@@ -347,7 +349,7 @@ impl RustcMirAttrs {
match path.file_name() {
Some(_) => Ok(path),
None => {
- tcx.sess.span_err(attr.span(), "path must end in a filename");
+ tcx.sess.emit_err(PathMustEndInFilename { span: attr.span() });
Err(())
}
}
@@ -356,7 +358,7 @@ impl RustcMirAttrs {
Self::set_field(&mut ret.formatter, tcx, &attr, |s| match s {
sym::gen_kill | sym::two_phase => Ok(s),
_ => {
- tcx.sess.span_err(attr.span(), "unknown formatter");
+ tcx.sess.emit_err(UnknownFormatter { span: attr.span() });
Err(())
}
})
@@ -377,8 +379,7 @@ impl RustcMirAttrs {
mapper: impl FnOnce(Symbol) -> Result<T, ()>,
) -> Result<(), ()> {
if field.is_some() {
- tcx.sess
- .span_err(attr.span(), &format!("duplicate values for `{}`", attr.name_or_empty()));
+ tcx.sess.emit_err(DuplicateValuesFor { span: attr.span(), name: attr.name_or_empty() });
return Err(());
}
@@ -387,8 +388,7 @@ impl RustcMirAttrs {
*field = Some(mapper(s)?);
Ok(())
} else {
- tcx.sess
- .span_err(attr.span(), &format!("`{}` requires an argument", attr.name_or_empty()));
+ tcx.sess.emit_err(RequiresAnArgument { span: attr.span(), name: attr.name_or_empty() });
Err(())
}
}
diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
index c94198c56..579fe68a1 100644
--- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
@@ -108,12 +108,12 @@ where
type Edge = CfgEdge;
fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
- self.body.basic_blocks().indices().collect::<Vec<_>>().into()
+ self.body.basic_blocks.indices().collect::<Vec<_>>().into()
}
fn edges(&self) -> dot::Edges<'_, Self::Edge> {
self.body
- .basic_blocks()
+ .basic_blocks
.indices()
.flat_map(|bb| dataflow_successors(self.body, bb))
.collect::<Vec<_>>()
diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs
index f9fd6c9c5..d9aff94fe 100644
--- a/compiler/rustc_mir_dataflow/src/framework/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs
@@ -256,6 +256,7 @@ pub trait Analysis<'tcx>: AnalysisDomain<'tcx> {
/// .iterate_to_fixpoint()
/// .into_results_cursor(body);
/// ```
+ #[inline]
fn into_engine<'mir>(
self,
tcx: TyCtxt<'tcx>,
@@ -413,7 +414,7 @@ where
}
/* Extension methods */
-
+ #[inline]
fn into_engine<'mir>(
self,
tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_mir_dataflow/src/framework/tests.rs b/compiler/rustc_mir_dataflow/src/framework/tests.rs
index d9461fd3a..17102454a 100644
--- a/compiler/rustc_mir_dataflow/src/framework/tests.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/tests.rs
@@ -100,9 +100,9 @@ impl<D: Direction> MockAnalysis<'_, D> {
fn mock_entry_sets(&self) -> IndexVec<BasicBlock, BitSet<usize>> {
let empty = self.bottom_value(self.body);
- let mut ret = IndexVec::from_elem(empty, &self.body.basic_blocks());
+ let mut ret = IndexVec::from_elem(empty, &self.body.basic_blocks);
- for (bb, _) in self.body.basic_blocks().iter_enumerated() {
+ for (bb, _) in self.body.basic_blocks.iter_enumerated() {
ret[bb] = self.mock_entry_set(bb);
}
@@ -169,7 +169,7 @@ impl<'tcx, D: Direction> AnalysisDomain<'tcx> for MockAnalysis<'tcx, D> {
const NAME: &'static str = "mock";
fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
- BitSet::new_empty(Self::BASIC_BLOCK_OFFSET + body.basic_blocks().len())
+ BitSet::new_empty(Self::BASIC_BLOCK_OFFSET + body.basic_blocks.len())
}
fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
@@ -271,9 +271,7 @@ fn test_cursor<D: Direction>(analysis: MockAnalysis<'_, D>) {
cursor.allow_unreachable();
let every_target = || {
- body.basic_blocks()
- .iter_enumerated()
- .flat_map(|(bb, _)| SeekTarget::iter_in_block(body, bb))
+ body.basic_blocks.iter_enumerated().flat_map(|(bb, _)| SeekTarget::iter_in_block(body, bb))
};
let mut seek_to_target = |targ| {
diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
index 21132eb99..3e08a8799 100644
--- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
@@ -23,12 +23,6 @@ use crate::{Analysis, AnalysisDomain, Backward, CallReturnPlaces, GenKill, GenKi
/// [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis
pub struct MaybeLiveLocals;
-impl MaybeLiveLocals {
- fn transfer_function<'a, T>(&self, trans: &'a mut T) -> TransferFunction<'a, T> {
- TransferFunction(trans)
- }
-}
-
impl<'tcx> AnalysisDomain<'tcx> for MaybeLiveLocals {
type Domain = ChunkedBitSet<Local>;
type Direction = Backward;
@@ -54,7 +48,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
statement: &mir::Statement<'tcx>,
location: Location,
) {
- self.transfer_function(trans).visit_statement(statement, location);
+ TransferFunction(trans).visit_statement(statement, location);
}
fn terminator_effect(
@@ -63,7 +57,7 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
terminator: &mir::Terminator<'tcx>,
location: Location,
) {
- self.transfer_function(trans).visit_terminator(terminator, location);
+ TransferFunction(trans).visit_terminator(terminator, location);
}
fn call_return_effect(
@@ -85,9 +79,11 @@ impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
_resume_block: mir::BasicBlock,
resume_place: mir::Place<'tcx>,
) {
- if let Some(local) = resume_place.as_local() {
- trans.kill(local);
- }
+ YieldResumeEffect(trans).visit_place(
+ &resume_place,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ Location::START,
+ )
}
}
@@ -98,28 +94,51 @@ where
T: GenKill<Local>,
{
fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
- let local = place.local;
-
- // We purposefully do not call `super_place` here to avoid calling `visit_local` for this
- // place with one of the `Projection` variants of `PlaceContext`.
- self.visit_projection(place.as_ref(), context, location);
+ if let PlaceContext::MutatingUse(MutatingUseContext::Yield) = context {
+ // The resume place is evaluated and assigned to only after generator resumes, so its
+ // effect is handled separately in `yield_resume_effect`.
+ return;
+ }
match DefUse::for_place(*place, context) {
- Some(DefUse::Def) => self.0.kill(local),
- Some(DefUse::Use) => self.0.gen(local),
+ Some(DefUse::Def) => {
+ if let PlaceContext::MutatingUse(
+ MutatingUseContext::Call | MutatingUseContext::AsmOutput,
+ ) = context
+ {
+ // For the associated terminators, this is only a `Def` when the terminator returns
+ // "successfully." As such, we handle this case separately in `call_return_effect`
+ // above. However, if the place looks like `*_5`, this is still unconditionally a use of
+ // `_5`.
+ } else {
+ self.0.kill(place.local);
+ }
+ }
+ Some(DefUse::Use) => self.0.gen(place.local),
None => {}
}
+
+ self.visit_projection(place.as_ref(), context, location);
}
fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
- // Because we do not call `super_place` above, `visit_local` is only called for locals that
- // do not appear as part of a `Place` in the MIR. This handles cases like the implicit use
- // of the return place in a `Return` terminator or the index in an `Index` projection.
- match DefUse::for_place(local.into(), context) {
- Some(DefUse::Def) => self.0.kill(local),
- Some(DefUse::Use) => self.0.gen(local),
- None => {}
- }
+ DefUse::apply(self.0, local.into(), context);
+ }
+}
+
+struct YieldResumeEffect<'a, T>(&'a mut T);
+
+impl<'tcx, T> Visitor<'tcx> for YieldResumeEffect<'_, T>
+where
+ T: GenKill<Local>,
+{
+ fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+ DefUse::apply(self.0, *place, context);
+ self.visit_projection(place.as_ref(), context, location);
+ }
+
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ DefUse::apply(self.0, local.into(), context);
}
}
@@ -130,11 +149,25 @@ enum DefUse {
}
impl DefUse {
+ fn apply<'tcx>(trans: &mut impl GenKill<Local>, place: Place<'tcx>, context: PlaceContext) {
+ match DefUse::for_place(place, context) {
+ Some(DefUse::Def) => trans.kill(place.local),
+ Some(DefUse::Use) => trans.gen(place.local),
+ None => {}
+ }
+ }
+
fn for_place<'tcx>(place: Place<'tcx>, context: PlaceContext) -> Option<DefUse> {
match context {
PlaceContext::NonUse(_) => None,
- PlaceContext::MutatingUse(MutatingUseContext::Store | MutatingUseContext::Deinit) => {
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Call
+ | MutatingUseContext::Yield
+ | MutatingUseContext::AsmOutput
+ | MutatingUseContext::Store
+ | MutatingUseContext::Deinit,
+ ) => {
if place.is_indirect() {
// Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a
// use.
@@ -152,16 +185,6 @@ impl DefUse {
place.is_indirect().then_some(DefUse::Use)
}
- // For the associated terminators, this is only a `Def` when the terminator returns
- // "successfully." As such, we handle this case separately in `call_return_effect`
- // above. However, if the place looks like `*_5`, this is still unconditionally a use of
- // `_5`.
- PlaceContext::MutatingUse(
- MutatingUseContext::Call
- | MutatingUseContext::Yield
- | MutatingUseContext::AsmOutput,
- ) => place.is_indirect().then_some(DefUse::Use),
-
// All other contexts are uses...
PlaceContext::MutatingUse(
MutatingUseContext::AddressOf
@@ -247,7 +270,7 @@ impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> {
| StatementKind::Retag(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Nop => None,
};
if let Some(destination) = destination {
@@ -290,8 +313,10 @@ impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> {
_resume_block: mir::BasicBlock,
resume_place: mir::Place<'tcx>,
) {
- if let Some(local) = resume_place.as_local() {
- trans.remove(local);
- }
+ YieldResumeEffect(trans).visit_place(
+ &resume_place,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ Location::START,
+ )
}
}
diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
index f6b5af90a..18760b6c6 100644
--- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
@@ -142,7 +142,7 @@ impl<'mir, 'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'mir, 'tc
| StatementKind::FakeRead(..)
| StatementKind::Nop
| StatementKind::Retag(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::StorageLive(..) => {}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/lib.rs b/compiler/rustc_mir_dataflow/src/lib.rs
index 5793a286b..b471d04fd 100644
--- a/compiler/rustc_mir_dataflow/src/lib.rs
+++ b/compiler/rustc_mir_dataflow/src/lib.rs
@@ -1,12 +1,13 @@
#![feature(associated_type_defaults)]
#![feature(box_patterns)]
#![feature(exact_size_is_empty)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(once_cell)]
#![feature(stmt_expr_attributes)]
#![feature(trusted_step)]
#![recursion_limit = "256"]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -33,6 +34,7 @@ use self::move_paths::MoveData;
pub mod drop_flag_effects;
pub mod elaborate_drops;
+mod errors;
mod framework;
pub mod impls;
pub mod move_paths;
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
index 28936274b..7806e8f45 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
@@ -48,6 +48,7 @@ impl<'tcx> Lift for PlaceElem<'tcx> {
match *self {
ProjectionElem::Deref => ProjectionElem::Deref,
ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty.lift()),
+ ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty.lift()),
ProjectionElem::Index(ref i) => ProjectionElem::Index(i.lift()),
ProjectionElem::Subslice { from, to, from_end } => {
ProjectionElem::Subslice { from, to, from_end }
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
index 116e5c1f3..f46fd118b 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
@@ -243,7 +243,7 @@ pub(super) fn gather_moves<'tcx>(
builder.gather_args();
- for (bb, block) in body.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks.iter_enumerated() {
for (i, stmt) in block.statements.iter().enumerate() {
let source = Location { block: bb, statement_index: i };
builder.gather_statement(source, stmt);
@@ -330,7 +330,7 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/mod.rs b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
index a951c5b0b..b36e268cf 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
@@ -217,7 +217,7 @@ where
fn new(body: &Body<'_>) -> Self {
LocationMap {
map: body
- .basic_blocks()
+ .basic_blocks
.iter()
.map(|block| vec![T::default(); block.statements.len() + 1])
.collect(),
diff --git a/compiler/rustc_mir_dataflow/src/rustc_peek.rs b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
index f2471f37a..7cae68efb 100644
--- a/compiler/rustc_mir_dataflow/src/rustc_peek.rs
+++ b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
@@ -6,6 +6,10 @@ use rustc_middle::mir::MirPass;
use rustc_middle::mir::{self, Body, Local, Location};
use rustc_middle::ty::{self, Ty, TyCtxt};
+use crate::errors::{
+ PeekArgumentNotALocal, PeekArgumentUntracked, PeekBitNotSet, PeekMustBeNotTemporary,
+ PeekMustBePlaceOrRefPlace, StopAfterDataFlowEndedCompilation,
+};
use crate::framework::BitSetExt;
use crate::impls::{
DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeLiveLocals, MaybeUninitializedPlaces,
@@ -64,7 +68,7 @@ impl<'tcx> MirPass<'tcx> for SanityCheck {
}
if has_rustc_mir_with(tcx, def_id, sym::stop_after_dataflow).is_some() {
- tcx.sess.fatal("stop_after_dataflow ended compilation");
+ tcx.sess.emit_fatal(StopAfterDataFlowEndedCompilation);
}
}
}
@@ -97,7 +101,7 @@ pub fn sanity_check_via_rustc_peek<'tcx, A>(
let mut cursor = ResultsCursor::new(body, results);
- let peek_calls = body.basic_blocks().iter_enumerated().filter_map(|(bb, block_data)| {
+ let peek_calls = body.basic_blocks.iter_enumerated().filter_map(|(bb, block_data)| {
PeekCall::from_terminator(tcx, block_data.terminator()).map(|call| (bb, block_data, call))
});
@@ -133,9 +137,7 @@ pub fn sanity_check_via_rustc_peek<'tcx, A>(
}
_ => {
- let msg = "rustc_peek: argument expression \
- must be either `place` or `&place`";
- tcx.sess.span_err(call.span, msg);
+ tcx.sess.emit_err(PeekMustBePlaceOrRefPlace { span: call.span });
}
}
}
@@ -204,18 +206,12 @@ impl PeekCall {
if let Some(local) = place.as_local() {
local
} else {
- tcx.sess.diagnostic().span_err(
- span,
- "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
- );
+ tcx.sess.emit_err(PeekMustBeNotTemporary { span });
return None;
}
}
_ => {
- tcx.sess.diagnostic().span_err(
- span,
- "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
- );
+ tcx.sess.emit_err(PeekMustBeNotTemporary { span });
return None;
}
};
@@ -255,12 +251,12 @@ where
let bit_state = flow_state.contains(peek_mpi);
debug!("rustc_peek({:?} = &{:?}) bit_state: {}", call.arg, place, bit_state);
if !bit_state {
- tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+ tcx.sess.emit_err(PeekBitNotSet { span: call.span });
}
}
LookupResult::Parent(..) => {
- tcx.sess.span_err(call.span, "rustc_peek: argument untracked");
+ tcx.sess.emit_err(PeekArgumentUntracked { span: call.span });
}
}
}
@@ -276,12 +272,12 @@ impl<'tcx> RustcPeekAt<'tcx> for MaybeLiveLocals {
) {
info!(?place, "peek_at");
let Some(local) = place.as_local() else {
- tcx.sess.span_err(call.span, "rustc_peek: argument was not a local");
+ tcx.sess.emit_err(PeekArgumentNotALocal { span: call.span });
return;
};
if !flow_state.contains(local) {
- tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+ tcx.sess.emit_err(PeekBitNotSet { span: call.span });
}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/storage.rs b/compiler/rustc_mir_dataflow/src/storage.rs
index c909648ea..e5a0e1d31 100644
--- a/compiler/rustc_mir_dataflow/src/storage.rs
+++ b/compiler/rustc_mir_dataflow/src/storage.rs
@@ -7,7 +7,7 @@ use rustc_middle::mir::{self, Local};
pub fn always_storage_live_locals(body: &mir::Body<'_>) -> BitSet<Local> {
let mut always_live_locals = BitSet::new_filled(body.local_decls.len());
- for block in body.basic_blocks() {
+ for block in &*body.basic_blocks {
for statement in &block.statements {
use mir::StatementKind::{StorageDead, StorageLive};
if let StorageLive(l) | StorageDead(l) = statement.kind {
diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml
index 85b7a4af5..53545cff0 100644
--- a/compiler/rustc_mir_transform/Cargo.toml
+++ b/compiler/rustc_mir_transform/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
itertools = "0.10.1"
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 2502e8b60..d8f85d2e3 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -56,7 +56,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
// example.
let mut calls_to_terminate = Vec::new();
let mut cleanups_to_remove = Vec::new();
- for (id, block) in body.basic_blocks().iter_enumerated() {
+ for (id, block) in body.basic_blocks.iter_enumerated() {
if block.is_cleanup {
continue;
}
diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs
index f12c8560c..30966d22e 100644
--- a/compiler/rustc_mir_transform/src/add_call_guards.rs
+++ b/compiler/rustc_mir_transform/src/add_call_guards.rs
@@ -45,7 +45,7 @@ impl AddCallGuards {
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
- let cur_len = body.basic_blocks().len();
+ let cur_len = body.basic_blocks.len();
for block in body.basic_blocks_mut() {
match block.terminator {
diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
index 8de0aad04..ffb5d8c6d 100644
--- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
+++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
@@ -55,7 +55,7 @@ fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>)
let mut patch = MirPatch::new(body);
let param_env = tcx.param_env(def_id);
- for (bb, data) in body.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks.iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index 9c5896c4e..036b55898 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -66,7 +66,6 @@ impl<'tcx> MirPass<'tcx> for AddRetag {
// We need an `AllCallEdges` pass before we can do any work.
super::add_call_guards::AllCallEdges.run_pass(tcx, body);
- let (span, arg_count) = (body.span, body.arg_count);
let basic_blocks = body.basic_blocks.as_mut();
let local_decls = &body.local_decls;
let needs_retag = |place: &Place<'tcx>| {
@@ -90,20 +89,18 @@ impl<'tcx> MirPass<'tcx> for AddRetag {
// PART 1
// Retag arguments at the beginning of the start block.
{
- // FIXME: Consider using just the span covering the function
- // argument declaration.
- let source_info = SourceInfo::outermost(span);
// Gather all arguments, skip return value.
- let places = local_decls
- .iter_enumerated()
- .skip(1)
- .take(arg_count)
- .map(|(local, _)| Place::from(local))
- .filter(needs_retag);
+ let places = local_decls.iter_enumerated().skip(1).take(body.arg_count).filter_map(
+ |(local, decl)| {
+ let place = Place::from(local);
+ needs_retag(&place).then_some((place, decl.source_info))
+ },
+ );
+
// Emit their retags.
basic_blocks[START_BLOCK].statements.splice(
0..0,
- places.map(|place| Statement {
+ places.map(|(place, source_info)| Statement {
source_info,
kind: StatementKind::Retag(RetagKind::FnEntry, Box::new(place)),
}),
diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
index 8838b14c5..fa5f392fa 100644
--- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
+++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
@@ -1,4 +1,4 @@
-use rustc_errors::{DiagnosticBuilder, LintDiagnosticBuilder};
+use rustc_errors::{DiagnosticBuilder, DiagnosticMessage};
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
@@ -63,7 +63,10 @@ impl<'tcx> ConstMutationChecker<'_, 'tcx> {
place: &Place<'tcx>,
const_item: DefId,
location: Location,
- decorate: impl for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) -> DiagnosticBuilder<'b, ()>,
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'a mut DiagnosticBuilder<'b, ()>,
+ ) -> &'a mut DiagnosticBuilder<'b, ()>,
) {
// Don't lint on borrowing/assigning when a dereference is involved.
// If we 'leave' the temporary via a dereference, we must
@@ -84,10 +87,10 @@ impl<'tcx> ConstMutationChecker<'_, 'tcx> {
CONST_ITEM_MUTATION,
lint_root,
source_info.span,
+ msg,
|lint| {
decorate(lint)
.span_note(self.tcx.def_span(const_item), "`const` item defined here")
- .emit();
},
);
}
@@ -102,10 +105,8 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
// so emitting a lint would be redundant.
if !lhs.projection.is_empty() {
if let Some(def_id) = self.is_const_item_without_destructor(lhs.local) {
- self.lint_const_item_usage(&lhs, def_id, loc, |lint| {
- let mut lint = lint.build("attempting to modify a `const` item");
- lint.note("each usage of a `const` item creates a new temporary; the original `const` item will not be modified");
- lint
+ self.lint_const_item_usage(&lhs, def_id, loc, "attempting to modify a `const` item",|lint| {
+ lint.note("each usage of a `const` item creates a new temporary; the original `const` item will not be modified")
})
}
}
@@ -137,8 +138,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
});
let lint_loc =
if method_did.is_some() { self.body.terminator_loc(loc.block) } else { loc };
- self.lint_const_item_usage(place, def_id, lint_loc, |lint| {
- let mut lint = lint.build("taking a mutable reference to a `const` item");
+ self.lint_const_item_usage(place, def_id, lint_loc, "taking a mutable reference to a `const` item", |lint| {
lint
.note("each usage of a `const` item creates a new temporary")
.note("the mutable reference will refer to this temporary, not the original `const` item");
diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs
index 3b7ba3f9a..51abcf511 100644
--- a/compiler/rustc_mir_transform/src/check_packed_ref.rs
+++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs
@@ -33,21 +33,27 @@ struct PackedRefChecker<'a, 'tcx> {
fn unsafe_derive_on_repr_packed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
let lint_hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- tcx.struct_span_lint_hir(UNALIGNED_REFERENCES, lint_hir_id, tcx.def_span(def_id), |lint| {
- // FIXME: when we make this a hard error, this should have its
- // own error code.
- let extra = if tcx.generics_of(def_id).own_requires_monomorphization() {
- "with type or const parameters"
- } else {
- "that does not derive `Copy`"
- };
- let message = format!(
- "`{}` can't be derived on this `#[repr(packed)]` struct {}",
- tcx.item_name(tcx.trait_id_of_impl(def_id.to_def_id()).expect("derived trait name")),
- extra
- );
- lint.build(message).emit();
- });
+ // FIXME: when we make this a hard error, this should have its
+ // own error code.
+
+ let extra = if tcx.generics_of(def_id).own_requires_monomorphization() {
+ "with type or const parameters"
+ } else {
+ "that does not derive `Copy`"
+ };
+ let message = format!(
+ "`{}` can't be derived on this `#[repr(packed)]` struct {}",
+ tcx.item_name(tcx.trait_id_of_impl(def_id.to_def_id()).expect("derived trait name")),
+ extra
+ );
+
+ tcx.struct_span_lint_hir(
+ UNALIGNED_REFERENCES,
+ lint_hir_id,
+ tcx.def_span(def_id),
+ message,
+ |lint| lint,
+ );
}
impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
@@ -86,8 +92,9 @@ impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
UNALIGNED_REFERENCES,
lint_root,
source_info.span,
+ "reference to packed field is unaligned",
|lint| {
- lint.build("reference to packed field is unaligned")
+ lint
.note(
"fields of packed structs are not properly aligned, and creating \
a misaligned reference is undefined behavior (even if that \
@@ -98,7 +105,6 @@ impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
reference with a raw pointer and use `read_unaligned`/`write_unaligned` \
(loads and stores via `*p` must be properly aligned even when using raw pointers)"
)
- .emit();
},
);
}
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index d564f4801..f8f04214a 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -1,17 +1,16 @@
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxHashSet;
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::hir_id::HirId;
use rustc_hir::intravisit;
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt};
-use rustc_middle::{lint, mir::*};
use rustc_session::lint::builtin::{UNSAFE_OP_IN_UNSAFE_FN, UNUSED_UNSAFE};
use rustc_session::lint::Level;
-use std::collections::hash_map;
use std::ops::Bound;
pub struct UnsafetyChecker<'a, 'tcx> {
@@ -23,10 +22,7 @@ pub struct UnsafetyChecker<'a, 'tcx> {
param_env: ty::ParamEnv<'tcx>,
/// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
- ///
- /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
- /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
- used_unsafe_blocks: FxHashMap<HirId, UsedUnsafeBlockData>,
+ used_unsafe_blocks: FxHashSet<HirId>,
}
impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
@@ -109,7 +105,8 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
// safe (at least as emitted during MIR construction)
}
- StatementKind::CopyNonOverlapping(..) => unreachable!(),
+ // Move to above list once mir construction uses it.
+ StatementKind::Intrinsic(..) => unreachable!(),
}
self.super_statement(statement, location);
}
@@ -130,10 +127,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
&AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
let UnsafetyCheckResult { violations, used_unsafe_blocks, .. } =
self.tcx.unsafety_check_result(def_id);
- self.register_violations(
- violations,
- used_unsafe_blocks.iter().map(|(&h, &d)| (h, d)),
- );
+ self.register_violations(violations, used_unsafe_blocks.iter().copied());
}
},
_ => {}
@@ -257,22 +251,8 @@ impl<'tcx> UnsafetyChecker<'_, 'tcx> {
fn register_violations<'a>(
&mut self,
violations: impl IntoIterator<Item = &'a UnsafetyViolation>,
- new_used_unsafe_blocks: impl IntoIterator<Item = (HirId, UsedUnsafeBlockData)>,
+ new_used_unsafe_blocks: impl IntoIterator<Item = HirId>,
) {
- use UsedUnsafeBlockData::{AllAllowedInUnsafeFn, SomeDisallowedInUnsafeFn};
-
- let update_entry = |this: &mut Self, hir_id, new_usage| {
- match this.used_unsafe_blocks.entry(hir_id) {
- hash_map::Entry::Occupied(mut entry) => {
- if new_usage == SomeDisallowedInUnsafeFn {
- *entry.get_mut() = SomeDisallowedInUnsafeFn;
- }
- }
- hash_map::Entry::Vacant(entry) => {
- entry.insert(new_usage);
- }
- };
- };
let safety = self.body.source_scopes[self.source_info.scope]
.local_data
.as_ref()
@@ -299,22 +279,14 @@ impl<'tcx> UnsafetyChecker<'_, 'tcx> {
}
}),
Safety::BuiltinUnsafe => {}
- Safety::ExplicitUnsafe(hir_id) => violations.into_iter().for_each(|violation| {
- update_entry(
- self,
- hir_id,
- match self.tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, violation.lint_root).0
- {
- Level::Allow => AllAllowedInUnsafeFn(violation.lint_root),
- _ => SomeDisallowedInUnsafeFn,
- },
- )
+ Safety::ExplicitUnsafe(hir_id) => violations.into_iter().for_each(|_violation| {
+ self.used_unsafe_blocks.insert(hir_id);
}),
};
- new_used_unsafe_blocks
- .into_iter()
- .for_each(|(hir_id, usage_data)| update_entry(self, hir_id, usage_data));
+ new_used_unsafe_blocks.into_iter().for_each(|hir_id| {
+ self.used_unsafe_blocks.insert(hir_id);
+ });
}
fn check_mut_borrowing_layout_constrained_field(
&mut self,
@@ -340,7 +312,7 @@ impl<'tcx> UnsafetyChecker<'_, 'tcx> {
} else if !place
.ty(self.body, self.tcx)
.ty
- .is_freeze(self.tcx.at(self.source_info.span), self.param_env)
+ .is_freeze(self.tcx, self.param_env)
{
UnsafetyViolationDetails::BorrowOfLayoutConstrainedField
} else {
@@ -411,34 +383,28 @@ enum Context {
struct UnusedUnsafeVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
- used_unsafe_blocks: &'a FxHashMap<HirId, UsedUnsafeBlockData>,
+ used_unsafe_blocks: &'a FxHashSet<HirId>,
context: Context,
unused_unsafes: &'a mut Vec<(HirId, UnusedUnsafe)>,
}
impl<'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'_, 'tcx> {
fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
- use UsedUnsafeBlockData::{AllAllowedInUnsafeFn, SomeDisallowedInUnsafeFn};
-
if let hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) = block.rules {
let used = match self.tcx.lint_level_at_node(UNUSED_UNSAFE, block.hir_id) {
- (Level::Allow, _) => Some(SomeDisallowedInUnsafeFn),
- _ => self.used_unsafe_blocks.get(&block.hir_id).copied(),
+ (Level::Allow, _) => true,
+ _ => self.used_unsafe_blocks.contains(&block.hir_id),
};
let unused_unsafe = match (self.context, used) {
- (_, None) => UnusedUnsafe::Unused,
- (Context::Safe, Some(_))
- | (Context::UnsafeFn(_), Some(SomeDisallowedInUnsafeFn)) => {
+ (_, false) => UnusedUnsafe::Unused,
+ (Context::Safe, true) | (Context::UnsafeFn(_), true) => {
let previous_context = self.context;
self.context = Context::UnsafeBlock(block.hir_id);
intravisit::walk_block(self, block);
self.context = previous_context;
return;
}
- (Context::UnsafeFn(hir_id), Some(AllAllowedInUnsafeFn(lint_root))) => {
- UnusedUnsafe::InUnsafeFn(hir_id, lint_root)
- }
- (Context::UnsafeBlock(hir_id), Some(_)) => UnusedUnsafe::InUnsafeBlock(hir_id),
+ (Context::UnsafeBlock(hir_id), true) => UnusedUnsafe::InUnsafeBlock(hir_id),
};
self.unused_unsafes.push((block.hir_id, unused_unsafe));
}
@@ -462,7 +428,7 @@ impl<'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'_, 'tcx> {
fn check_unused_unsafe(
tcx: TyCtxt<'_>,
def_id: LocalDefId,
- used_unsafe_blocks: &FxHashMap<HirId, UsedUnsafeBlockData>,
+ used_unsafe_blocks: &FxHashSet<HirId>,
) -> Vec<(HirId, UnusedUnsafe)> {
let body_id = tcx.hir().maybe_body_owned_by(def_id);
@@ -523,40 +489,20 @@ fn unsafety_check_result<'tcx>(
fn report_unused_unsafe(tcx: TyCtxt<'_>, kind: UnusedUnsafe, id: HirId) {
let span = tcx.sess.source_map().guess_head_span(tcx.hir().span(id));
- tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, |lint| {
- let msg = "unnecessary `unsafe` block";
- let mut db = lint.build(msg);
- db.span_label(span, msg);
+ let msg = "unnecessary `unsafe` block";
+ tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, msg, |lint| {
+ lint.span_label(span, msg);
match kind {
UnusedUnsafe::Unused => {}
UnusedUnsafe::InUnsafeBlock(id) => {
- db.span_label(
+ lint.span_label(
tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
"because it's nested under this `unsafe` block",
);
}
- UnusedUnsafe::InUnsafeFn(id, usage_lint_root) => {
- db.span_label(
- tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
- "because it's nested under this `unsafe` fn",
- )
- .note(
- "this `unsafe` block does contain unsafe operations, \
- but those are already allowed in an `unsafe fn`",
- );
- let (level, source) =
- tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, usage_lint_root);
- assert_eq!(level, Level::Allow);
- lint::explain_lint_level_source(
- UNSAFE_OP_IN_UNSAFE_FN,
- Level::Allow,
- source,
- &mut db,
- );
- }
}
- db.emit();
+ lint
});
}
@@ -596,15 +542,8 @@ pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
UNSAFE_OP_IN_UNSAFE_FN,
lint_root,
source_info.span,
- |lint| {
- lint.build(&format!(
- "{} is unsafe and requires unsafe block (error E0133)",
- description,
- ))
- .span_label(source_info.span, description)
- .note(note)
- .emit();
- },
+ format!("{} is unsafe and requires unsafe block (error E0133)", description,),
+ |lint| lint.span_label(source_info.span, description).note(note),
),
}
}
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
index 611d29a4e..3378923c2 100644
--- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
+++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
@@ -33,7 +33,7 @@ pub struct DeleteNonCodegenStatements<'tcx> {
impl<'tcx> MirPass<'tcx> for CleanupNonCodegenStatements {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let mut delete = DeleteNonCodegenStatements { tcx };
- delete.visit_body(body);
+ delete.visit_body_preserves_cfg(body);
body.user_type_annotations.raw.clear();
for decl in &mut body.local_decls {
diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs
index 5acf939f0..0a305a402 100644
--- a/compiler/rustc_mir_transform/src/const_goto.rs
+++ b/compiler/rustc_mir_transform/src/const_goto.rs
@@ -61,14 +61,14 @@ impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> {
let _: Option<_> = try {
let target = terminator.kind.as_goto()?;
// We only apply this optimization if the last statement is a const assignment
- let last_statement = self.body.basic_blocks()[location.block].statements.last()?;
+ let last_statement = self.body.basic_blocks[location.block].statements.last()?;
if let (place, Rvalue::Use(Operand::Constant(_const))) =
last_statement.kind.as_assign()?
{
// We found a constant being assigned to `place`.
// Now check that the target of this Goto switches on this place.
- let target_bb = &self.body.basic_blocks()[target];
+ let target_bb = &self.body.basic_blocks[target];
// The `StorageDead(..)` statement does not affect the functionality of mir.
// We can move this part of the statement up to the predecessor.
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index fbc0a767f..4e4515888 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -17,7 +17,7 @@ use rustc_middle::mir::{
RETURN_PLACE,
};
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::InternalSubsts;
use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeVisitable};
use rustc_span::{def_id::DefId, Span};
use rustc_target::abi::{self, HasDataLayout, Size, TargetDataLayout};
@@ -28,7 +28,7 @@ use crate::MirPass;
use rustc_const_eval::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, ConstValue, CtfeValidationMode, Frame,
ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, PlaceTy,
- Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
+ Pointer, Scalar, StackPopCleanup, StackPopUnwind,
};
/// The maximum number of bytes that we'll allocate space for a local or the return value.
@@ -131,7 +131,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
let dummy_body = &Body::new(
body.source,
- body.basic_blocks().clone(),
+ (*body.basic_blocks).clone(),
body.source_scopes.clone(),
body.local_decls.clone(),
Default::default(),
@@ -183,6 +183,18 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
type MemoryKind = !;
+ #[inline(always)]
+ fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ // We do not check for alignment to avoid having to carry an `Align`
+ // in `ConstValue::ByRef`.
+ false
+ }
+
+ #[inline(always)]
+ fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ false // for now, we don't enforce validity
+ }
+
fn load_mir(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_instance: ty::InstanceDef<'tcx>,
@@ -231,24 +243,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
}
- fn access_local<'a>(
- frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
- local: Local,
- ) -> InterpResult<'tcx, &'a interpret::Operand<Self::Provenance>> {
- let l = &frame.locals[local];
-
- if matches!(
- l.value,
- LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit))
- ) {
- // For us "uninit" means "we don't know its value, might be initiailized or not".
- // So stop here.
- throw_machine_stop_str!("tried to access alocal with unknown value ")
- }
-
- l.access()
- }
-
fn access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
@@ -419,7 +413,13 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
let op = match self.ecx.eval_place_to_op(place, None) {
- Ok(op) => op,
+ Ok(op) => {
+ if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) {
+ // Make sure nobody accidentally uses this value.
+ return None;
+ }
+ op
+ }
Err(e) => {
trace!("get_const failed: {}", e);
return None;
@@ -428,7 +428,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// Try to read the local as an immediate so that if it is representable as a scalar, we can
// handle it as such, but otherwise, just return the value as is.
- Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) {
+ Some(match self.ecx.read_immediate_raw(&op) {
Ok(Ok(imm)) => imm.into(),
_ => op,
})
@@ -471,7 +471,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
return None;
}
- self.ecx.mir_const_to_op(&c.literal, None).ok()
+ self.ecx.const_to_op(&c.literal, None).ok()
}
/// Returns the value, if any, of evaluating `place`.
@@ -520,8 +520,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let left_ty = left.ty(self.local_decls, self.tcx);
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
let right_size = r.layout.size;
- let r_bits = r.to_scalar().ok();
- let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
+ let r_bits = r.to_scalar().to_bits(right_size).ok();
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
return None;
}
@@ -550,7 +549,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// and use it to do const-prop here and everywhere else
// where it makes sense.
if let interpret::Operand::Immediate(interpret::Immediate::Scalar(
- ScalarMaybeUninit::Scalar(scalar),
+ scalar,
)) = *value
{
*operand = self.operand_from_scalar(
@@ -632,6 +631,14 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
if rvalue.needs_subst() {
return None;
}
+ if !rvalue
+ .ty(&self.ecx.frame().body.local_decls, *self.ecx.tcx)
+ .is_sized(*self.ecx.tcx, self.param_env)
+ {
+ // the interpreter doesn't support unsized locals (only unsized arguments),
+ // but rustc does (in a kinda broken way), so we have to skip them here
+ return None;
+ }
if self.tcx.sess.mir_opt_level() >= 4 {
self.eval_rvalue_with_identities(rvalue, place)
@@ -649,21 +656,23 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
self.use_ecx(|this| match rvalue {
Rvalue::BinaryOp(op, box (left, right))
| Rvalue::CheckedBinaryOp(op, box (left, right)) => {
- let l = this.ecx.eval_operand(left, None);
- let r = this.ecx.eval_operand(right, None);
+ let l = this.ecx.eval_operand(left, None).and_then(|x| this.ecx.read_immediate(&x));
+ let r =
+ this.ecx.eval_operand(right, None).and_then(|x| this.ecx.read_immediate(&x));
let const_arg = match (l, r) {
- (Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?,
- (Err(e), Err(_)) => return Err(e),
- (Ok(_), Ok(_)) => return this.ecx.eval_rvalue_into_place(rvalue, place),
+ (Ok(x), Err(_)) | (Err(_), Ok(x)) => x, // exactly one side is known
+ (Err(e), Err(_)) => return Err(e), // neither side is known
+ (Ok(_), Ok(_)) => return this.ecx.eval_rvalue_into_place(rvalue, place), // both sides are known
};
if !matches!(const_arg.layout.abi, abi::Abi::Scalar(..)) {
// We cannot handle Scalar Pair stuff.
- return this.ecx.eval_rvalue_into_place(rvalue, place);
+ // No point in calling `eval_rvalue_into_place`, since only one side is known
+ throw_machine_stop_str!("cannot optimize this")
}
- let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?;
+ let arg_value = const_arg.to_scalar().to_bits(const_arg.layout.size)?;
let dest = this.ecx.eval_place(place)?;
match op {
@@ -677,7 +686,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
BinOp::Mul if const_arg.layout.ty.is_integral() && arg_value == 0 => {
if let Rvalue::CheckedBinaryOp(_, _) = rvalue {
let val = Immediate::ScalarPair(
- const_arg.to_scalar()?.into(),
+ const_arg.to_scalar().into(),
Scalar::from_bool(false).into(),
);
this.ecx.write_immediate(val, &dest)
@@ -685,7 +694,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
this.ecx.write_immediate(*const_arg, &dest)
}
}
- _ => this.ecx.eval_rvalue_into_place(rvalue, place),
+ _ => throw_machine_stop_str!("cannot optimize this"),
}
}
_ => this.ecx.eval_rvalue_into_place(rvalue, place),
@@ -731,21 +740,18 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
// FIXME> figure out what to do when read_immediate_raw fails
- let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value, /*force*/ false));
+ let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value));
if let Some(Ok(imm)) = imm {
match *imm {
- interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => {
+ interpret::Immediate::Scalar(scalar) => {
*rval = Rvalue::Use(self.operand_from_scalar(
scalar,
value.layout.ty,
source_info.span,
));
}
- Immediate::ScalarPair(
- ScalarMaybeUninit::Scalar(_),
- ScalarMaybeUninit::Scalar(_),
- ) => {
+ Immediate::ScalarPair(..) => {
// Found a value represented as a pair. For now only do const-prop if the type
// of `rvalue` is also a tuple with two scalars.
// FIXME: enable the general case stated above ^.
@@ -800,13 +806,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
match **op {
- interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
- s.try_to_int().is_ok()
+ interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(),
+ interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => {
+ l.try_to_int().is_ok() && r.try_to_int().is_ok()
}
- interpret::Operand::Immediate(Immediate::ScalarPair(
- ScalarMaybeUninit::Scalar(l),
- ScalarMaybeUninit::Scalar(r),
- )) => l.try_to_int().is_ok() && r.try_to_int().is_ok(),
_ => false,
}
}
@@ -951,7 +954,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
fn visit_body(&mut self, body: &mut Body<'tcx>) {
- for (bb, data) in body.basic_blocks_mut().iter_enumerated_mut() {
+ for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
self.visit_basic_block_data(bb, data);
}
}
@@ -1063,26 +1066,28 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
let source_info = terminator.source_info;
self.source_info = Some(source_info);
self.super_terminator(terminator, location);
+ // Do NOT early return in this function, it does some crucial fixup of the state at the end!
match &mut terminator.kind {
TerminatorKind::Assert { expected, ref mut cond, .. } => {
if let Some(ref value) = self.eval_operand(&cond) {
trace!("assertion on {:?} should be {:?}", value, expected);
- let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
- let value_const = self.ecx.read_scalar(&value).unwrap();
- if expected != value_const {
- // Poison all places this operand references so that further code
- // doesn't use the invalid value
- match cond {
- Operand::Move(ref place) | Operand::Copy(ref place) => {
- Self::remove_const(&mut self.ecx, place.local);
+ let expected = Scalar::from_bool(*expected);
+ // FIXME should be used use_ecx rather than a local match... but we have
+ // quite a few of these read_scalar/read_immediate that need fixing.
+ if let Ok(value_const) = self.ecx.read_scalar(&value) {
+ if expected != value_const {
+ // Poison all places this operand references so that further code
+ // doesn't use the invalid value
+ match cond {
+ Operand::Move(ref place) | Operand::Copy(ref place) => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ Operand::Constant(_) => {}
}
- Operand::Constant(_) => {}
- }
- } else {
- if self.should_const_prop(value) {
- if let ScalarMaybeUninit::Scalar(scalar) = value_const {
+ } else {
+ if self.should_const_prop(value) {
*cond = self.operand_from_scalar(
- scalar,
+ value_const,
self.tcx.types.bool,
source_info.span,
);
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index c2ea55af4..479c4e577 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -6,9 +6,9 @@ use crate::const_prop::ConstPropMachine;
use crate::const_prop::ConstPropMode;
use crate::MirLint;
use rustc_const_eval::const_eval::ConstEvalErr;
+use rustc_const_eval::interpret::Immediate;
use rustc_const_eval::interpret::{
- self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar,
- ScalarMaybeUninit, StackPopCleanup,
+ self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar, StackPopCleanup,
};
use rustc_hir::def::DefKind;
use rustc_hir::HirId;
@@ -16,15 +16,13 @@ use rustc_index::bit_set::BitSet;
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::{
- AssertKind, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, Location, Operand, Place,
- Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement, StatementKind, Terminator,
- TerminatorKind, UnOp, RETURN_PLACE,
+ AssertKind, BinOp, Body, Constant, Local, LocalDecl, Location, Operand, Place, Rvalue,
+ SourceInfo, SourceScope, SourceScopeData, Statement, StatementKind, Terminator, TerminatorKind,
+ UnOp, RETURN_PLACE,
};
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
-use rustc_middle::ty::{
- self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitable,
-};
+use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::{self, ConstInt, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitable};
use rustc_session::lint;
use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
@@ -106,7 +104,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
let dummy_body = &Body::new(
body.source,
- body.basic_blocks().clone(),
+ (*body.basic_blocks).clone(),
body.source_scopes.clone(),
body.local_decls.clone(),
Default::default(),
@@ -230,7 +228,13 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
let op = match self.ecx.eval_place_to_op(place, None) {
- Ok(op) => op,
+ Ok(op) => {
+ if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) {
+ // Make sure nobody accidentally uses this value.
+ return None;
+ }
+ op
+ }
Err(e) => {
trace!("get_const failed: {}", e);
return None;
@@ -239,7 +243,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// Try to read the local as an immediate so that if it is representable as a scalar, we can
// handle it as such, but otherwise, just return the value as is.
- Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) {
+ Some(match self.ecx.read_immediate_raw(&op) {
Ok(Ok(imm)) => imm.into(),
_ => op,
})
@@ -282,42 +286,22 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
/// Returns the value, if any, of evaluating `c`.
- fn eval_constant(&mut self, c: &Constant<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ fn eval_constant(
+ &mut self,
+ c: &Constant<'tcx>,
+ _source_info: SourceInfo,
+ ) -> Option<OpTy<'tcx>> {
// FIXME we need to revisit this for #67176
if c.needs_subst() {
return None;
}
- match self.ecx.mir_const_to_op(&c.literal, None) {
+ match self.ecx.const_to_op(&c.literal, None) {
Ok(op) => Some(op),
Err(error) => {
let tcx = self.ecx.tcx.at(c.span);
let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
- if let Some(lint_root) = self.lint_root(source_info) {
- let lint_only = match c.literal {
- ConstantKind::Ty(ct) => match ct.kind() {
- // Promoteds must lint and not error as the user didn't ask for them
- ConstKind::Unevaluated(ty::Unevaluated {
- def: _,
- substs: _,
- promoted: Some(_),
- }) => true,
- // Out of backwards compatibility we cannot report hard errors in unused
- // generic functions using associated constants of the generic parameters.
- _ => c.literal.needs_subst(),
- },
- ConstantKind::Val(_, ty) => ty.needs_subst(),
- };
- if lint_only {
- // Out of backwards compatibility we cannot report hard errors in unused
- // generic functions using associated constants of the generic parameters.
- err.report_as_lint(tcx, "erroneous constant used", lint_root, Some(c.span));
- } else {
- err.report_as_error(tcx, "erroneous constant used");
- }
- } else {
- err.report_as_error(tcx, "erroneous constant used");
- }
+ err.report_as_error(tcx, "erroneous constant used");
None
}
}
@@ -346,10 +330,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
panic: AssertKind<impl std::fmt::Debug>,
) {
if let Some(lint_root) = self.lint_root(source_info) {
- self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, |lint| {
- let mut err = lint.build(message);
- err.span_label(source_info.span, format!("{:?}", panic));
- err.emit();
+ self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, message, |lint| {
+ lint.span_label(source_info.span, format!("{:?}", panic))
});
}
}
@@ -401,8 +383,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let left_ty = left.ty(self.local_decls, self.tcx);
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
let right_size = r.layout.size;
- let r_bits = r.to_scalar().ok();
- let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
+ let r_bits = r.to_scalar().to_bits(right_size).ok();
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
debug!("check_binary_op: reporting assert for {:?}", source_info);
self.report_assert_as_lint(
@@ -517,6 +498,14 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
if rvalue.needs_subst() {
return None;
}
+ if !rvalue
+ .ty(&self.ecx.frame().body.local_decls, *self.ecx.tcx)
+ .is_sized(*self.ecx.tcx, self.param_env)
+ {
+ // the interpreter doesn't support unsized locals (only unsized arguments),
+ // but rustc does (in a kinda broken way), so we have to skip them here
+ return None;
+ }
self.use_ecx(source_info, |this| this.ecx.eval_rvalue_into_place(rvalue, place))
}
@@ -524,7 +513,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
fn visit_body(&mut self, body: &Body<'tcx>) {
- for (bb, data) in body.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks.iter_enumerated() {
self.visit_basic_block_data(bb, data);
}
}
@@ -625,8 +614,12 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
TerminatorKind::Assert { expected, ref msg, ref cond, .. } => {
if let Some(ref value) = self.eval_operand(&cond, source_info) {
trace!("assertion on {:?} should be {:?}", value, expected);
- let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
- let value_const = self.ecx.read_scalar(&value).unwrap();
+ let expected = Scalar::from_bool(*expected);
+ let Ok(value_const) = self.ecx.read_scalar(&value) else {
+ // FIXME should be used use_ecx rather than a local match... but we have
+ // quite a few of these read_scalar/read_immediate that need fixing.
+ return
+ };
if expected != value_const {
enum DbgVal<T> {
Val(T),
@@ -643,9 +636,9 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
let mut eval_to_int = |op| {
// This can be `None` if the lhs wasn't const propagated and we just
// triggered the assert on the value of the rhs.
- self.eval_operand(op, source_info).map_or(DbgVal::Underscore, |op| {
- DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int())
- })
+ self.eval_operand(op, source_info)
+ .and_then(|op| self.ecx.read_immediate(&op).ok())
+ .map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int()))
};
let msg = match msg {
AssertKind::DivisionByZero(op) => {
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index 759ea7cd3..782129be0 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -713,7 +713,7 @@ impl<
ShortCircuitPreorder {
body,
- visited: BitSet::new_empty(body.basic_blocks().len()),
+ visited: BitSet::new_empty(body.basic_blocks.len()),
worklist,
filtered_successors,
}
@@ -747,7 +747,7 @@ impl<
}
fn size_hint(&self) -> (usize, Option<usize>) {
- let size = self.body.basic_blocks().len() - self.visited.count();
+ let size = self.body.basic_blocks.len() - self.visited.count();
(size, Some(size))
}
}
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 2619626a5..604810144 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -80,7 +80,7 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
return;
}
- match mir_body.basic_blocks()[mir::START_BLOCK].terminator().kind {
+ match mir_body.basic_blocks[mir::START_BLOCK].terminator().kind {
TerminatorKind::Unreachable => {
trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`");
return;
@@ -541,7 +541,7 @@ fn fn_sig_and_body<'tcx>(
// to HIR for it.
let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body");
- (hir::map::fn_sig(hir_node), tcx.hir().body(fn_body_id))
+ (hir_node.fn_sig(), tcx.hir().body(fn_body_id))
}
fn get_body_span<'tcx>(
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 9d02f58ae..dc1e68b25 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -84,7 +84,7 @@ impl CoverageVisitor {
}
fn visit_body(&mut self, body: &Body<'_>) {
- for bb_data in body.basic_blocks().iter() {
+ for bb_data in body.basic_blocks.iter() {
for statement in bb_data.statements.iter() {
if let StatementKind::Coverage(box ref coverage) = statement.kind {
if is_inlined(body, statement) {
@@ -138,7 +138,7 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) ->
fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> {
let body = mir_body(tcx, def_id);
- body.basic_blocks()
+ body.basic_blocks
.iter()
.flat_map(|data| {
data.statements.iter().filter_map(|statement| match statement.kind {
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index 423e78317..9f842c929 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -825,7 +825,7 @@ pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span>
// Retain spans from all other statements
StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Assign(_)
| StatementKind::SetDiscriminant { .. }
| StatementKind::Deinit(..)
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
index f5e8b6565..f753caa91 100644
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
+++ b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
@@ -5,4 +5,3 @@ edition = "2021"
[lib]
proc-macro = true
-doctest = false
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 6380f0352..9c9ed5fa5 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -176,7 +176,7 @@ fn debug_basic_blocks<'tcx>(mir_body: &Body<'tcx>) -> String {
format!(
"{:?}",
mir_body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.map(|(bb, data)| {
let term = &data.terminator();
@@ -213,7 +213,7 @@ fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) {
"digraph {} {{\n{}\n}}",
name,
mir_body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.map(|(bb, data)| {
format!(
@@ -653,7 +653,7 @@ fn test_traverse_coverage_with_loops() {
fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
let mut some_span: Option<Span> = None;
- for (_, data) in mir_body.basic_blocks().iter_enumerated() {
+ for (_, data) in mir_body.basic_blocks.iter_enumerated() {
let term_span = data.terminator().source_info.span;
if let Some(span) = some_span.as_mut() {
*span = span.to(term_span);
diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
index 9163672f5..3f3870cc7 100644
--- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs
+++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
@@ -52,7 +52,7 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
| StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
| StatementKind::Coverage(_)
- | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Intrinsic(_)
| StatementKind::Nop => (),
StatementKind::FakeRead(_) | StatementKind::AscribeUserType(_, _) => {
diff --git a/compiler/rustc_mir_transform/src/deaggregator.rs b/compiler/rustc_mir_transform/src/deaggregator.rs
index b93fe5879..fe272de20 100644
--- a/compiler/rustc_mir_transform/src/deaggregator.rs
+++ b/compiler/rustc_mir_transform/src/deaggregator.rs
@@ -6,10 +6,6 @@ use rustc_middle::ty::TyCtxt;
pub struct Deaggregator;
impl<'tcx> MirPass<'tcx> for Deaggregator {
- fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::Deaggregated)
- }
-
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
for bb in basic_blocks {
diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
new file mode 100644
index 000000000..28b1c5a48
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs
@@ -0,0 +1,248 @@
+//! Deduces supplementary parameter attributes from MIR.
+//!
+//! Deduced parameter attributes are those that can only be soundly determined by examining the
+//! body of the function instead of just the signature. These can be useful for optimization
+//! purposes on a best-effort basis. We compute them here and store them into the crate metadata so
+//! dependent crates can use them.
+
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location, Operand, Terminator, TerminatorKind, RETURN_PLACE};
+use rustc_middle::ty::{self, DeducedParamAttrs, ParamEnv, Ty, TyCtxt};
+use rustc_session::config::OptLevel;
+
+/// A visitor that determines which arguments have been mutated. We can't use the mutability field
+/// on LocalDecl for this because it has no meaning post-optimization.
+struct DeduceReadOnly {
+ /// Each bit is indexed by argument number, starting at zero (so 0 corresponds to local decl
+ /// 1). The bit is true if the argument may have been mutated or false if we know it hasn't
+ /// been up to the point we're at.
+ mutable_args: BitSet<usize>,
+}
+
+impl DeduceReadOnly {
+ /// Returns a new DeduceReadOnly instance.
+ fn new(arg_count: usize) -> Self {
+ Self { mutable_args: BitSet::new_empty(arg_count) }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for DeduceReadOnly {
+ fn visit_local(&mut self, local: Local, mut context: PlaceContext, _: Location) {
+ // We're only interested in arguments.
+ if local == RETURN_PLACE || local.index() > self.mutable_args.domain_size() {
+ return;
+ }
+
+ // Replace place contexts that are moves with copies. This is safe in all cases except
+ // function argument position, which we already handled in `visit_terminator()` by using the
+ // ArgumentChecker. See the comment in that method for more details.
+ //
+ // In the future, we might want to move this out into a separate pass, but for now let's
+ // just do it on the fly because that's faster.
+ if matches!(context, PlaceContext::NonMutatingUse(NonMutatingUseContext::Move)) {
+ context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+ }
+
+ match context {
+ PlaceContext::MutatingUse(..)
+ | PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => {
+ // This is a mutation, so mark it as such.
+ self.mutable_args.insert(local.index() - 1);
+ }
+ PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => {
+ // Not mutating, so it's fine.
+ }
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ // OK, this is subtle. Suppose that we're trying to deduce whether `x` in `f` is read-only
+ // and we have the following:
+ //
+ // fn f(x: BigStruct) { g(x) }
+ // fn g(mut y: BigStruct) { y.foo = 1 }
+ //
+ // If, at the generated MIR level, `f` turned into something like:
+ //
+ // fn f(_1: BigStruct) -> () {
+ // let mut _0: ();
+ // bb0: {
+ // _0 = g(move _1) -> bb1;
+ // }
+ // ...
+ // }
+ //
+ // then it would be incorrect to mark `x` (i.e. `_1`) as `readonly`, because `g`'s write to
+ // its copy of the indirect parameter would actually be a write directly to the pointer that
+ // `f` passes. Note that function arguments are the only situation in which this problem can
+ // arise: every other use of `move` in MIR doesn't actually write to the value it moves
+ // from.
+ //
+ // Anyway, right now this situation doesn't actually arise in practice. Instead, the MIR for
+ // that function looks like this:
+ //
+ // fn f(_1: BigStruct) -> () {
+ // let mut _0: ();
+ // let mut _2: BigStruct;
+ // bb0: {
+ // _2 = move _1;
+ // _0 = g(move _2) -> bb1;
+ // }
+ // ...
+ // }
+ //
+ // Because of that extra move that MIR construction inserts, `x` (i.e. `_1`) can *in
+ // practice* safely be marked `readonly`.
+ //
+ // To handle the possibility that other optimizations (for example, destination propagation)
+ // might someday generate MIR like the first example above, we panic upon seeing an argument
+ // to *our* function that is directly moved into *another* function as an argument. Having
+ // eliminated that problematic case, we can safely treat moves as copies in this analysis.
+ //
+ // In the future, if MIR optimizations cause arguments of a caller to be directly moved into
+ // the argument of a callee, we can just add that argument to `mutated_args` instead of
+ // panicking.
+ //
+ // Note that, because the problematic MIR is never actually generated, we can't add a test
+ // case for this.
+
+ if let TerminatorKind::Call { ref args, .. } = terminator.kind {
+ for arg in args {
+ if let Operand::Move(_) = *arg {
+ // ArgumentChecker panics if a direct move of an argument from a caller to a
+ // callee was detected.
+ //
+ // If, in the future, MIR optimizations cause arguments to be moved directly
+ // from callers to callees, change the panic to instead add the argument in
+ // question to `mutating_uses`.
+ ArgumentChecker::new(self.mutable_args.domain_size())
+ .visit_operand(arg, location)
+ }
+ }
+ };
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+/// A visitor that simply panics if a direct move of an argument from a caller to a callee was
+/// detected.
+struct ArgumentChecker {
+ /// The number of arguments to the calling function.
+ arg_count: usize,
+}
+
+impl ArgumentChecker {
+ /// Creates a new ArgumentChecker.
+ fn new(arg_count: usize) -> Self {
+ Self { arg_count }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ArgumentChecker {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ // Check to make sure that, if this local is an argument, we didn't move directly from it.
+ if matches!(context, PlaceContext::NonMutatingUse(NonMutatingUseContext::Move))
+ && local != RETURN_PLACE
+ && local.index() <= self.arg_count
+ {
+ // If, in the future, MIR optimizations cause arguments to be moved directly from
+ // callers to callees, change this panic to instead add the argument in question to
+ // `mutating_uses`.
+ panic!("Detected a direct move from a caller's argument to a callee's argument!")
+ }
+ }
+}
+
+/// Returns true if values of a given type will never be passed indirectly, regardless of ABI.
+fn type_will_always_be_passed_directly<'tcx>(ty: Ty<'tcx>) -> bool {
+ matches!(
+ ty.kind(),
+ ty::Bool
+ | ty::Char
+ | ty::Float(..)
+ | ty::Int(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::Slice(..)
+ | ty::Uint(..)
+ )
+}
+
+/// Returns the deduced parameter attributes for a function.
+///
+/// Deduced parameter attributes are those that can only be soundly determined by examining the
+/// body of the function instead of just the signature. These can be useful for optimization
+/// purposes on a best-effort basis. We compute them here and store them into the crate metadata so
+/// dependent crates can use them.
+pub fn deduced_param_attrs<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx [DeducedParamAttrs] {
+ // This computation is unfortunately rather expensive, so don't do it unless we're optimizing.
+ // Also skip it in incremental mode.
+ if tcx.sess.opts.optimize == OptLevel::No || tcx.sess.opts.incremental.is_some() {
+ return &[];
+ }
+
+ // If the Freeze language item isn't present, then don't bother.
+ if tcx.lang_items().freeze_trait().is_none() {
+ return &[];
+ }
+
+ // Codegen won't use this information for anything if all the function parameters are passed
+ // directly. Detect that and bail, for compilation speed.
+ let fn_ty = tcx.type_of(def_id);
+ if matches!(fn_ty.kind(), ty::FnDef(..)) {
+ if fn_ty
+ .fn_sig(tcx)
+ .inputs()
+ .skip_binder()
+ .iter()
+ .cloned()
+ .all(type_will_always_be_passed_directly)
+ {
+ return &[];
+ }
+ }
+
+ // Don't deduce any attributes for functions that have no MIR.
+ if !tcx.is_mir_available(def_id) {
+ return &[];
+ }
+
+ // Deduced attributes for other crates should be read from the metadata instead of via this
+ // function.
+ debug_assert!(def_id.is_local());
+
+ // Grab the optimized MIR. Analyze it to determine which arguments have been mutated.
+ let body: &Body<'tcx> = tcx.optimized_mir(def_id);
+ let mut deduce_read_only = DeduceReadOnly::new(body.arg_count);
+ deduce_read_only.visit_body(body);
+
+ // Set the `readonly` attribute for every argument that we concluded is immutable and that
+ // contains no UnsafeCells.
+ //
+ // FIXME: This is overly conservative around generic parameters: `is_freeze()` will always
+ // return false for them. For a description of alternatives that could do a better job here,
+ // see [1].
+ //
+ // [1]: https://github.com/rust-lang/rust/pull/103172#discussion_r999139997
+ let mut deduced_param_attrs = tcx.arena.alloc_from_iter(
+ body.local_decls.iter().skip(1).take(body.arg_count).enumerate().map(
+ |(arg_index, local_decl)| DeducedParamAttrs {
+ read_only: !deduce_read_only.mutable_args.contains(arg_index)
+ && local_decl.ty.is_freeze(tcx, ParamEnv::reveal_all()),
+ },
+ ),
+ );
+
+ // Trailing parameters past the size of the `deduced_param_attrs` array are assumed to have the
+ // default set of attributes, so we don't have to store them explicitly. Pop them off to save a
+ // few bytes in metadata.
+ while deduced_param_attrs.last() == Some(&DeducedParamAttrs::default()) {
+ let last_index = deduced_param_attrs.len() - 1;
+ deduced_param_attrs = &mut deduced_param_attrs[0..last_index];
+ }
+
+ deduced_param_attrs
+}
diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
index d1977ed49..909116a77 100644
--- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
+++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
@@ -58,7 +58,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
let mut duplicates = FxHashMap::default();
let bbs_to_go_through =
- body.basic_blocks().iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
+ body.basic_blocks.iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
let mut same_hashes =
FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default());
@@ -71,8 +71,7 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
// When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list
// with replacement bb3.
// When the duplicates are removed, we will end up with only bb3.
- for (bb, bbd) in body.basic_blocks().iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup)
- {
+ for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) {
// Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
// that are unlikely to have duplicates, we stop early. The early bail number has been
// found experimentally by eprintln while compiling the crates in the rustc-perf suite.
diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs
index a00bb16f7..7508df92d 100644
--- a/compiler/rustc_mir_transform/src/deref_separator.rs
+++ b/compiler/rustc_mir_transform/src/deref_separator.rs
@@ -28,8 +28,6 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> {
let mut last_len = 0;
let mut last_deref_idx = 0;
- let mut prev_temp: Option<Local> = None;
-
for (idx, elem) in place.projection[0..].iter().enumerate() {
if *elem == ProjectionElem::Deref {
last_deref_idx = idx;
@@ -39,14 +37,12 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> {
for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() {
if !p_ref.projection.is_empty() && p_elem == ProjectionElem::Deref {
let ty = p_ref.ty(&self.local_decls, self.tcx).ty;
- let temp = self.patcher.new_local_with_info(
+ let temp = self.patcher.new_internal_with_info(
ty,
self.local_decls[p_ref.local].source_info.span,
Some(Box::new(LocalInfo::DerefTemp)),
);
- self.patcher.add_statement(loc, StatementKind::StorageLive(temp));
-
// We are adding current p_ref's projections to our
// temp value, excluding projections we already covered.
let deref_place = Place::from(place_local)
@@ -66,22 +62,8 @@ impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> {
Place::from(temp).project_deeper(&place.projection[idx..], self.tcx);
*place = temp_place;
}
-
- // We are destroying the previous temp since it's no longer used.
- if let Some(prev_temp) = prev_temp {
- self.patcher.add_statement(loc, StatementKind::StorageDead(prev_temp));
- }
-
- prev_temp = Some(temp);
}
}
-
- // Since we won't be able to reach final temp, we destroy it outside the loop.
- if let Some(prev_temp) = prev_temp {
- let last_loc =
- Location { block: loc.block, statement_index: loc.statement_index + 1 };
- self.patcher.add_statement(last_loc, StatementKind::StorageDead(prev_temp));
- }
}
}
}
@@ -90,7 +72,7 @@ pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let patch = MirPatch::new(body);
let mut checker = DerefChecker { tcx, patcher: patch, local_decls: body.local_decls.clone() };
- for (bb, data) in body.basic_blocks_mut().iter_enumerated_mut() {
+ for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
checker.visit_basic_block_data(bb, data);
}
@@ -100,6 +82,5 @@ pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
impl<'tcx> MirPass<'tcx> for Derefer {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
deref_finder(tcx, body);
- body.phase = MirPhase::Derefered;
}
}
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index 33572068f..9bc47613e 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -150,7 +150,7 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
def_id,
body.local_decls.len(),
relevant,
- body.basic_blocks().len()
+ body.basic_blocks.len()
);
if relevant > MAX_LOCALS {
warn!(
@@ -159,11 +159,11 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation {
);
return;
}
- if body.basic_blocks().len() > MAX_BLOCKS {
+ if body.basic_blocks.len() > MAX_BLOCKS {
warn!(
"too many blocks in {:?} ({}, max is {}), not optimizing",
def_id,
- body.basic_blocks().len(),
+ body.basic_blocks.len(),
MAX_BLOCKS
);
return;
@@ -537,7 +537,7 @@ impl<'a> Conflicts<'a> {
| StatementKind::FakeRead(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
index dba42f7af..32e738bbc 100644
--- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
+++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
@@ -104,8 +104,8 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
let mut should_cleanup = false;
// Also consider newly generated bbs in the same pass
- for i in 0..body.basic_blocks().len() {
- let bbs = body.basic_blocks();
+ for i in 0..body.basic_blocks.len() {
+ let bbs = &*body.basic_blocks;
let parent = BasicBlock::from_usize(i);
let Some(opt_data) = evaluate_candidate(tcx, body, parent) else {
continue
@@ -316,7 +316,7 @@ fn evaluate_candidate<'tcx>(
body: &Body<'tcx>,
parent: BasicBlock,
) -> Option<OptimizationData<'tcx>> {
- let bbs = body.basic_blocks();
+ let bbs = &body.basic_blocks;
let TerminatorKind::SwitchInt {
targets,
switch_ty: parent_ty,
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
index 44e3945d6..ef8d6bb65 100644
--- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -8,7 +8,6 @@ use rustc_index::vec::Idx;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::visit::MutVisitor;
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{Ty, TyCtxt};
/// Constructs the types used when accessing a Box's pointer
@@ -69,10 +68,7 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> {
let (unique_ty, nonnull_ty, ptr_ty) =
build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did);
- let ptr_local = self.patch.new_temp(ptr_ty, source_info.span);
- self.local_decls.push(LocalDecl::new(ptr_ty, source_info.span));
-
- self.patch.add_statement(location, StatementKind::StorageLive(ptr_local));
+ let ptr_local = self.patch.new_internal(ptr_ty, source_info.span);
self.patch.add_assign(
location,
@@ -84,11 +80,6 @@ impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> {
);
place.local = ptr_local;
-
- self.patch.add_statement(
- Location { block: location.block, statement_index: location.statement_index + 1 },
- StatementKind::StorageDead(ptr_local),
- );
}
self.super_place(place, context, location);
@@ -115,34 +106,8 @@ impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs {
let mut visitor =
ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch };
- for (block, BasicBlockData { statements, terminator, .. }) in
- body.basic_blocks.as_mut().iter_enumerated_mut()
- {
- let mut index = 0;
- for statement in statements {
- let location = Location { block, statement_index: index };
- visitor.visit_statement(statement, location);
- index += 1;
- }
-
- if let Some(terminator) = terminator
- && !matches!(terminator.kind, TerminatorKind::Yield{..})
- {
- let location = Location { block, statement_index: index };
- visitor.visit_terminator(terminator, location);
- }
-
- let location = Location { block, statement_index: index };
- match terminator {
- // yielding into a box is handled when lowering generators
- Some(Terminator { kind: TerminatorKind::Yield { value, .. }, .. }) => {
- visitor.visit_operand(value, location);
- }
- Some(terminator) => {
- visitor.visit_terminator(terminator, location);
- }
- None => {}
- }
+ for (block, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
+ visitor.visit_basic_block_data(block, data);
}
visitor.patch.apply(body);
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index 9c1fcbaa6..65f4956d2 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -21,10 +21,6 @@ use std::fmt;
pub struct ElaborateDrops;
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
- fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::DropsLowered)
- }
-
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
@@ -89,13 +85,13 @@ fn find_dead_unwinds<'tcx>(
debug!("find_dead_unwinds({:?})", body.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
- let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
+ let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len());
let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
.into_engine(tcx, body)
.pass_name("find_dead_unwinds")
.iterate_to_fixpoint()
.into_results_cursor(body);
- for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+ for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
let place = match bb_data.terminator().kind {
TerminatorKind::Drop { ref place, unwind: Some(_), .. }
| TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => {
@@ -303,7 +299,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn collect_drop_flags(&mut self) {
- for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks.iter_enumerated() {
let terminator = data.terminator();
let place = match terminator.kind {
TerminatorKind::Drop { ref place, .. }
@@ -358,7 +354,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn elaborate_drops(&mut self) {
- for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks.iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
@@ -515,7 +511,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn drop_flags_for_fn_rets(&mut self) {
- for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Call {
destination, target: Some(tgt), cleanup: Some(_), ..
} = data.terminator().kind
@@ -550,7 +546,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
// drop flags by themselves, to avoid the drop flags being
// clobbered before they are read.
- for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks.iter_enumerated() {
debug!("drop_flags_for_locs({:?})", data);
for i in 0..(data.statements.len() + 1) {
debug!("drop_flag_for_locs: stmt {}", i);
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
index 7728fdaff..1244c1802 100644
--- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -65,7 +65,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
let mut tainted = false;
- for block in body.basic_blocks() {
+ for block in body.basic_blocks.iter() {
if block.is_cleanup {
continue;
}
@@ -106,14 +106,12 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
.lint_root;
let span = terminator.source_info.span;
- tcx.struct_span_lint_hir(FFI_UNWIND_CALLS, lint_root, span, |lint| {
- let msg = match fn_def_id {
- Some(_) => "call to foreign function with FFI-unwind ABI",
- None => "call to function pointer with FFI-unwind ABI",
- };
- let mut db = lint.build(msg);
- db.span_label(span, msg);
- db.emit();
+ let msg = match fn_def_id {
+ Some(_) => "call to foreign function with FFI-unwind ABI",
+ None => "call to function pointer with FFI-unwind ABI",
+ };
+ tcx.struct_span_lint_hir(FFI_UNWIND_CALLS, lint_root, span, msg, |lint| {
+ lint.span_label(span, msg)
});
tainted = true;
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
index 2e4fe1e3e..469566694 100644
--- a/compiler/rustc_mir_transform/src/function_item_references.rs
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -3,11 +3,7 @@ use rustc_errors::Applicability;
use rustc_hir::def_id::DefId;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
-use rustc_middle::ty::{
- self,
- subst::{GenericArgKind, Subst, SubstsRef},
- EarlyBinder, PredicateKind, Ty, TyCtxt,
-};
+use rustc_middle::ty::{self, EarlyBinder, GenericArgKind, PredicateKind, SubstsRef, Ty, TyCtxt};
use rustc_session::lint::builtin::FUNCTION_ITEM_REFERENCES;
use rustc_span::{symbol::sym, Span};
use rustc_target::spec::abi::Abi;
@@ -183,11 +179,15 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder();
let variadic = if fn_sig.c_variadic() { ", ..." } else { "" };
let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" };
- self.tcx.struct_span_lint_hir(FUNCTION_ITEM_REFERENCES, lint_root, span, |lint| {
- lint.build("taking a reference to a function item does not give a function pointer")
- .span_suggestion(
+ self.tcx.struct_span_lint_hir(
+ FUNCTION_ITEM_REFERENCES,
+ lint_root,
+ span,
+ "taking a reference to a function item does not give a function pointer",
+ |lint| {
+ lint.span_suggestion(
span,
- &format!("cast `{}` to obtain a function pointer", ident),
+ format!("cast `{}` to obtain a function pointer", ident),
format!(
"{} as {}{}fn({}{}){}",
if params.is_empty() { ident } else { format!("{}::<{}>", ident, params) },
@@ -199,7 +199,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
),
Applicability::Unspecified,
)
- .emit();
- });
+ },
+ );
}
}
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs
index 91ecf3879..c833de3a8 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/generator.rs
@@ -61,9 +61,8 @@ use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::dump_mir;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::{Subst, SubstsRef};
-use rustc_middle::ty::GeneratorSubsts;
use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+use rustc_middle::ty::{GeneratorSubsts, SubstsRef};
use rustc_mir_dataflow::impls::{
MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
};
@@ -490,12 +489,12 @@ fn locals_live_across_suspend_points<'tcx>(
.iterate_to_fixpoint()
.into_results_cursor(body_ref);
- let mut storage_liveness_map = IndexVec::from_elem(None, body.basic_blocks());
+ let mut storage_liveness_map = IndexVec::from_elem(None, &body.basic_blocks);
let mut live_locals_at_suspension_points = Vec::new();
let mut source_info_at_suspension_points = Vec::new();
let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len());
- for (block, data) in body.basic_blocks().iter_enumerated() {
+ for (block, data) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::Yield { .. } = data.terminator().kind {
let loc = Location { block, statement_index: data.statements.len() };
@@ -704,7 +703,7 @@ impl<'mir, 'tcx> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx>
impl StorageConflictVisitor<'_, '_, '_> {
fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) {
// Ignore unreachable blocks.
- if self.body.basic_blocks()[loc.block].terminator().kind == TerminatorKind::Unreachable {
+ if self.body.basic_blocks[loc.block].terminator().kind == TerminatorKind::Unreachable {
return;
}
@@ -886,7 +885,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env };
- for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ for (block, block_data) in body.basic_blocks.iter_enumerated() {
let (target, unwind, source_info) = match block_data.terminator() {
Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => {
if let Some(local) = place.as_local() {
@@ -991,7 +990,7 @@ fn insert_panic_block<'tcx>(
body: &mut Body<'tcx>,
message: AssertMessage<'tcx>,
) -> BasicBlock {
- let assert_block = BasicBlock::new(body.basic_blocks().len());
+ let assert_block = BasicBlock::new(body.basic_blocks.len());
let term = TerminatorKind::Assert {
cond: Operand::Constant(Box::new(Constant {
span: body.span,
@@ -1021,7 +1020,7 @@ fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEn
}
// If there's a return terminator the function may return.
- for block in body.basic_blocks() {
+ for block in body.basic_blocks.iter() {
if let TerminatorKind::Return = block.terminator().kind {
return true;
}
@@ -1038,7 +1037,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
}
// Unwinds can only start at certain terminators.
- for block in body.basic_blocks() {
+ for block in body.basic_blocks.iter() {
match block.terminator().kind {
// These never unwind.
TerminatorKind::Goto { .. }
@@ -1182,8 +1181,6 @@ fn create_cases<'tcx>(
transform: &TransformVisitor<'tcx>,
operation: Operation,
) -> Vec<(usize, BasicBlock)> {
- let tcx = transform.tcx;
-
let source_info = SourceInfo::outermost(body.span);
transform
@@ -1216,85 +1213,13 @@ fn create_cases<'tcx>(
if operation == Operation::Resume {
// Move the resume argument to the destination place of the `Yield` terminator
let resume_arg = Local::new(2); // 0 = return, 1 = self
-
- // handle `box yield` properly
- let box_place = if let [projection @ .., ProjectionElem::Deref] =
- &**point.resume_arg.projection
- {
- let box_place =
- Place::from(point.resume_arg.local).project_deeper(projection, tcx);
-
- let box_ty = box_place.ty(&body.local_decls, tcx).ty;
-
- if box_ty.is_box() { Some((box_place, box_ty)) } else { None }
- } else {
- None
- };
-
- if let Some((box_place, box_ty)) = box_place {
- let unique_did = box_ty
- .ty_adt_def()
- .expect("expected Box to be an Adt")
- .non_enum_variant()
- .fields[0]
- .did;
-
- let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else {
- span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
- };
-
- let nonnull_did = nonnull_def.non_enum_variant().fields[0].did;
-
- let (unique_ty, nonnull_ty, ptr_ty) =
- crate::elaborate_box_derefs::build_ptr_tys(
- tcx,
- box_ty.boxed_ty(),
- unique_did,
- nonnull_did,
- );
-
- let ptr_local = body.local_decls.push(LocalDecl::new(ptr_ty, body.span));
-
- statements.push(Statement {
- source_info,
- kind: StatementKind::StorageLive(ptr_local),
- });
-
- statements.push(Statement {
- source_info,
- kind: StatementKind::Assign(Box::new((
- Place::from(ptr_local),
- Rvalue::Use(Operand::Copy(box_place.project_deeper(
- &crate::elaborate_box_derefs::build_projection(
- unique_ty, nonnull_ty, ptr_ty,
- ),
- tcx,
- ))),
- ))),
- });
-
- statements.push(Statement {
- source_info,
- kind: StatementKind::Assign(Box::new((
- Place::from(ptr_local)
- .project_deeper(&[ProjectionElem::Deref], tcx),
- Rvalue::Use(Operand::Move(resume_arg.into())),
- ))),
- });
-
- statements.push(Statement {
- source_info,
- kind: StatementKind::StorageDead(ptr_local),
- });
- } else {
- statements.push(Statement {
- source_info,
- kind: StatementKind::Assign(Box::new((
- point.resume_arg,
- Rvalue::Use(Operand::Move(resume_arg.into())),
- ))),
- });
- }
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ point.resume_arg,
+ Rvalue::Use(Operand::Move(resume_arg.into())),
+ ))),
+ });
}
// Then jump to the real target
@@ -1314,10 +1239,6 @@ fn create_cases<'tcx>(
}
impl<'tcx> MirPass<'tcx> for StateTransform {
- fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::GeneratorsLowered)
- }
-
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let Some(yield_ty) = body.yield_ty() else {
// This only applies to generators
@@ -1530,7 +1451,7 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
| StatementKind::Retag(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 76b1522f3..780b91d92 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -7,9 +7,11 @@ use rustc_index::vec::Idx;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
-use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_session::config::OptLevel;
+use rustc_span::def_id::DefId;
use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span};
+use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi::Abi;
use super::simplify::{remove_dead_blocks, CfgSimplifier};
@@ -43,8 +45,15 @@ impl<'tcx> MirPass<'tcx> for Inline {
return enabled;
}
- // rust-lang/rust#101004: reverted to old inlining decision logic
- sess.mir_opt_level() >= 3
+ match sess.mir_opt_level() {
+ 0 | 1 => false,
+ 2 => {
+ (sess.opts.optimize == OptLevel::Default
+ || sess.opts.optimize == OptLevel::Aggressive)
+ && sess.opts.incremental == None
+ }
+ _ => true,
+ }
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
@@ -85,7 +94,7 @@ fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
history: Vec::new(),
changed: false,
};
- let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
+ let blocks = BasicBlock::new(0)..body.basic_blocks.next_index();
this.process_blocks(body, blocks);
this.changed
}
@@ -95,8 +104,12 @@ struct Inliner<'tcx> {
param_env: ParamEnv<'tcx>,
/// Caller codegen attributes.
codegen_fn_attrs: &'tcx CodegenFnAttrs,
- /// Stack of inlined Instances.
- history: Vec<ty::Instance<'tcx>>,
+ /// Stack of inlined instances.
+ /// We only check the `DefId` and not the substs because we want to
+ /// avoid inlining cases of polymorphic recursion.
+ /// The number of `DefId`s is finite, so checking history is enough
+ /// to ensure that we do not loop endlessly while inlining.
+ history: Vec<DefId>,
/// Indicates that the caller body has been modified.
changed: bool,
}
@@ -124,7 +137,7 @@ impl<'tcx> Inliner<'tcx> {
Ok(new_blocks) => {
debug!("inlined {}", callsite.callee);
self.changed = true;
- self.history.push(callsite.callee);
+ self.history.push(callsite.callee.def_id());
self.process_blocks(caller_body, new_blocks);
self.history.pop();
}
@@ -203,9 +216,9 @@ impl<'tcx> Inliner<'tcx> {
}
}
- let old_blocks = caller_body.basic_blocks().next_index();
+ let old_blocks = caller_body.basic_blocks.next_index();
self.inline_call(caller_body, &callsite, callee_body);
- let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
+ let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
Ok(new_blocks)
}
@@ -300,7 +313,7 @@ impl<'tcx> Inliner<'tcx> {
return None;
}
- if self.history.contains(&callee) {
+ if self.history.contains(&callee.def_id()) {
return None;
}
@@ -395,124 +408,66 @@ impl<'tcx> Inliner<'tcx> {
// Give a bonus functions with a small number of blocks,
// We normally have two or three blocks for even
// very small functions.
- if callee_body.basic_blocks().len() <= 3 {
+ if callee_body.basic_blocks.len() <= 3 {
threshold += threshold / 4;
}
debug!(" final inline threshold = {}", threshold);
// FIXME: Give a bonus to functions with only a single caller
- let mut first_block = true;
- let mut cost = 0;
+ let diverges = matches!(
+ callee_body.basic_blocks[START_BLOCK].terminator().kind,
+ TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. }
+ );
+ if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) {
+ return Err("callee diverges unconditionally");
+ }
- // Traverse the MIR manually so we can account for the effects of
- // inlining on the CFG.
+ let mut checker = CostChecker {
+ tcx: self.tcx,
+ param_env: self.param_env,
+ instance: callsite.callee,
+ callee_body,
+ cost: 0,
+ validation: Ok(()),
+ };
+
+ // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
let mut work_list = vec![START_BLOCK];
- let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
+ let mut visited = BitSet::new_empty(callee_body.basic_blocks.len());
while let Some(bb) = work_list.pop() {
if !visited.insert(bb.index()) {
continue;
}
- let blk = &callee_body.basic_blocks()[bb];
-
- for stmt in &blk.statements {
- // Don't count StorageLive/StorageDead in the inlining cost.
- match stmt.kind {
- StatementKind::StorageLive(_)
- | StatementKind::StorageDead(_)
- | StatementKind::Deinit(_)
- | StatementKind::Nop => {}
- _ => cost += INSTR_COST,
- }
- }
- let term = blk.terminator();
- let mut is_drop = false;
- match term.kind {
- TerminatorKind::Drop { ref place, target, unwind }
- | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
- is_drop = true;
- work_list.push(target);
- // If the place doesn't actually need dropping, treat it like
- // a regular goto.
- let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
- if ty.needs_drop(tcx, self.param_env) {
- cost += CALL_PENALTY;
- if let Some(unwind) = unwind {
- cost += LANDINGPAD_PENALTY;
- work_list.push(unwind);
- }
- } else {
- cost += INSTR_COST;
- }
- }
- TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. }
- if first_block =>
- {
- // If the function always diverges, don't inline
- // unless the cost is zero
- threshold = 0;
- }
+ let blk = &callee_body.basic_blocks[bb];
+ checker.visit_basic_block_data(bb, blk);
- TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
- if let ty::FnDef(def_id, _) =
- *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind()
- {
- // Don't give intrinsics the extra penalty for calls
- if tcx.is_intrinsic(def_id) {
- cost += INSTR_COST;
- } else {
- cost += CALL_PENALTY;
- }
- } else {
- cost += CALL_PENALTY;
- }
- if cleanup.is_some() {
- cost += LANDINGPAD_PENALTY;
- }
- }
- TerminatorKind::Assert { cleanup, .. } => {
- cost += CALL_PENALTY;
-
- if cleanup.is_some() {
- cost += LANDINGPAD_PENALTY;
- }
- }
- TerminatorKind::Resume => cost += RESUME_PENALTY,
- TerminatorKind::InlineAsm { cleanup, .. } => {
- cost += INSTR_COST;
+ let term = blk.terminator();
+ if let TerminatorKind::Drop { ref place, target, unwind }
+ | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } = term.kind
+ {
+ work_list.push(target);
- if cleanup.is_some() {
- cost += LANDINGPAD_PENALTY;
+ // If the place doesn't actually need dropping, treat it like a regular goto.
+ let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
+ if ty.needs_drop(tcx, self.param_env) && let Some(unwind) = unwind {
+ work_list.push(unwind);
}
- }
- _ => cost += INSTR_COST,
- }
-
- if !is_drop {
- for succ in term.successors() {
- work_list.push(succ);
- }
+ } else {
+ work_list.extend(term.successors())
}
-
- first_block = false;
}
// Count up the cost of local variables and temps, if we know the size
// use that, otherwise we use a moderately-large dummy cost.
-
- let ptr_size = tcx.data_layout.pointer_size.bytes();
-
for v in callee_body.vars_and_temps_iter() {
- let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty);
- // Cost of the var is the size in machine-words, if we know
- // it.
- if let Some(size) = type_size_of(tcx, self.param_env, ty) {
- cost += ((size + ptr_size - 1) / ptr_size) as usize;
- } else {
- cost += UNKNOWN_SIZE_COST;
- }
+ checker.visit_local_decl(v, &callee_body.local_decls[v]);
}
+ // Abort if type validation found anything fishy.
+ checker.validation?;
+
+ let cost = checker.cost;
if let InlineAttr::Always = callee_attrs.inline {
debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
Ok(())
@@ -585,7 +540,7 @@ impl<'tcx> Inliner<'tcx> {
args: &args,
new_locals: Local::new(caller_body.local_decls.len())..,
new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
- new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
+ new_blocks: BasicBlock::new(caller_body.basic_blocks.len())..,
destination: dest,
callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
callsite,
@@ -603,7 +558,9 @@ impl<'tcx> Inliner<'tcx> {
// If there are any locals without storage markers, give them storage only for the
// duration of the call.
for local in callee_body.vars_and_temps_iter() {
- if integrator.always_live_locals.contains(local) {
+ if !callee_body.local_decls[local].internal
+ && integrator.always_live_locals.contains(local)
+ {
let new_local = integrator.map_local(local);
caller_body[callsite.block].statements.push(Statement {
source_info: callsite.source_info,
@@ -616,7 +573,9 @@ impl<'tcx> Inliner<'tcx> {
// the slice once.
let mut n = 0;
for local in callee_body.vars_and_temps_iter().rev() {
- if integrator.always_live_locals.contains(local) {
+ if !callee_body.local_decls[local].internal
+ && integrator.always_live_locals.contains(local)
+ {
let new_local = integrator.map_local(local);
caller_body[block].statements.push(Statement {
source_info: callsite.source_info,
@@ -644,11 +603,11 @@ impl<'tcx> Inliner<'tcx> {
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(
- callee_body.required_consts.iter().copied().filter(|&ct| {
- match ct.literal.const_for_ty() {
- Some(ct) => matches!(ct.kind(), ConstKind::Unevaluated(_)),
- None => true,
+ callee_body.required_consts.iter().copied().filter(|&ct| match ct.literal {
+ ConstantKind::Ty(_) => {
+ bug!("should never encounter ty::UnevaluatedConst in `required_consts`")
}
+ ConstantKind::Val(..) | ConstantKind::Unevaluated(..) => true,
}),
);
}
@@ -782,6 +741,193 @@ fn type_size_of<'tcx>(
tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
}
+/// Verify that the callee body is compatible with the caller.
+///
+/// This visitor mostly computes the inlining cost,
+/// but also needs to verify that types match because of normalization failure.
+struct CostChecker<'b, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ cost: usize,
+ callee_body: &'b Body<'tcx>,
+ instance: ty::Instance<'tcx>,
+ validation: Result<(), &'static str>,
+}
+
+impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ // Don't count StorageLive/StorageDead in the inlining cost.
+ match statement.kind {
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::Nop => {}
+ _ => self.cost += INSTR_COST,
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ let tcx = self.tcx;
+ match terminator.kind {
+ TerminatorKind::Drop { ref place, unwind, .. }
+ | TerminatorKind::DropAndReplace { ref place, unwind, .. } => {
+ // If the place doesn't actually need dropping, treat it like a regular goto.
+ let ty = self.instance.subst_mir(tcx, &place.ty(self.callee_body, tcx).ty);
+ if ty.needs_drop(tcx, self.param_env) {
+ self.cost += CALL_PENALTY;
+ if unwind.is_some() {
+ self.cost += LANDINGPAD_PENALTY;
+ }
+ } else {
+ self.cost += INSTR_COST;
+ }
+ }
+ TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
+ let fn_ty = self.instance.subst_mir(tcx, &f.literal.ty());
+ self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
+ // Don't give intrinsics the extra penalty for calls
+ INSTR_COST
+ } else {
+ CALL_PENALTY
+ };
+ if cleanup.is_some() {
+ self.cost += LANDINGPAD_PENALTY;
+ }
+ }
+ TerminatorKind::Assert { cleanup, .. } => {
+ self.cost += CALL_PENALTY;
+ if cleanup.is_some() {
+ self.cost += LANDINGPAD_PENALTY;
+ }
+ }
+ TerminatorKind::Resume => self.cost += RESUME_PENALTY,
+ TerminatorKind::InlineAsm { cleanup, .. } => {
+ self.cost += INSTR_COST;
+ if cleanup.is_some() {
+ self.cost += LANDINGPAD_PENALTY;
+ }
+ }
+ _ => self.cost += INSTR_COST,
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ /// Count up the cost of local variables and temps, if we know the size
+ /// use that, otherwise we use a moderately-large dummy cost.
+ fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
+ let tcx = self.tcx;
+ let ptr_size = tcx.data_layout.pointer_size.bytes();
+
+ let ty = self.instance.subst_mir(tcx, &local_decl.ty);
+ // Cost of the var is the size in machine-words, if we know
+ // it.
+ if let Some(size) = type_size_of(tcx, self.param_env, ty) {
+ self.cost += ((size + ptr_size - 1) / ptr_size) as usize;
+ } else {
+ self.cost += UNKNOWN_SIZE_COST;
+ }
+
+ self.super_local_decl(local, local_decl)
+ }
+
+ /// This method duplicates code from MIR validation in an attempt to detect type mismatches due
+ /// to normalization failure.
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ if let ProjectionElem::Field(f, ty) = elem {
+ let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent_ty = parent.ty(&self.callee_body.local_decls, self.tcx);
+ let check_equal = |this: &mut Self, f_ty| {
+ if !equal_up_to_regions(this.tcx, this.param_env, ty, f_ty) {
+ trace!(?ty, ?f_ty);
+ this.validation = Err("failed to normalize projection type");
+ return;
+ }
+ };
+
+ let kind = match parent_ty.ty.kind() {
+ &ty::Opaque(def_id, substs) => {
+ self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ }
+ kind => kind,
+ };
+
+ match kind {
+ ty::Tuple(fields) => {
+ let Some(f_ty) = fields.get(f.as_usize()) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+ check_equal(self, *f_ty);
+ }
+ ty::Adt(adt_def, substs) => {
+ let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
+ let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+ check_equal(self, field.ty(self.tcx, substs));
+ }
+ ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+ check_equal(self, f_ty);
+ }
+ &ty::Generator(def_id, substs, _) => {
+ let f_ty = if let Some(var) = parent_ty.variant_index {
+ let gen_body = if def_id == self.callee_body.source.def_id() {
+ self.callee_body
+ } else {
+ self.tcx.optimized_mir(def_id)
+ };
+
+ let Some(layout) = gen_body.generator_layout() else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+
+ let Some(&local) = layout.variant_fields[var].get(f) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+
+ let Some(&f_ty) = layout.field_tys.get(local) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+
+ f_ty
+ } else {
+ let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ self.validation = Err("malformed MIR");
+ return;
+ };
+
+ f_ty
+ };
+
+ check_equal(self, f_ty);
+ }
+ _ => self.validation = Err("malformed MIR"),
+ }
+ }
+
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+}
+
/**
* Integrator.
*
@@ -831,6 +977,21 @@ impl Integrator<'_, '_> {
trace!("mapping block `{:?}` to `{:?}`", block, new);
new
}
+
+ fn map_unwind(&self, unwind: Option<BasicBlock>) -> Option<BasicBlock> {
+ if self.in_cleanup_block {
+ if unwind.is_some() {
+ bug!("cleanup on cleanup block");
+ }
+ return unwind;
+ }
+
+ match unwind {
+ Some(target) => Some(self.map_block(target)),
+ // Add an unwind edge to the original call's cleanup block
+ None => self.cleanup_block,
+ }
+ }
}
impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
@@ -939,35 +1100,17 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
| TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
*target = self.map_block(*target);
- if let Some(tgt) = *unwind {
- *unwind = Some(self.map_block(tgt));
- } else if !self.in_cleanup_block {
- // Unless this drop is in a cleanup block, add an unwind edge to
- // the original call's cleanup block
- *unwind = self.cleanup_block;
- }
+ *unwind = self.map_unwind(*unwind);
}
TerminatorKind::Call { ref mut target, ref mut cleanup, .. } => {
if let Some(ref mut tgt) = *target {
*tgt = self.map_block(*tgt);
}
- if let Some(tgt) = *cleanup {
- *cleanup = Some(self.map_block(tgt));
- } else if !self.in_cleanup_block {
- // Unless this call is in a cleanup block, add an unwind edge to
- // the original call's cleanup block
- *cleanup = self.cleanup_block;
- }
+ *cleanup = self.map_unwind(*cleanup);
}
TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
*target = self.map_block(*target);
- if let Some(tgt) = *cleanup {
- *cleanup = Some(self.map_block(tgt));
- } else if !self.in_cleanup_block {
- // Unless this assert is in a cleanup block, add an unwind edge to
- // the original call's cleanup block
- *cleanup = self.cleanup_block;
- }
+ *cleanup = self.map_unwind(*cleanup);
}
TerminatorKind::Return => {
terminator.kind = if let Some(tgt) = self.callsite.target {
@@ -995,11 +1138,8 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
TerminatorKind::InlineAsm { ref mut destination, ref mut cleanup, .. } => {
if let Some(ref mut tgt) = *destination {
*tgt = self.map_block(*tgt);
- } else if !self.in_cleanup_block {
- // Unless this inline asm is in a cleanup block, add an unwind edge to
- // the original call's cleanup block
- *cleanup = self.cleanup_block;
}
+ *cleanup = self.map_unwind(*cleanup);
}
}
}
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 7810218fd..b027f9492 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -153,7 +153,7 @@ pub(crate) fn mir_inliner_callees<'tcx>(
_ => tcx.instance_mir(instance),
};
let mut calls = FxIndexSet::default();
- for bb_data in body.basic_blocks() {
+ for bb_data in body.basic_blocks.iter() {
let terminator = bb_data.terminator();
if let TerminatorKind::Call { func, .. } = &terminator.kind {
let ty = func.ty(&body.local_decls, tcx);
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index d968a4885..5be223254 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -1,7 +1,6 @@
#![allow(rustc::potential_query_instability)]
#![feature(box_patterns)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
#![feature(never_type)]
@@ -10,6 +9,7 @@
#![feature(trusted_step)]
#![feature(try_blocks)]
#![feature(yeet_expr)]
+#![feature(if_let_guard)]
#![recursion_limit = "256"]
#[macro_use]
@@ -26,10 +26,14 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::Visitor as _;
-use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPass, MirPhase, Promoted};
+use rustc_middle::mir::{
+ traversal, AnalysisPhase, Body, ConstQualifs, Constant, LocalDecl, MirPass, MirPhase, Operand,
+ Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, SourceInfo, Statement, StatementKind,
+ TerminatorKind,
+};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
-use rustc_span::{Span, Symbol};
+use rustc_span::sym;
#[macro_use]
mod pass_manager;
@@ -52,6 +56,7 @@ mod const_prop_lint;
mod coverage;
mod dead_store_elimination;
mod deaggregator;
+mod deduce_param_attrs;
mod deduplicate_blocks;
mod deref_separator;
mod dest_prop;
@@ -66,7 +71,6 @@ mod inline;
mod instcombine;
mod lower_intrinsics;
mod lower_slice_len;
-mod marker;
mod match_branches;
mod multiple_return_terminators;
mod normalize_array_len;
@@ -135,10 +139,69 @@ pub fn provide(providers: &mut Providers) {
promoted_mir_of_const_arg: |tcx, (did, param_did)| {
promoted_mir(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
},
+ deduced_param_attrs: deduce_param_attrs::deduced_param_attrs,
..*providers
};
}
+fn remap_mir_for_const_eval_select<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut body: Body<'tcx>,
+ context: hir::Constness,
+) -> Body<'tcx> {
+ for bb in body.basic_blocks.as_mut().iter_mut() {
+ let terminator = bb.terminator.as_mut().expect("invalid terminator");
+ match terminator.kind {
+ TerminatorKind::Call {
+ func: Operand::Constant(box Constant { ref literal, .. }),
+ ref mut args,
+ destination,
+ target,
+ cleanup,
+ fn_span,
+ ..
+ } if let ty::FnDef(def_id, _) = *literal.ty().kind()
+ && tcx.item_name(def_id) == sym::const_eval_select
+ && tcx.is_intrinsic(def_id) =>
+ {
+ let [tupled_args, called_in_const, called_at_rt]: [_; 3] = std::mem::take(args).try_into().unwrap();
+ let ty = tupled_args.ty(&body.local_decls, tcx);
+ let fields = ty.tuple_fields();
+ let num_args = fields.len();
+ let func = if context == hir::Constness::Const { called_in_const } else { called_at_rt };
+ let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = match tupled_args {
+ Operand::Constant(_) => {
+ // there is no good way of extracting a tuple arg from a constant (const generic stuff)
+ // so we just create a temporary and deconstruct that.
+ let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
+ bb.statements.push(Statement {
+ source_info: SourceInfo::outermost(fn_span),
+ kind: StatementKind::Assign(Box::new((local.into(), Rvalue::Use(tupled_args.clone())))),
+ });
+ (Operand::Move, local.into())
+ }
+ Operand::Move(place) => (Operand::Move, place),
+ Operand::Copy(place) => (Operand::Copy, place),
+ };
+ let place_elems = place.projection;
+ let arguments = (0..num_args).map(|x| {
+ let mut place_elems = place_elems.to_vec();
+ place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
+ let projection = tcx.intern_place_elems(&place_elems);
+ let place = Place {
+ local: place.local,
+ projection,
+ };
+ method(place)
+ }).collect();
+ terminator.kind = TerminatorKind::Call { func, args: arguments, destination, target, cleanup, from_hir_call: false, fn_span };
+ }
+ _ => {}
+ }
+ }
+ body
+}
+
fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
let def_id = def_id.expect_local();
tcx.mir_keys(()).contains(&def_id)
@@ -159,14 +222,7 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
set: &'a mut FxIndexSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for GatherCtors<'_, 'tcx> {
- fn visit_variant_data(
- &mut self,
- v: &'tcx hir::VariantData<'tcx>,
- _: Symbol,
- _: &'tcx hir::Generics<'tcx>,
- _: hir::HirId,
- _: Span,
- ) {
+ fn visit_variant_data(&mut self, v: &'tcx hir::VariantData<'tcx>) {
if let hir::VariantData::Tuple(_, hir_id) = *v {
self.set.insert(self.tcx.hir().local_def_id(hir_id));
}
@@ -208,6 +264,8 @@ fn mir_const_qualif(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) ->
}
/// Make MIR ready for const evaluation. This is run on all MIR, not just on consts!
+/// FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
+/// We used to have this for pre-miri MIR based const eval.
fn mir_const<'tcx>(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
@@ -243,8 +301,8 @@ fn mir_const<'tcx>(
// What we need to do constant evaluation.
&simplify::SimplifyCfg::new("initial"),
&rustc_peek::SanityCheck, // Just a lint
- &marker::PhaseChange(MirPhase::Const),
],
+ None,
);
tcx.alloc_steal_mir(body)
}
@@ -284,6 +342,7 @@ fn mir_promoted<'tcx>(
&simplify::SimplifyCfg::new("promote-consts"),
&coverage::InstrumentCoverage,
],
+ Some(MirPhase::Analysis(AnalysisPhase::Initial)),
);
let promoted = promote_pass.promoted_fragments.into_inner();
@@ -330,7 +389,9 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -
.body_const_context(def.did)
.expect("mir_for_ctfe should not be used for runtime functions");
- let mut body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone();
+ let body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone();
+
+ let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const);
match context {
// Do not const prop functions, either they get executed at runtime or exported to metadata,
@@ -349,7 +410,8 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -
pm::run_passes(
tcx,
&mut body,
- &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimized)],
+ &[&const_prop::ConstProp],
+ Some(MirPhase::Runtime(RuntimePhase::Optimized)),
);
}
}
@@ -389,38 +451,61 @@ fn mir_drops_elaborated_and_const_checked<'tcx>(
body.tainted_by_errors = Some(error_reported);
}
- // IMPORTANT
- pm::run_passes(tcx, &mut body, &[&remove_false_edges::RemoveFalseEdges]);
+ run_analysis_to_runtime_passes(tcx, &mut body);
+
+ tcx.alloc_steal_mir(body)
+}
+
+fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial));
+ let did = body.source.def_id();
+
+ debug!("analysis_mir_cleanup({:?})", did);
+ run_analysis_cleanup_passes(tcx, body);
+ assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
// Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, &body)) {
pm::run_passes(
tcx,
- &mut body,
+ body,
&[
- &simplify::SimplifyCfg::new("remove-false-edges"),
&remove_uninit_drops::RemoveUninitDrops,
+ &simplify::SimplifyCfg::new("remove-false-edges"),
],
+ None,
);
check_consts::post_drop_elaboration::check_live_drops(tcx, &body); // FIXME: make this a MIR lint
}
- run_post_borrowck_cleanup_passes(tcx, &mut body);
- assert!(body.phase == MirPhase::Deaggregated);
- tcx.alloc_steal_mir(body)
+ debug!("runtime_mir_lowering({:?})", did);
+ run_runtime_lowering_passes(tcx, body);
+ assert!(body.phase == MirPhase::Runtime(RuntimePhase::Initial));
+
+ debug!("runtime_mir_cleanup({:?})", did);
+ run_runtime_cleanup_passes(tcx, body);
+ assert!(body.phase == MirPhase::Runtime(RuntimePhase::PostCleanup));
}
-/// After this series of passes, no lifetime analysis based on borrowing can be done.
-fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- debug!("post_borrowck_cleanup({:?})", body.source.def_id());
+// FIXME(JakobDegen): Can we make these lists of passes consts?
- let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[
- // Remove all things only needed by analysis
+/// After this series of passes, no lifetime analysis based on borrowing can be done.
+fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let passes: &[&dyn MirPass<'tcx>] = &[
+ &remove_false_edges::RemoveFalseEdges,
&simplify_branches::SimplifyConstCondition::new("initial"),
&remove_noop_landing_pads::RemoveNoopLandingPads,
&cleanup_post_borrowck::CleanupNonCodegenStatements,
&simplify::SimplifyCfg::new("early-opt"),
&deref_separator::Derefer,
+ ];
+
+ pm::run_passes(tcx, body, passes, Some(MirPhase::Analysis(AnalysisPhase::PostCleanup)));
+}
+
+/// Returns the sequence of passes that lowers analysis to runtime MIR.
+fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let passes: &[&dyn MirPass<'tcx>] = &[
// These next passes must be executed together
&add_call_guards::CriticalCallEdges,
&elaborate_drops::ElaborateDrops,
@@ -434,16 +519,25 @@ fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tc
// `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
// but before optimizations begin.
&elaborate_box_derefs::ElaborateBoxDerefs,
+ &generator::StateTransform,
&add_retag::AddRetag,
- &lower_intrinsics::LowerIntrinsics,
- &simplify::SimplifyCfg::new("elaborate-drops"),
- // `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
- // and it can help optimizations.
+ // Deaggregator is necessary for const prop. We may want to consider implementing
+ // CTFE support for aggregates.
&deaggregator::Deaggregator,
&Lint(const_prop_lint::ConstProp),
];
+ pm::run_passes_no_validate(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::Initial)));
+}
+
+/// Returns the sequence of passes that do the initial cleanup of runtime MIR.
+fn run_runtime_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let passes: &[&dyn MirPass<'tcx>] = &[
+ &elaborate_box_derefs::ElaborateBoxDerefs,
+ &lower_intrinsics::LowerIntrinsics,
+ &simplify::SimplifyCfg::new("elaborate-drops"),
+ ];
- pm::run_passes(tcx, body, post_borrowck_cleanup);
+ pm::run_passes(tcx, body, passes, Some(MirPhase::Runtime(RuntimePhase::PostCleanup)));
}
fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
@@ -451,9 +545,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
WithMinOptLevel(1, x)
}
- // Lowering generator control-flow and variables has to happen before we do anything else
- // to them. We run some optimizations before that, because they may be harder to do on the state
- // machine than on MIR with async primitives.
+ // The main optimizations that we do on MIR.
pm::run_passes(
tcx,
body,
@@ -465,17 +557,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&uninhabited_enum_branching::UninhabitedEnumBranching,
&o1(simplify::SimplifyCfg::new("after-uninhabited-enum-branching")),
&inline::Inline,
- &generator::StateTransform,
- ],
- );
-
- assert!(body.phase == MirPhase::GeneratorsLowered);
-
- // The main optimizations that we do on MIR.
- pm::run_passes(
- tcx,
- body,
- &[
&remove_storage_markers::RemoveStorageMarkers,
&remove_zsts::RemoveZsts,
&const_goto::ConstGoto,
@@ -507,10 +588,10 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
&deduplicate_blocks::DeduplicateBlocks,
// Some cleanup necessary at least for LLVM and potentially other codegen backends.
&add_call_guards::CriticalCallEdges,
- &marker::PhaseChange(MirPhase::Optimized),
// Dump the end result for testing and debugging purposes.
&dump_mir::Marker("PreCodegen"),
],
+ Some(MirPhase::Runtime(RuntimePhase::Optimized)),
);
}
@@ -539,8 +620,9 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
Some(other) => panic!("do not use `optimized_mir` for constants: {:?}", other),
}
debug!("about to call mir_drops_elaborated...");
- let mut body =
+ let body =
tcx.mir_drops_elaborated_and_const_checked(ty::WithOptConstParam::unknown(did)).steal();
+ let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
debug!("body: {:#?}", body);
run_optimization_passes(tcx, &mut body);
@@ -566,7 +648,7 @@ fn promoted_mir<'tcx>(
if let Some(error_reported) = tainted_by_errors {
body.tainted_by_errors = Some(error_reported);
}
- run_post_borrowck_cleanup_passes(tcx, body);
+ run_analysis_to_runtime_passes(tcx, body);
}
debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR");
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index b7ba61651..9892580e6 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -46,12 +46,31 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
let mut args = args.drain(..);
block.statements.push(Statement {
source_info: terminator.source_info,
- kind: StatementKind::CopyNonOverlapping(Box::new(
- rustc_middle::mir::CopyNonOverlapping {
- src: args.next().unwrap(),
- dst: args.next().unwrap(),
- count: args.next().unwrap(),
- },
+ kind: StatementKind::Intrinsic(Box::new(
+ NonDivergingIntrinsic::CopyNonOverlapping(
+ rustc_middle::mir::CopyNonOverlapping {
+ src: args.next().unwrap(),
+ dst: args.next().unwrap(),
+ count: args.next().unwrap(),
+ },
+ ),
+ )),
+ });
+ assert_eq!(
+ args.next(),
+ None,
+ "Extra argument for copy_non_overlapping intrinsic"
+ );
+ drop(args);
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ sym::assume => {
+ let target = target.unwrap();
+ let mut args = args.drain(..);
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Intrinsic(Box::new(
+ NonDivergingIntrinsic::Assume(args.next().unwrap()),
)),
});
assert_eq!(
diff --git a/compiler/rustc_mir_transform/src/marker.rs b/compiler/rustc_mir_transform/src/marker.rs
deleted file mode 100644
index 06819fc1d..000000000
--- a/compiler/rustc_mir_transform/src/marker.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-use std::borrow::Cow;
-
-use crate::MirPass;
-use rustc_middle::mir::{Body, MirPhase};
-use rustc_middle::ty::TyCtxt;
-
-/// Changes the MIR phase without changing the MIR itself.
-pub struct PhaseChange(pub MirPhase);
-
-impl<'tcx> MirPass<'tcx> for PhaseChange {
- fn phase_change(&self) -> Option<MirPhase> {
- Some(self.0)
- }
-
- fn name(&self) -> Cow<'_, str> {
- Cow::from(format!("PhaseChange-{:?}", self.0))
- }
-
- fn run_pass(&self, _: TyCtxt<'tcx>, _body: &mut Body<'tcx>) {}
-}
diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
index 22b6dead9..3957cd92c 100644
--- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
+++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
@@ -15,7 +15,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// find basic blocks with no statement and a return terminator
- let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks().len());
+ let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks.len());
let def_id = body.source.def_id();
let bbs = body.basic_blocks_mut();
for idx in bbs.indices() {
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index c0217a105..a159e6171 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -21,10 +21,10 @@ impl<'tcx> MirPass<'tcx> for NormalizeArrayLen {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// early returns for edge cases of highly unrolled functions
- if body.basic_blocks().len() > MAX_NUM_BLOCKS {
+ if body.basic_blocks.len() > MAX_NUM_BLOCKS {
return;
}
- if body.local_decls().len() > MAX_NUM_LOCALS {
+ if body.local_decls.len() > MAX_NUM_LOCALS {
return;
}
normalize_array_len_calls(tcx, body)
diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs
index bb063915f..4291e81c7 100644
--- a/compiler/rustc_mir_transform/src/nrvo.rs
+++ b/compiler/rustc_mir_transform/src/nrvo.rs
@@ -53,10 +53,10 @@ impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
def_id, returned_local
);
- RenameToReturnPlace { tcx, to_rename: returned_local }.visit_body(body);
+ RenameToReturnPlace { tcx, to_rename: returned_local }.visit_body_preserves_cfg(body);
// Clean up the `NOP`s we inserted for statements made useless by our renaming.
- for block_data in body.basic_blocks_mut() {
+ for block_data in body.basic_blocks.as_mut_preserves_cfg() {
block_data.statements.retain(|stmt| stmt.kind != mir::StatementKind::Nop);
}
@@ -89,7 +89,7 @@ fn local_eligible_for_nrvo(body: &mut mir::Body<'_>) -> Option<Local> {
}
let mut copied_to_return_place = None;
- for block in body.basic_blocks().indices() {
+ for block in body.basic_blocks.indices() {
// Look for blocks with a `Return` terminator.
if !matches!(body[block].terminator().kind, mir::TerminatorKind::Return) {
continue;
@@ -122,7 +122,7 @@ fn find_local_assigned_to_return_place(
body: &mut mir::Body<'_>,
) -> Option<Local> {
let mut block = start;
- let mut seen = HybridBitSet::new_empty(body.basic_blocks().len());
+ let mut seen = HybridBitSet::new_empty(body.basic_blocks.len());
// Iterate as long as `block` has exactly one predecessor that we have not yet visited.
while seen.insert(block) {
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index e27d4ab16..230c6a7cb 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -1,6 +1,6 @@
use std::borrow::Cow;
-use rustc_middle::mir::{self, Body, MirPhase};
+use rustc_middle::mir::{self, Body, MirPhase, RuntimePhase};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
@@ -66,69 +66,90 @@ where
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
self.1.run_pass(tcx, body)
}
+}
- fn phase_change(&self) -> Option<MirPhase> {
- self.1.phase_change()
- }
+/// Run the sequence of passes without validating the MIR after each pass. The MIR is still
+/// validated at the end.
+pub fn run_passes_no_validate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &mut Body<'tcx>,
+ passes: &[&dyn MirPass<'tcx>],
+ phase_change: Option<MirPhase>,
+) {
+ run_passes_inner(tcx, body, passes, phase_change, false);
}
-pub fn run_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn MirPass<'tcx>]) {
- let start_phase = body.phase;
- let mut cnt = 0;
+/// The optional `phase_change` is applied after executing all the passes, if present
+pub fn run_passes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &mut Body<'tcx>,
+ passes: &[&dyn MirPass<'tcx>],
+ phase_change: Option<MirPhase>,
+) {
+ run_passes_inner(tcx, body, passes, phase_change, true);
+}
- let validate = tcx.sess.opts.unstable_opts.validate_mir;
+fn run_passes_inner<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &mut Body<'tcx>,
+ passes: &[&dyn MirPass<'tcx>],
+ phase_change: Option<MirPhase>,
+ validate_each: bool,
+) {
+ let validate = validate_each & tcx.sess.opts.unstable_opts.validate_mir;
let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes;
trace!(?overridden_passes);
- if validate {
- validate_body(tcx, body, format!("start of phase transition from {:?}", start_phase));
- }
-
for pass in passes {
let name = pass.name();
- if let Some((_, polarity)) = overridden_passes.iter().rev().find(|(s, _)| s == &*name) {
- trace!(
- pass = %name,
- "{} as requested by flag",
- if *polarity { "Running" } else { "Not running" },
- );
- if !polarity {
- continue;
- }
- } else {
- if !pass.is_enabled(&tcx.sess) {
- continue;
- }
+ let overridden =
+ overridden_passes.iter().rev().find(|(s, _)| s == &*name).map(|(_name, polarity)| {
+ trace!(
+ pass = %name,
+ "{} as requested by flag",
+ if *polarity { "Running" } else { "Not running" },
+ );
+ *polarity
+ });
+ if !overridden.unwrap_or_else(|| pass.is_enabled(&tcx.sess)) {
+ continue;
}
+
let dump_enabled = pass.is_mir_dump_enabled();
if dump_enabled {
- dump_mir(tcx, body, start_phase, &name, cnt, false);
+ dump_mir_for_pass(tcx, body, &name, false);
+ }
+ if validate {
+ validate_body(tcx, body, format!("before pass {}", name));
}
pass.run_pass(tcx, body);
if dump_enabled {
- dump_mir(tcx, body, start_phase, &name, cnt, true);
- cnt += 1;
+ dump_mir_for_pass(tcx, body, &name, true);
+ }
+ if validate {
+ validate_body(tcx, body, format!("after pass {}", name));
}
- if let Some(new_phase) = pass.phase_change() {
- if body.phase >= new_phase {
- panic!("Invalid MIR phase transition from {:?} to {:?}", body.phase, new_phase);
- }
+ body.pass_count += 1;
+ }
- body.phase = new_phase;
+ if let Some(new_phase) = phase_change {
+ if body.phase >= new_phase {
+ panic!("Invalid MIR phase transition from {:?} to {:?}", body.phase, new_phase);
}
- if validate {
- validate_body(tcx, body, format!("after pass {}", pass.name()));
+ body.phase = new_phase;
+
+ dump_mir_for_phase_change(tcx, body);
+ if validate || new_phase == MirPhase::Runtime(RuntimePhase::Optimized) {
+ validate_body(tcx, body, format!("after phase change to {}", new_phase));
}
- }
- if validate || body.phase == MirPhase::Optimized {
- validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase));
+ body.pass_count = 1;
}
}
@@ -136,22 +157,33 @@ pub fn validate_body<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, when: Strin
validate::Validator { when, mir_phase: body.phase }.run_pass(tcx, body);
}
-pub fn dump_mir<'tcx>(
+pub fn dump_mir_for_pass<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
- phase: MirPhase,
pass_name: &str,
- cnt: usize,
is_after: bool,
) {
- let phase_index = phase as u32;
+ let phase_index = body.phase.phase_index();
mir::dump_mir(
tcx,
- Some(&format_args!("{:03}-{:03}", phase_index, cnt)),
+ Some(&format_args!("{:03}-{:03}", phase_index, body.pass_count)),
pass_name,
if is_after { &"after" } else { &"before" },
body,
|_, _| Ok(()),
);
}
+
+pub fn dump_mir_for_phase_change<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let phase_index = body.phase.phase_index();
+
+ mir::dump_mir(
+ tcx,
+ Some(&format_args!("{:03}-000", phase_index)),
+ &format!("{}", body.phase),
+ &"after",
+ body,
+ |_, _| Ok(()),
+ )
+}
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 5c441c5b1..f1bbf2ea7 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -51,7 +51,7 @@ impl RemoveNoopLandingPads {
StatementKind::Assign { .. }
| StatementKind::SetDiscriminant { .. }
| StatementKind::Deinit(..)
- | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Intrinsic(..)
| StatementKind::Retag { .. } => {
return false;
}
@@ -94,7 +94,7 @@ impl RemoveNoopLandingPads {
let mut jumps_folded = 0;
let mut landing_pads_removed = 0;
- let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len());
+ let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len());
// This is a post-order traversal, so that if A post-dominates B
// then A will be visited before B.
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 96b715402..78b6f714a 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -35,7 +35,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
.into_results_cursor(body);
let mut to_remove = vec![];
- for (bb, block) in body.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks.iter_enumerated() {
let terminator = block.terminator();
let (TerminatorKind::Drop { place, .. } | TerminatorKind::DropAndReplace { place, .. })
= &terminator.kind
diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs
index 827ce0c02..cc75947d9 100644
--- a/compiler/rustc_mir_transform/src/required_consts.rs
+++ b/compiler/rustc_mir_transform/src/required_consts.rs
@@ -1,5 +1,5 @@
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{Constant, Location};
+use rustc_middle::mir::{Constant, ConstantKind, Location};
use rustc_middle::ty::ConstKind;
pub struct RequiredConstsVisitor<'a, 'tcx> {
@@ -15,8 +15,13 @@ impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
fn visit_constant(&mut self, constant: &Constant<'tcx>, _: Location) {
let literal = constant.literal;
- if let Some(ct) = literal.const_for_ty() && let ConstKind::Unevaluated(_) = ct.kind() {
- self.required_consts.push(*constant);
+ match literal {
+ ConstantKind::Ty(c) => match c.kind() {
+ ConstKind::Param(_) => {}
+ _ => bug!("only ConstKind::Param should be encountered here, got {:#?}", c),
+ },
+ ConstantKind::Unevaluated(..) => self.required_consts.push(*constant),
+ ConstantKind::Val(..) => {}
}
}
}
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
index 4919ad400..abe6cb285 100644
--- a/compiler/rustc_mir_transform/src/reveal_all.rs
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -19,7 +19,7 @@ impl<'tcx> MirPass<'tcx> for RevealAll {
}
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
- RevealAllVisitor { tcx, param_env }.visit_body(body);
+ RevealAllVisitor { tcx, param_env }.visit_body_preserves_cfg(body);
}
}
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
index 925eb10a1..2f116aaa9 100644
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -62,7 +62,7 @@ impl<'tcx> MirPass<'tcx> for SeparateConstSwitch {
pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
let mut new_blocks: SmallVec<[(BasicBlock, BasicBlock); 6]> = SmallVec::new();
let predecessors = body.basic_blocks.predecessors();
- 'block_iter: for (block_id, block) in body.basic_blocks().iter_enumerated() {
+ 'block_iter: for (block_id, block) in body.basic_blocks.iter_enumerated() {
if let TerminatorKind::SwitchInt {
discr: Operand::Copy(switch_place) | Operand::Move(switch_place),
..
@@ -90,7 +90,7 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
let mut predecessors_left = predecessors[block_id].len();
'predec_iter: for predecessor_id in predecessors[block_id].iter().copied() {
- let predecessor = &body.basic_blocks()[predecessor_id];
+ let predecessor = &body.basic_blocks[predecessor_id];
// First we make sure the predecessor jumps
// in a reasonable way
@@ -249,7 +249,7 @@ fn is_likely_const<'tcx>(mut tracked_place: Place<'tcx>, block: &BasicBlockData<
| StatementKind::AscribeUserType(_, _)
| StatementKind::Coverage(_)
| StatementKind::StorageDead(_)
- | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Intrinsic(_)
| StatementKind::Nop => {}
}
}
@@ -317,7 +317,7 @@ fn find_determining_place<'tcx>(
| StatementKind::Retag(_, _)
| StatementKind::AscribeUserType(_, _)
| StatementKind::Coverage(_)
- | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Intrinsic(_)
| StatementKind::Nop => {}
// If the discriminant is set, it is always set
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index 3620e94be..4e8798b7a 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -3,8 +3,8 @@ use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_middle::mir::*;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
-use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
+use rustc_middle::ty::InternalSubsts;
+use rustc_middle::ty::{self, EarlyBinder, GeneratorSubsts, Ty, TyCtxt};
use rustc_target::abi::VariantIdx;
use rustc_index::vec::{Idx, IndexVec};
@@ -17,8 +17,8 @@ use std::iter;
use crate::util::expand_aggregate;
use crate::{
- abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, marker, pass_manager as pm,
- remove_noop_landing_pads, simplify,
+ abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator,
+ pass_manager as pm, remove_noop_landing_pads, simplify,
};
use rustc_middle::mir::patch::MirPatch;
use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle};
@@ -92,12 +92,13 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
&mut result,
&[
&add_moves_for_packed_drops::AddMovesForPackedDrops,
+ &deref_separator::Derefer,
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("make_shim"),
&add_call_guards::CriticalCallEdges,
&abort_unwinding_calls::AbortUnwindingCalls,
- &marker::PhaseChange(MirPhase::Const),
],
+ Some(MirPhase::Runtime(RuntimePhase::Optimized)),
);
debug!("make_shim({:?}) = {:?}", instance, result);
@@ -311,7 +312,7 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -
let param_env = tcx.param_env(def_id);
let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
- let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
+ let is_copy = self_ty.is_copy_modulo_regions(tcx, param_env);
let dest = Place::return_place();
let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
@@ -322,6 +323,9 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -
builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
}
ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
+ ty::Generator(gen_def_id, substs, hir::Movability::Movable) => {
+ builder.generator_shim(dest, src, *gen_def_id, substs.as_generator())
+ }
_ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
};
@@ -387,7 +391,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
/// offset=0 will give you the index of the next BasicBlock,
/// offset=1 will give the index of the next-to-next block,
/// offset=-1 will give you the index of the last-created block
- fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
+ fn block_index_offset(&self, offset: usize) -> BasicBlock {
BasicBlock::new(self.blocks.len() + offset)
}
@@ -460,49 +464,106 @@ impl<'tcx> CloneShimBuilder<'tcx> {
);
}
- fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
+ fn clone_fields<I>(
+ &mut self,
+ dest: Place<'tcx>,
+ src: Place<'tcx>,
+ target: BasicBlock,
+ mut unwind: BasicBlock,
+ tys: I,
+ ) -> BasicBlock
where
I: IntoIterator<Item = Ty<'tcx>>,
{
- let mut previous_field = None;
+ // For an iterator of length n, create 2*n + 1 blocks.
for (i, ity) in tys.into_iter().enumerate() {
+ // Each iteration creates two blocks, referred to here as block 2*i and block 2*i + 1.
+ //
+ // Block 2*i attempts to clone the field. If successful it branches to 2*i + 2 (the
+ // next clone block). If unsuccessful it branches to the previous unwind block, which
+ // is initially the `unwind` argument passed to this function.
+ //
+ // Block 2*i + 1 is the unwind block for this iteration. It drops the cloned value
+ // created by block 2*i. We store this block in `unwind` so that the next clone block
+ // will unwind to it if cloning fails.
+
let field = Field::new(i);
let src_field = self.tcx.mk_place_field(src, field, ity);
let dest_field = self.tcx.mk_place_field(dest, field, ity);
- // #(2i + 1) is the cleanup block for the previous clone operation
- let cleanup_block = self.block_index_offset(1);
- // #(2i + 2) is the next cloning block
- // (or the Return terminator if this is the last block)
+ let next_unwind = self.block_index_offset(1);
let next_block = self.block_index_offset(2);
+ self.make_clone_call(dest_field, src_field, ity, next_block, unwind);
+ self.block(
+ vec![],
+ TerminatorKind::Drop { place: dest_field, target: unwind, unwind: None },
+ true,
+ );
+ unwind = next_unwind;
+ }
+ // If all clones succeed then we end up here.
+ self.block(vec![], TerminatorKind::Goto { target }, false);
+ unwind
+ }
- // BB #(2i)
- // `dest.i = Clone::clone(&src.i);`
- // Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
- self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
-
- // BB #(2i + 1) (cleanup)
- if let Some((previous_field, previous_cleanup)) = previous_field.take() {
- // Drop previous field and goto previous cleanup block.
- self.block(
- vec![],
- TerminatorKind::Drop {
- place: previous_field,
- target: previous_cleanup,
- unwind: None,
- },
- true,
- );
- } else {
- // Nothing to drop, just resume.
- self.block(vec![], TerminatorKind::Resume, true);
- }
+ fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
+ where
+ I: IntoIterator<Item = Ty<'tcx>>,
+ {
+ self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
+ let unwind = self.block(vec![], TerminatorKind::Resume, true);
+ let target = self.block(vec![], TerminatorKind::Return, false);
- previous_field = Some((dest_field, cleanup_block));
- }
+ let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, tys);
+ }
- self.block(vec![], TerminatorKind::Return, false);
+ fn generator_shim(
+ &mut self,
+ dest: Place<'tcx>,
+ src: Place<'tcx>,
+ gen_def_id: DefId,
+ substs: GeneratorSubsts<'tcx>,
+ ) {
+ self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
+ let unwind = self.block(vec![], TerminatorKind::Resume, true);
+ // This will get overwritten with a switch once we know the target blocks
+ let switch = self.block(vec![], TerminatorKind::Unreachable, false);
+ let unwind = self.clone_fields(dest, src, switch, unwind, substs.upvar_tys());
+ let target = self.block(vec![], TerminatorKind::Return, false);
+ let unreachable = self.block(vec![], TerminatorKind::Unreachable, false);
+ let mut cases = Vec::with_capacity(substs.state_tys(gen_def_id, self.tcx).count());
+ for (index, state_tys) in substs.state_tys(gen_def_id, self.tcx).enumerate() {
+ let variant_index = VariantIdx::new(index);
+ let dest = self.tcx.mk_place_downcast_unnamed(dest, variant_index);
+ let src = self.tcx.mk_place_downcast_unnamed(src, variant_index);
+ let clone_block = self.block_index_offset(1);
+ let start_block = self.block(
+ vec![self.make_statement(StatementKind::SetDiscriminant {
+ place: Box::new(Place::return_place()),
+ variant_index,
+ })],
+ TerminatorKind::Goto { target: clone_block },
+ false,
+ );
+ cases.push((index as u128, start_block));
+ let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, state_tys);
+ }
+ let discr_ty = substs.discr_ty(self.tcx);
+ let temp = self.make_place(Mutability::Mut, discr_ty);
+ let rvalue = Rvalue::Discriminant(src);
+ let statement = self.make_statement(StatementKind::Assign(Box::new((temp, rvalue))));
+ match &mut self.blocks[switch] {
+ BasicBlockData { statements, terminator: Some(Terminator { kind, .. }), .. } => {
+ statements.push(statement);
+ *kind = TerminatorKind::SwitchInt {
+ discr: Operand::Move(temp),
+ switch_ty: discr_ty,
+ targets: SwitchTargets::new(cases.into_iter(), unreachable),
+ };
+ }
+ BasicBlockData { terminator: None, .. } => unreachable!(),
+ }
}
}
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index 180f4c7dc..57d372fda 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -74,7 +74,7 @@ pub struct CfgSimplifier<'a, 'tcx> {
impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
pub fn new(body: &'a mut Body<'tcx>) -> Self {
- let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
+ let mut pred_count = IndexVec::from_elem(0u32, &body.basic_blocks);
// we can't use mir.predecessors() here because that counts
// dead blocks, which we don't want to.
@@ -263,7 +263,7 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let reachable = traversal::reachable_as_bitset(body);
- let num_blocks = body.basic_blocks().len();
+ let num_blocks = body.basic_blocks.len();
if num_blocks == reachable.count() {
return;
}
@@ -412,7 +412,7 @@ pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) {
if map.iter().any(Option::is_none) {
// Update references to all vars and tmps now
let mut updater = LocalUpdater { map, tcx };
- updater.visit_body(body);
+ updater.visit_body_preserves_cfg(body);
body.local_decls.shrink_to_fit();
}
@@ -499,7 +499,7 @@ impl UsedLocals {
impl<'tcx> Visitor<'tcx> for UsedLocals {
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
match statement.kind {
- StatementKind::CopyNonOverlapping(..)
+ StatementKind::Intrinsic(..)
| StatementKind::Retag(..)
| StatementKind::Coverage(..)
| StatementKind::FakeRead(..)
@@ -548,7 +548,7 @@ fn remove_unused_definitions(used_locals: &mut UsedLocals, body: &mut Body<'_>)
while modified {
modified = false;
- for data in body.basic_blocks_mut() {
+ for data in body.basic_blocks.as_mut_preserves_cfg() {
// Remove unnecessary StorageLive and StorageDead annotations.
data.statements.retain(|statement| {
let keep = match &statement.kind {
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
index bbfaace70..321d8c63b 100644
--- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -151,7 +151,7 @@ struct OptimizationFinder<'a, 'tcx> {
impl<'tcx> OptimizationFinder<'_, 'tcx> {
fn find_optimizations(&self) -> Vec<OptimizationInfo<'tcx>> {
self.body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.filter_map(|(bb_idx, bb)| {
// find switch
diff --git a/compiler/rustc_mir_transform/src/simplify_try.rs b/compiler/rustc_mir_transform/src/simplify_try.rs
index d52f1261b..baeb620ef 100644
--- a/compiler/rustc_mir_transform/src/simplify_try.rs
+++ b/compiler/rustc_mir_transform/src/simplify_try.rs
@@ -596,7 +596,7 @@ struct SimplifyBranchSameOptimizationFinder<'a, 'tcx> {
impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> {
fn find(&self) -> Vec<SimplifyBranchSameOptimization> {
self.body
- .basic_blocks()
+ .basic_blocks
.iter_enumerated()
.filter_map(|(bb_idx, bb)| {
let (discr_switched_on, targets_and_values) = match &bb.terminator().kind {
@@ -632,7 +632,7 @@ impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> {
let mut iter_bbs_reachable = targets_and_values
.iter()
- .map(|target_and_value| (target_and_value, &self.body.basic_blocks()[target_and_value.target]))
+ .map(|target_and_value| (target_and_value, &self.body.basic_blocks[target_and_value.target]))
.filter(|(_, bb)| {
// Reaching `unreachable` is UB so assume it doesn't happen.
bb.terminator().kind != TerminatorKind::Unreachable
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 30be64f5b..96ea15f1b 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -79,7 +79,7 @@ fn ensure_otherwise_unreachable<'tcx>(
targets: &SwitchTargets,
) -> Option<BasicBlockData<'tcx>> {
let otherwise = targets.otherwise();
- let bb = &body.basic_blocks()[otherwise];
+ let bb = &body.basic_blocks[otherwise];
if bb.terminator().kind == TerminatorKind::Unreachable
&& bb.statements.iter().all(|s| matches!(&s.kind, StatementKind::StorageDead(_)))
{
@@ -102,10 +102,10 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
trace!("UninhabitedEnumBranching starting for {:?}", body.source);
- for bb in body.basic_blocks().indices() {
+ for bb in body.basic_blocks.indices() {
trace!("processing block {:?}", bb);
- let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks()[bb], tcx, body) else {
+ let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) else {
continue;
};
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index f916ca362..95fda2eaf 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -12,9 +12,8 @@ pub struct UnreachablePropagation;
impl MirPass<'_> for UnreachablePropagation {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
- // Enable only under -Zmir-opt-level=4 as in some cases (check the deeply-nested-opt
- // perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
- sess.mir_opt_level() >= 4
+ // Enable only under -Zmir-opt-level=2 as this can make programs less debuggable.
+ sess.mir_opt_level() >= 2
}
fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
@@ -38,7 +37,19 @@ impl MirPass<'_> for UnreachablePropagation {
}
}
+ // We do want do keep some unreachable blocks, but make them empty.
+ for bb in unreachable_blocks {
+ if !tcx.consider_optimizing(|| {
+ format!("UnreachablePropagation {:?} ", body.source.def_id())
+ }) {
+ break;
+ }
+
+ body.basic_blocks_mut()[bb].statements.clear();
+ }
+
let replaced = !replacements.is_empty();
+
for (bb, terminator_kind) in replacements {
if !tcx.consider_optimizing(|| {
format!("UnreachablePropagation {:?} ", body.source.def_id())
@@ -57,42 +68,55 @@ impl MirPass<'_> for UnreachablePropagation {
fn remove_successors<'tcx, F>(
terminator_kind: &TerminatorKind<'tcx>,
- predicate: F,
+ is_unreachable: F,
) -> Option<TerminatorKind<'tcx>>
where
F: Fn(BasicBlock) -> bool,
{
- let terminator = match *terminator_kind {
- TerminatorKind::Goto { target } if predicate(target) => TerminatorKind::Unreachable,
- TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+ let terminator = match terminator_kind {
+ // This will unconditionally run into an unreachable and is therefore unreachable as well.
+ TerminatorKind::Goto { target } if is_unreachable(*target) => TerminatorKind::Unreachable,
+ TerminatorKind::SwitchInt { targets, discr, switch_ty } => {
let otherwise = targets.otherwise();
- let original_targets_len = targets.iter().len() + 1;
- let (mut values, mut targets): (Vec<_>, Vec<_>) =
- targets.iter().filter(|(_, bb)| !predicate(*bb)).unzip();
+ // If all targets are unreachable, we can be unreachable as well.
+ if targets.all_targets().iter().all(|bb| is_unreachable(*bb)) {
+ TerminatorKind::Unreachable
+ } else if is_unreachable(otherwise) {
+ // If there are multiple targets, don't delete unreachable branches (like an unreachable otherwise)
+ // unless otherwise is unreachable, in which case deleting a normal branch causes it to be merged with
+ // the otherwise, keeping its unreachable.
+ // This looses information about reachability causing worse codegen.
+ // For example (see src/test/codegen/match-optimizes-away.rs)
+ //
+ // pub enum Two { A, B }
+ // pub fn identity(x: Two) -> Two {
+ // match x {
+ // Two::A => Two::A,
+ // Two::B => Two::B,
+ // }
+ // }
+ //
+ // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to
+ // turn it into just `x` later. Without the unreachable, such a transformation would be illegal.
+ // If the otherwise branch is unreachable, we can delete all other unreacahble targets, as they will
+ // still point to the unreachable and therefore not lose reachability information.
+ let reachable_iter = targets.iter().filter(|(_, bb)| !is_unreachable(*bb));
- if !predicate(otherwise) {
- targets.push(otherwise);
- } else {
- values.pop();
- }
+ let new_targets = SwitchTargets::new(reachable_iter, otherwise);
- let retained_targets_len = targets.len();
+ // No unreachable branches were removed.
+ if new_targets.all_targets().len() == targets.all_targets().len() {
+ return None;
+ }
- if targets.is_empty() {
- TerminatorKind::Unreachable
- } else if targets.len() == 1 {
- TerminatorKind::Goto { target: targets[0] }
- } else if original_targets_len != retained_targets_len {
TerminatorKind::SwitchInt {
discr: discr.clone(),
- switch_ty,
- targets: SwitchTargets::new(
- values.iter().copied().zip(targets.iter().copied()),
- *targets.last().unwrap(),
- ),
+ switch_ty: *switch_ty,
+ targets: new_targets,
}
} else {
+ // If the otherwise branch is reachable, we don't want to delete any unreachable branches.
return None;
}
}
diff --git a/compiler/rustc_monomorphize/Cargo.toml b/compiler/rustc_monomorphize/Cargo.toml
index 41ba4d4b6..6ee5330b6 100644
--- a/compiler/rustc_monomorphize/Cargo.toml
+++ b/compiler/rustc_monomorphize/Cargo.toml
@@ -4,14 +4,15 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+smallvec = { version = "1.8.1", features = [ "union", "may_dangle" ] }
tracing = "0.1"
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
index 68b65658c..a71218e69 100644
--- a/compiler/rustc_monomorphize/src/collector.rs
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -112,12 +112,6 @@
//! method in operand position, we treat it as a neighbor of the current
//! mono item. Calls are just a special case of that.
//!
-//! #### Closures
-//! In a way, closures are a simple case. Since every closure object needs to be
-//! constructed somewhere, we can reliably discover them by observing
-//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also
-//! true for closures inlined from other crates.
-//!
//! #### Drop glue
//! Drop glue mono items are introduced by MIR drop-statements. The
//! generated mono item will again have drop-glue item neighbors if the
@@ -128,7 +122,7 @@
//! #### Unsizing Casts
//! A subtle way of introducing neighbor edges is by casting to a trait object.
//! Since the resulting fat-pointer contains a reference to a vtable, we need to
-//! instantiate all object-save methods of the trait, as we need to store
+//! instantiate all object-safe methods of the trait, as we need to store
//! pointers to these functions even if they never get called anywhere. This can
//! be seen as a special case of taking a function reference.
//!
@@ -207,6 +201,8 @@ use std::iter;
use std::ops::Range;
use std::path::PathBuf;
+use crate::errors::{LargeAssignmentsLint, RecursionLimit, RequiresLangItem, TypeLengthLimit};
+
#[derive(PartialEq)]
pub enum MonoItemCollectionMode {
Eager,
@@ -417,7 +413,6 @@ fn collect_items_rec<'tcx>(
// We've been here already, no need to search again.
return;
}
- debug!("BEGIN collect_items_rec({})", starting_point.node);
let mut neighbors = MonoItems { compute_inlining: true, tcx, items: Vec::new() };
let recursion_depth_reset;
@@ -461,7 +456,7 @@ fn collect_items_rec<'tcx>(
recursion_depth_reset = None;
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
- for &id in alloc.inner().relocations().values() {
+ for &id in alloc.inner().provenance().values() {
collect_miri(tcx, id, &mut neighbors);
}
}
@@ -543,8 +538,6 @@ fn collect_items_rec<'tcx>(
if let Some((def_id, depth)) = recursion_depth_reset {
recursion_depths.insert(def_id, depth);
}
-
- debug!("END collect_items_rec({})", starting_point.node);
}
/// Format instance name that is already known to be too long for rustc.
@@ -604,17 +597,24 @@ fn check_recursion_limit<'tcx>(
// more than the recursion limit is assumed to be causing an
// infinite expansion.
if !recursion_limit.value_within_limit(adjusted_recursion_depth) {
+ let def_span = tcx.def_span(def_id);
+ let def_path_str = tcx.def_path_str(def_id);
let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32);
- let error = format!("reached the recursion limit while instantiating `{}`", shrunk);
- let mut err = tcx.sess.struct_span_fatal(span, &error);
- err.span_note(
- tcx.def_span(def_id),
- &format!("`{}` defined here", tcx.def_path_str(def_id)),
- );
- if let Some(path) = written_to_path {
- err.note(&format!("the full type name has been written to '{}'", path.display()));
- }
- err.emit()
+ let mut path = PathBuf::new();
+ let was_written = if written_to_path.is_some() {
+ path = written_to_path.unwrap();
+ Some(())
+ } else {
+ None
+ };
+ tcx.sess.emit_fatal(RecursionLimit {
+ span,
+ shrunk,
+ def_span,
+ def_path_str,
+ was_written,
+ path,
+ });
}
recursion_depths.insert(def_id, recursion_depth + 1);
@@ -642,16 +642,15 @@ fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
// Bail out in these cases to avoid that bad user experience.
if !tcx.type_length_limit().value_within_limit(type_length) {
let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32);
- let msg = format!("reached the type-length limit while instantiating `{}`", shrunk);
- let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg);
- if let Some(path) = written_to_path {
- diag.note(&format!("the full type name has been written to '{}'", path.display()));
- }
- diag.help(&format!(
- "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate",
- type_length
- ));
- diag.emit()
+ let span = tcx.def_span(instance.def_id());
+ let mut path = PathBuf::new();
+ let was_written = if written_to_path.is_some() {
+ path = written_to_path.unwrap();
+ Some(())
+ } else {
+ None
+ };
+ tcx.sess.emit_fatal(TypeLengthLimit { span, shrunk, was_written, path, type_length });
}
}
@@ -690,7 +689,8 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
mir::CastKind::Pointer(PointerCast::Unsize),
ref operand,
target_ty,
- ) => {
+ )
+ | mir::Rvalue::Cast(mir::CastKind::DynStar, ref operand, target_ty) => {
let target_ty = self.monomorphize(target_ty);
let source_ty = operand.ty(self.body, self.tcx);
let source_ty = self.monomorphize(source_ty);
@@ -699,7 +699,9 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
// This could also be a different Unsize instruction, like
// from a fixed sized array to a slice. But we are only
// interested in things that produce a vtable.
- if target_ty.is_trait() && !source_ty.is_trait() {
+ if (target_ty.is_trait() && !source_ty.is_trait())
+ || (target_ty.is_dyn_star() && !source_ty.is_dyn_star())
+ {
create_mono_items_for_vtable_methods(
self.tcx,
target_ty,
@@ -768,7 +770,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
ty::ConstKind::Unevaluated(ct) => {
debug!(?ct);
let param_env = ty::ParamEnv::reveal_all();
- match self.tcx.const_eval_resolve(param_env, ct, None) {
+ match self.tcx.const_eval_resolve(param_env, ct.expand(), None) {
// The `monomorphize` call should have evaluated that constant already.
Ok(val) => val,
Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => return,
@@ -781,44 +783,22 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
}
_ => return,
},
- };
- collect_const_value(self.tcx, val, self.output);
- self.visit_ty(literal.ty(), TyContext::Location(location));
- }
-
- #[instrument(skip(self), level = "debug")]
- fn visit_const(&mut self, constant: ty::Const<'tcx>, location: Location) {
- debug!("visiting const {:?} @ {:?}", constant, location);
-
- let substituted_constant = self.monomorphize(constant);
- let param_env = ty::ParamEnv::reveal_all();
-
- match substituted_constant.kind() {
- ty::ConstKind::Value(val) => {
- let const_val = self.tcx.valtree_to_const_val((constant.ty(), val));
- collect_const_value(self.tcx, const_val, self.output)
- }
- ty::ConstKind::Unevaluated(unevaluated) => {
- match self.tcx.const_eval_resolve(param_env, unevaluated, None) {
+ mir::ConstantKind::Unevaluated(uv, _) => {
+ let param_env = ty::ParamEnv::reveal_all();
+ match self.tcx.const_eval_resolve(param_env, uv, None) {
// The `monomorphize` call should have evaluated that constant already.
- Ok(val) => span_bug!(
- self.body.source_info(location).span,
- "collection encountered the unevaluated constant {} which evaluated to {:?}",
- substituted_constant,
- val
- ),
- Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => {}
+ Ok(val) => val,
+ Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => return,
Err(ErrorHandled::TooGeneric) => span_bug!(
self.body.source_info(location).span,
- "collection encountered polymorphic constant: {}",
- substituted_constant
+ "collection encountered polymorphic constant: {:?}",
+ literal
),
}
}
- _ => {}
- }
-
- self.super_const(constant);
+ };
+ collect_const_value(self.tcx, val, self.output);
+ MirVisitor::visit_ty(self, literal.ty(), TyContext::Location(location));
}
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
@@ -830,7 +810,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.body, tcx);
let callee_ty = self.monomorphize(callee_ty);
- visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output);
+ visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output)
}
mir::TerminatorKind::Drop { ref place, .. }
| mir::TerminatorKind::DropAndReplace { ref place, .. } => {
@@ -914,17 +894,16 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
// but correct span? This would make the lint at least accept crate-level lint attributes.
return;
};
- self.tcx.struct_span_lint_hir(
+ self.tcx.emit_spanned_lint(
LARGE_ASSIGNMENTS,
lint_root,
source_info.span,
- |lint| {
- let mut err = lint.build(&format!("moving {} bytes", layout.size.bytes()));
- err.span_label(source_info.span, "value moved from here");
- err.note(&format!(r#"The current maximum size is {}, but it can be customized with the move_size_limit attribute: `#![move_size_limit = "..."]`"#, limit.bytes()));
- err.emit();
+ LargeAssignmentsLint {
+ span: source_info.span,
+ size: layout.size.bytes(),
+ limit: limit.bytes(),
},
- );
+ )
}
}
}
@@ -1027,6 +1006,11 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
return false;
}
+ if let DefKind::Static(_) = tcx.def_kind(def_id) {
+ // We cannot monomorphize statics from upstream crates.
+ return false;
+ }
+
if !tcx.is_mir_available(def_id) {
bug!("no MIR available for {:?}", def_id);
}
@@ -1039,10 +1023,12 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
/// them.
///
/// For example, the source type might be `&SomeStruct` and the target type
-/// might be `&SomeTrait` in a cast like:
+/// might be `&dyn SomeTrait` in a cast like:
///
+/// ```rust,ignore (not real code)
/// let src: &SomeStruct = ...;
-/// let target = src as &SomeTrait;
+/// let target = src as &dyn SomeTrait;
+/// ```
///
/// Then the output of this function would be (SomeStruct, SomeTrait) since for
/// constructing the `target` fat-pointer we need the vtable for that pair.
@@ -1063,8 +1049,10 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
/// for the pair of `T` (which is a trait) and the concrete type that `T` was
/// originally coerced from:
///
+/// ```rust,ignore (not real code)
/// let src: &ComplexStruct<SomeStruct> = ...;
-/// let target = src as &ComplexStruct<SomeTrait>;
+/// let target = src as &ComplexStruct<dyn SomeTrait>;
+/// ```
///
/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
/// `(SomeStruct, SomeTrait)`.
@@ -1079,7 +1067,7 @@ fn find_vtable_types_for_unsizing<'tcx>(
let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| {
let param_env = ty::ParamEnv::reveal_all();
let type_has_metadata = |ty: Ty<'tcx>| -> bool {
- if ty.is_sized(tcx.at(DUMMY_SP), param_env) {
+ if ty.is_sized(tcx, param_env) {
return false;
}
let tail = tcx.struct_tail_erasing_lifetimes(ty, param_env);
@@ -1105,6 +1093,9 @@ fn find_vtable_types_for_unsizing<'tcx>(
ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
}
+ // T as dyn* Trait
+ (_, &ty::Dynamic(_, _, ty::DynStar)) => ptr_vtable(source_ty, target_ty),
+
(&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => {
assert_eq!(source_adt_def, target_adt_def);
@@ -1132,23 +1123,18 @@ fn find_vtable_types_for_unsizing<'tcx>(
}
}
-#[instrument(skip(tcx), level = "debug")]
+#[instrument(skip(tcx), level = "debug", ret)]
fn create_fn_mono_item<'tcx>(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
source: Span,
) -> Spanned<MonoItem<'tcx>> {
- debug!("create_fn_mono_item(instance={})", instance);
-
let def_id = instance.def_id();
if tcx.sess.opts.unstable_opts.profile_closures && def_id.is_local() && tcx.is_closure(def_id) {
crate::util::dump_closure_profile(tcx, instance);
}
- let respanned = respan(source, MonoItem::Fn(instance.polymorphize(tcx)));
- debug!(?respanned);
-
- respanned
+ respan(source, MonoItem::Fn(instance.polymorphize(tcx)))
}
/// Creates a `MonoItem` for each method that is referenced by the vtable for
@@ -1206,7 +1192,7 @@ struct RootCollector<'a, 'tcx> {
impl<'v> RootCollector<'_, 'v> {
fn process_item(&mut self, id: hir::ItemId) {
- match self.tcx.def_kind(id.def_id) {
+ match self.tcx.def_kind(id.owner_id) {
DefKind::Enum | DefKind::Struct | DefKind::Union => {
let item = self.tcx.hir().item(id);
match item.kind {
@@ -1217,12 +1203,14 @@ impl<'v> RootCollector<'_, 'v> {
if self.mode == MonoItemCollectionMode::Eager {
debug!(
"RootCollector: ADT drop-glue for {}",
- self.tcx.def_path_str(item.def_id.to_def_id())
+ self.tcx.def_path_str(item.owner_id.to_def_id())
);
- let ty =
- Instance::new(item.def_id.to_def_id(), InternalSubsts::empty())
- .ty(self.tcx, ty::ParamEnv::reveal_all());
+ let ty = Instance::new(
+ item.owner_id.to_def_id(),
+ InternalSubsts::empty(),
+ )
+ .ty(self.tcx, ty::ParamEnv::reveal_all());
visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output);
}
}
@@ -1233,23 +1221,23 @@ impl<'v> RootCollector<'_, 'v> {
DefKind::GlobalAsm => {
debug!(
"RootCollector: ItemKind::GlobalAsm({})",
- self.tcx.def_path_str(id.def_id.to_def_id())
+ self.tcx.def_path_str(id.owner_id.to_def_id())
);
self.output.push(dummy_spanned(MonoItem::GlobalAsm(id)));
}
DefKind::Static(..) => {
debug!(
"RootCollector: ItemKind::Static({})",
- self.tcx.def_path_str(id.def_id.to_def_id())
+ self.tcx.def_path_str(id.owner_id.to_def_id())
);
- self.output.push(dummy_spanned(MonoItem::Static(id.def_id.to_def_id())));
+ self.output.push(dummy_spanned(MonoItem::Static(id.owner_id.to_def_id())));
}
DefKind::Const => {
// const items only generate mono items if they are
// actually used somewhere. Just declaring them is insufficient.
// but even just declaring them must collect the items they refer to
- if let Ok(val) = self.tcx.const_eval_poly(id.def_id.to_def_id()) {
+ if let Ok(val) = self.tcx.const_eval_poly(id.owner_id.to_def_id()) {
collect_const_value(self.tcx, val, &mut self.output);
}
}
@@ -1260,15 +1248,15 @@ impl<'v> RootCollector<'_, 'v> {
}
}
DefKind::Fn => {
- self.push_if_root(id.def_id);
+ self.push_if_root(id.owner_id.def_id);
}
_ => {}
}
}
fn process_impl_item(&mut self, id: hir::ImplItemId) {
- if matches!(self.tcx.def_kind(id.def_id), DefKind::AssocFn) {
- self.push_if_root(id.def_id);
+ if matches!(self.tcx.def_kind(id.owner_id), DefKind::AssocFn) {
+ self.push_if_root(id.owner_id.def_id);
}
}
@@ -1293,7 +1281,7 @@ impl<'v> RootCollector<'_, 'v> {
#[instrument(skip(self), level = "debug")]
fn push_if_root(&mut self, def_id: LocalDefId) {
if self.is_root(def_id) {
- debug!("RootCollector::push_if_root: found root def_id={:?}", def_id);
+ debug!("found root");
let instance = Instance::mono(self.tcx, def_id.to_def_id());
self.output.push(create_fn_mono_item(self.tcx, instance, DUMMY_SP));
@@ -1306,13 +1294,17 @@ impl<'v> RootCollector<'_, 'v> {
/// the return type of `main`. This is not needed when
/// the user writes their own `start` manually.
fn push_extra_entry_roots(&mut self) {
- let Some((main_def_id, EntryFnType::Main)) = self.entry_fn else {
+ let Some((main_def_id, EntryFnType::Main { .. })) = self.entry_fn else {
return;
};
let start_def_id = match self.tcx.lang_items().require(LangItem::Start) {
Ok(s) => s,
- Err(err) => self.tcx.sess.fatal(&err),
+ Err(lang_item_err) => {
+ self.tcx
+ .sess
+ .emit_fatal(RequiresLangItem { lang_item: lang_item_err.0.name().to_string() });
+ }
};
let main_ret_ty = self.tcx.fn_sig(main_def_id).output();
@@ -1362,13 +1354,13 @@ fn create_mono_items_for_default_impls<'tcx>(
debug!(
"create_mono_items_for_default_impls(item={})",
- tcx.def_path_str(item.def_id.to_def_id())
+ tcx.def_path_str(item.owner_id.to_def_id())
);
- if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) {
+ if let Some(trait_ref) = tcx.impl_trait_ref(item.owner_id) {
let param_env = ty::ParamEnv::reveal_all();
let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref);
- let overridden_methods = tcx.impl_item_implementor_ids(item.def_id);
+ let overridden_methods = tcx.impl_item_implementor_ids(item.owner_id);
for method in tcx.provided_trait_methods(trait_ref.def_id) {
if overridden_methods.contains_key(&method.def_id) {
continue;
@@ -1415,7 +1407,7 @@ fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIte
}
GlobalAlloc::Memory(alloc) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
- for &inner in alloc.inner().relocations().values() {
+ for &inner in alloc.inner().provenance().values() {
rustc_data_structures::stack::ensure_sufficient_stack(|| {
collect_miri(tcx, inner, output);
});
@@ -1454,7 +1446,7 @@ fn collect_const_value<'tcx>(
match value {
ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
- for &id in alloc.inner().relocations().values() {
+ for &id in alloc.inner().provenance().values() {
collect_miri(tcx, id, output);
}
}
diff --git a/compiler/rustc_monomorphize/src/errors.rs b/compiler/rustc_monomorphize/src/errors.rs
new file mode 100644
index 000000000..ce097b8d8
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/errors.rs
@@ -0,0 +1,84 @@
+use std::path::PathBuf;
+
+use rustc_errors::ErrorGuaranteed;
+use rustc_errors::IntoDiagnostic;
+use rustc_macros::{Diagnostic, LintDiagnostic};
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(monomorphize_recursion_limit)]
+pub struct RecursionLimit {
+ #[primary_span]
+ pub span: Span,
+ pub shrunk: String,
+ #[note]
+ pub def_span: Span,
+ pub def_path_str: String,
+ #[note(monomorphize_written_to_path)]
+ pub was_written: Option<()>,
+ pub path: PathBuf,
+}
+
+#[derive(Diagnostic)]
+#[diag(monomorphize_type_length_limit)]
+#[help(monomorphize_consider_type_length_limit)]
+pub struct TypeLengthLimit {
+ #[primary_span]
+ pub span: Span,
+ pub shrunk: String,
+ #[note(monomorphize_written_to_path)]
+ pub was_written: Option<()>,
+ pub path: PathBuf,
+ pub type_length: usize,
+}
+
+#[derive(Diagnostic)]
+#[diag(monomorphize_requires_lang_item)]
+pub struct RequiresLangItem {
+ pub lang_item: String,
+}
+
+pub struct UnusedGenericParams {
+ pub span: Span,
+ pub param_spans: Vec<Span>,
+ pub param_names: Vec<String>,
+}
+
+impl IntoDiagnostic<'_> for UnusedGenericParams {
+ fn into_diagnostic(
+ self,
+ handler: &'_ rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(rustc_errors::fluent::monomorphize_unused_generic_params);
+ diag.set_span(self.span);
+ for (span, name) in self.param_spans.into_iter().zip(self.param_names) {
+ // FIXME: I can figure out how to do a label with a fluent string with a fixed message,
+ // or a label with a dynamic value in a hard-coded string, but I haven't figured out
+ // how to combine the two. 😢
+ diag.span_label(span, format!("generic parameter `{}` is unused", name));
+ }
+ diag
+ }
+}
+
+#[derive(LintDiagnostic)]
+#[diag(monomorphize_large_assignments)]
+#[note]
+pub struct LargeAssignmentsLint {
+ #[label]
+ pub span: Span,
+ pub size: u64,
+ pub limit: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(monomorphize_unknown_partition_strategy)]
+pub struct UnknownPartitionStrategy;
+
+#[derive(Diagnostic)]
+#[diag(monomorphize_symbol_already_defined)]
+pub struct SymbolAlreadyDefined {
+ #[primary_span]
+ pub span: Option<Span>,
+ pub symbol: String,
+}
diff --git a/compiler/rustc_monomorphize/src/lib.rs b/compiler/rustc_monomorphize/src/lib.rs
index ef4560b5e..42781bd25 100644
--- a/compiler/rustc_monomorphize/src/lib.rs
+++ b/compiler/rustc_monomorphize/src/lib.rs
@@ -1,8 +1,9 @@
#![feature(array_windows)]
#![feature(control_flow_enum)]
-#![feature(let_else)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -16,6 +17,7 @@ use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt};
mod collector;
+mod errors;
mod partitioning;
mod polymorphize;
mod util;
@@ -32,7 +34,7 @@ fn custom_coerce_unsize_info<'tcx>(
substs: tcx.mk_substs_trait(source_ty, &[target_ty.into()]),
});
- match tcx.codegen_fulfill_obligation((ty::ParamEnv::reveal_all(), trait_ref)) {
+ match tcx.codegen_select_candidate((ty::ParamEnv::reveal_all(), trait_ref)) {
Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
impl_def_id,
..
diff --git a/compiler/rustc_monomorphize/src/partitioning/default.rs b/compiler/rustc_monomorphize/src/partitioning/default.rs
index 15276569c..29009c480 100644
--- a/compiler/rustc_monomorphize/src/partitioning/default.rs
+++ b/compiler/rustc_monomorphize/src/partitioning/default.rs
@@ -319,7 +319,7 @@ fn characteristic_def_id_of_mono_item<'tcx>(
Some(def_id)
}
MonoItem::Static(def_id) => Some(def_id),
- MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.to_def_id()),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.to_def_id()),
}
}
@@ -411,9 +411,9 @@ fn mono_item_visibility<'tcx>(
};
}
MonoItem::GlobalAsm(item_id) => {
- return if tcx.is_reachable_non_generic(item_id.def_id) {
+ return if tcx.is_reachable_non_generic(item_id.owner_id) {
*can_be_internalized = false;
- default_visibility(tcx, item_id.def_id.to_def_id(), false)
+ default_visibility(tcx, item_id.owner_id.to_def_id(), false)
} else {
Visibility::Hidden
};
diff --git a/compiler/rustc_monomorphize/src/partitioning/mod.rs b/compiler/rustc_monomorphize/src/partitioning/mod.rs
index ff2d38693..932edc667 100644
--- a/compiler/rustc_monomorphize/src/partitioning/mod.rs
+++ b/compiler/rustc_monomorphize/src/partitioning/mod.rs
@@ -108,6 +108,7 @@ use rustc_span::symbol::Symbol;
use crate::collector::InliningMap;
use crate::collector::{self, MonoItemCollectionMode};
+use crate::errors::{SymbolAlreadyDefined, UnknownPartitionStrategy};
pub struct PartitioningCx<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
@@ -149,7 +150,9 @@ fn get_partitioner<'tcx>(tcx: TyCtxt<'tcx>) -> Box<dyn Partitioner<'tcx>> {
match strategy {
"default" => Box::new(default::DefaultPartitioning),
- _ => tcx.sess.fatal("unknown partitioning strategy"),
+ _ => {
+ tcx.sess.emit_fatal(UnknownPartitionStrategy);
+ }
}
}
@@ -331,13 +334,7 @@ where
(span1, span2) => span1.or(span2),
};
- let error_message = format!("symbol `{}` is already defined", sym1);
-
- if let Some(span) = span {
- tcx.sess.span_fatal(span, &error_message)
- } else {
- tcx.sess.fatal(&error_message)
- }
+ tcx.sess.emit_fatal(SymbolAlreadyDefined { span, symbol: sym1.to_string() });
}
}
}
@@ -481,7 +478,7 @@ fn codegened_and_inlined_items<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> &'tcx DefIdSe
continue;
}
let body = tcx.instance_mir(instance.def);
- for block in body.basic_blocks() {
+ for block in body.basic_blocks.iter() {
for statement in &block.statements {
let mir::StatementKind::Coverage(_) = statement.kind else { continue };
let scope = statement.source_info.scope;
diff --git a/compiler/rustc_monomorphize/src/polymorphize.rs b/compiler/rustc_monomorphize/src/polymorphize.rs
index 394843e51..650076c22 100644
--- a/compiler/rustc_monomorphize/src/polymorphize.rs
+++ b/compiler/rustc_monomorphize/src/polymorphize.rs
@@ -8,8 +8,9 @@
use rustc_hir::{def::DefKind, def_id::DefId, ConstContext};
use rustc_index::bit_set::FiniteBitSet;
use rustc_middle::mir::{
+ self,
visit::{TyContext, Visitor},
- Local, LocalDecl, Location,
+ Constant, ConstantKind, Local, LocalDecl, Location,
};
use rustc_middle::ty::{
self,
@@ -22,6 +23,8 @@ use rustc_span::symbol::sym;
use std::convert::TryInto;
use std::ops::ControlFlow;
+use crate::errors::UnusedGenericParams;
+
/// Provide implementations of queries relating to polymorphization analysis.
pub fn provide(providers: &mut Providers) {
providers.unused_generic_params = unused_generic_params;
@@ -31,7 +34,6 @@ pub fn provide(providers: &mut Providers) {
///
/// Returns a bitset where bits representing unused parameters are set (`is_empty` indicates all
/// parameters are used).
-#[instrument(level = "debug", skip(tcx))]
fn unused_generic_params<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::InstanceDef<'tcx>,
@@ -169,6 +171,7 @@ fn mark_used_by_default_parameters<'tcx>(
| DefKind::AnonConst
| DefKind::InlineConst
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::Field
| DefKind::LifetimeParam
| DefKind::GlobalAsm
@@ -206,22 +209,23 @@ fn emit_unused_generic_params_error<'tcx>(
_ => tcx.def_span(def_id),
};
- let mut err = tcx.sess.struct_span_err(fn_span, "item has unused generic parameters");
-
+ let mut param_spans = Vec::new();
+ let mut param_names = Vec::new();
let mut next_generics = Some(generics);
while let Some(generics) = next_generics {
for param in &generics.params {
if unused_parameters.contains(param.index).unwrap_or(false) {
debug!(?param);
let def_span = tcx.def_span(param.def_id);
- err.span_label(def_span, &format!("generic parameter `{}` is unused", param.name));
+ param_spans.push(def_span);
+ param_names.push(param.name.to_string());
}
}
next_generics = generics.parent.map(|did| tcx.generics_of(did));
}
- err.emit();
+ tcx.sess.emit_err(UnusedGenericParams { span: fn_span, param_spans, param_names });
}
/// Visitor used to aggregate generic parameter uses.
@@ -267,8 +271,27 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
self.super_local_decl(local, local_decl);
}
- fn visit_const(&mut self, c: Const<'tcx>, _: Location) {
- c.visit_with(self);
+ fn visit_constant(&mut self, ct: &Constant<'tcx>, location: Location) {
+ match ct.literal {
+ ConstantKind::Ty(c) => {
+ c.visit_with(self);
+ }
+ ConstantKind::Unevaluated(mir::UnevaluatedConst { def, substs: _, promoted }, ty) => {
+ // Avoid considering `T` unused when constants are of the form:
+ // `<Self as Foo<T>>::foo::promoted[p]`
+ if let Some(p) = promoted {
+ if self.def_id == def.did && !self.tcx.generics_of(def.did).has_self {
+ // If there is a promoted, don't look at the substs - since it will always contain
+ // the generic parameters, instead, traverse the promoted MIR.
+ let promoted = self.tcx.promoted_mir(def.did);
+ self.visit_body(&promoted[p]);
+ }
+ }
+
+ Visitor::visit_ty(self, ty, TyContext::Location(location));
+ }
+ ConstantKind::Val(_, ty) => Visitor::visit_ty(self, ty, TyContext::Location(location)),
+ }
}
fn visit_ty(&mut self, ty: Ty<'tcx>, _: TyContext) {
@@ -279,7 +302,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
#[instrument(level = "debug", skip(self))]
fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
- if !c.has_param_types_or_consts() {
+ if !c.has_non_region_param() {
return ControlFlow::CONTINUE;
}
@@ -289,21 +312,10 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
self.unused_parameters.clear(param.index);
ControlFlow::CONTINUE
}
- ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted: Some(p)})
- // Avoid considering `T` unused when constants are of the form:
- // `<Self as Foo<T>>::foo::promoted[p]`
- if self.def_id == def.did && !self.tcx.generics_of(def.did).has_self =>
- {
- // If there is a promoted, don't look at the substs - since it will always contain
- // the generic parameters, instead, traverse the promoted MIR.
- let promoted = self.tcx.promoted_mir(def.did);
- self.visit_body(&promoted[p]);
- ControlFlow::CONTINUE
- }
- ty::ConstKind::Unevaluated(uv)
- if matches!(self.tcx.def_kind(uv.def.did), DefKind::AnonConst | DefKind::InlineConst) =>
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs })
+ if matches!(self.tcx.def_kind(def.did), DefKind::AnonConst) =>
{
- self.visit_child_body(uv.def.did, uv.substs);
+ self.visit_child_body(def.did, substs);
ControlFlow::CONTINUE
}
_ => c.super_visit_with(self),
@@ -312,7 +324,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
#[instrument(level = "debug", skip(self))]
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if !ty.has_param_types_or_consts() {
+ if !ty.has_non_region_param() {
return ControlFlow::CONTINUE;
}
@@ -349,7 +361,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
#[instrument(level = "debug", skip(self))]
fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
- if !c.has_param_types_or_consts() {
+ if !c.has_non_region_param() {
return ControlFlow::CONTINUE;
}
@@ -367,7 +379,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
#[instrument(level = "debug", skip(self))]
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if !ty.has_param_types_or_consts() {
+ if !ty.has_non_region_param() {
return ControlFlow::CONTINUE;
}
diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs
index 847e64dc2..6a4d2df1e 100644
--- a/compiler/rustc_monomorphize/src/util.rs
+++ b/compiler/rustc_monomorphize/src/util.rs
@@ -13,7 +13,7 @@ pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: In
.append(true)
.open(&format!("closure_profile_{}.csv", std::process::id()))
else {
- eprintln!("Cound't open file for writing closure profile");
+ eprintln!("Couldn't open file for writing closure profile");
return;
};
diff --git a/compiler/rustc_parse/Cargo.toml b/compiler/rustc_parse/Cargo.toml
index c6ca260e9..a5c94e164 100644
--- a/compiler/rustc_parse/Cargo.toml
+++ b/compiler/rustc_parse/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
bitflags = "1.0"
diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs
new file mode 100644
index 000000000..9b177c518
--- /dev/null
+++ b/compiler/rustc_parse/src/errors.rs
@@ -0,0 +1,1237 @@
+use rustc_ast::token::Token;
+use rustc_ast::Path;
+use rustc_errors::{fluent, AddToDiagnostic, Applicability, EmissionGuarantee, IntoDiagnostic};
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_session::errors::ExprParenthesesNeeded;
+use rustc_span::symbol::Ident;
+use rustc_span::{Span, Symbol};
+
+use crate::parser::TokenDescription;
+
+#[derive(Diagnostic)]
+#[diag(parser_maybe_report_ambiguous_plus)]
+pub(crate) struct AmbiguousPlus {
+ pub sum_ty: String,
+ #[primary_span]
+ #[suggestion(code = "({sum_ty})")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_maybe_recover_from_bad_type_plus, code = "E0178")]
+pub(crate) struct BadTypePlus {
+ pub ty: String,
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: BadTypePlusSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum BadTypePlusSub {
+ #[suggestion(
+ parser_add_paren,
+ code = "{sum_with_parens}",
+ applicability = "machine-applicable"
+ )]
+ AddParen {
+ sum_with_parens: String,
+ #[primary_span]
+ span: Span,
+ },
+ #[label(parser_forgot_paren)]
+ ForgotParen {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(parser_expect_path)]
+ ExpectPath {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_maybe_recover_from_bad_qpath_stage_2)]
+pub(crate) struct BadQPathStage2 {
+ #[primary_span]
+ #[suggestion(code = "", applicability = "maybe-incorrect")]
+ pub span: Span,
+ pub ty: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_incorrect_semicolon)]
+pub(crate) struct IncorrectSemicolon<'a> {
+ #[primary_span]
+ #[suggestion_short(code = "", applicability = "machine-applicable")]
+ pub span: Span,
+ #[help]
+ pub opt_help: Option<()>,
+ pub name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_incorrect_use_of_await)]
+pub(crate) struct IncorrectUseOfAwait {
+ #[primary_span]
+ #[suggestion(parentheses_suggestion, code = "", applicability = "machine-applicable")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_incorrect_use_of_await)]
+pub(crate) struct IncorrectAwait {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(postfix_suggestion, code = "{expr}.await{question_mark}")]
+ pub sugg_span: (Span, Applicability),
+ pub expr: String,
+ pub question_mark: &'static str,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_in_in_typo)]
+pub(crate) struct InInTypo {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = "", applicability = "machine-applicable")]
+ pub sugg_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_variable_declaration)]
+pub(crate) struct InvalidVariableDeclaration {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: InvalidVariableDeclarationSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum InvalidVariableDeclarationSub {
+ #[suggestion(parser_switch_mut_let_order, applicability = "maybe-incorrect", code = "let mut")]
+ SwitchMutLetOrder(#[primary_span] Span),
+ #[suggestion(
+ parser_missing_let_before_mut,
+ applicability = "machine-applicable",
+ code = "let mut"
+ )]
+ MissingLet(#[primary_span] Span),
+ #[suggestion(parser_use_let_not_auto, applicability = "machine-applicable", code = "let")]
+ UseLetNotAuto(#[primary_span] Span),
+ #[suggestion(parser_use_let_not_var, applicability = "machine-applicable", code = "let")]
+ UseLetNotVar(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_comparison_operator)]
+pub(crate) struct InvalidComparisonOperator {
+ #[primary_span]
+ pub span: Span,
+ pub invalid: String,
+ #[subdiagnostic]
+ pub sub: InvalidComparisonOperatorSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum InvalidComparisonOperatorSub {
+ #[suggestion_short(use_instead, applicability = "machine-applicable", code = "{correct}")]
+ Correctable {
+ #[primary_span]
+ span: Span,
+ invalid: String,
+ correct: String,
+ },
+ #[label(spaceship_operator_invalid)]
+ Spaceship(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_logical_operator)]
+#[note]
+pub(crate) struct InvalidLogicalOperator {
+ #[primary_span]
+ pub span: Span,
+ pub incorrect: String,
+ #[subdiagnostic]
+ pub sub: InvalidLogicalOperatorSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum InvalidLogicalOperatorSub {
+ #[suggestion_short(
+ use_amp_amp_for_conjunction,
+ applicability = "machine-applicable",
+ code = "&&"
+ )]
+ Conjunction(#[primary_span] Span),
+ #[suggestion_short(
+ use_pipe_pipe_for_disjunction,
+ applicability = "machine-applicable",
+ code = "||"
+ )]
+ Disjunction(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_tilde_is_not_unary_operator)]
+pub(crate) struct TildeAsUnaryOperator(
+ #[primary_span]
+ #[suggestion_short(applicability = "machine-applicable", code = "!")]
+ pub Span,
+);
+
+#[derive(Diagnostic)]
+#[diag(parser_unexpected_token_after_not)]
+pub(crate) struct NotAsNegationOperator {
+ #[primary_span]
+ pub negated: Span,
+ pub negated_desc: String,
+ #[subdiagnostic]
+ pub sub: NotAsNegationOperatorSub,
+}
+
+#[derive(Subdiagnostic)]
+pub enum NotAsNegationOperatorSub {
+ #[suggestion_short(
+ parser_unexpected_token_after_not_default,
+ applicability = "machine-applicable",
+ code = "!"
+ )]
+ SuggestNotDefault(#[primary_span] Span),
+
+ #[suggestion_short(
+ parser_unexpected_token_after_not_bitwise,
+ applicability = "machine-applicable",
+ code = "!"
+ )]
+ SuggestNotBitwise(#[primary_span] Span),
+
+ #[suggestion_short(
+ parser_unexpected_token_after_not_logical,
+ applicability = "machine-applicable",
+ code = "!"
+ )]
+ SuggestNotLogical(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_malformed_loop_label)]
+pub(crate) struct MalformedLoopLabel {
+ #[primary_span]
+ #[suggestion(applicability = "machine-applicable", code = "{correct_label}")]
+ pub span: Span,
+ pub correct_label: Ident,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_lifetime_in_borrow_expression)]
+pub(crate) struct LifetimeInBorrowExpression {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(applicability = "machine-applicable", code = "")]
+ #[label]
+ pub lifetime_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_field_expression_with_generic)]
+pub(crate) struct FieldExpressionWithGeneric(#[primary_span] pub Span);
+
+#[derive(Diagnostic)]
+#[diag(parser_macro_invocation_with_qualified_path)]
+pub(crate) struct MacroInvocationWithQualifiedPath(#[primary_span] pub Span);
+
+#[derive(Diagnostic)]
+#[diag(parser_unexpected_token_after_label)]
+pub(crate) struct UnexpectedTokenAfterLabel {
+ #[primary_span]
+ #[label(parser_unexpected_token_after_label)]
+ pub span: Span,
+ #[suggestion_verbose(suggestion_remove_label, code = "")]
+ pub remove_label: Option<Span>,
+ #[subdiagnostic]
+ pub enclose_in_block: Option<UnexpectedTokenAfterLabelSugg>,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion_enclose_in_block, applicability = "machine-applicable")]
+pub(crate) struct UnexpectedTokenAfterLabelSugg {
+ #[suggestion_part(code = "{{ ")]
+ pub left: Span,
+ #[suggestion_part(code = " }}")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_require_colon_after_labeled_expression)]
+#[note]
+pub(crate) struct RequireColonAfterLabeledExpression {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub label: Span,
+ #[suggestion_short(applicability = "machine-applicable", code = ": ")]
+ pub label_end: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_do_catch_syntax_removed)]
+#[note]
+pub(crate) struct DoCatchSyntaxRemoved {
+ #[primary_span]
+ #[suggestion(applicability = "machine-applicable", code = "try")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_float_literal_requires_integer_part)]
+pub(crate) struct FloatLiteralRequiresIntegerPart {
+ #[primary_span]
+ #[suggestion(applicability = "machine-applicable", code = "{correct}")]
+ pub span: Span,
+ pub correct: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_int_literal_width)]
+#[help]
+pub(crate) struct InvalidIntLiteralWidth {
+ #[primary_span]
+ pub span: Span,
+ pub width: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_num_literal_base_prefix)]
+#[note]
+pub(crate) struct InvalidNumLiteralBasePrefix {
+ #[primary_span]
+ #[suggestion(applicability = "maybe-incorrect", code = "{fixed}")]
+ pub span: Span,
+ pub fixed: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_num_literal_suffix)]
+#[help]
+pub(crate) struct InvalidNumLiteralSuffix {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub suffix: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_float_literal_width)]
+#[help]
+pub(crate) struct InvalidFloatLiteralWidth {
+ #[primary_span]
+ pub span: Span,
+ pub width: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_float_literal_suffix)]
+#[help]
+pub(crate) struct InvalidFloatLiteralSuffix {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub suffix: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_int_literal_too_large)]
+pub(crate) struct IntLiteralTooLarge {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_missing_semicolon_before_array)]
+pub(crate) struct MissingSemicolonBeforeArray {
+ #[primary_span]
+ pub open_delim: Span,
+ #[suggestion_verbose(applicability = "maybe-incorrect", code = ";")]
+ pub semicolon: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_block_macro_segment)]
+pub(crate) struct InvalidBlockMacroSegment {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub context: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_if_expression_missing_then_block)]
+pub(crate) struct IfExpressionMissingThenBlock {
+ #[primary_span]
+ pub if_span: Span,
+ #[subdiagnostic]
+ pub sub: IfExpressionMissingThenBlockSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum IfExpressionMissingThenBlockSub {
+ #[help(condition_possibly_unfinished)]
+ UnfinishedCondition(#[primary_span] Span),
+ #[help(add_then_block)]
+ AddThenBlock(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_if_expression_missing_condition)]
+pub(crate) struct IfExpressionMissingCondition {
+ #[primary_span]
+ #[label(condition_label)]
+ pub if_span: Span,
+ #[label(block_label)]
+ pub block_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_expected_expression_found_let)]
+pub(crate) struct ExpectedExpressionFoundLet {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_expected_else_block)]
+pub(crate) struct ExpectedElseBlock {
+ #[primary_span]
+ pub first_tok_span: Span,
+ pub first_tok: String,
+ #[label]
+ pub else_span: Span,
+ #[suggestion(applicability = "maybe-incorrect", code = "if ")]
+ pub condition_start: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_outer_attribute_not_allowed_on_if_else)]
+pub(crate) struct OuterAttributeNotAllowedOnIfElse {
+ #[primary_span]
+ pub last: Span,
+
+ #[label(branch_label)]
+ pub branch_span: Span,
+
+ #[label(ctx_label)]
+ pub ctx_span: Span,
+ pub ctx: String,
+
+ #[suggestion(applicability = "machine-applicable", code = "")]
+ pub attributes: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_missing_in_in_for_loop)]
+pub(crate) struct MissingInInForLoop {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: MissingInInForLoopSub,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum MissingInInForLoopSub {
+ // Has been misleading, at least in the past (closed Issue #48492), thus maybe-incorrect
+ #[suggestion_short(use_in_not_of, applicability = "maybe-incorrect", code = "in")]
+ InNotOf(#[primary_span] Span),
+ #[suggestion_short(add_in, applicability = "maybe-incorrect", code = " in ")]
+ AddIn(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_missing_comma_after_match_arm)]
+pub(crate) struct MissingCommaAfterMatchArm {
+ #[primary_span]
+ #[suggestion(applicability = "machine-applicable", code = ",")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_catch_after_try)]
+#[help]
+pub(crate) struct CatchAfterTry {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_comma_after_base_struct)]
+#[note]
+pub(crate) struct CommaAfterBaseStruct {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion_short(applicability = "machine-applicable", code = "")]
+ pub comma: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_eq_field_init)]
+pub(crate) struct EqFieldInit {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(applicability = "machine-applicable", code = ":")]
+ pub eq: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_dotdotdot)]
+pub(crate) struct DotDotDot {
+ #[primary_span]
+ #[suggestion(suggest_exclusive_range, applicability = "maybe-incorrect", code = "..")]
+ #[suggestion(suggest_inclusive_range, applicability = "maybe-incorrect", code = "..=")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_left_arrow_operator)]
+pub(crate) struct LeftArrowOperator {
+ #[primary_span]
+ #[suggestion(applicability = "maybe-incorrect", code = "< -")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_remove_let)]
+pub(crate) struct RemoveLet {
+ #[primary_span]
+ #[suggestion(applicability = "machine-applicable", code = "")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_use_eq_instead)]
+pub(crate) struct UseEqInstead {
+ #[primary_span]
+ #[suggestion_short(applicability = "machine-applicable", code = "=")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_use_empty_block_not_semi)]
+pub(crate) struct UseEmptyBlockNotSemi {
+ #[primary_span]
+ #[suggestion_hidden(applicability = "machine-applicable", code = "{{}}")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_comparison_interpreted_as_generic)]
+pub(crate) struct ComparisonInterpretedAsGeneric {
+ #[primary_span]
+ #[label(label_comparison)]
+ pub comparison: Span,
+ pub r#type: Path,
+ #[label(label_args)]
+ pub args: Span,
+ #[subdiagnostic]
+ pub suggestion: ComparisonOrShiftInterpretedAsGenericSugg,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_shift_interpreted_as_generic)]
+pub(crate) struct ShiftInterpretedAsGeneric {
+ #[primary_span]
+ #[label(label_comparison)]
+ pub shift: Span,
+ pub r#type: Path,
+ #[label(label_args)]
+ pub args: Span,
+ #[subdiagnostic]
+ pub suggestion: ComparisonOrShiftInterpretedAsGenericSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct ComparisonOrShiftInterpretedAsGenericSugg {
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_found_expr_would_be_stmt)]
+pub(crate) struct FoundExprWouldBeStmt {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub token: Token,
+ #[subdiagnostic]
+ pub suggestion: ExprParenthesesNeeded,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_leading_plus_not_supported)]
+pub(crate) struct LeadingPlusNotSupported {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[suggestion_verbose(suggestion_remove_plus, code = "", applicability = "machine-applicable")]
+ pub remove_plus: Option<Span>,
+ #[subdiagnostic]
+ pub add_parentheses: Option<ExprParenthesesNeeded>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_parentheses_with_struct_fields)]
+pub(crate) struct ParenthesesWithStructFields {
+ #[primary_span]
+ pub span: Span,
+ pub r#type: Path,
+ #[subdiagnostic]
+ pub braces_for_struct: BracesForStructLiteral,
+ #[subdiagnostic]
+ pub no_fields_for_fn: NoFieldsForFnCall,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion_braces_for_struct, applicability = "maybe-incorrect")]
+pub(crate) struct BracesForStructLiteral {
+ #[suggestion_part(code = " {{ ")]
+ pub first: Span,
+ #[suggestion_part(code = " }}")]
+ pub second: Span,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion_no_fields_for_fn, applicability = "maybe-incorrect")]
+pub(crate) struct NoFieldsForFnCall {
+ #[suggestion_part(code = "")]
+ pub fields: Vec<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_labeled_loop_in_break)]
+pub(crate) struct LabeledLoopInBreak {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: WrapExpressionInParentheses,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(
+ parser_sugg_wrap_expression_in_parentheses,
+ applicability = "machine-applicable"
+)]
+pub(crate) struct WrapExpressionInParentheses {
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_array_brackets_instead_of_braces)]
+pub(crate) struct ArrayBracketsInsteadOfSpaces {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: ArrayBracketsInsteadOfSpacesSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "maybe-incorrect")]
+pub(crate) struct ArrayBracketsInsteadOfSpacesSugg {
+ #[suggestion_part(code = "[")]
+ pub left: Span,
+ #[suggestion_part(code = "]")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_match_arm_body_without_braces)]
+pub(crate) struct MatchArmBodyWithoutBraces {
+ #[primary_span]
+ #[label(label_statements)]
+ pub statements: Span,
+ #[label(label_arrow)]
+ pub arrow: Span,
+ pub num_statements: usize,
+ #[subdiagnostic]
+ pub sub: MatchArmBodyWithoutBracesSugg,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum MatchArmBodyWithoutBracesSugg {
+ #[multipart_suggestion(suggestion_add_braces, applicability = "machine-applicable")]
+ AddBraces {
+ #[suggestion_part(code = "{{ ")]
+ left: Span,
+ #[suggestion_part(code = " }}")]
+ right: Span,
+ },
+ #[suggestion(
+ suggestion_use_comma_not_semicolon,
+ code = ",",
+ applicability = "machine-applicable"
+ )]
+ UseComma {
+ #[primary_span]
+ semicolon: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_struct_literal_not_allowed_here)]
+pub(crate) struct StructLiteralNotAllowedHere {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: StructLiteralNotAllowedHereSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct StructLiteralNotAllowedHereSugg {
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_interpolated_expression)]
+pub(crate) struct InvalidInterpolatedExpression {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_hexadecimal_float_literal_not_supported)]
+pub(crate) struct HexadecimalFloatLiteralNotSupported {
+ #[primary_span]
+ #[label(parser_not_supported)]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_octal_float_literal_not_supported)]
+pub(crate) struct OctalFloatLiteralNotSupported {
+ #[primary_span]
+ #[label(parser_not_supported)]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_binary_float_literal_not_supported)]
+pub(crate) struct BinaryFloatLiteralNotSupported {
+ #[primary_span]
+ #[label(parser_not_supported)]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_literal_suffix)]
+pub(crate) struct InvalidLiteralSuffix {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ // FIXME(#100717)
+ pub kind: String,
+ pub suffix: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_literal_suffix_on_tuple_index)]
+pub(crate) struct InvalidLiteralSuffixOnTupleIndex {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub suffix: Symbol,
+ #[help(tuple_exception_line_1)]
+ #[help(tuple_exception_line_2)]
+ #[help(tuple_exception_line_3)]
+ pub exception: Option<()>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_non_string_abi_literal)]
+pub(crate) struct NonStringAbiLiteral {
+ #[primary_span]
+ #[suggestion(code = "\"C\"", applicability = "maybe-incorrect")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_mismatched_closing_delimiter)]
+pub(crate) struct MismatchedClosingDelimiter {
+ #[primary_span]
+ pub spans: Vec<Span>,
+ pub delimiter: String,
+ #[label(label_unmatched)]
+ pub unmatched: Span,
+ #[label(label_opening_candidate)]
+ pub opening_candidate: Option<Span>,
+ #[label(label_unclosed)]
+ pub unclosed: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_incorrect_visibility_restriction, code = "E0704")]
+#[help]
+pub(crate) struct IncorrectVisibilityRestriction {
+ #[primary_span]
+ #[suggestion(code = "in {inner_str}", applicability = "machine-applicable")]
+ pub span: Span,
+ pub inner_str: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_assignment_else_not_allowed)]
+pub(crate) struct AssignmentElseNotAllowed {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_expected_statement_after_outer_attr)]
+pub(crate) struct ExpectedStatementAfterOuterAttr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_doc_comment_does_not_document_anything, code = "E0585")]
+#[help]
+pub(crate) struct DocCommentDoesNotDocumentAnything {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = ",", applicability = "machine-applicable")]
+ pub missing_comma: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_const_let_mutually_exclusive)]
+pub(crate) struct ConstLetMutuallyExclusive {
+ #[primary_span]
+ #[suggestion(code = "const", applicability = "maybe-incorrect")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_expression_in_let_else)]
+pub(crate) struct InvalidExpressionInLetElse {
+ #[primary_span]
+ pub span: Span,
+ pub operator: &'static str,
+ #[subdiagnostic]
+ pub sugg: WrapExpressionInParentheses,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_curly_in_let_else)]
+pub(crate) struct InvalidCurlyInLetElse {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: WrapExpressionInParentheses,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_compound_assignment_expression_in_let)]
+#[help]
+pub(crate) struct CompoundAssignmentExpressionInLet {
+ #[primary_span]
+ #[suggestion_short(code = "=", applicability = "maybe-incorrect")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_suffixed_literal_in_attribute)]
+#[help]
+pub(crate) struct SuffixedLiteralInAttribute {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_invalid_meta_item)]
+pub(crate) struct InvalidMetaItem {
+ #[primary_span]
+ pub span: Span,
+ pub token: Token,
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion_verbose(
+ parser_sugg_escape_to_use_as_identifier,
+ applicability = "maybe-incorrect",
+ code = "r#"
+)]
+pub(crate) struct SuggEscapeToUseAsIdentifier {
+ #[primary_span]
+ pub span: Span,
+ pub ident_name: String,
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion(parser_sugg_remove_comma, applicability = "machine-applicable", code = "")]
+pub(crate) struct SuggRemoveComma {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum ExpectedIdentifierFound {
+ #[label(parser_expected_identifier_found_reserved_identifier)]
+ ReservedIdentifier(#[primary_span] Span),
+ #[label(parser_expected_identifier_found_keyword)]
+ Keyword(#[primary_span] Span),
+ #[label(parser_expected_identifier_found_reserved_keyword)]
+ ReservedKeyword(#[primary_span] Span),
+ #[label(parser_expected_identifier_found_doc_comment)]
+ DocComment(#[primary_span] Span),
+ #[label(parser_expected_identifier)]
+ Other(#[primary_span] Span),
+}
+
+impl ExpectedIdentifierFound {
+ pub fn new(token_descr: Option<TokenDescription>, span: Span) -> Self {
+ (match token_descr {
+ Some(TokenDescription::ReservedIdentifier) => {
+ ExpectedIdentifierFound::ReservedIdentifier
+ }
+ Some(TokenDescription::Keyword) => ExpectedIdentifierFound::Keyword,
+ Some(TokenDescription::ReservedKeyword) => ExpectedIdentifierFound::ReservedKeyword,
+ Some(TokenDescription::DocComment) => ExpectedIdentifierFound::DocComment,
+ None => ExpectedIdentifierFound::Other,
+ })(span)
+ }
+}
+
+pub(crate) struct ExpectedIdentifier {
+ pub span: Span,
+ pub token: Token,
+ pub suggest_raw: Option<SuggEscapeToUseAsIdentifier>,
+ pub suggest_remove_comma: Option<SuggRemoveComma>,
+}
+
+impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for ExpectedIdentifier {
+ fn into_diagnostic(
+ self,
+ handler: &'a rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'a, G> {
+ let token_descr = super::parser::TokenDescription::from_token(&self.token);
+
+ let mut diag = handler.struct_diagnostic(match token_descr {
+ Some(TokenDescription::ReservedIdentifier) => {
+ fluent::parser_expected_identifier_found_reserved_identifier_str
+ }
+ Some(TokenDescription::Keyword) => fluent::parser_expected_identifier_found_keyword_str,
+ Some(TokenDescription::ReservedKeyword) => {
+ fluent::parser_expected_identifier_found_reserved_keyword_str
+ }
+ Some(TokenDescription::DocComment) => {
+ fluent::parser_expected_identifier_found_doc_comment_str
+ }
+ None => fluent::parser_expected_identifier_found_str,
+ });
+ diag.set_span(self.span);
+ diag.set_arg("token", self.token);
+
+ if let Some(sugg) = self.suggest_raw {
+ sugg.add_to_diagnostic(&mut diag);
+ }
+
+ ExpectedIdentifierFound::new(token_descr, self.span).add_to_diagnostic(&mut diag);
+
+ if let Some(sugg) = self.suggest_remove_comma {
+ sugg.add_to_diagnostic(&mut diag);
+ }
+
+ diag
+ }
+}
+
+pub(crate) struct ExpectedSemi {
+ pub span: Span,
+ pub token: Token,
+
+ pub unexpected_token_label: Option<Span>,
+ pub sugg: ExpectedSemiSugg,
+}
+
+impl<'a, G: EmissionGuarantee> IntoDiagnostic<'a, G> for ExpectedSemi {
+ fn into_diagnostic(
+ self,
+ handler: &'a rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'a, G> {
+ let token_descr = super::parser::TokenDescription::from_token(&self.token);
+
+ let mut diag = handler.struct_diagnostic(match token_descr {
+ Some(TokenDescription::ReservedIdentifier) => {
+ fluent::parser_expected_semi_found_reserved_identifier_str
+ }
+ Some(TokenDescription::Keyword) => fluent::parser_expected_semi_found_keyword_str,
+ Some(TokenDescription::ReservedKeyword) => {
+ fluent::parser_expected_semi_found_reserved_keyword_str
+ }
+ Some(TokenDescription::DocComment) => {
+ fluent::parser_expected_semi_found_doc_comment_str
+ }
+ None => fluent::parser_expected_semi_found_str,
+ });
+ diag.set_span(self.span);
+ diag.set_arg("token", self.token);
+
+ if let Some(unexpected_token_label) = self.unexpected_token_label {
+ diag.span_label(unexpected_token_label, fluent::parser_label_unexpected_token);
+ }
+
+ self.sugg.add_to_diagnostic(&mut diag);
+
+ diag
+ }
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum ExpectedSemiSugg {
+ #[suggestion(
+ parser_sugg_change_this_to_semi,
+ code = ";",
+ applicability = "machine-applicable"
+ )]
+ ChangeToSemi(#[primary_span] Span),
+ #[suggestion_short(parser_sugg_add_semi, code = ";", applicability = "machine-applicable")]
+ AddSemi(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_struct_literal_body_without_path)]
+pub(crate) struct StructLiteralBodyWithoutPath {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: StructLiteralBodyWithoutPathSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "has-placeholders")]
+pub(crate) struct StructLiteralBodyWithoutPathSugg {
+ #[suggestion_part(code = "{{ SomeStruct ")]
+ pub before: Span,
+ #[suggestion_part(code = " }}")]
+ pub after: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_unmatched_angle_brackets)]
+pub(crate) struct UnmatchedAngleBrackets {
+ #[primary_span]
+ #[suggestion(code = "", applicability = "machine-applicable")]
+ pub span: Span,
+ pub num_extra_brackets: usize,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_generic_parameters_without_angle_brackets)]
+pub(crate) struct GenericParamsWithoutAngleBrackets {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: GenericParamsWithoutAngleBracketsSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct GenericParamsWithoutAngleBracketsSugg {
+ #[suggestion_part(code = "<")]
+ pub left: Span,
+ #[suggestion_part(code = ">")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_comparison_operators_cannot_be_chained)]
+pub(crate) struct ComparisonOperatorsCannotBeChained {
+ #[primary_span]
+ pub span: Vec<Span>,
+ #[suggestion_verbose(
+ parser_sugg_turbofish_syntax,
+ code = "::",
+ applicability = "maybe-incorrect"
+ )]
+ pub suggest_turbofish: Option<Span>,
+ #[help(parser_sugg_turbofish_syntax)]
+ #[help(sugg_parentheses_for_function_args)]
+ pub help_turbofish: Option<()>,
+ #[subdiagnostic]
+ pub chaining_sugg: Option<ComparisonOperatorsCannotBeChainedSugg>,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum ComparisonOperatorsCannotBeChainedSugg {
+ #[suggestion_verbose(
+ sugg_split_comparison,
+ code = " && {middle_term}",
+ applicability = "maybe-incorrect"
+ )]
+ SplitComparison {
+ #[primary_span]
+ span: Span,
+ middle_term: String,
+ },
+ #[multipart_suggestion(sugg_parenthesize, applicability = "maybe-incorrect")]
+ Parenthesize {
+ #[suggestion_part(code = "(")]
+ left: Span,
+ #[suggestion_part(code = ")")]
+ right: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_question_mark_in_type)]
+pub(crate) struct QuestionMarkInType {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: QuestionMarkInTypeSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct QuestionMarkInTypeSugg {
+ #[suggestion_part(code = "Option<")]
+ pub left: Span,
+ #[suggestion_part(code = ">")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_unexpected_parentheses_in_for_head)]
+pub(crate) struct ParenthesesInForHead {
+ #[primary_span]
+ pub span: Vec<Span>,
+ #[subdiagnostic]
+ pub sugg: ParenthesesInForHeadSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct ParenthesesInForHeadSugg {
+ #[suggestion_part(code = "")]
+ pub left: Span,
+ #[suggestion_part(code = "")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_doc_comment_on_param_type)]
+pub(crate) struct DocCommentOnParamType {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_attribute_on_param_type)]
+pub(crate) struct AttributeOnParamType {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_pattern_method_param_without_body, code = "E0642")]
+pub(crate) struct PatternMethodParamWithoutBody {
+ #[primary_span]
+ #[suggestion(code = "_", applicability = "machine-applicable")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_self_param_not_first)]
+pub(crate) struct SelfParamNotFirst {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_const_generic_without_braces)]
+pub(crate) struct ConstGenericWithoutBraces {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: ConstGenericWithoutBracesSugg,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+pub(crate) struct ConstGenericWithoutBracesSugg {
+ #[suggestion_part(code = "{{ ")]
+ pub left: Span,
+ #[suggestion_part(code = " }}")]
+ pub right: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_unexpected_const_param_declaration)]
+pub(crate) struct UnexpectedConstParamDeclaration {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: Option<UnexpectedConstParamDeclarationSugg>,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum UnexpectedConstParamDeclarationSugg {
+ #[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+ AddParam {
+ #[suggestion_part(code = "<{snippet}>")]
+ impl_generics: Span,
+ #[suggestion_part(code = "{ident}")]
+ incorrect_decl: Span,
+ snippet: String,
+ ident: String,
+ },
+ #[multipart_suggestion(suggestion, applicability = "machine-applicable")]
+ AppendParam {
+ #[suggestion_part(code = ", {snippet}")]
+ impl_generics_end: Span,
+ #[suggestion_part(code = "{ident}")]
+ incorrect_decl: Span,
+ snippet: String,
+ ident: String,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_unexpected_const_in_generic_param)]
+pub(crate) struct UnexpectedConstInGenericParam {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion_verbose(code = "", applicability = "maybe-incorrect")]
+ pub to_remove: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_async_move_order_incorrect)]
+pub(crate) struct AsyncMoveOrderIncorrect {
+ #[primary_span]
+ #[suggestion_verbose(code = "async move", applicability = "maybe-incorrect")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(parser_double_colon_in_bound)]
+pub(crate) struct DoubleColonInBound {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = ": ", applicability = "machine-applicable")]
+ pub between: Span,
+}
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs
index 848e142e5..462bce16a 100644
--- a/compiler/rustc_parse/src/lexer/mod.rs
+++ b/compiler/rustc_parse/src/lexer/mod.rs
@@ -1,10 +1,13 @@
use crate::lexer::unicode_chars::UNICODE_ARRAY;
use rustc_ast::ast::{self, AttrStyle};
use rustc_ast::token::{self, CommentKind, Delimiter, Token, TokenKind};
-use rustc_ast::tokenstream::{Spacing, TokenStream};
+use rustc_ast::tokenstream::TokenStream;
use rustc_ast::util::unicode::contains_text_flow_control_chars;
-use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_errors::{
+ error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult, StashKey,
+};
use rustc_lexer::unescape::{self, Mode};
+use rustc_lexer::Cursor;
use rustc_lexer::{Base, DocStyle, RawStrError};
use rustc_session::lint::builtin::{
RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX, TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
@@ -14,8 +17,6 @@ use rustc_session::parse::ParseSess;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{edition::Edition, BytePos, Pos, Span};
-use tracing::debug;
-
mod tokentrees;
mod unescape_error_reporting;
mod unicode_chars;
@@ -40,11 +41,20 @@ pub struct UnmatchedBrace {
pub(crate) fn parse_token_trees<'a>(
sess: &'a ParseSess,
- src: &'a str,
- start_pos: BytePos,
+ mut src: &'a str,
+ mut start_pos: BytePos,
override_span: Option<Span>,
) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
- StringReader { sess, start_pos, pos: start_pos, src, override_span }.into_token_trees()
+ // Skip `#!`, if present.
+ if let Some(shebang_len) = rustc_lexer::strip_shebang(src) {
+ src = &src[shebang_len..];
+ start_pos = start_pos + BytePos::from_usize(shebang_len);
+ }
+
+ let cursor = Cursor::new(src);
+ let string_reader =
+ StringReader { sess, start_pos, pos: start_pos, src, cursor, override_span };
+ tokentrees::TokenTreesReader::parse_all_token_trees(string_reader)
}
struct StringReader<'a> {
@@ -55,6 +65,8 @@ struct StringReader<'a> {
pos: BytePos,
/// Source text to tokenize.
src: &'a str,
+ /// Cursor for getting lexer tokens.
+ cursor: Cursor<'a>,
override_span: Option<Span>,
}
@@ -63,42 +75,198 @@ impl<'a> StringReader<'a> {
self.override_span.unwrap_or_else(|| Span::with_root_ctxt(lo, hi))
}
- /// Returns the next token, and info about preceding whitespace, if any.
- fn next_token(&mut self) -> (Spacing, Token) {
- let mut spacing = Spacing::Joint;
-
- // Skip `#!` at the start of the file
- if self.pos == self.start_pos
- && let Some(shebang_len) = rustc_lexer::strip_shebang(self.src)
- {
- self.pos = self.pos + BytePos::from_usize(shebang_len);
- spacing = Spacing::Alone;
- }
+ /// Returns the next token, paired with a bool indicating if the token was
+ /// preceded by whitespace.
+ fn next_token(&mut self) -> (Token, bool) {
+ let mut preceded_by_whitespace = false;
// Skip trivial (whitespace & comments) tokens
loop {
- let start_src_index = self.src_index(self.pos);
- let text: &str = &self.src[start_src_index..];
-
- if text.is_empty() {
- let span = self.mk_sp(self.pos, self.pos);
- return (spacing, Token::new(token::Eof, span));
- }
-
- let token = rustc_lexer::first_token(text);
-
+ let token = self.cursor.advance_token();
let start = self.pos;
self.pos = self.pos + BytePos(token.len);
debug!("next_token: {:?}({:?})", token.kind, self.str_from(start));
- match self.cook_lexer_token(token.kind, start) {
- Some(kind) => {
+ // Now "cook" the token, converting the simple `rustc_lexer::TokenKind` enum into a
+ // rich `rustc_ast::TokenKind`. This turns strings into interned symbols and runs
+ // additional validation.
+ let kind = match token.kind {
+ rustc_lexer::TokenKind::LineComment { doc_style } => {
+ // Skip non-doc comments
+ let Some(doc_style) = doc_style else {
+ self.lint_unicode_text_flow(start);
+ preceded_by_whitespace = true;
+ continue;
+ };
+
+ // Opening delimiter of the length 3 is not included into the symbol.
+ let content_start = start + BytePos(3);
+ let content = self.str_from(content_start);
+ self.cook_doc_comment(content_start, content, CommentKind::Line, doc_style)
+ }
+ rustc_lexer::TokenKind::BlockComment { doc_style, terminated } => {
+ if !terminated {
+ self.report_unterminated_block_comment(start, doc_style);
+ }
+
+ // Skip non-doc comments
+ let Some(doc_style) = doc_style else {
+ self.lint_unicode_text_flow(start);
+ preceded_by_whitespace = true;
+ continue;
+ };
+
+ // Opening delimiter of the length 3 and closing delimiter of the length 2
+ // are not included into the symbol.
+ let content_start = start + BytePos(3);
+ let content_end = self.pos - BytePos(if terminated { 2 } else { 0 });
+ let content = self.str_from_to(content_start, content_end);
+ self.cook_doc_comment(content_start, content, CommentKind::Block, doc_style)
+ }
+ rustc_lexer::TokenKind::Whitespace => {
+ preceded_by_whitespace = true;
+ continue;
+ }
+ rustc_lexer::TokenKind::Ident => {
+ let sym = nfc_normalize(self.str_from(start));
let span = self.mk_sp(start, self.pos);
- return (spacing, Token::new(kind, span));
+ self.sess.symbol_gallery.insert(sym, span);
+ token::Ident(sym, false)
}
- None => spacing = Spacing::Alone,
- }
+ rustc_lexer::TokenKind::RawIdent => {
+ let sym = nfc_normalize(self.str_from(start + BytePos(2)));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.symbol_gallery.insert(sym, span);
+ if !sym.can_be_raw() {
+ self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
+ }
+ self.sess.raw_identifier_spans.borrow_mut().push(span);
+ token::Ident(sym, true)
+ }
+ rustc_lexer::TokenKind::UnknownPrefix => {
+ self.report_unknown_prefix(start);
+ let sym = nfc_normalize(self.str_from(start));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.symbol_gallery.insert(sym, span);
+ token::Ident(sym, false)
+ }
+ rustc_lexer::TokenKind::InvalidIdent
+ // Do not recover an identifier with emoji if the codepoint is a confusable
+ // with a recoverable substitution token, like `➖`.
+ if !UNICODE_ARRAY
+ .iter()
+ .any(|&(c, _, _)| {
+ let sym = self.str_from(start);
+ sym.chars().count() == 1 && c == sym.chars().next().unwrap()
+ }) =>
+ {
+ let sym = nfc_normalize(self.str_from(start));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.bad_unicode_identifiers.borrow_mut().entry(sym).or_default()
+ .push(span);
+ token::Ident(sym, false)
+ }
+ rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
+ let suffix_start = start + BytePos(suffix_start);
+ let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
+ let suffix = if suffix_start < self.pos {
+ let string = self.str_from(suffix_start);
+ if string == "_" {
+ self.sess
+ .span_diagnostic
+ .struct_span_warn(
+ self.mk_sp(suffix_start, self.pos),
+ "underscore literal suffix is not allowed",
+ )
+ .warn(
+ "this was previously accepted by the compiler but is \
+ being phased out; it will become a hard error in \
+ a future release!",
+ )
+ .note(
+ "see issue #42326 \
+ <https://github.com/rust-lang/rust/issues/42326> \
+ for more information",
+ )
+ .emit();
+ None
+ } else {
+ Some(Symbol::intern(string))
+ }
+ } else {
+ None
+ };
+ token::Literal(token::Lit { kind, symbol, suffix })
+ }
+ rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
+ // Include the leading `'` in the real identifier, for macro
+ // expansion purposes. See #12512 for the gory details of why
+ // this is necessary.
+ let lifetime_name = self.str_from(start);
+ if starts_with_number {
+ let span = self.mk_sp(start, self.pos);
+ let mut diag = self.sess.struct_err("lifetimes cannot start with a number");
+ diag.set_span(span);
+ diag.stash(span, StashKey::LifetimeIsChar);
+ }
+ let ident = Symbol::intern(lifetime_name);
+ token::Lifetime(ident)
+ }
+ rustc_lexer::TokenKind::Semi => token::Semi,
+ rustc_lexer::TokenKind::Comma => token::Comma,
+ rustc_lexer::TokenKind::Dot => token::Dot,
+ rustc_lexer::TokenKind::OpenParen => token::OpenDelim(Delimiter::Parenthesis),
+ rustc_lexer::TokenKind::CloseParen => token::CloseDelim(Delimiter::Parenthesis),
+ rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(Delimiter::Brace),
+ rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(Delimiter::Brace),
+ rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(Delimiter::Bracket),
+ rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(Delimiter::Bracket),
+ rustc_lexer::TokenKind::At => token::At,
+ rustc_lexer::TokenKind::Pound => token::Pound,
+ rustc_lexer::TokenKind::Tilde => token::Tilde,
+ rustc_lexer::TokenKind::Question => token::Question,
+ rustc_lexer::TokenKind::Colon => token::Colon,
+ rustc_lexer::TokenKind::Dollar => token::Dollar,
+ rustc_lexer::TokenKind::Eq => token::Eq,
+ rustc_lexer::TokenKind::Bang => token::Not,
+ rustc_lexer::TokenKind::Lt => token::Lt,
+ rustc_lexer::TokenKind::Gt => token::Gt,
+ rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
+ rustc_lexer::TokenKind::And => token::BinOp(token::And),
+ rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
+ rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
+ rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
+ rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
+ rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
+ rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
+
+ rustc_lexer::TokenKind::Unknown | rustc_lexer::TokenKind::InvalidIdent => {
+ let c = self.str_from(start).chars().next().unwrap();
+ let mut err =
+ self.struct_err_span_char(start, self.pos, "unknown start of token", c);
+ // FIXME: the lexer could be used to turn the ASCII version of unicode
+ // homoglyphs, instead of keeping a table in `check_for_substitution`into the
+ // token. Ideally, this should be inside `rustc_lexer`. However, we should
+ // first remove compound tokens like `<<` from `rustc_lexer`, and then add
+ // fancier error recovery to it, as there will be less overall work to do this
+ // way.
+ let token = unicode_chars::check_for_substitution(self, start, c, &mut err);
+ if c == '\x00' {
+ err.help("source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used");
+ }
+ err.emit();
+ if let Some(token) = token {
+ token
+ } else {
+ preceded_by_whitespace = true;
+ continue;
+ }
+ }
+ rustc_lexer::TokenKind::Eof => token::Eof,
+ };
+ let span = self.mk_sp(start, self.pos);
+ return (Token::new(kind, span), preceded_by_whitespace);
}
}
@@ -164,171 +332,6 @@ impl<'a> StringReader<'a> {
}
}
- /// Turns simple `rustc_lexer::TokenKind` enum into a rich
- /// `rustc_ast::TokenKind`. This turns strings into interned
- /// symbols and runs additional validation.
- fn cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> Option<TokenKind> {
- Some(match token {
- rustc_lexer::TokenKind::LineComment { doc_style } => {
- // Skip non-doc comments
- let Some(doc_style) = doc_style else {
- self.lint_unicode_text_flow(start);
- return None;
- };
-
- // Opening delimiter of the length 3 is not included into the symbol.
- let content_start = start + BytePos(3);
- let content = self.str_from(content_start);
- self.cook_doc_comment(content_start, content, CommentKind::Line, doc_style)
- }
- rustc_lexer::TokenKind::BlockComment { doc_style, terminated } => {
- if !terminated {
- self.report_unterminated_block_comment(start, doc_style);
- }
-
- // Skip non-doc comments
- let Some(doc_style) = doc_style else {
- self.lint_unicode_text_flow(start);
- return None;
- };
-
- // Opening delimiter of the length 3 and closing delimiter of the length 2
- // are not included into the symbol.
- let content_start = start + BytePos(3);
- let content_end = self.pos - BytePos(if terminated { 2 } else { 0 });
- let content = self.str_from_to(content_start, content_end);
- self.cook_doc_comment(content_start, content, CommentKind::Block, doc_style)
- }
- rustc_lexer::TokenKind::Whitespace => return None,
- rustc_lexer::TokenKind::Ident
- | rustc_lexer::TokenKind::RawIdent
- | rustc_lexer::TokenKind::UnknownPrefix => {
- let is_raw_ident = token == rustc_lexer::TokenKind::RawIdent;
- let is_unknown_prefix = token == rustc_lexer::TokenKind::UnknownPrefix;
- let mut ident_start = start;
- if is_raw_ident {
- ident_start = ident_start + BytePos(2);
- }
- if is_unknown_prefix {
- self.report_unknown_prefix(start);
- }
- let sym = nfc_normalize(self.str_from(ident_start));
- let span = self.mk_sp(start, self.pos);
- self.sess.symbol_gallery.insert(sym, span);
- if is_raw_ident {
- if !sym.can_be_raw() {
- self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
- }
- self.sess.raw_identifier_spans.borrow_mut().push(span);
- }
- token::Ident(sym, is_raw_ident)
- }
- rustc_lexer::TokenKind::InvalidIdent
- // Do not recover an identifier with emoji if the codepoint is a confusable
- // with a recoverable substitution token, like `➖`.
- if !UNICODE_ARRAY
- .iter()
- .any(|&(c, _, _)| {
- let sym = self.str_from(start);
- sym.chars().count() == 1 && c == sym.chars().next().unwrap()
- })
- =>
- {
- let sym = nfc_normalize(self.str_from(start));
- let span = self.mk_sp(start, self.pos);
- self.sess.bad_unicode_identifiers.borrow_mut().entry(sym).or_default().push(span);
- token::Ident(sym, false)
- }
- rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
- let suffix_start = start + BytePos(suffix_start);
- let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
- let suffix = if suffix_start < self.pos {
- let string = self.str_from(suffix_start);
- if string == "_" {
- self.sess
- .span_diagnostic
- .struct_span_warn(
- self.mk_sp(suffix_start, self.pos),
- "underscore literal suffix is not allowed",
- )
- .warn(
- "this was previously accepted by the compiler but is \
- being phased out; it will become a hard error in \
- a future release!",
- )
- .note(
- "see issue #42326 \
- <https://github.com/rust-lang/rust/issues/42326> \
- for more information",
- )
- .emit();
- None
- } else {
- Some(Symbol::intern(string))
- }
- } else {
- None
- };
- token::Literal(token::Lit { kind, symbol, suffix })
- }
- rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
- // Include the leading `'` in the real identifier, for macro
- // expansion purposes. See #12512 for the gory details of why
- // this is necessary.
- let lifetime_name = self.str_from(start);
- if starts_with_number {
- self.err_span_(start, self.pos, "lifetimes cannot start with a number");
- }
- let ident = Symbol::intern(lifetime_name);
- token::Lifetime(ident)
- }
- rustc_lexer::TokenKind::Semi => token::Semi,
- rustc_lexer::TokenKind::Comma => token::Comma,
- rustc_lexer::TokenKind::Dot => token::Dot,
- rustc_lexer::TokenKind::OpenParen => token::OpenDelim(Delimiter::Parenthesis),
- rustc_lexer::TokenKind::CloseParen => token::CloseDelim(Delimiter::Parenthesis),
- rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(Delimiter::Brace),
- rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(Delimiter::Brace),
- rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(Delimiter::Bracket),
- rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(Delimiter::Bracket),
- rustc_lexer::TokenKind::At => token::At,
- rustc_lexer::TokenKind::Pound => token::Pound,
- rustc_lexer::TokenKind::Tilde => token::Tilde,
- rustc_lexer::TokenKind::Question => token::Question,
- rustc_lexer::TokenKind::Colon => token::Colon,
- rustc_lexer::TokenKind::Dollar => token::Dollar,
- rustc_lexer::TokenKind::Eq => token::Eq,
- rustc_lexer::TokenKind::Bang => token::Not,
- rustc_lexer::TokenKind::Lt => token::Lt,
- rustc_lexer::TokenKind::Gt => token::Gt,
- rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
- rustc_lexer::TokenKind::And => token::BinOp(token::And),
- rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
- rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
- rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
- rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
- rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
- rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
-
- rustc_lexer::TokenKind::Unknown | rustc_lexer::TokenKind::InvalidIdent => {
- let c = self.str_from(start).chars().next().unwrap();
- let mut err =
- self.struct_err_span_char(start, self.pos, "unknown start of token", c);
- // FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
- // instead of keeping a table in `check_for_substitution`into the token. Ideally,
- // this should be inside `rustc_lexer`. However, we should first remove compound
- // tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
- // as there will be less overall work to do this way.
- let token = unicode_chars::check_for_substitution(self, start, c, &mut err);
- if c == '\x00' {
- err.help("source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used");
- }
- err.emit();
- token?
- }
- })
- }
-
fn cook_doc_comment(
&self,
content_start: BytePos,
diff --git a/compiler/rustc_parse/src/lexer/tokentrees.rs b/compiler/rustc_parse/src/lexer/tokentrees.rs
index aa70912dc..b2701817d 100644
--- a/compiler/rustc_parse/src/lexer/tokentrees.rs
+++ b/compiler/rustc_parse/src/lexer/tokentrees.rs
@@ -1,31 +1,15 @@
use super::{StringReader, UnmatchedBrace};
-
use rustc_ast::token::{self, Delimiter, Token};
use rustc_ast::tokenstream::{DelimSpan, Spacing, TokenStream, TokenTree};
use rustc_ast_pretty::pprust::token_to_string;
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::PResult;
+use rustc_errors::{PErr, PResult};
use rustc_span::Span;
-impl<'a> StringReader<'a> {
- pub(super) fn into_token_trees(self) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
- let mut tt_reader = TokenTreesReader {
- string_reader: self,
- token: Token::dummy(),
- open_braces: Vec::new(),
- unmatched_braces: Vec::new(),
- matching_delim_spans: Vec::new(),
- last_unclosed_found_span: None,
- last_delim_empty_block_spans: FxHashMap::default(),
- matching_block_spans: Vec::new(),
- };
- let res = tt_reader.parse_all_token_trees();
- (res, tt_reader.unmatched_braces)
- }
-}
-
-struct TokenTreesReader<'a> {
+pub(super) struct TokenTreesReader<'a> {
string_reader: StringReader<'a>,
+ /// The "next" token, which has been obtained from the `StringReader` but
+ /// not yet handled by the `TokenTreesReader`.
token: Token,
/// Stack of open delimiters and their spans. Used for error message.
open_braces: Vec<(Delimiter, Span)>,
@@ -43,254 +27,232 @@ struct TokenTreesReader<'a> {
}
impl<'a> TokenTreesReader<'a> {
- // Parse a stream of tokens into a list of `TokenTree`s, up to an `Eof`.
- fn parse_all_token_trees(&mut self) -> PResult<'a, TokenStream> {
- let mut buf = TokenStreamBuilder::default();
-
- self.bump();
- while self.token != token::Eof {
- buf.push(self.parse_token_tree()?);
- }
-
- Ok(buf.into_token_stream())
+ pub(super) fn parse_all_token_trees(
+ string_reader: StringReader<'a>,
+ ) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
+ let mut tt_reader = TokenTreesReader {
+ string_reader,
+ token: Token::dummy(),
+ open_braces: Vec::new(),
+ unmatched_braces: Vec::new(),
+ matching_delim_spans: Vec::new(),
+ last_unclosed_found_span: None,
+ last_delim_empty_block_spans: FxHashMap::default(),
+ matching_block_spans: Vec::new(),
+ };
+ let res = tt_reader.parse_token_trees(/* is_delimited */ false);
+ (res, tt_reader.unmatched_braces)
}
- // Parse a stream of tokens into a list of `TokenTree`s, up to a `CloseDelim`.
- fn parse_token_trees_until_close_delim(&mut self) -> TokenStream {
- let mut buf = TokenStreamBuilder::default();
+ // Parse a stream of tokens into a list of `TokenTree`s.
+ fn parse_token_trees(&mut self, is_delimited: bool) -> PResult<'a, TokenStream> {
+ self.token = self.string_reader.next_token().0;
+ let mut buf = Vec::new();
loop {
- if let token::CloseDelim(..) = self.token.kind {
- return buf.into_token_stream();
- }
-
- match self.parse_token_tree() {
- Ok(tree) => buf.push(tree),
- Err(mut e) => {
- e.emit();
- return buf.into_token_stream();
+ match self.token.kind {
+ token::OpenDelim(delim) => buf.push(self.parse_token_tree_open_delim(delim)),
+ token::CloseDelim(delim) => {
+ return if is_delimited {
+ Ok(TokenStream::new(buf))
+ } else {
+ Err(self.close_delim_err(delim))
+ };
+ }
+ token::Eof => {
+ if is_delimited {
+ self.eof_err().emit();
+ }
+ return Ok(TokenStream::new(buf));
+ }
+ _ => {
+ // Get the next normal token. This might require getting multiple adjacent
+ // single-char tokens and joining them together.
+ let (this_spacing, next_tok) = loop {
+ let (next_tok, is_next_tok_preceded_by_whitespace) =
+ self.string_reader.next_token();
+ if !is_next_tok_preceded_by_whitespace {
+ if let Some(glued) = self.token.glue(&next_tok) {
+ self.token = glued;
+ } else {
+ let this_spacing =
+ if next_tok.is_op() { Spacing::Joint } else { Spacing::Alone };
+ break (this_spacing, next_tok);
+ }
+ } else {
+ break (Spacing::Alone, next_tok);
+ }
+ };
+ let this_tok = std::mem::replace(&mut self.token, next_tok);
+ buf.push(TokenTree::Token(this_tok, this_spacing));
}
}
}
}
- fn parse_token_tree(&mut self) -> PResult<'a, TokenTree> {
- let sm = self.string_reader.sess.source_map();
-
- match self.token.kind {
- token::Eof => {
- let msg = "this file contains an unclosed delimiter";
- let mut err =
- self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, msg);
- for &(_, sp) in &self.open_braces {
- err.span_label(sp, "unclosed delimiter");
- self.unmatched_braces.push(UnmatchedBrace {
- expected_delim: Delimiter::Brace,
- found_delim: None,
- found_span: self.token.span,
- unclosed_span: Some(sp),
- candidate_span: None,
- });
- }
+ fn eof_err(&mut self) -> PErr<'a> {
+ let msg = "this file contains an unclosed delimiter";
+ let mut err = self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, msg);
+ for &(_, sp) in &self.open_braces {
+ err.span_label(sp, "unclosed delimiter");
+ self.unmatched_braces.push(UnmatchedBrace {
+ expected_delim: Delimiter::Brace,
+ found_delim: None,
+ found_span: self.token.span,
+ unclosed_span: Some(sp),
+ candidate_span: None,
+ });
+ }
- if let Some((delim, _)) = self.open_braces.last() {
- if let Some((_, open_sp, close_sp)) =
- self.matching_delim_spans.iter().find(|(d, open_sp, close_sp)| {
- if let Some(close_padding) = sm.span_to_margin(*close_sp) {
- if let Some(open_padding) = sm.span_to_margin(*open_sp) {
- return delim == d && close_padding != open_padding;
- }
- }
- false
- })
- // these are in reverse order as they get inserted on close, but
- {
- // we want the last open/first close
- err.span_label(*open_sp, "this delimiter might not be properly closed...");
- err.span_label(
- *close_sp,
- "...as it matches this but it has different indentation",
- );
+ if let Some((delim, _)) = self.open_braces.last() {
+ if let Some((_, open_sp, close_sp)) =
+ self.matching_delim_spans.iter().find(|(d, open_sp, close_sp)| {
+ let sm = self.string_reader.sess.source_map();
+ if let Some(close_padding) = sm.span_to_margin(*close_sp) {
+ if let Some(open_padding) = sm.span_to_margin(*open_sp) {
+ return delim == d && close_padding != open_padding;
+ }
}
- }
- Err(err)
+ false
+ })
+ // these are in reverse order as they get inserted on close, but
+ {
+ // we want the last open/first close
+ err.span_label(*open_sp, "this delimiter might not be properly closed...");
+ err.span_label(*close_sp, "...as it matches this but it has different indentation");
}
- token::OpenDelim(delim) => {
- // The span for beginning of the delimited section
- let pre_span = self.token.span;
-
- // Parse the open delimiter.
- self.open_braces.push((delim, self.token.span));
- self.bump();
+ }
+ err
+ }
- // Parse the token trees within the delimiters.
- // We stop at any delimiter so we can try to recover if the user
- // uses an incorrect delimiter.
- let tts = self.parse_token_trees_until_close_delim();
+ fn parse_token_tree_open_delim(&mut self, open_delim: Delimiter) -> TokenTree {
+ // The span for beginning of the delimited section
+ let pre_span = self.token.span;
- // Expand to cover the entire delimited token tree
- let delim_span = DelimSpan::from_pair(pre_span, self.token.span);
+ self.open_braces.push((open_delim, self.token.span));
- match self.token.kind {
- // Correct delimiter.
- token::CloseDelim(d) if d == delim => {
- let (open_brace, open_brace_span) = self.open_braces.pop().unwrap();
- let close_brace_span = self.token.span;
+ // Parse the token trees within the delimiters.
+ // We stop at any delimiter so we can try to recover if the user
+ // uses an incorrect delimiter.
+ let tts = self.parse_token_trees(/* is_delimited */ true).unwrap();
- if tts.is_empty() {
- let empty_block_span = open_brace_span.to(close_brace_span);
- if !sm.is_multiline(empty_block_span) {
- // Only track if the block is in the form of `{}`, otherwise it is
- // likely that it was written on purpose.
- self.last_delim_empty_block_spans.insert(delim, empty_block_span);
- }
- }
+ // Expand to cover the entire delimited token tree
+ let delim_span = DelimSpan::from_pair(pre_span, self.token.span);
- //only add braces
- if let (Delimiter::Brace, Delimiter::Brace) = (open_brace, delim) {
- self.matching_block_spans.push((open_brace_span, close_brace_span));
- }
+ match self.token.kind {
+ // Correct delimiter.
+ token::CloseDelim(close_delim) if close_delim == open_delim => {
+ let (open_brace, open_brace_span) = self.open_braces.pop().unwrap();
+ let close_brace_span = self.token.span;
- if self.open_braces.is_empty() {
- // Clear up these spans to avoid suggesting them as we've found
- // properly matched delimiters so far for an entire block.
- self.matching_delim_spans.clear();
- } else {
- self.matching_delim_spans.push((
- open_brace,
- open_brace_span,
- close_brace_span,
- ));
- }
- // Parse the closing delimiter.
- self.bump();
+ if tts.is_empty() {
+ let empty_block_span = open_brace_span.to(close_brace_span);
+ let sm = self.string_reader.sess.source_map();
+ if !sm.is_multiline(empty_block_span) {
+ // Only track if the block is in the form of `{}`, otherwise it is
+ // likely that it was written on purpose.
+ self.last_delim_empty_block_spans.insert(open_delim, empty_block_span);
}
- // Incorrect delimiter.
- token::CloseDelim(other) => {
- let mut unclosed_delimiter = None;
- let mut candidate = None;
-
- if self.last_unclosed_found_span != Some(self.token.span) {
- // do not complain about the same unclosed delimiter multiple times
- self.last_unclosed_found_span = Some(self.token.span);
- // This is a conservative error: only report the last unclosed
- // delimiter. The previous unclosed delimiters could actually be
- // closed! The parser just hasn't gotten to them yet.
- if let Some(&(_, sp)) = self.open_braces.last() {
- unclosed_delimiter = Some(sp);
- };
- if let Some(current_padding) = sm.span_to_margin(self.token.span) {
- for (brace, brace_span) in &self.open_braces {
- if let Some(padding) = sm.span_to_margin(*brace_span) {
- // high likelihood of these two corresponding
- if current_padding == padding && brace == &other {
- candidate = Some(*brace_span);
- }
- }
- }
- }
- let (tok, _) = self.open_braces.pop().unwrap();
- self.unmatched_braces.push(UnmatchedBrace {
- expected_delim: tok,
- found_delim: Some(other),
- found_span: self.token.span,
- unclosed_span: unclosed_delimiter,
- candidate_span: candidate,
- });
- } else {
- self.open_braces.pop();
- }
+ }
- // If the incorrect delimiter matches an earlier opening
- // delimiter, then don't consume it (it can be used to
- // close the earlier one). Otherwise, consume it.
- // E.g., we try to recover from:
- // fn foo() {
- // bar(baz(
- // } // Incorrect delimiter but matches the earlier `{`
- if !self.open_braces.iter().any(|&(b, _)| b == other) {
- self.bump();
- }
- }
- token::Eof => {
- // Silently recover, the EOF token will be seen again
- // and an error emitted then. Thus we don't pop from
- // self.open_braces here.
- }
- _ => {}
+ //only add braces
+ if let (Delimiter::Brace, Delimiter::Brace) = (open_brace, open_delim) {
+ self.matching_block_spans.push((open_brace_span, close_brace_span));
}
- Ok(TokenTree::Delimited(delim_span, delim, tts))
+ if self.open_braces.is_empty() {
+ // Clear up these spans to avoid suggesting them as we've found
+ // properly matched delimiters so far for an entire block.
+ self.matching_delim_spans.clear();
+ } else {
+ self.matching_delim_spans.push((open_brace, open_brace_span, close_brace_span));
+ }
+ // Move past the closing delimiter.
+ self.token = self.string_reader.next_token().0;
}
- token::CloseDelim(delim) => {
- // An unexpected closing delimiter (i.e., there is no
- // matching opening delimiter).
- let token_str = token_to_string(&self.token);
- let msg = format!("unexpected closing delimiter: `{}`", token_str);
- let mut err =
- self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, &msg);
+ // Incorrect delimiter.
+ token::CloseDelim(close_delim) => {
+ let mut unclosed_delimiter = None;
+ let mut candidate = None;
- // Braces are added at the end, so the last element is the biggest block
- if let Some(parent) = self.matching_block_spans.last() {
- if let Some(span) = self.last_delim_empty_block_spans.remove(&delim) {
- // Check if the (empty block) is in the last properly closed block
- if (parent.0.to(parent.1)).contains(span) {
- err.span_label(
- span,
- "block is empty, you might have not meant to close it",
- );
- } else {
- err.span_label(parent.0, "this opening brace...");
-
- err.span_label(parent.1, "...matches this closing brace");
+ if self.last_unclosed_found_span != Some(self.token.span) {
+ // do not complain about the same unclosed delimiter multiple times
+ self.last_unclosed_found_span = Some(self.token.span);
+ // This is a conservative error: only report the last unclosed
+ // delimiter. The previous unclosed delimiters could actually be
+ // closed! The parser just hasn't gotten to them yet.
+ if let Some(&(_, sp)) = self.open_braces.last() {
+ unclosed_delimiter = Some(sp);
+ };
+ let sm = self.string_reader.sess.source_map();
+ if let Some(current_padding) = sm.span_to_margin(self.token.span) {
+ for (brace, brace_span) in &self.open_braces {
+ if let Some(padding) = sm.span_to_margin(*brace_span) {
+ // high likelihood of these two corresponding
+ if current_padding == padding && brace == &close_delim {
+ candidate = Some(*brace_span);
+ }
+ }
}
- } else {
- err.span_label(parent.0, "this opening brace...");
-
- err.span_label(parent.1, "...matches this closing brace");
}
+ let (tok, _) = self.open_braces.pop().unwrap();
+ self.unmatched_braces.push(UnmatchedBrace {
+ expected_delim: tok,
+ found_delim: Some(close_delim),
+ found_span: self.token.span,
+ unclosed_span: unclosed_delimiter,
+ candidate_span: candidate,
+ });
+ } else {
+ self.open_braces.pop();
}
- err.span_label(self.token.span, "unexpected closing delimiter");
- Err(err)
- }
- _ => {
- let tok = self.token.take();
- let mut spacing = self.bump();
- if !self.token.is_op() {
- spacing = Spacing::Alone;
+ // If the incorrect delimiter matches an earlier opening
+ // delimiter, then don't consume it (it can be used to
+ // close the earlier one). Otherwise, consume it.
+ // E.g., we try to recover from:
+ // fn foo() {
+ // bar(baz(
+ // } // Incorrect delimiter but matches the earlier `{`
+ if !self.open_braces.iter().any(|&(b, _)| b == close_delim) {
+ self.token = self.string_reader.next_token().0;
}
- Ok(TokenTree::Token(tok, spacing))
}
+ token::Eof => {
+ // Silently recover, the EOF token will be seen again
+ // and an error emitted then. Thus we don't pop from
+ // self.open_braces here.
+ }
+ _ => unreachable!(),
}
- }
- fn bump(&mut self) -> Spacing {
- let (spacing, token) = self.string_reader.next_token();
- self.token = token;
- spacing
+ TokenTree::Delimited(delim_span, open_delim, tts)
}
-}
-#[derive(Default)]
-struct TokenStreamBuilder {
- buf: Vec<TokenTree>,
-}
+ fn close_delim_err(&mut self, delim: Delimiter) -> PErr<'a> {
+ // An unexpected closing delimiter (i.e., there is no
+ // matching opening delimiter).
+ let token_str = token_to_string(&self.token);
+ let msg = format!("unexpected closing delimiter: `{}`", token_str);
+ let mut err =
+ self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, &msg);
-impl TokenStreamBuilder {
- #[inline(always)]
- fn push(&mut self, tree: TokenTree) {
- if let Some(TokenTree::Token(prev_token, Spacing::Joint)) = self.buf.last()
- && let TokenTree::Token(token, joint) = &tree
- && let Some(glued) = prev_token.glue(token)
- {
- self.buf.pop();
- self.buf.push(TokenTree::Token(glued, *joint));
- } else {
- self.buf.push(tree)
+ // Braces are added at the end, so the last element is the biggest block
+ if let Some(parent) = self.matching_block_spans.last() {
+ if let Some(span) = self.last_delim_empty_block_spans.remove(&delim) {
+ // Check if the (empty block) is in the last properly closed block
+ if (parent.0.to(parent.1)).contains(span) {
+ err.span_label(span, "block is empty, you might have not meant to close it");
+ } else {
+ err.span_label(parent.0, "this opening brace...");
+ err.span_label(parent.1, "...matches this closing brace");
+ }
+ } else {
+ err.span_label(parent.0, "this opening brace...");
+ err.span_label(parent.1, "...matches this closing brace");
+ }
}
- }
- fn into_token_stream(self) -> TokenStream {
- TokenStream::new(self.buf)
+ err.span_label(self.token.span, "unexpected closing delimiter");
+ err
}
}
diff --git a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
index 273827864..f075de714 100644
--- a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
+++ b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
@@ -20,13 +20,9 @@ pub(crate) fn emit_unescape_error(
range: Range<usize>,
error: EscapeError,
) {
- tracing::debug!(
+ debug!(
"emit_unescape_error: {:?}, {:?}, {:?}, {:?}, {:?}",
- lit,
- span_with_quotes,
- mode,
- range,
- error
+ lit, span_with_quotes, mode, range, error
);
let last_char = || {
let c = lit[range.clone()].chars().rev().next().unwrap();
@@ -117,11 +113,26 @@ pub(crate) fn emit_unescape_error(
} else {
("", "if you meant to write a `str` literal, use double quotes")
};
-
+ let mut escaped = String::with_capacity(lit.len());
+ let mut chrs = lit.chars().peekable();
+ while let Some(first) = chrs.next() {
+ match (first, chrs.peek()) {
+ ('\\', Some('"')) => {
+ escaped.push('\\');
+ escaped.push('"');
+ chrs.next();
+ }
+ ('"', _) => {
+ escaped.push('\\');
+ escaped.push('"')
+ }
+ (c, _) => escaped.push(c),
+ };
+ }
handler.span_suggestion(
span_with_quotes,
msg,
- format!("{}\"{}\"", prefix, lit),
+ format!("{prefix}\"{escaped}\""),
Applicability::MachineApplicable,
);
}
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index 8c087c65c..3dcadb4c9 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -4,7 +4,6 @@
#![feature(box_patterns)]
#![feature(if_let_guard)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(never_type)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
@@ -33,12 +32,15 @@ use parser::{emit_unclosed_delims, make_unclosed_delims_error, Parser};
pub mod lexer;
pub mod validate_attr;
+mod errors;
+
// A bunch of utility functions of the form `parse_<thing>_from_<source>`
// where <thing> includes crate, expr, item, stmt, tts, and one that
// uses a HOF to parse anything, and <source> includes file and
// `source_str`.
-/// A variant of 'panictry!' that works on a Vec<Diagnostic> instead of a single DiagnosticBuilder.
+/// A variant of 'panictry!' that works on a `Vec<Diagnostic>` instead of a single
+/// `DiagnosticBuilder`.
macro_rules! panictry_buffer {
($handler:expr, $e:expr) => {{
use rustc_errors::FatalError;
@@ -63,7 +65,7 @@ pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'
pub fn parse_crate_attrs_from_file<'a>(
input: &Path,
sess: &'a ParseSess,
-) -> PResult<'a, Vec<ast::Attribute>> {
+) -> PResult<'a, ast::AttrVec> {
let mut parser = new_parser_from_file(sess, input, None);
parser.parse_inner_attributes()
}
@@ -80,7 +82,7 @@ pub fn parse_crate_attrs_from_source_str(
name: FileName,
source: String,
sess: &ParseSess,
-) -> PResult<'_, Vec<ast::Attribute>> {
+) -> PResult<'_, ast::AttrVec> {
new_parser_from_source_str(sess, name, source).parse_inner_attributes()
}
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
index acdbddf40..9e4565694 100644
--- a/compiler/rustc_parse/src/parser/attr.rs
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -1,29 +1,26 @@
+use crate::errors::{InvalidMetaItem, SuffixedLiteralInAttribute};
+
use super::{AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle};
use rustc_ast as ast;
use rustc_ast::attr;
use rustc_ast::token::{self, Delimiter, Nonterminal};
-use rustc_ast_pretty::pprust;
-use rustc_errors::{error_code, Diagnostic, PResult};
+use rustc_errors::{error_code, fluent, Diagnostic, IntoDiagnostic, PResult};
use rustc_span::{sym, BytePos, Span};
use std::convert::TryInto;
-use tracing::debug;
-
// Public for rustfmt usage
#[derive(Debug)]
-pub enum InnerAttrPolicy<'a> {
+pub enum InnerAttrPolicy {
Permitted,
- Forbidden { reason: &'a str, saw_doc_comment: bool, prev_outer_attr_sp: Option<Span> },
+ Forbidden(Option<InnerAttrForbiddenReason>),
}
-const DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG: &str = "an inner attribute is not \
- permitted in this context";
-
-pub(super) const DEFAULT_INNER_ATTR_FORBIDDEN: InnerAttrPolicy<'_> = InnerAttrPolicy::Forbidden {
- reason: DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG,
- saw_doc_comment: false,
- prev_outer_attr_sp: None,
-};
+#[derive(Clone, Copy, Debug)]
+pub enum InnerAttrForbiddenReason {
+ InCodeBlock,
+ AfterOuterDocComment { prev_doc_comment_span: Span },
+ AfterOuterAttribute { prev_outer_attr_sp: Span },
+}
enum OuterAttributeType {
DocComment,
@@ -34,7 +31,7 @@ enum OuterAttributeType {
impl<'a> Parser<'a> {
/// Parses attributes that appear before an item.
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
- let mut outer_attrs: Vec<ast::Attribute> = Vec::new();
+ let mut outer_attrs = ast::AttrVec::new();
let mut just_parsed_doc_comment = false;
let start_pos = self.token_cursor.num_next_calls;
loop {
@@ -42,17 +39,15 @@ impl<'a> Parser<'a> {
let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span);
let inner_error_reason = if just_parsed_doc_comment {
- "an inner attribute is not permitted following an outer doc comment"
- } else if prev_outer_attr_sp.is_some() {
- "an inner attribute is not permitted following an outer attribute"
+ Some(InnerAttrForbiddenReason::AfterOuterDocComment {
+ prev_doc_comment_span: prev_outer_attr_sp.unwrap(),
+ })
+ } else if let Some(prev_outer_attr_sp) = prev_outer_attr_sp {
+ Some(InnerAttrForbiddenReason::AfterOuterAttribute { prev_outer_attr_sp })
} else {
- DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG
- };
- let inner_parse_policy = InnerAttrPolicy::Forbidden {
- reason: inner_error_reason,
- saw_doc_comment: just_parsed_doc_comment,
- prev_outer_attr_sp,
+ None
};
+ let inner_parse_policy = InnerAttrPolicy::Forbidden(inner_error_reason);
just_parsed_doc_comment = false;
Some(self.parse_attribute(inner_parse_policy)?)
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
@@ -60,7 +55,7 @@ impl<'a> Parser<'a> {
let span = self.token.span;
let mut err = self.sess.span_diagnostic.struct_span_err_with_code(
span,
- "expected outer doc comment",
+ fluent::parser_inner_doc_comment_not_permitted,
error_code!(E0753),
);
if let Some(replacement_span) = self.annotate_following_item_if_applicable(
@@ -71,13 +66,10 @@ impl<'a> Parser<'a> {
token::CommentKind::Block => OuterAttributeType::DocBlockComment,
},
) {
- err.note(
- "inner doc comments like this (starting with `//!` or `/*!`) can \
- only appear before items",
- );
+ err.note(fluent::note);
err.span_suggestion_verbose(
replacement_span,
- "you might have meant to write a regular comment",
+ fluent::suggestion,
"",
rustc_errors::Applicability::MachineApplicable,
);
@@ -89,6 +81,7 @@ impl<'a> Parser<'a> {
// Always make an outer attribute - this allows us to recover from a misplaced
// inner attribute.
Some(attr::mk_doc_comment(
+ &self.sess.attr_id_generator,
comment_kind,
ast::AttrStyle::Outer,
data,
@@ -106,7 +99,7 @@ impl<'a> Parser<'a> {
break;
}
}
- Ok(AttrWrapper::new(outer_attrs.into(), start_pos))
+ Ok(AttrWrapper::new(outer_attrs, start_pos))
}
/// Matches `attribute = # ! [ meta_item ]`.
@@ -114,7 +107,7 @@ impl<'a> Parser<'a> {
// Public for rustfmt usage.
pub fn parse_attribute(
&mut self,
- inner_parse_policy: InnerAttrPolicy<'_>,
+ inner_parse_policy: InnerAttrPolicy,
) -> PResult<'a, ast::Attribute> {
debug!(
"parse_attribute: inner_parse_policy={:?} self.token={:?}",
@@ -123,29 +116,22 @@ impl<'a> Parser<'a> {
let lo = self.token.span;
// Attributes can't have attributes of their own [Editor's note: not with that attitude]
self.collect_tokens_no_attrs(|this| {
- if this.eat(&token::Pound) {
- let style = if this.eat(&token::Not) {
- ast::AttrStyle::Inner
- } else {
- ast::AttrStyle::Outer
- };
+ assert!(this.eat(&token::Pound), "parse_attribute called in non-attribute position");
- this.expect(&token::OpenDelim(Delimiter::Bracket))?;
- let item = this.parse_attr_item(false)?;
- this.expect(&token::CloseDelim(Delimiter::Bracket))?;
- let attr_sp = lo.to(this.prev_token.span);
+ let style =
+ if this.eat(&token::Not) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer };
- // Emit error if inner attribute is encountered and forbidden.
- if style == ast::AttrStyle::Inner {
- this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
- }
+ this.expect(&token::OpenDelim(Delimiter::Bracket))?;
+ let item = this.parse_attr_item(false)?;
+ this.expect(&token::CloseDelim(Delimiter::Bracket))?;
+ let attr_sp = lo.to(this.prev_token.span);
- Ok(attr::mk_attr_from_item(item, None, style, attr_sp))
- } else {
- let token_str = pprust::token_to_string(&this.token);
- let msg = &format!("expected `#`, found `{token_str}`");
- Err(this.struct_span_err(this.token.span, msg))
+ // Emit error if inner attribute is encountered and forbidden.
+ if style == ast::AttrStyle::Inner {
+ this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
}
+
+ Ok(attr::mk_attr_from_item(&self.sess.attr_id_generator, item, None, style, attr_sp))
})
}
@@ -185,21 +171,12 @@ impl<'a> Parser<'a> {
ForceCollect::No,
) {
Ok(Some(item)) => {
- let attr_name = match attr_type {
- OuterAttributeType::Attribute => "attribute",
- _ => "doc comment",
- };
- err.span_label(
- item.span,
- &format!("the inner {} doesn't annotate this {}", attr_name, item.kind.descr()),
- );
+ // FIXME(#100717)
+ err.set_arg("item", item.kind.descr());
+ err.span_label(item.span, fluent::label_does_not_annotate_this);
err.span_suggestion_verbose(
replacement_span,
- &format!(
- "to annotate the {}, change the {} from inner to outer style",
- item.kind.descr(),
- attr_name
- ),
+ fluent::sugg_change_inner_to_outer,
match attr_type {
OuterAttributeType::Attribute => "",
OuterAttributeType::DocBlockComment => "*",
@@ -217,22 +194,33 @@ impl<'a> Parser<'a> {
Some(replacement_span)
}
- pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) {
- if let InnerAttrPolicy::Forbidden { reason, saw_doc_comment, prev_outer_attr_sp } = policy {
- let prev_outer_attr_note =
- if saw_doc_comment { "previous doc comment" } else { "previous outer attribute" };
-
- let mut diag = self.struct_span_err(attr_sp, reason);
-
- if let Some(prev_outer_attr_sp) = prev_outer_attr_sp {
- diag.span_label(attr_sp, "not permitted following an outer attribute")
- .span_label(prev_outer_attr_sp, prev_outer_attr_note);
- }
+ pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy) {
+ if let InnerAttrPolicy::Forbidden(reason) = policy {
+ let mut diag = match reason.as_ref().copied() {
+ Some(InnerAttrForbiddenReason::AfterOuterDocComment { prev_doc_comment_span }) => {
+ let mut diag = self.struct_span_err(
+ attr_sp,
+ fluent::parser_inner_attr_not_permitted_after_outer_doc_comment,
+ );
+ diag.span_label(attr_sp, fluent::label_attr)
+ .span_label(prev_doc_comment_span, fluent::label_prev_doc_comment);
+ diag
+ }
+ Some(InnerAttrForbiddenReason::AfterOuterAttribute { prev_outer_attr_sp }) => {
+ let mut diag = self.struct_span_err(
+ attr_sp,
+ fluent::parser_inner_attr_not_permitted_after_outer_attr,
+ );
+ diag.span_label(attr_sp, fluent::label_attr)
+ .span_label(prev_outer_attr_sp, fluent::label_prev_attr);
+ diag
+ }
+ Some(InnerAttrForbiddenReason::InCodeBlock) | None => {
+ self.struct_span_err(attr_sp, fluent::parser_inner_attr_not_permitted)
+ }
+ };
- diag.note(
- "inner attributes, like `#![no_std]`, annotate the item enclosing them, and \
- are usually found at the beginning of source files",
- );
+ diag.note(fluent::parser_inner_attr_explanation);
if self
.annotate_following_item_if_applicable(
&mut diag,
@@ -241,7 +229,7 @@ impl<'a> Parser<'a> {
)
.is_some()
{
- diag.note("outer attributes, like `#[test]`, annotate the item following them");
+ diag.note(fluent::parser_outer_attr_explanation);
};
diag.emit();
}
@@ -283,8 +271,8 @@ impl<'a> Parser<'a> {
/// terminated by a semicolon.
///
/// Matches `inner_attrs*`.
- pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
- let mut attrs: Vec<ast::Attribute> = vec![];
+ pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> {
+ let mut attrs = ast::AttrVec::new();
loop {
let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
// Only try to parse if it is an inner attribute (has `!`).
@@ -293,7 +281,13 @@ impl<'a> Parser<'a> {
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
if attr_style == ast::AttrStyle::Inner {
self.bump();
- Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
+ Some(attr::mk_doc_comment(
+ &self.sess.attr_id_generator,
+ comment_kind,
+ attr_style,
+ data,
+ self.prev_token.span,
+ ))
} else {
None
}
@@ -303,9 +297,9 @@ impl<'a> Parser<'a> {
if let Some(attr) = attr {
let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
// If we are currently capturing tokens, mark the location of this inner attribute.
- // If capturing ends up creating a `LazyTokenStream`, we will include
+ // If capturing ends up creating a `LazyAttrTokenStream`, we will include
// this replace range with it, removing the inner attribute from the final
- // `AttrAnnotatedTokenStream`. Inner attributes are stored in the parsed AST note.
+ // `AttrTokenStream`. Inner attributes are stored in the parsed AST note.
// During macro expansion, they are selectively inserted back into the
// token stream (the first inner attribute is removed each time we invoke the
// corresponding macro).
@@ -326,12 +320,7 @@ impl<'a> Parser<'a> {
debug!("checking if {:?} is unusuffixed", lit);
if !lit.kind.is_unsuffixed() {
- self.struct_span_err(lit.span, "suffixed literals are not allowed in attributes")
- .help(
- "instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), \
- use an unsuffixed version (`1`, `1.0`, etc.)",
- )
- .emit();
+ self.sess.emit_err(SuffixedLiteralInAttribute { span: lit.span });
}
Ok(lit)
@@ -424,9 +413,8 @@ impl<'a> Parser<'a> {
Err(err) => err.cancel(),
}
- let found = pprust::token_to_string(&self.token);
- let msg = format!("expected unsuffixed literal or identifier, found `{found}`");
- Err(self.struct_span_err(self.token.span, &msg))
+ Err(InvalidMetaItem { span: self.token.span, token: self.token.clone() }
+ .into_diagnostic(&self.sess.span_diagnostic))
}
}
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
index 6c750ff42..1b16ecb5e 100644
--- a/compiler/rustc_parse/src/parser/attr_wrapper.rs
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -1,7 +1,7 @@
use super::{Capturing, FlatToken, ForceCollect, Parser, ReplaceRange, TokenCursor, TrailingToken};
use rustc_ast::token::{self, Delimiter, Token, TokenKind};
-use rustc_ast::tokenstream::{AttrAnnotatedTokenStream, AttributesData, CreateTokenStream};
-use rustc_ast::tokenstream::{AttrAnnotatedTokenTree, DelimSpan, LazyTokenStream, Spacing};
+use rustc_ast::tokenstream::{AttrTokenStream, AttributesData, ToAttrTokenStream};
+use rustc_ast::tokenstream::{AttrTokenTree, DelimSpan, LazyAttrTokenStream, Spacing};
use rustc_ast::{self as ast};
use rustc_ast::{AttrVec, Attribute, HasAttrs, HasTokens};
use rustc_errors::PResult;
@@ -15,11 +15,11 @@ use std::ops::Range;
/// for the attribute target. This allows us to perform cfg-expansion on
/// a token stream before we invoke a derive proc-macro.
///
-/// This wrapper prevents direct access to the underlying `Vec<ast::Attribute>`.
+/// This wrapper prevents direct access to the underlying `ast::AttrVec>`.
/// Parsing code can only get access to the underlying attributes
/// by passing an `AttrWrapper` to `collect_tokens_trailing_tokens`.
/// This makes it difficult to accidentally construct an AST node
-/// (which stores a `Vec<ast::Attribute>`) without first collecting tokens.
+/// (which stores an `ast::AttrVec`) without first collecting tokens.
///
/// This struct has its own module, to ensure that the parser code
/// cannot directly access the `attrs` field
@@ -32,11 +32,6 @@ pub struct AttrWrapper {
start_pos: usize,
}
-// This struct is passed around very frequently,
-// so make sure it doesn't accidentally get larger
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(AttrWrapper, 16);
-
impl AttrWrapper {
pub(super) fn new(attrs: AttrVec, start_pos: usize) -> AttrWrapper {
AttrWrapper { attrs, start_pos }
@@ -49,9 +44,10 @@ impl AttrWrapper {
self.attrs
}
+ // Prepend `self.attrs` to `attrs`.
// FIXME: require passing an NT to prevent misuse of this method
- pub(crate) fn prepend_to_nt_inner(self, attrs: &mut Vec<Attribute>) {
- let mut self_attrs: Vec<_> = self.attrs.into();
+ pub(crate) fn prepend_to_nt_inner(self, attrs: &mut AttrVec) {
+ let mut self_attrs = self.attrs;
std::mem::swap(attrs, &mut self_attrs);
attrs.extend(self_attrs);
}
@@ -87,7 +83,7 @@ fn has_cfg_or_cfg_attr(attrs: &[Attribute]) -> bool {
// This also makes `Parser` very cheap to clone, since
// there is no intermediate collection buffer to clone.
#[derive(Clone)]
-struct LazyTokenStreamImpl {
+struct LazyAttrTokenStreamImpl {
start_token: (Token, Spacing),
cursor_snapshot: TokenCursor,
num_calls: usize,
@@ -95,11 +91,8 @@ struct LazyTokenStreamImpl {
replace_ranges: Box<[ReplaceRange]>,
}
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(LazyTokenStreamImpl, 144);
-
-impl CreateTokenStream for LazyTokenStreamImpl {
- fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
+impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
+ fn to_attr_token_stream(&self) -> AttrTokenStream {
// The token produced by the final call to `{,inlined_}next` was not
// actually consumed by the callback. The combination of chaining the
// initial token and using `take` produces the desired result - we
@@ -116,7 +109,7 @@ impl CreateTokenStream for LazyTokenStreamImpl {
if !self.replace_ranges.is_empty() {
let mut tokens: Vec<_> = tokens.collect();
- let mut replace_ranges = self.replace_ranges.clone();
+ let mut replace_ranges = self.replace_ranges.to_vec();
replace_ranges.sort_by_key(|(range, _)| range.start);
#[cfg(debug_assertions)]
@@ -146,7 +139,7 @@ impl CreateTokenStream for LazyTokenStreamImpl {
// start position, we ensure that any replace range which encloses
// another replace range will capture the *replaced* tokens for the inner
// range, not the original tokens.
- for (range, new_tokens) in replace_ranges.iter().rev() {
+ for (range, new_tokens) in replace_ranges.into_iter().rev() {
assert!(!range.is_empty(), "Cannot replace an empty range: {:?}", range);
// Replace ranges are only allowed to decrease the number of tokens.
assert!(
@@ -165,7 +158,7 @@ impl CreateTokenStream for LazyTokenStreamImpl {
tokens.splice(
(range.start as usize)..(range.end as usize),
- new_tokens.clone().into_iter().chain(filler),
+ new_tokens.into_iter().chain(filler),
);
}
make_token_stream(tokens.into_iter(), self.break_last_token)
@@ -178,7 +171,7 @@ impl CreateTokenStream for LazyTokenStreamImpl {
impl<'a> Parser<'a> {
/// Records all tokens consumed by the provided callback,
/// including the current token. These tokens are collected
- /// into a `LazyTokenStream`, and returned along with the result
+ /// into a `LazyAttrTokenStream`, and returned along with the result
/// of the callback.
///
/// Note: If your callback consumes an opening delimiter
@@ -196,7 +189,7 @@ impl<'a> Parser<'a> {
&mut self,
attrs: AttrWrapper,
force_collect: ForceCollect,
- f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, (R, TrailingToken)>,
+ f: impl FnOnce(&mut Self, ast::AttrVec) -> PResult<'a, (R, TrailingToken)>,
) -> PResult<'a, R> {
// We only bail out when nothing could possibly observe the collected tokens:
// 1. We cannot be force collecting tokens (since force-collecting requires tokens
@@ -212,7 +205,7 @@ impl<'a> Parser<'a> {
// or `#[cfg_attr]` attributes.
&& !self.capture_cfg
{
- return Ok(f(self, attrs.attrs.into())?.0);
+ return Ok(f(self, attrs.attrs)?.0);
}
let start_token = (self.token.clone(), self.token_spacing);
@@ -222,7 +215,7 @@ impl<'a> Parser<'a> {
let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes);
let replace_ranges_start = self.capture_state.replace_ranges.len();
- let ret = f(self, attrs.attrs.into());
+ let ret = f(self, attrs.attrs);
self.capture_state.capturing = prev_capturing;
@@ -280,30 +273,33 @@ impl<'a> Parser<'a> {
let cursor_snapshot_next_calls = cursor_snapshot.num_next_calls;
let mut end_pos = self.token_cursor.num_next_calls;
+ let mut captured_trailing = false;
+
// Capture a trailing token if requested by the callback 'f'
match trailing {
TrailingToken::None => {}
+ TrailingToken::Gt => {
+ assert_eq!(self.token.kind, token::Gt);
+ }
TrailingToken::Semi => {
assert_eq!(self.token.kind, token::Semi);
end_pos += 1;
+ captured_trailing = true;
}
TrailingToken::MaybeComma => {
if self.token.kind == token::Comma {
end_pos += 1;
+ captured_trailing = true;
}
}
}
// If we 'broke' the last token (e.g. breaking a '>>' token to two '>' tokens),
// then extend the range of captured tokens to include it, since the parser
- // was not actually bumped past it. When the `LazyTokenStream` gets converted
- // into an `AttrAnnotatedTokenStream`, we will create the proper token.
+ // was not actually bumped past it. When the `LazyAttrTokenStream` gets converted
+ // into an `AttrTokenStream`, we will create the proper token.
if self.token_cursor.break_last_token {
- assert_eq!(
- trailing,
- TrailingToken::None,
- "Cannot set `break_last_token` and have trailing token"
- );
+ assert!(!captured_trailing, "Cannot set break_last_token and have trailing token");
end_pos += 1;
}
@@ -315,20 +311,20 @@ impl<'a> Parser<'a> {
Box::new([])
} else {
// Grab any replace ranges that occur *inside* the current AST node.
- // We will perform the actual replacement when we convert the `LazyTokenStream`
- // to an `AttrAnnotatedTokenStream`
+ // We will perform the actual replacement when we convert the `LazyAttrTokenStream`
+ // to an `AttrTokenStream`.
let start_calls: u32 = cursor_snapshot_next_calls.try_into().unwrap();
self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end]
.iter()
.cloned()
- .chain(inner_attr_replace_ranges.clone().into_iter())
+ .chain(inner_attr_replace_ranges.iter().cloned())
.map(|(range, tokens)| {
((range.start - start_calls)..(range.end - start_calls), tokens)
})
.collect()
};
- let tokens = LazyTokenStream::new(LazyTokenStreamImpl {
+ let tokens = LazyAttrTokenStream::new(LazyAttrTokenStreamImpl {
start_token,
num_calls,
cursor_snapshot,
@@ -352,9 +348,9 @@ impl<'a> Parser<'a> {
// on the captured token stream.
if self.capture_cfg
&& matches!(self.capture_state.capturing, Capturing::Yes)
- && has_cfg_or_cfg_attr(&final_attrs)
+ && has_cfg_or_cfg_attr(final_attrs)
{
- let attr_data = AttributesData { attrs: final_attrs.to_vec().into(), tokens };
+ let attr_data = AttributesData { attrs: final_attrs.iter().cloned().collect(), tokens };
// Replace the entire AST node that we just parsed, including attributes,
// with a `FlatToken::AttrTarget`. If this AST node is inside an item
@@ -391,12 +387,12 @@ impl<'a> Parser<'a> {
fn make_token_stream(
mut iter: impl Iterator<Item = (FlatToken, Spacing)>,
break_last_token: bool,
-) -> AttrAnnotatedTokenStream {
+) -> AttrTokenStream {
#[derive(Debug)]
struct FrameData {
// This is `None` for the first frame, `Some` for all others.
open_delim_sp: Option<(Delimiter, Span)>,
- inner: Vec<(AttrAnnotatedTokenTree, Spacing)>,
+ inner: Vec<AttrTokenTree>,
}
let mut stack = vec![FrameData { open_delim_sp: None, inner: vec![] }];
let mut token_and_spacing = iter.next();
@@ -417,48 +413,57 @@ fn make_token_stream(
open_delim, span
);
let dspan = DelimSpan::from_pair(open_sp, span);
- let stream = AttrAnnotatedTokenStream::new(frame_data.inner);
- let delimited = AttrAnnotatedTokenTree::Delimited(dspan, delim, stream);
+ let stream = AttrTokenStream::new(frame_data.inner);
+ let delimited = AttrTokenTree::Delimited(dspan, delim, stream);
stack
.last_mut()
.unwrap_or_else(|| {
panic!("Bottom token frame is missing for token: {:?}", token)
})
.inner
- .push((delimited, Spacing::Alone));
+ .push(delimited);
}
FlatToken::Token(token) => stack
.last_mut()
.expect("Bottom token frame is missing!")
.inner
- .push((AttrAnnotatedTokenTree::Token(token), spacing)),
+ .push(AttrTokenTree::Token(token, spacing)),
FlatToken::AttrTarget(data) => stack
.last_mut()
.expect("Bottom token frame is missing!")
.inner
- .push((AttrAnnotatedTokenTree::Attributes(data), spacing)),
+ .push(AttrTokenTree::Attributes(data)),
FlatToken::Empty => {}
}
token_and_spacing = iter.next();
}
let mut final_buf = stack.pop().expect("Missing final buf!");
if break_last_token {
- let (last_token, spacing) = final_buf.inner.pop().unwrap();
- if let AttrAnnotatedTokenTree::Token(last_token) = last_token {
+ let last_token = final_buf.inner.pop().unwrap();
+ if let AttrTokenTree::Token(last_token, spacing) = last_token {
let unglued_first = last_token.kind.break_two_token_op().unwrap().0;
// An 'unglued' token is always two ASCII characters
let mut first_span = last_token.span.shrink_to_lo();
first_span = first_span.with_hi(first_span.lo() + rustc_span::BytePos(1));
- final_buf.inner.push((
- AttrAnnotatedTokenTree::Token(Token::new(unglued_first, first_span)),
- spacing,
- ));
+ final_buf
+ .inner
+ .push(AttrTokenTree::Token(Token::new(unglued_first, first_span), spacing));
} else {
panic!("Unexpected last token {:?}", last_token)
}
}
- assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
- AttrAnnotatedTokenStream::new(final_buf.inner)
+ AttrTokenStream::new(final_buf.inner)
+}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(AttrWrapper, 16);
+ static_assert_size!(LazyAttrTokenStreamImpl, 144);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
index a2155ac1d..309717350 100644
--- a/compiler/rustc_parse/src/parser/diagnostics.rs
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -3,6 +3,19 @@ use super::{
BlockMode, CommaRecoveryMode, Parser, PathStyle, Restrictions, SemiColonMode, SeqSep,
TokenExpectType, TokenType,
};
+use crate::errors::{
+ AmbiguousPlus, AttributeOnParamType, BadQPathStage2, BadTypePlus, BadTypePlusSub,
+ ComparisonOperatorsCannotBeChained, ComparisonOperatorsCannotBeChainedSugg,
+ ConstGenericWithoutBraces, ConstGenericWithoutBracesSugg, DocCommentOnParamType,
+ DoubleColonInBound, ExpectedIdentifier, ExpectedSemi, ExpectedSemiSugg,
+ GenericParamsWithoutAngleBrackets, GenericParamsWithoutAngleBracketsSugg, InInTypo,
+ IncorrectAwait, IncorrectSemicolon, IncorrectUseOfAwait, ParenthesesInForHead,
+ ParenthesesInForHeadSugg, PatternMethodParamWithoutBody, QuestionMarkInType,
+ QuestionMarkInTypeSugg, SelfParamNotFirst, StructLiteralBodyWithoutPath,
+ StructLiteralBodyWithoutPathSugg, SuggEscapeToUseAsIdentifier, SuggRemoveComma,
+ UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration,
+ UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead,
+};
use crate::lexer::UnmatchedBrace;
use rustc_ast as ast;
@@ -10,35 +23,31 @@ use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter, Lit, LitKind, TokenKind};
use rustc_ast::util::parser::AssocOp;
use rustc_ast::{
- AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingMode, Block,
- BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Mutability, Param, Pat,
- PatKind, Path, PathSegment, QSelf, Ty, TyKind,
+ AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingAnnotation, Block,
+ BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Param, Pat, PatKind,
+ Path, PathSegment, QSelf, Ty, TyKind,
};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{
fluent, Applicability, DiagnosticBuilder, DiagnosticMessage, Handler, MultiSpan, PResult,
};
-use rustc_errors::{pluralize, struct_span_err, Diagnostic, EmissionGuarantee, ErrorGuaranteed};
-use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
+use rustc_errors::{pluralize, Diagnostic, ErrorGuaranteed, IntoDiagnostic};
+use rustc_session::errors::ExprParenthesesNeeded;
use rustc_span::source_map::Spanned;
-use rustc_span::symbol::{kw, Ident};
+use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{Span, SpanSnippetError, DUMMY_SP};
use std::ops::{Deref, DerefMut};
use std::mem::take;
use crate::parser;
-use tracing::{debug, trace};
-
-const TURBOFISH_SUGGESTION_STR: &str =
- "use `::<...>` instead of `<...>` to specify lifetime, type, or const arguments";
/// Creates a placeholder argument.
pub(super) fn dummy_arg(ident: Ident) -> Param {
let pat = P(Pat {
id: ast::DUMMY_NODE_ID,
- kind: PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None),
+ kind: PatKind::Ident(BindingAnnotation::NONE, ident, None),
span: ident.span,
tokens: None,
});
@@ -53,34 +62,6 @@ pub(super) fn dummy_arg(ident: Ident) -> Param {
}
}
-pub enum Error {
- UselessDocComment,
-}
-
-impl Error {
- fn span_err(
- self,
- sp: impl Into<MultiSpan>,
- handler: &Handler,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
- match self {
- Error::UselessDocComment => {
- let mut err = struct_span_err!(
- handler,
- sp,
- E0585,
- "found a documentation comment that doesn't document anything",
- );
- err.help(
- "doc comments must come before what they document, maybe a comment was \
- intended with `//`?",
- );
- err
- }
- }
- }
-}
-
pub(super) trait RecoverQPath: Sized + 'static {
const PATH_STYLE: PathStyle = PathStyle::Expr;
fn to_ty(&self) -> Option<P<Ty>>;
@@ -228,13 +209,13 @@ struct MultiSugg {
}
impl MultiSugg {
- fn emit<G: EmissionGuarantee>(self, err: &mut DiagnosticBuilder<'_, G>) {
+ fn emit(self, err: &mut Diagnostic) {
err.multipart_suggestion(&self.msg, self.patches, self.applicability);
}
/// Overrides individual messages and applicabilities.
- fn emit_many<G: EmissionGuarantee>(
- err: &mut DiagnosticBuilder<'_, G>,
+ fn emit_many(
+ err: &mut Diagnostic,
msg: &str,
applicability: Applicability,
suggestions: impl Iterator<Item = Self>,
@@ -243,97 +224,6 @@ impl MultiSugg {
}
}
-#[derive(SessionDiagnostic)]
-#[error(parser::maybe_report_ambiguous_plus)]
-struct AmbiguousPlus {
- pub sum_ty: String,
- #[primary_span]
- #[suggestion(code = "({sum_ty})")]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::maybe_recover_from_bad_type_plus, code = "E0178")]
-struct BadTypePlus {
- pub ty: String,
- #[primary_span]
- pub span: Span,
- #[subdiagnostic]
- pub sub: BadTypePlusSub,
-}
-
-#[derive(SessionSubdiagnostic)]
-pub enum BadTypePlusSub {
- #[suggestion(
- parser::add_paren,
- code = "{sum_with_parens}",
- applicability = "machine-applicable"
- )]
- AddParen {
- sum_with_parens: String,
- #[primary_span]
- span: Span,
- },
- #[label(parser::forgot_paren)]
- ForgotParen {
- #[primary_span]
- span: Span,
- },
- #[label(parser::expect_path)]
- ExpectPath {
- #[primary_span]
- span: Span,
- },
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::maybe_recover_from_bad_qpath_stage_2)]
-struct BadQPathStage2 {
- #[primary_span]
- #[suggestion(applicability = "maybe-incorrect")]
- span: Span,
- ty: String,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::incorrect_semicolon)]
-struct IncorrectSemicolon<'a> {
- #[primary_span]
- #[suggestion_short(applicability = "machine-applicable")]
- span: Span,
- #[help]
- opt_help: Option<()>,
- name: &'a str,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::incorrect_use_of_await)]
-struct IncorrectUseOfAwait {
- #[primary_span]
- #[suggestion(parser::parentheses_suggestion, applicability = "machine-applicable")]
- span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::incorrect_use_of_await)]
-struct IncorrectAwait {
- #[primary_span]
- span: Span,
- #[suggestion(parser::postfix_suggestion, code = "{expr}.await{question_mark}")]
- sugg_span: (Span, Applicability),
- expr: String,
- question_mark: &'static str,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(parser::in_in_typo)]
-struct InInTypo {
- #[primary_span]
- span: Span,
- #[suggestion(applicability = "machine-applicable")]
- sugg_span: Span,
-}
-
// SnapshotParser is used to create a snapshot of the parser
// without causing duplicate errors being emitted when the `Parser`
// is dropped.
@@ -358,15 +248,6 @@ impl<'a> DerefMut for SnapshotParser<'a> {
impl<'a> Parser<'a> {
#[rustc_lint_diagnostics]
- pub(super) fn span_err<S: Into<MultiSpan>>(
- &self,
- sp: S,
- err: Error,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- err.span_err(sp, self.diagnostic())
- }
-
- #[rustc_lint_diagnostics]
pub fn struct_span_err<S: Into<MultiSpan>>(
&self,
sp: S,
@@ -387,7 +268,7 @@ impl<'a> Parser<'a> {
/// This is to avoid losing unclosed delims errors `create_snapshot_for_diagnostic` clears.
pub(super) fn restore_snapshot(&mut self, snapshot: SnapshotParser<'a>) {
*self = snapshot.parser;
- self.unclosed_delims.extend(snapshot.unclosed_delims.clone());
+ self.unclosed_delims.extend(snapshot.unclosed_delims);
}
pub fn unclosed_delims(&self) -> &[UnmatchedBrace] {
@@ -411,10 +292,6 @@ impl<'a> Parser<'a> {
}
pub(super) fn expected_ident_found(&self) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err = self.struct_span_err(
- self.token.span,
- &format!("expected identifier, found {}", super::token_descr(&self.token)),
- );
let valid_follow = &[
TokenKind::Eq,
TokenKind::Colon,
@@ -426,34 +303,35 @@ impl<'a> Parser<'a> {
TokenKind::CloseDelim(Delimiter::Brace),
TokenKind::CloseDelim(Delimiter::Parenthesis),
];
- match self.token.ident() {
+ let suggest_raw = match self.token.ident() {
Some((ident, false))
if ident.is_raw_guess()
&& self.look_ahead(1, |t| valid_follow.contains(&t.kind)) =>
{
- err.span_suggestion_verbose(
- ident.span.shrink_to_lo(),
- &format!("escape `{}` to use it as an identifier", ident.name),
- "r#",
- Applicability::MaybeIncorrect,
- );
+ Some(SuggEscapeToUseAsIdentifier {
+ span: ident.span.shrink_to_lo(),
+ // `Symbol::to_string()` is different from `Symbol::into_diagnostic_arg()`,
+ // which uses `Symbol::to_ident_string()` and "helpfully" adds an implicit `r#`
+ ident_name: ident.name.to_string(),
+ })
}
- _ => {}
- }
- if let Some(token_descr) = super::token_descr_opt(&self.token) {
- err.span_label(self.token.span, format!("expected identifier, found {}", token_descr));
- } else {
- err.span_label(self.token.span, "expected identifier");
+ _ => None,
+ };
+
+ let suggest_remove_comma =
if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) {
- err.span_suggestion(
- self.token.span,
- "remove this comma",
- "",
- Applicability::MachineApplicable,
- );
- }
- }
- err
+ Some(SuggRemoveComma { span: self.token.span })
+ } else {
+ None
+ };
+
+ let err = ExpectedIdentifier {
+ span: self.token.span,
+ token: self.token.clone(),
+ suggest_raw,
+ suggest_remove_comma,
+ };
+ err.into_diagnostic(&self.sess.span_diagnostic)
}
pub(super) fn expected_one_of_not_found(
@@ -518,8 +396,8 @@ impl<'a> Parser<'a> {
expected.dedup();
let sm = self.sess.source_map();
- let msg = format!("expected `;`, found {}", super::token_descr(&self.token));
- let appl = Applicability::MachineApplicable;
+
+ // Special-case "expected `;`" errors
if expected.contains(&TokenType::Token(token::Semi)) {
if self.token.span == DUMMY_SP || self.prev_token.span == DUMMY_SP {
// Likely inside a macro, can't provide meaningful suggestions.
@@ -547,18 +425,22 @@ impl<'a> Parser<'a> {
//
// let x = 32:
// let y = 42;
+ self.sess.emit_err(ExpectedSemi {
+ span: self.token.span,
+ token: self.token.clone(),
+ unexpected_token_label: None,
+ sugg: ExpectedSemiSugg::ChangeToSemi(self.token.span),
+ });
self.bump();
- let sp = self.prev_token.span;
- self.struct_span_err(sp, &msg)
- .span_suggestion_short(sp, "change this to `;`", ";", appl)
- .emit();
return Ok(true);
} else if self.look_ahead(0, |t| {
t == &token::CloseDelim(Delimiter::Brace)
- || (t.can_begin_expr() && t != &token::Semi && t != &token::Pound)
+ || ((t.can_begin_expr() || t.can_begin_item())
+ && t != &token::Semi
+ && t != &token::Pound)
// Avoid triggering with too many trailing `#` in raw string.
|| (sm.is_multiline(
- self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo())
+ self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo()),
) && t == &token::Pound)
}) && !expected.contains(&TokenType::Token(token::Comma))
{
@@ -567,15 +449,25 @@ impl<'a> Parser<'a> {
//
// let x = 32
// let y = 42;
- let sp = self.prev_token.span.shrink_to_hi();
- self.struct_span_err(sp, &msg)
- .span_label(self.token.span, "unexpected token")
- .span_suggestion_short(sp, "add `;` here", ";", appl)
- .emit();
+ let span = self.prev_token.span.shrink_to_hi();
+ self.sess.emit_err(ExpectedSemi {
+ span,
+ token: self.token.clone(),
+ unexpected_token_label: Some(self.token.span),
+ sugg: ExpectedSemiSugg::AddSemi(span),
+ });
return Ok(true);
}
}
+ if self.token.kind == TokenKind::EqEq
+ && self.prev_token.is_ident()
+ && expected.iter().any(|tok| matches!(tok, TokenType::Token(TokenKind::Eq)))
+ {
+ // Likely typo: `=` → `==` in let expr or enum item
+ return Err(self.sess.create_err(UseEqInstead { span: self.token.span }));
+ }
+
let expect = tokens_to_string(&expected);
let actual = super::token_descr(&self.token);
let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 {
@@ -590,7 +482,7 @@ impl<'a> Parser<'a> {
)
} else if expected.is_empty() {
(
- format!("unexpected token: {}", actual),
+ format!("unexpected token: {actual}"),
(self.prev_token.span, "unexpected token after this".to_string()),
)
} else {
@@ -600,19 +492,33 @@ impl<'a> Parser<'a> {
)
};
self.last_unexpected_token_span = Some(self.token.span);
+ // FIXME: translation requires list formatting (for `expect`)
let mut err = self.struct_span_err(self.token.span, &msg_exp);
if let TokenKind::Ident(symbol, _) = &self.prev_token.kind {
- if symbol.as_str() == "public" {
+ if ["def", "fun", "func", "function"].contains(&symbol.as_str()) {
err.span_suggestion_short(
self.prev_token.span,
- "write `pub` instead of `public` to make the item public",
- "pub",
- appl,
+ &format!("write `fn` instead of `{symbol}` to declare a function"),
+ "fn",
+ Applicability::MachineApplicable,
);
}
}
+ // `pub` may be used for an item or `pub(crate)`
+ if self.prev_token.is_ident_named(sym::public)
+ && (self.token.can_begin_item()
+ || self.token.kind == TokenKind::OpenDelim(Delimiter::Parenthesis))
+ {
+ err.span_suggestion_short(
+ self.prev_token.span,
+ "write `pub` instead of `public` to make the item public",
+ "pub",
+ Applicability::MachineApplicable,
+ );
+ }
+
// Add suggestion for a missing closing angle bracket if '>' is included in expected_tokens
// there are unclosed angle brackets
if self.unmatched_angle_bracket_count > 0
@@ -734,7 +640,7 @@ impl<'a> Parser<'a> {
let mut snapshot = self.create_snapshot_for_diagnostic();
let path =
Path { segments: vec![], span: self.prev_token.span.shrink_to_lo(), tokens: None };
- let struct_expr = snapshot.parse_struct_expr(None, path, AttrVec::new(), false);
+ let struct_expr = snapshot.parse_struct_expr(None, path, false);
let block_tail = self.parse_block_tail(lo, s, AttemptLocalParseRecovery::No);
return Some(match (struct_expr, block_tail) {
(Ok(expr), Err(mut err)) => {
@@ -747,19 +653,13 @@ impl<'a> Parser<'a> {
// field: value,
// } }
err.delay_as_bug();
- self.struct_span_err(
- expr.span,
- fluent::parser::struct_literal_body_without_path,
- )
- .multipart_suggestion(
- fluent::parser::suggestion,
- vec![
- (expr.span.shrink_to_lo(), "{ SomeStruct ".to_string()),
- (expr.span.shrink_to_hi(), " }".to_string()),
- ],
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(StructLiteralBodyWithoutPath {
+ span: expr.span,
+ sugg: StructLiteralBodyWithoutPathSugg {
+ before: expr.span.shrink_to_lo(),
+ after: expr.span.shrink_to_hi(),
+ },
+ });
self.restore_snapshot(snapshot);
let mut tail = self.mk_block(
vec![self.mk_stmt_err(expr.span)],
@@ -953,18 +853,8 @@ impl<'a> Parser<'a> {
self.eat_to_tokens(end);
let span = lo.until(self.token.span);
- let total_num_of_gt = number_of_gt + number_of_shr * 2;
- self.struct_span_err(
- span,
- &format!("unmatched angle bracket{}", pluralize!(total_num_of_gt)),
- )
- .span_suggestion(
- span,
- &format!("remove extra angle bracket{}", pluralize!(total_num_of_gt)),
- "",
- Applicability::MachineApplicable,
- )
- .emit();
+ let num_extra_brackets = number_of_gt + number_of_shr * 2;
+ self.sess.emit_err(UnmatchedAngleBrackets { span, num_extra_brackets });
return true;
}
false
@@ -993,19 +883,13 @@ impl<'a> Parser<'a> {
let args = AngleBracketedArgs { args, span }.into();
segment.args = args;
- self.struct_span_err(
+ self.sess.emit_err(GenericParamsWithoutAngleBrackets {
span,
- "generic parameters without surrounding angle brackets",
- )
- .multipart_suggestion(
- "surround the type parameters with angle brackets",
- vec![
- (span.shrink_to_lo(), "<".to_string()),
- (trailing_span, ">".to_string()),
- ],
- Applicability::MachineApplicable,
- )
- .emit();
+ sugg: GenericParamsWithoutAngleBracketsSugg {
+ left: span.shrink_to_lo(),
+ right: trailing_span,
+ },
+ });
} else {
// This doesn't look like an invalid turbofish, can't recover parse state.
self.restore_snapshot(snapshot);
@@ -1042,7 +926,7 @@ impl<'a> Parser<'a> {
if self.eat(&token::Gt) {
e.span_suggestion_verbose(
binop.span.shrink_to_lo(),
- TURBOFISH_SUGGESTION_STR,
+ fluent::parser_sugg_turbofish_syntax,
"::",
Applicability::MaybeIncorrect,
)
@@ -1074,7 +958,7 @@ impl<'a> Parser<'a> {
/// parenthesising the leftmost comparison.
fn attempt_chained_comparison_suggestion(
&mut self,
- err: &mut Diagnostic,
+ err: &mut ComparisonOperatorsCannotBeChained,
inner_op: &Expr,
outer_op: &Spanned<AssocOp>,
) -> bool /* advanced the cursor */ {
@@ -1087,16 +971,6 @@ impl<'a> Parser<'a> {
// suggestion being the only one to apply is high.
return false;
}
- let mut enclose = |left: Span, right: Span| {
- err.multipart_suggestion(
- "parenthesize the comparison",
- vec![
- (left.shrink_to_lo(), "(".to_string()),
- (right.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- };
return match (op.node, &outer_op.node) {
// `x == y == z`
(BinOpKind::Eq, AssocOp::Equal) |
@@ -1110,12 +984,10 @@ impl<'a> Parser<'a> {
self.span_to_snippet(e.span)
.unwrap_or_else(|_| pprust::expr_to_string(&e))
};
- err.span_suggestion_verbose(
- inner_op.span.shrink_to_hi(),
- "split the comparison into two",
- format!(" && {}", expr_to_str(&r1)),
- Applicability::MaybeIncorrect,
- );
+ err.chaining_sugg = Some(ComparisonOperatorsCannotBeChainedSugg::SplitComparison {
+ span: inner_op.span.shrink_to_hi(),
+ middle_term: expr_to_str(&r1),
+ });
false // Keep the current parse behavior, where the AST is `(x < y) < z`.
}
// `x == y < z`
@@ -1126,7 +998,10 @@ impl<'a> Parser<'a> {
Ok(r2) => {
// We are sure that outer-op-rhs could be consumed, the suggestion is
// likely correct.
- enclose(r1.span, r2.span);
+ err.chaining_sugg = Some(ComparisonOperatorsCannotBeChainedSugg::Parenthesize {
+ left: r1.span.shrink_to_lo(),
+ right: r2.span.shrink_to_hi(),
+ });
true
}
Err(expr_err) => {
@@ -1143,7 +1018,10 @@ impl<'a> Parser<'a> {
// further checks are necessary.
match self.parse_expr() {
Ok(_) => {
- enclose(l1.span, r1.span);
+ err.chaining_sugg = Some(ComparisonOperatorsCannotBeChainedSugg::Parenthesize {
+ left: l1.span.shrink_to_lo(),
+ right: r1.span.shrink_to_hi(),
+ });
true
}
Err(expr_err) => {
@@ -1188,23 +1066,15 @@ impl<'a> Parser<'a> {
outer_op.node,
);
- let mk_err_expr =
- |this: &Self, span| Ok(Some(this.mk_expr(span, ExprKind::Err, AttrVec::new())));
+ let mk_err_expr = |this: &Self, span| Ok(Some(this.mk_expr(span, ExprKind::Err)));
match inner_op.kind {
ExprKind::Binary(op, ref l1, ref r1) if op.node.is_comparison() => {
- let mut err = self.struct_span_err(
- vec![op.span, self.prev_token.span],
- "comparison operators cannot be chained",
- );
-
- let suggest = |err: &mut Diagnostic| {
- err.span_suggestion_verbose(
- op.span.shrink_to_lo(),
- TURBOFISH_SUGGESTION_STR,
- "::",
- Applicability::MaybeIncorrect,
- );
+ let mut err = ComparisonOperatorsCannotBeChained {
+ span: vec![op.span, self.prev_token.span],
+ suggest_turbofish: None,
+ help_turbofish: None,
+ chaining_sugg: None,
};
// Include `<` to provide this recommendation even in a case like
@@ -1231,7 +1101,7 @@ impl<'a> Parser<'a> {
return if token::ModSep == self.token.kind {
// We have some certainty that this was a bad turbofish at this point.
// `foo< bar >::`
- suggest(&mut err);
+ err.suggest_turbofish = Some(op.span.shrink_to_lo());
let snapshot = self.create_snapshot_for_diagnostic();
self.bump(); // `::`
@@ -1240,7 +1110,7 @@ impl<'a> Parser<'a> {
match self.parse_expr() {
Ok(_) => {
// 99% certain that the suggestion is correct, continue parsing.
- err.emit();
+ self.sess.emit_err(err);
// FIXME: actually check that the two expressions in the binop are
// paths and resynthesize new fn call expression instead of using
// `ExprKind::Err` placeholder.
@@ -1251,18 +1121,18 @@ impl<'a> Parser<'a> {
// Not entirely sure now, but we bubble the error up with the
// suggestion.
self.restore_snapshot(snapshot);
- Err(err)
+ Err(err.into_diagnostic(&self.sess.span_diagnostic))
}
}
} else if token::OpenDelim(Delimiter::Parenthesis) == self.token.kind {
// We have high certainty that this was a bad turbofish at this point.
// `foo< bar >(`
- suggest(&mut err);
+ err.suggest_turbofish = Some(op.span.shrink_to_lo());
// Consume the fn call arguments.
match self.consume_fn_args() {
- Err(()) => Err(err),
+ Err(()) => Err(err.into_diagnostic(&self.sess.span_diagnostic)),
Ok(()) => {
- err.emit();
+ self.sess.emit_err(err);
// FIXME: actually check that the two expressions in the binop are
// paths and resynthesize new fn call expression instead of using
// `ExprKind::Err` placeholder.
@@ -1275,25 +1145,24 @@ impl<'a> Parser<'a> {
{
// All we know is that this is `foo < bar >` and *nothing* else. Try to
// be helpful, but don't attempt to recover.
- err.help(TURBOFISH_SUGGESTION_STR);
- err.help("or use `(...)` if you meant to specify fn arguments");
+ err.help_turbofish = Some(());
}
// If it looks like a genuine attempt to chain operators (as opposed to a
// misformatted turbofish, for instance), suggest a correct form.
if self.attempt_chained_comparison_suggestion(&mut err, inner_op, outer_op)
{
- err.emit();
+ self.sess.emit_err(err);
mk_err_expr(self, inner_op.span.to(self.prev_token.span))
} else {
// These cases cause too many knock-down errors, bail out (#61329).
- Err(err)
+ Err(err.into_diagnostic(&self.sess.span_diagnostic))
}
};
}
let recover =
self.attempt_chained_comparison_suggestion(&mut err, inner_op, outer_op);
- err.emit();
+ self.sess.emit_err(err);
if recover {
return mk_err_expr(self, inner_op.span.to(self.prev_token.span));
}
@@ -1334,17 +1203,13 @@ impl<'a> Parser<'a> {
pub(super) fn maybe_recover_from_question_mark(&mut self, ty: P<Ty>) -> P<Ty> {
if self.token == token::Question {
self.bump();
- self.struct_span_err(self.prev_token.span, "invalid `?` in type")
- .span_label(self.prev_token.span, "`?` is only allowed on expressions, not types")
- .multipart_suggestion(
- "if you meant to express that the type might not contain a value, use the `Option` wrapper type",
- vec![
- (ty.span.shrink_to_lo(), "Option<".to_string()),
- (self.prev_token.span, ">".to_string()),
- ],
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(QuestionMarkInType {
+ span: self.prev_token.span,
+ sugg: QuestionMarkInTypeSugg {
+ left: ty.span.shrink_to_lo(),
+ right: self.prev_token.span,
+ },
+ });
self.mk_ty(ty.span.to(self.prev_token.span), TyKind::Err)
} else {
ty
@@ -1497,7 +1362,7 @@ impl<'a> Parser<'a> {
MultiSugg {
msg: format!("use `{}= 1` instead", kind.op.chr()),
patches: vec![
- (pre_span, format!("{{ let {} = ", tmp_var)),
+ (pre_span, format!("{{ let {tmp_var} = ")),
(post_span, format!("; {} {}= 1; {} }}", base_src, kind.op.chr(), tmp_var)),
],
applicability: Applicability::HasPlaceholders,
@@ -1509,9 +1374,17 @@ impl<'a> Parser<'a> {
kind: IncDecRecovery,
(pre_span, post_span): (Span, Span),
) -> MultiSugg {
+ let mut patches = Vec::new();
+
+ if !pre_span.is_empty() {
+ patches.push((pre_span, String::new()));
+ }
+
+ patches.push((post_span, format!(" {}= 1", kind.op.chr())));
+
MultiSugg {
msg: format!("use `{}= 1` instead", kind.op.chr()),
- patches: vec![(pre_span, String::new()), (post_span, format!(" {}= 1", kind.op.chr()))],
+ patches,
applicability: Applicability::MachineApplicable,
}
}
@@ -1596,7 +1469,7 @@ impl<'a> Parser<'a> {
let (prev_sp, sp) = match (&self.token.kind, self.subparser_name) {
// Point at the end of the macro call when reaching end of macro arguments.
(token::Eof, Some(_)) => {
- let sp = self.sess.source_map().next_point(self.prev_token.span);
+ let sp = self.prev_token.span.shrink_to_hi();
(sp, sp)
}
// We don't want to point at the following span after DUMMY_SP.
@@ -1647,7 +1520,6 @@ impl<'a> Parser<'a> {
&mut self,
lo: Span,
await_sp: Span,
- attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
let (hi, expr, is_question) = if self.token == token::Not {
// Handle `await!(<expr>)`.
@@ -1662,7 +1534,7 @@ impl<'a> Parser<'a> {
ExprKind::Try(_) => ExprKind::Err,
_ => ExprKind::Await(expr),
};
- let expr = self.mk_expr(lo.to(sp), kind, attrs);
+ let expr = self.mk_expr(lo.to(sp), kind);
self.maybe_recover_from_bad_qpath(expr)
}
@@ -1680,7 +1552,7 @@ impl<'a> Parser<'a> {
// Handle `await { <expr> }`.
// This needs to be handled separately from the next arm to avoid
// interpreting `await { <expr> }?` as `<expr>?.await`.
- self.parse_block_expr(None, self.token.span, BlockCheckMode::Default, AttrVec::new())
+ self.parse_block_expr(None, self.token.span, BlockCheckMode::Default)
} else {
self.parse_expr()
}
@@ -1769,19 +1641,16 @@ impl<'a> Parser<'a> {
(token::CloseDelim(Delimiter::Parenthesis), Some(begin_par_sp)) => {
self.bump();
- self.struct_span_err(
- MultiSpan::from_spans(vec![begin_par_sp, self.prev_token.span]),
- "unexpected parentheses surrounding `for` loop head",
- )
- .multipart_suggestion(
- "remove parentheses in `for` loop",
- vec![(begin_par_sp, String::new()), (self.prev_token.span, String::new())],
+ self.sess.emit_err(ParenthesesInForHead {
+ span: vec![begin_par_sp, self.prev_token.span],
// With e.g. `for (x) in y)` this would replace `(x) in y)`
// with `x) in y)` which is syntactically invalid.
// However, this is prevented before we get here.
- Applicability::MachineApplicable,
- )
- .emit();
+ sugg: ParenthesesInForHeadSugg {
+ left: begin_par_sp,
+ right: self.prev_token.span,
+ },
+ });
// Unwrap `(pat)` into `pat` to avoid the `unused_parens` lint.
pat.and_then(|pat| match pat.kind {
@@ -1823,7 +1692,7 @@ impl<'a> Parser<'a> {
err.emit();
// Recover from parse error, callers expect the closing delim to be consumed.
self.consume_block(delim, ConsumeClosingDelim::Yes);
- self.mk_expr(lo.to(self.prev_token.span), ExprKind::Err, AttrVec::new())
+ self.mk_expr(lo.to(self.prev_token.span), ExprKind::Err)
}
}
}
@@ -2000,12 +1869,7 @@ impl<'a> Parser<'a> {
pub(super) fn eat_incorrect_doc_comment_for_param_type(&mut self) {
if let token::DocComment(..) = self.token.kind {
- self.struct_span_err(
- self.token.span,
- "documentation comments cannot be applied to a function parameter's type",
- )
- .span_label(self.token.span, "doc comments are not allowed here")
- .emit();
+ self.sess.emit_err(DocCommentOnParamType { span: self.token.span });
self.bump();
} else if self.token == token::Pound
&& self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Bracket))
@@ -2017,9 +1881,7 @@ impl<'a> Parser<'a> {
}
let sp = lo.to(self.token.span);
self.bump();
- self.struct_span_err(sp, "attributes cannot be applied to a function parameter's type")
- .span_label(sp, "attributes are not allowed here")
- .emit();
+ self.sess.emit_err(AttributeOnParamType { span: sp });
}
}
@@ -2140,19 +2002,7 @@ impl<'a> Parser<'a> {
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
- struct_span_err!(
- self.diagnostic(),
- pat.span,
- E0642,
- "patterns aren't allowed in methods without bodies",
- )
- .span_suggestion_short(
- pat.span,
- "give this argument a name or use an underscore to ignore it",
- "_",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(PatternMethodParamWithoutBody { span: pat.span });
// Pretend the pattern is `_`, to avoid duplicate errors from AST validation.
let pat =
@@ -2161,11 +2011,9 @@ impl<'a> Parser<'a> {
}
pub(super) fn recover_bad_self_param(&mut self, mut param: Param) -> PResult<'a, Param> {
- let sp = param.pat.span;
+ let span = param.pat.span;
param.ty.kind = TyKind::Err;
- self.struct_span_err(sp, "unexpected `self` parameter in function")
- .span_label(sp, "must be the first parameter of an associated function")
- .emit();
+ self.sess.emit_err(SelfParamNotFirst { span });
Ok(param)
}
@@ -2199,7 +2047,7 @@ impl<'a> Parser<'a> {
pub(super) fn expected_expression_found(&self) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
let (span, msg) = match (&self.token.kind, self.subparser_name) {
(&token::Eof, Some(origin)) => {
- let sp = self.sess.source_map().next_point(self.prev_token.span);
+ let sp = self.prev_token.span.shrink_to_hi();
(sp, format!("expected expression, found end of {origin}"))
}
_ => (
@@ -2210,7 +2058,7 @@ impl<'a> Parser<'a> {
let mut err = self.struct_span_err(span, &msg);
let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
- self.sess.expr_parentheses_needed(&mut err, *sp);
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
}
err.span_label(span, "expected expression");
err
@@ -2314,27 +2162,20 @@ impl<'a> Parser<'a> {
err
})?;
if !self.expr_is_valid_const_arg(&expr) {
- self.struct_span_err(
- expr.span,
- "expressions must be enclosed in braces to be used as const generic \
- arguments",
- )
- .multipart_suggestion(
- "enclose the `const` expression in braces",
- vec![
- (expr.span.shrink_to_lo(), "{ ".to_string()),
- (expr.span.shrink_to_hi(), " }".to_string()),
- ],
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(ConstGenericWithoutBraces {
+ span: expr.span,
+ sugg: ConstGenericWithoutBracesSugg {
+ left: expr.span.shrink_to_lo(),
+ right: expr.span.shrink_to_hi(),
+ },
+ });
}
Ok(expr)
}
fn recover_const_param_decl(&mut self, ty_generics: Option<&Generics>) -> Option<GenericArg> {
let snapshot = self.create_snapshot_for_diagnostic();
- let param = match self.parse_const_param(vec![]) {
+ let param = match self.parse_const_param(AttrVec::new()) {
Ok(param) => param,
Err(err) => {
err.cancel();
@@ -2342,24 +2183,30 @@ impl<'a> Parser<'a> {
return None;
}
};
- let mut err =
- self.struct_span_err(param.span(), "unexpected `const` parameter declaration");
- err.span_label(param.span(), "expected a `const` expression, not a parameter declaration");
- if let (Some(generics), Ok(snippet)) =
- (ty_generics, self.sess.source_map().span_to_snippet(param.span()))
- {
- let (span, sugg) = match &generics.params[..] {
- [] => (generics.span, format!("<{snippet}>")),
- [.., generic] => (generic.span().shrink_to_hi(), format!(", {snippet}")),
- };
- err.multipart_suggestion(
- "`const` parameters must be declared for the `impl`",
- vec![(span, sugg), (param.span(), param.ident.to_string())],
- Applicability::MachineApplicable,
- );
- }
+
+ let ident = param.ident.to_string();
+ let sugg = match (ty_generics, self.sess.source_map().span_to_snippet(param.span())) {
+ (Some(Generics { params, span: impl_generics, .. }), Ok(snippet)) => {
+ Some(match &params[..] {
+ [] => UnexpectedConstParamDeclarationSugg::AddParam {
+ impl_generics: *impl_generics,
+ incorrect_decl: param.span(),
+ snippet,
+ ident,
+ },
+ [.., generic] => UnexpectedConstParamDeclarationSugg::AppendParam {
+ impl_generics_end: generic.span().shrink_to_hi(),
+ incorrect_decl: param.span(),
+ snippet,
+ ident,
+ },
+ })
+ }
+ _ => None,
+ };
+ self.sess.emit_err(UnexpectedConstParamDeclaration { span: param.span(), sugg });
+
let value = self.mk_expr_err(param.span());
- err.emit();
Some(GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value }))
}
@@ -2377,20 +2224,15 @@ impl<'a> Parser<'a> {
self.bump(); // `const`
// Detect and recover from the old, pre-RFC2000 syntax for const generics.
- let mut err = self
- .struct_span_err(start, "expected lifetime, type, or constant, found keyword `const`");
+ let mut err = UnexpectedConstInGenericParam { span: start, to_remove: None };
if self.check_const_arg() {
- err.span_suggestion_verbose(
- start.until(self.token.span),
- "the `const` keyword is only needed in the definition of the type",
- "",
- Applicability::MaybeIncorrect,
- );
- err.emit();
+ err.to_remove = Some(start.until(self.token.span));
+ self.sess.emit_err(err);
Ok(Some(GenericArg::Const(self.parse_const_arg()?)))
} else {
let after_kw_const = self.token.span;
- self.recover_const_arg(after_kw_const, err).map(Some)
+ self.recover_const_arg(after_kw_const, err.into_diagnostic(&self.sess.span_diagnostic))
+ .map(Some)
}
}
@@ -2398,7 +2240,7 @@ impl<'a> Parser<'a> {
///
/// When encountering code like `foo::< bar + 3 >` or `foo::< bar - baz >` we suggest
/// `foo::<{ bar + 3 }>` and `foo::<{ bar - baz }>`, respectively. We only provide a suggestion
- /// if we think that that the resulting expression would be well formed.
+ /// if we think that the resulting expression would be well formed.
pub fn recover_const_arg(
&mut self,
start: Span,
@@ -2496,24 +2338,6 @@ impl<'a> Parser<'a> {
GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value })
}
- /// Get the diagnostics for the cases where `move async` is found.
- ///
- /// `move_async_span` starts at the 'm' of the move keyword and ends with the 'c' of the async keyword
- pub(super) fn incorrect_move_async_order_found(
- &self,
- move_async_span: Span,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err =
- self.struct_span_err(move_async_span, "the order of `move` and `async` is incorrect");
- err.span_suggestion_verbose(
- move_async_span,
- "try switching the order",
- "async move",
- Applicability::MaybeIncorrect,
- );
- err
- }
-
/// Some special error handling for the "top-level" patterns in a match arm,
/// `for` loop, `let`, &c. (in contrast to subpatterns within such).
pub(crate) fn maybe_recover_colon_colon_in_pat_typo(
@@ -2577,7 +2401,7 @@ impl<'a> Parser<'a> {
}
_ => {}
},
- PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => {
+ PatKind::Ident(BindingAnnotation::NONE, ident, None) => {
match &first_pat.kind {
PatKind::Ident(_, old_ident, _) => {
let path = PatKind::Path(
@@ -2632,11 +2456,15 @@ impl<'a> Parser<'a> {
}
pub(crate) fn maybe_recover_unexpected_block_label(&mut self) -> bool {
- let Some(label) = self.eat_label().filter(|_| {
- self.eat(&token::Colon) && self.token.kind == token::OpenDelim(Delimiter::Brace)
- }) else {
+ // Check for `'a : {`
+ if !(self.check_lifetime()
+ && self.look_ahead(1, |tok| tok.kind == token::Colon)
+ && self.look_ahead(2, |tok| tok.kind == token::OpenDelim(Delimiter::Brace)))
+ {
return false;
- };
+ }
+ let label = self.eat_label().expect("just checked if a label exists");
+ self.bump(); // eat `:`
let span = label.ident.span.to(self.prev_token.span);
let mut err = self.struct_span_err(span, "block label not supported here");
err.span_label(span, "not supported here");
@@ -2709,17 +2537,11 @@ impl<'a> Parser<'a> {
let (a_span, b_span) = (a.span(), b.span());
let between_span = a_span.shrink_to_hi().to(b_span.shrink_to_lo());
if self.span_to_snippet(between_span).as_ref().map(|a| &a[..]) == Ok(":: ") {
- let mut err = self.struct_span_err(
- path.span.shrink_to_hi(),
- "expected `:` followed by trait or lifetime",
- );
- err.span_suggestion(
- between_span,
- "use single colon",
- ": ",
- Applicability::MachineApplicable,
- );
- return Err(err);
+ return Err(DoubleColonInBound {
+ span: path.span.shrink_to_hi(),
+ between: between_span,
+ }
+ .into_diagnostic(&self.sess.span_diagnostic));
}
}
}
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
index 0719a0ef0..a781748ef 100644
--- a/compiler/rustc_parse/src/parser/expr.rs
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -5,6 +5,28 @@ use super::{
AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions,
SemiColonMode, SeqSep, TokenExpectType, TokenType, TrailingToken,
};
+use crate::errors::{
+ ArrayBracketsInsteadOfSpaces, ArrayBracketsInsteadOfSpacesSugg, AsyncMoveOrderIncorrect,
+ BinaryFloatLiteralNotSupported, BracesForStructLiteral, CatchAfterTry, CommaAfterBaseStruct,
+ ComparisonInterpretedAsGeneric, ComparisonOrShiftInterpretedAsGenericSugg,
+ DoCatchSyntaxRemoved, DotDotDot, EqFieldInit, ExpectedElseBlock, ExpectedExpressionFoundLet,
+ FieldExpressionWithGeneric, FloatLiteralRequiresIntegerPart, FoundExprWouldBeStmt,
+ HexadecimalFloatLiteralNotSupported, IfExpressionMissingCondition,
+ IfExpressionMissingThenBlock, IfExpressionMissingThenBlockSub, IntLiteralTooLarge,
+ InvalidBlockMacroSegment, InvalidComparisonOperator, InvalidComparisonOperatorSub,
+ InvalidFloatLiteralSuffix, InvalidFloatLiteralWidth, InvalidIntLiteralWidth,
+ InvalidInterpolatedExpression, InvalidLiteralSuffix, InvalidLiteralSuffixOnTupleIndex,
+ InvalidLogicalOperator, InvalidLogicalOperatorSub, InvalidNumLiteralBasePrefix,
+ InvalidNumLiteralSuffix, LabeledLoopInBreak, LeadingPlusNotSupported, LeftArrowOperator,
+ LifetimeInBorrowExpression, MacroInvocationWithQualifiedPath, MalformedLoopLabel,
+ MatchArmBodyWithoutBraces, MatchArmBodyWithoutBracesSugg, MissingCommaAfterMatchArm,
+ MissingInInForLoop, MissingInInForLoopSub, MissingSemicolonBeforeArray, NoFieldsForFnCall,
+ NotAsNegationOperator, NotAsNegationOperatorSub, OctalFloatLiteralNotSupported,
+ OuterAttributeNotAllowedOnIfElse, ParenthesesWithStructFields,
+ RequireColonAfterLabeledExpression, ShiftInterpretedAsGeneric, StructLiteralNotAllowedHere,
+ StructLiteralNotAllowedHereSugg, TildeAsUnaryOperator, UnexpectedTokenAfterLabel,
+ UnexpectedTokenAfterLabelSugg, WrapExpressionInParentheses,
+};
use crate::maybe_recover_from_interpolated_ty_qpath;
use core::mem;
@@ -20,8 +42,11 @@ use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty
use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
use rustc_ast::{ClosureBinder, StmtKind};
use rustc_ast_pretty::pprust;
-use rustc_data_structures::thin_vec::ThinVec;
-use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_errors::{
+ Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic, PResult,
+ StashKey,
+};
+use rustc_session::errors::ExprParenthesesNeeded;
use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP;
use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_span::source_map::{self, Span, Spanned};
@@ -45,20 +70,12 @@ macro_rules! maybe_whole_expr {
token::NtPath(path) => {
let path = (**path).clone();
$p.bump();
- return Ok($p.mk_expr(
- $p.prev_token.span,
- ExprKind::Path(None, path),
- AttrVec::new(),
- ));
+ return Ok($p.mk_expr($p.prev_token.span, ExprKind::Path(None, path)));
}
token::NtBlock(block) => {
let block = block.clone();
$p.bump();
- return Ok($p.mk_expr(
- $p.prev_token.span,
- ExprKind::Block(block, None),
- AttrVec::new(),
- ));
+ return Ok($p.mk_expr($p.prev_token.span, ExprKind::Block(block, None)));
}
_ => {}
};
@@ -120,7 +137,7 @@ impl<'a> Parser<'a> {
// Special-case handling of `foo(_, _, _)`
err.emit();
self.bump();
- Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new()))
+ Ok(self.mk_expr(self.prev_token.span, ExprKind::Err))
}
_ => Err(err),
},
@@ -225,15 +242,18 @@ impl<'a> Parser<'a> {
AssocOp::Equal => "==",
AssocOp::NotEqual => "!=",
_ => unreachable!(),
- };
- self.struct_span_err(sp, &format!("invalid comparison operator `{sugg}=`"))
- .span_suggestion_short(
- sp,
- &format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg),
- sugg,
- Applicability::MachineApplicable,
- )
- .emit();
+ }
+ .into();
+ let invalid = format!("{}=", &sugg);
+ self.sess.emit_err(InvalidComparisonOperator {
+ span: sp,
+ invalid: invalid.clone(),
+ sub: InvalidComparisonOperatorSub::Correctable {
+ span: sp,
+ invalid,
+ correct: sugg,
+ },
+ });
self.bump();
}
@@ -243,14 +263,15 @@ impl<'a> Parser<'a> {
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
- self.struct_span_err(sp, "invalid comparison operator `<>`")
- .span_suggestion_short(
- sp,
- "`<>` is not a valid comparison operator, use `!=`",
- "!=",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(InvalidComparisonOperator {
+ span: sp,
+ invalid: "<>".into(),
+ sub: InvalidComparisonOperatorSub::Correctable {
+ span: sp,
+ invalid: "<>".into(),
+ correct: "!=".into(),
+ },
+ });
self.bump();
}
@@ -260,12 +281,11 @@ impl<'a> Parser<'a> {
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
- self.struct_span_err(sp, "invalid comparison operator `<=>`")
- .span_label(
- sp,
- "`<=>` is not a valid comparison operator, use `std::cmp::Ordering`",
- )
- .emit();
+ self.sess.emit_err(InvalidComparisonOperator {
+ span: sp,
+ invalid: "<=>".into(),
+ sub: InvalidComparisonOperatorSub::Spaceship(sp),
+ });
self.bump();
}
@@ -329,11 +349,9 @@ impl<'a> Parser<'a> {
| AssocOp::GreaterEqual => {
let ast_op = op.to_ast_binop().unwrap();
let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs);
- self.mk_expr(span, binary, AttrVec::new())
- }
- AssocOp::Assign => {
- self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new())
+ self.mk_expr(span, binary)
}
+ AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span)),
AssocOp::AssignOp(k) => {
let aop = match k {
token::Plus => BinOpKind::Add,
@@ -348,7 +366,7 @@ impl<'a> Parser<'a> {
token::Shr => BinOpKind::Shr,
};
let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs);
- self.mk_expr(span, aopexpr, AttrVec::new())
+ self.mk_expr(span, aopexpr)
}
AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => {
self.span_bug(span, "AssocOp should have been handled by special case")
@@ -412,13 +430,11 @@ impl<'a> Parser<'a> {
/// but the next token implies this should be parsed as an expression.
/// For example: `if let Some(x) = x { x } else { 0 } / 2`.
fn error_found_expr_would_be_stmt(&self, lhs: &Expr) {
- let mut err = self.struct_span_err(
- self.token.span,
- &format!("expected expression, found `{}`", pprust::token_to_string(&self.token),),
- );
- err.span_label(self.token.span, "expected expression");
- self.sess.expr_parentheses_needed(&mut err, lhs.span);
- err.emit();
+ self.sess.emit_err(FoundExprWouldBeStmt {
+ span: self.token.span,
+ token: self.token.clone(),
+ suggestion: ExprParenthesesNeeded::surrounding(lhs.span),
+ });
}
/// Possibly translate the current token to an associative operator.
@@ -441,11 +457,19 @@ impl<'a> Parser<'a> {
}
(Some(op), _) => (op, self.token.span),
(None, Some((Ident { name: sym::and, span }, false))) => {
- self.error_bad_logical_op("and", "&&", "conjunction");
+ self.sess.emit_err(InvalidLogicalOperator {
+ span: self.token.span,
+ incorrect: "and".into(),
+ sub: InvalidLogicalOperatorSub::Conjunction(self.token.span),
+ });
(AssocOp::LAnd, span)
}
(None, Some((Ident { name: sym::or, span }, false))) => {
- self.error_bad_logical_op("or", "||", "disjunction");
+ self.sess.emit_err(InvalidLogicalOperator {
+ span: self.token.span,
+ incorrect: "or".into(),
+ sub: InvalidLogicalOperatorSub::Disjunction(self.token.span),
+ });
(AssocOp::LOr, span)
}
_ => return None,
@@ -453,19 +477,6 @@ impl<'a> Parser<'a> {
Some(source_map::respan(span, op))
}
- /// Error on `and` and `or` suggesting `&&` and `||` respectively.
- fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) {
- self.struct_span_err(self.token.span, &format!("`{bad}` is not a logical operator"))
- .span_suggestion_short(
- self.token.span,
- &format!("use `{good}` to perform logical {english}"),
- good,
- Applicability::MachineApplicable,
- )
- .note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators")
- .emit();
- }
-
/// Checks if this expression is a successfully parsed statement.
fn expr_is_complete(&self, e: &Expr) -> bool {
self.restrictions.contains(Restrictions::STMT_EXPR)
@@ -491,7 +502,7 @@ impl<'a> Parser<'a> {
let limits =
if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
let range = self.mk_range(Some(lhs), rhs, limits);
- Ok(self.mk_expr(span, range, AttrVec::new()))
+ Ok(self.mk_expr(span, range))
}
fn is_at_start_of_range_notation_rhs(&self) -> bool {
@@ -540,7 +551,7 @@ impl<'a> Parser<'a> {
(lo, None)
};
let range = this.mk_range(None, opt_end, limits);
- Ok(this.mk_expr(span, range, attrs.into()))
+ Ok(this.mk_expr_with_attrs(span, range, attrs))
})
}
@@ -553,7 +564,7 @@ impl<'a> Parser<'a> {
($this:ident, $attrs:expr, |this, _| $body:expr) => {
$this.collect_tokens_for_expr($attrs, |$this, attrs| {
let (hi, ex) = $body?;
- Ok($this.mk_expr(lo.to(hi), ex, attrs.into()))
+ Ok($this.mk_expr_with_attrs(lo.to(hi), ex, attrs))
})
};
}
@@ -574,21 +585,16 @@ impl<'a> Parser<'a> {
make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo))
}
token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => {
- let mut err = this.struct_span_err(lo, "leading `+` is not supported");
- err.span_label(lo, "unexpected `+`");
+ let mut err =
+ LeadingPlusNotSupported { span: lo, remove_plus: None, add_parentheses: None };
// a block on the LHS might have been intended to be an expression instead
if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
- this.sess.expr_parentheses_needed(&mut err, *sp);
+ err.add_parentheses = Some(ExprParenthesesNeeded::surrounding(*sp));
} else {
- err.span_suggestion_verbose(
- lo,
- "try removing the `+`",
- "",
- Applicability::MachineApplicable,
- );
+ err.remove_plus = Some(lo);
}
- err.emit();
+ this.sess.emit_err(err);
this.bump();
this.parse_prefix_expr(None)
@@ -630,14 +636,7 @@ impl<'a> Parser<'a> {
// Recover on `!` suggesting for bitwise negation instead.
fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
- self.struct_span_err(lo, "`~` cannot be used as a unary operator")
- .span_suggestion_short(
- lo,
- "use `!` to perform bitwise not",
- "!",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(TildeAsUnaryOperator(lo));
self.parse_unary_expr(lo, UnOp::Not)
}
@@ -663,20 +662,25 @@ impl<'a> Parser<'a> {
/// Recover on `not expr` in favor of `!expr`.
fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
// Emit the error...
- let not_token = self.look_ahead(1, |t| t.clone());
- self.struct_span_err(
- not_token.span,
- &format!("unexpected {} after identifier", super::token_descr(&not_token)),
- )
- .span_suggestion_short(
+ let negated_token = self.look_ahead(1, |t| t.clone());
+
+ let sub_diag = if negated_token.is_numeric_lit() {
+ NotAsNegationOperatorSub::SuggestNotBitwise
+ } else if negated_token.is_bool_lit() {
+ NotAsNegationOperatorSub::SuggestNotLogical
+ } else {
+ NotAsNegationOperatorSub::SuggestNotDefault
+ };
+
+ self.sess.emit_err(NotAsNegationOperator {
+ negated: negated_token.span,
+ negated_desc: super::token_descr(&negated_token),
// Span the `not` plus trailing whitespace to avoid
// trailing whitespace after the `!` in our suggestion
- self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)),
- "use `!` to perform logical negation",
- "!",
- Applicability::MachineApplicable,
- )
- .emit();
+ sub: sub_diag(
+ self.sess.source_map().span_until_non_whitespace(lo.to(negated_token.span)),
+ ),
+ });
// ...and recover!
self.parse_unary_expr(lo, UnOp::Not)
@@ -705,11 +709,7 @@ impl<'a> Parser<'a> {
expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind,
) -> PResult<'a, P<Expr>> {
let mk_expr = |this: &mut Self, lhs: P<Expr>, rhs: P<Ty>| {
- this.mk_expr(
- this.mk_expr_sp(&lhs, lhs_span, rhs.span),
- expr_kind(lhs, rhs),
- AttrVec::new(),
- )
+ this.mk_expr(this.mk_expr_sp(&lhs, lhs_span, rhs.span), expr_kind(lhs, rhs))
};
// Save the state of the parser before parsing type normally, in case there is a
@@ -737,17 +737,13 @@ impl<'a> Parser<'a> {
segments[0].ident.span,
),
};
- match self.parse_labeled_expr(label, AttrVec::new(), false) {
+ match self.parse_labeled_expr(label, false) {
Ok(expr) => {
type_err.cancel();
- self.struct_span_err(label.ident.span, "malformed loop label")
- .span_suggestion(
- label.ident.span,
- "use the correct loop label format",
- label.ident,
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(MalformedLoopLabel {
+ span: label.ident.span,
+ correct_label: label.ident,
+ });
return Ok(expr);
}
Err(err) => {
@@ -761,9 +757,34 @@ impl<'a> Parser<'a> {
match self.parse_path(PathStyle::Expr) {
Ok(path) => {
- let (op_noun, op_verb) = match self.token.kind {
- token::Lt => ("comparison", "comparing"),
- token::BinOp(token::Shl) => ("shift", "shifting"),
+ let span_after_type = parser_snapshot_after_type.token.span;
+ let expr = mk_expr(
+ self,
+ lhs,
+ self.mk_ty(path.span, TyKind::Path(None, path.clone())),
+ );
+
+ let args_span = self.look_ahead(1, |t| t.span).to(span_after_type);
+ let suggestion = ComparisonOrShiftInterpretedAsGenericSugg {
+ left: expr.span.shrink_to_lo(),
+ right: expr.span.shrink_to_hi(),
+ };
+
+ match self.token.kind {
+ token::Lt => self.sess.emit_err(ComparisonInterpretedAsGeneric {
+ comparison: self.token.span,
+ r#type: path,
+ args: args_span,
+ suggestion,
+ }),
+ token::BinOp(token::Shl) => {
+ self.sess.emit_err(ShiftInterpretedAsGeneric {
+ shift: self.token.span,
+ r#type: path,
+ args: args_span,
+ suggestion,
+ })
+ }
_ => {
// We can end up here even without `<` being the next token, for
// example because `parse_ty_no_plus` returns `Err` on keywords,
@@ -777,33 +798,7 @@ impl<'a> Parser<'a> {
// Successfully parsed the type path leaving a `<` yet to parse.
type_err.cancel();
- // Report non-fatal diagnostics, keep `x as usize` as an expression
- // in AST and continue parsing.
- let msg = format!(
- "`<` is interpreted as a start of generic arguments for `{}`, not a {}",
- pprust::path_to_string(&path),
- op_noun,
- );
- let span_after_type = parser_snapshot_after_type.token.span;
- let expr =
- mk_expr(self, lhs, self.mk_ty(path.span, TyKind::Path(None, path)));
-
- self.struct_span_err(self.token.span, &msg)
- .span_label(
- self.look_ahead(1, |t| t.span).to(span_after_type),
- "interpreted as generic arguments",
- )
- .span_label(self.token.span, format!("not interpreted as {op_noun}"))
- .multipart_suggestion(
- &format!("try {op_verb} the cast value"),
- vec![
- (expr.span.shrink_to_lo(), "(".to_string()),
- (expr.span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MachineApplicable,
- )
- .emit();
-
+ // Keep `x as usize` as an expression in AST and continue parsing.
expr
}
Err(path_err) => {
@@ -850,7 +845,7 @@ impl<'a> Parser<'a> {
ExprKind::Index(_, _) => "indexing",
ExprKind::Try(_) => "`?`",
ExprKind::Field(_, _) => "a field access",
- ExprKind::MethodCall(_, _, _) => "a method call",
+ ExprKind::MethodCall(_, _, _, _) => "a method call",
ExprKind::Call(_, _) => "a function call",
ExprKind::Await(_) => "`.await`",
ExprKind::Err => return Ok(with_postfix),
@@ -859,7 +854,7 @@ impl<'a> Parser<'a> {
);
let mut err = self.struct_span_err(span, &msg);
- let suggest_parens = |err: &mut DiagnosticBuilder<'_, _>| {
+ let suggest_parens = |err: &mut Diagnostic| {
let suggestions = vec![
(span.shrink_to_lo(), "(".to_string()),
(span.shrink_to_hi(), ")".to_string()),
@@ -925,15 +920,7 @@ impl<'a> Parser<'a> {
}
fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) {
- self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes")
- .span_label(lt_span, "annotated with lifetime here")
- .span_suggestion(
- lt_span,
- "remove the lifetime annotation",
- "",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(LifetimeInBorrowExpression { span, lifetime_span: lt_span });
}
/// Parse `mut?` or `raw [ const | mut ]`.
@@ -965,18 +952,23 @@ impl<'a> Parser<'a> {
&mut self,
e0: P<Expr>,
lo: Span,
- mut attrs: Vec<ast::Attribute>,
+ mut attrs: ast::AttrVec,
) -> PResult<'a, P<Expr>> {
// Stitch the list of outer attributes onto the return value.
// A little bit ugly, but the best way given the current code
// structure
- self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| {
- expr.map(|mut expr| {
- attrs.extend::<Vec<_>>(expr.attrs.into());
- expr.attrs = attrs.into();
- expr
+ let res = self.parse_dot_or_call_expr_with_(e0, lo);
+ if attrs.is_empty() {
+ res
+ } else {
+ res.map(|expr| {
+ expr.map(|mut expr| {
+ attrs.extend(expr.attrs);
+ expr.attrs = attrs;
+ expr
+ })
})
- })
+ }
}
fn parse_dot_or_call_expr_with_(&mut self, mut e: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
@@ -990,7 +982,7 @@ impl<'a> Parser<'a> {
};
if has_question {
// `expr?`
- e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new());
+ e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e));
continue;
}
let has_dot = if self.prev_token.kind == TokenKind::Ident(kw::Return, false) {
@@ -1167,8 +1159,10 @@ impl<'a> Parser<'a> {
}
let span = self.prev_token.span;
let field = ExprKind::Field(base, Ident::new(field, span));
- self.expect_no_suffix(span, "a tuple index", suffix);
- self.mk_expr(lo.to(span), field, AttrVec::new())
+ if let Some(suffix) = suffix {
+ self.expect_no_tuple_index_suffix(span, suffix);
+ }
+ self.mk_expr(lo.to(span), field)
}
/// Parse a function call expression, `expr(...)`.
@@ -1182,9 +1176,9 @@ impl<'a> Parser<'a> {
};
let open_paren = self.token.span;
- let mut seq = self.parse_paren_expr_seq().map(|args| {
- self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new())
- });
+ let mut seq = self
+ .parse_paren_expr_seq()
+ .map(|args| self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args)));
if let Some(expr) =
self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot)
{
@@ -1205,9 +1199,8 @@ impl<'a> Parser<'a> {
) -> Option<P<Expr>> {
match (seq.as_mut(), snapshot) {
(Err(err), Some((mut snapshot, ExprKind::Path(None, path)))) => {
- let name = pprust::path_to_string(&path);
snapshot.bump(); // `(`
- match snapshot.parse_struct_fields(path, false, Delimiter::Parenthesis) {
+ match snapshot.parse_struct_fields(path.clone(), false, Delimiter::Parenthesis) {
Ok((fields, ..))
if snapshot.eat(&token::CloseDelim(Delimiter::Parenthesis)) =>
{
@@ -1217,29 +1210,25 @@ impl<'a> Parser<'a> {
let close_paren = self.prev_token.span;
let span = lo.to(self.prev_token.span);
if !fields.is_empty() {
- let replacement_err = self.struct_span_err(
+ let mut replacement_err = ParenthesesWithStructFields {
span,
- "invalid `struct` delimiters or `fn` call arguments",
- );
- mem::replace(err, replacement_err).cancel();
-
- err.multipart_suggestion(
- &format!("if `{name}` is a struct, use braces as delimiters"),
- vec![
- (open_paren, " { ".to_string()),
- (close_paren, " }".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- err.multipart_suggestion(
- &format!("if `{name}` is a function, use the arguments directly"),
- fields
- .into_iter()
- .map(|field| (field.span.until(field.expr.span), String::new()))
- .collect(),
- Applicability::MaybeIncorrect,
- );
- err.emit();
+ r#type: path,
+ braces_for_struct: BracesForStructLiteral {
+ first: open_paren,
+ second: close_paren,
+ },
+ no_fields_for_fn: NoFieldsForFnCall {
+ fields: fields
+ .into_iter()
+ .map(|field| field.span.until(field.expr.span))
+ .collect(),
+ },
+ }
+ .into_diagnostic(&self.sess.span_diagnostic);
+ replacement_err.emit();
+
+ let old_err = mem::replace(err, replacement_err);
+ old_err.cancel();
} else {
err.emit();
}
@@ -1258,10 +1247,13 @@ impl<'a> Parser<'a> {
/// Parse an indexing expression `expr[...]`.
fn parse_index_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
+ let prev_span = self.prev_token.span;
+ let open_delim_span = self.token.span;
self.bump(); // `[`
let index = self.parse_expr()?;
+ self.suggest_missing_semicolon_before_array(prev_span, open_delim_span)?;
self.expect(&token::CloseDelim(Delimiter::Bracket))?;
- Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new()))
+ Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index)))
}
/// Assuming we have just parsed `.`, continue parsing into an expression.
@@ -1277,24 +1269,18 @@ impl<'a> Parser<'a> {
if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
// Method call `expr.f()`
- let mut args = self.parse_paren_expr_seq()?;
- args.insert(0, self_arg);
-
+ let args = self.parse_paren_expr_seq()?;
let fn_span = fn_span_lo.to(self.prev_token.span);
let span = lo.to(self.prev_token.span);
- Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new()))
+ Ok(self.mk_expr(span, ExprKind::MethodCall(segment, self_arg, args, fn_span)))
} else {
// Field access `expr.f`
if let Some(args) = segment.args {
- self.struct_span_err(
- args.span(),
- "field expressions cannot have generic arguments",
- )
- .emit();
+ self.sess.emit_err(FieldExpressionWithGeneric(args.span()));
}
let span = lo.to(self.prev_token.span);
- Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new()))
+ Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident)))
}
}
@@ -1309,10 +1295,6 @@ impl<'a> Parser<'a> {
// Outer attributes are already parsed and will be
// added to the return value after the fact.
- //
- // Therefore, prevent sub-parser from parsing
- // attributes by giving them an empty "already-parsed" list.
- let attrs = AttrVec::new();
// Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
let lo = self.token.span;
@@ -1320,80 +1302,81 @@ impl<'a> Parser<'a> {
// This match arm is a special-case of the `_` match arm below and
// could be removed without changing functionality, but it's faster
// to have it here, especially for programs with large constants.
- self.parse_lit_expr(attrs)
+ self.parse_lit_expr()
} else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
- self.parse_tuple_parens_expr(attrs)
+ self.parse_tuple_parens_expr()
} else if self.check(&token::OpenDelim(Delimiter::Brace)) {
- self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs)
+ self.parse_block_expr(None, lo, BlockCheckMode::Default)
} else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) {
- self.parse_closure_expr(attrs).map_err(|mut err| {
+ self.parse_closure_expr().map_err(|mut err| {
// If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }`
// then suggest parens around the lhs.
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
- self.sess.expr_parentheses_needed(&mut err, *sp);
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
}
err
})
} else if self.check(&token::OpenDelim(Delimiter::Bracket)) {
- self.parse_array_or_repeat_expr(attrs, Delimiter::Bracket)
+ self.parse_array_or_repeat_expr(Delimiter::Bracket)
} else if self.check_path() {
- self.parse_path_start_expr(attrs)
+ self.parse_path_start_expr()
} else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) {
- self.parse_closure_expr(attrs)
+ self.parse_closure_expr()
} else if self.eat_keyword(kw::If) {
- self.parse_if_expr(attrs)
+ self.parse_if_expr()
} else if self.check_keyword(kw::For) {
if self.choose_generics_over_qpath(1) {
- self.parse_closure_expr(attrs)
+ self.parse_closure_expr()
} else {
assert!(self.eat_keyword(kw::For));
- self.parse_for_expr(None, self.prev_token.span, attrs)
+ self.parse_for_expr(None, self.prev_token.span)
}
} else if self.eat_keyword(kw::While) {
- self.parse_while_expr(None, self.prev_token.span, attrs)
+ self.parse_while_expr(None, self.prev_token.span)
} else if let Some(label) = self.eat_label() {
- self.parse_labeled_expr(label, attrs, true)
+ self.parse_labeled_expr(label, true)
} else if self.eat_keyword(kw::Loop) {
let sp = self.prev_token.span;
- self.parse_loop_expr(None, self.prev_token.span, attrs).map_err(|mut err| {
+ self.parse_loop_expr(None, self.prev_token.span).map_err(|mut err| {
err.span_label(sp, "while parsing this `loop` expression");
err
})
} else if self.eat_keyword(kw::Continue) {
let kind = ExprKind::Continue(self.eat_label());
- Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ Ok(self.mk_expr(lo.to(self.prev_token.span), kind))
} else if self.eat_keyword(kw::Match) {
let match_sp = self.prev_token.span;
- self.parse_match_expr(attrs).map_err(|mut err| {
+ self.parse_match_expr().map_err(|mut err| {
err.span_label(match_sp, "while parsing this `match` expression");
err
})
} else if self.eat_keyword(kw::Unsafe) {
let sp = self.prev_token.span;
- self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs)
- .map_err(|mut err| {
+ self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided)).map_err(
+ |mut err| {
err.span_label(sp, "while parsing this `unsafe` expression");
err
- })
+ },
+ )
} else if self.check_inline_const(0) {
self.parse_const_block(lo.to(self.token.span), false)
} else if self.is_do_catch_block() {
- self.recover_do_catch(attrs)
+ self.recover_do_catch()
} else if self.is_try_block() {
self.expect_keyword(kw::Try)?;
- self.parse_try_block(lo, attrs)
+ self.parse_try_block(lo)
} else if self.eat_keyword(kw::Return) {
- self.parse_return_expr(attrs)
+ self.parse_return_expr()
} else if self.eat_keyword(kw::Break) {
- self.parse_break_expr(attrs)
+ self.parse_break_expr()
} else if self.eat_keyword(kw::Yield) {
- self.parse_yield_expr(attrs)
+ self.parse_yield_expr()
} else if self.is_do_yeet() {
- self.parse_yeet_expr(attrs)
+ self.parse_yeet_expr()
} else if self.check_keyword(kw::Let) {
- self.parse_let_expr(attrs)
+ self.parse_let_expr()
} else if self.eat_keyword(kw::Underscore) {
- Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs))
+ Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore))
} else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) {
// Don't complain about bare semicolons after unclosed braces
// recovery in order to keep the error count down. Fixing the
@@ -1412,32 +1395,32 @@ impl<'a> Parser<'a> {
if self.check_keyword(kw::Async) {
if self.is_async_block() {
// Check for `async {` and `async move {`.
- self.parse_async_block(attrs)
+ self.parse_async_block()
} else {
- self.parse_closure_expr(attrs)
+ self.parse_closure_expr()
}
} else if self.eat_keyword(kw::Await) {
- self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs)
+ self.recover_incorrect_await_syntax(lo, self.prev_token.span)
} else {
- self.parse_lit_expr(attrs)
+ self.parse_lit_expr()
}
} else {
- self.parse_lit_expr(attrs)
+ self.parse_lit_expr()
}
}
- fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_lit_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
match self.parse_opt_lit() {
Some(literal) => {
- let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal));
self.maybe_recover_from_bad_qpath(expr)
}
None => self.try_macro_suggestion(),
}
}
- fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_tuple_parens_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
let (es, trailing_comma) = match self.parse_seq_to_end(
@@ -1457,15 +1440,11 @@ impl<'a> Parser<'a> {
// `(e,)` is a tuple with only one field, `e`.
ExprKind::Tup(es)
};
- let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind);
self.maybe_recover_from_bad_qpath(expr)
}
- fn parse_array_or_repeat_expr(
- &mut self,
- attrs: AttrVec,
- close_delim: Delimiter,
- ) -> PResult<'a, P<Expr>> {
+ fn parse_array_or_repeat_expr(&mut self, close_delim: Delimiter) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `[` or other open delim
@@ -1494,81 +1473,94 @@ impl<'a> Parser<'a> {
ExprKind::Array(vec![first_expr])
}
};
- let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind);
self.maybe_recover_from_bad_qpath(expr)
}
- fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_path_start_expr(&mut self) -> PResult<'a, P<Expr>> {
let (qself, path) = if self.eat_lt() {
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
(Some(qself), path)
} else {
(None, self.parse_path(PathStyle::Expr)?)
};
- let lo = path.span;
// `!`, as an operator, is prefix, so we know this isn't that.
- let (hi, kind) = if self.eat(&token::Not) {
+ let (span, kind) = if self.eat(&token::Not) {
// MACRO INVOCATION expression
if qself.is_some() {
- self.struct_span_err(path.span, "macros cannot use qualified paths").emit();
+ self.sess.emit_err(MacroInvocationWithQualifiedPath(path.span));
}
- let mac = MacCall {
+ let lo = path.span;
+ let mac = P(MacCall {
path,
args: self.parse_mac_args()?,
prior_type_ascription: self.last_type_ascription,
- };
- (self.prev_token.span, ExprKind::MacCall(mac))
- } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
- if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) {
+ });
+ (lo.to(self.prev_token.span), ExprKind::MacCall(mac))
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) &&
+ let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path) {
if qself.is_some() {
self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
}
return expr;
- } else {
- (path.span, ExprKind::Path(qself, path))
- }
} else {
(path.span, ExprKind::Path(qself, path))
};
- let expr = self.mk_expr(lo.to(hi), kind, attrs);
+ let expr = self.mk_expr(span, kind);
self.maybe_recover_from_bad_qpath(expr)
}
/// Parse `'label: $expr`. The label is already parsed.
fn parse_labeled_expr(
&mut self,
- label: Label,
- attrs: AttrVec,
+ label_: Label,
mut consume_colon: bool,
) -> PResult<'a, P<Expr>> {
- let lo = label.ident.span;
- let label = Some(label);
+ let lo = label_.ident.span;
+ let label = Some(label_);
let ate_colon = self.eat(&token::Colon);
let expr = if self.eat_keyword(kw::While) {
- self.parse_while_expr(label, lo, attrs)
+ self.parse_while_expr(label, lo)
} else if self.eat_keyword(kw::For) {
- self.parse_for_expr(label, lo, attrs)
+ self.parse_for_expr(label, lo)
} else if self.eat_keyword(kw::Loop) {
- self.parse_loop_expr(label, lo, attrs)
+ self.parse_loop_expr(label, lo)
} else if self.check_noexpect(&token::OpenDelim(Delimiter::Brace))
|| self.token.is_whole_block()
{
- self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs)
+ self.parse_block_expr(label, lo, BlockCheckMode::Default)
+ } else if !ate_colon
+ && (matches!(self.token.kind, token::CloseDelim(_) | token::Comma)
+ || self.token.is_op())
+ {
+ let lit = self.recover_unclosed_char(label_.ident, |self_| {
+ self_.sess.create_err(UnexpectedTokenAfterLabel {
+ span: self_.token.span,
+ remove_label: None,
+ enclose_in_block: None,
+ })
+ });
+ consume_colon = false;
+ Ok(self.mk_expr(lo, ExprKind::Lit(lit)))
} else if !ate_colon
&& (self.check_noexpect(&TokenKind::Comma) || self.check_noexpect(&TokenKind::Gt))
{
// We're probably inside of a `Path<'a>` that needs a turbofish
- let msg = "expected `while`, `for`, `loop` or `{` after a label";
- self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit();
+ self.sess.emit_err(UnexpectedTokenAfterLabel {
+ span: self.token.span,
+ remove_label: None,
+ enclose_in_block: None,
+ });
consume_colon = false;
Ok(self.mk_expr_err(lo))
} else {
- let msg = "expected `while`, `for`, `loop` or `{` after a label";
-
- let mut err = self.struct_span_err(self.token.span, msg);
- err.span_label(self.token.span, msg);
+ let mut err = UnexpectedTokenAfterLabel {
+ span: self.token.span,
+ remove_label: None,
+ enclose_in_block: None,
+ };
// Continue as an expression in an effort to recover on `'label: non_block_expr`.
let expr = self.parse_expr().map(|expr| {
@@ -1595,78 +1587,81 @@ impl<'a> Parser<'a> {
// If there are no breaks that may use this label, suggest removing the label and
// recover to the unmodified expression.
if !found_labeled_breaks {
- let msg = "consider removing the label";
- err.span_suggestion_verbose(
- lo.until(span),
- msg,
- "",
- Applicability::MachineApplicable,
- );
+ err.remove_label = Some(lo.until(span));
return expr;
}
- let sugg_msg = "consider enclosing expression in a block";
- let suggestions = vec![
- (span.shrink_to_lo(), "{ ".to_owned()),
- (span.shrink_to_hi(), " }".to_owned()),
- ];
-
- err.multipart_suggestion_verbose(
- sugg_msg,
- suggestions,
- Applicability::MachineApplicable,
- );
+ err.enclose_in_block = Some(UnexpectedTokenAfterLabelSugg {
+ left: span.shrink_to_lo(),
+ right: span.shrink_to_hi(),
+ });
- // Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to supress future errors about `break 'label`.
+ // Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to suppress future errors about `break 'label`.
let stmt = self.mk_stmt(span, StmtKind::Expr(expr));
let blk = self.mk_block(vec![stmt], BlockCheckMode::Default, span);
- self.mk_expr(span, ExprKind::Block(blk, label), ThinVec::new())
+ self.mk_expr(span, ExprKind::Block(blk, label))
});
- err.emit();
+ self.sess.emit_err(err);
expr
}?;
if !ate_colon && consume_colon {
- self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span);
+ self.sess.emit_err(RequireColonAfterLabeledExpression {
+ span: expr.span,
+ label: lo,
+ label_end: lo.shrink_to_hi(),
+ });
}
Ok(expr)
}
- fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) {
- self.struct_span_err(span, "labeled expression must be followed by `:`")
- .span_label(lo, "the label")
- .span_suggestion_short(
- lo.shrink_to_hi(),
- "add `:` after the label",
- ": ",
- Applicability::MachineApplicable,
+ /// Emit an error when a char is parsed as a lifetime because of a missing quote
+ pub(super) fn recover_unclosed_char(
+ &mut self,
+ lifetime: Ident,
+ err: impl FnOnce(&mut Self) -> DiagnosticBuilder<'a, ErrorGuaranteed>,
+ ) -> ast::Lit {
+ if let Some(mut diag) =
+ self.sess.span_diagnostic.steal_diagnostic(lifetime.span, StashKey::LifetimeIsChar)
+ {
+ diag.span_suggestion_verbose(
+ lifetime.span.shrink_to_hi(),
+ "add `'` to close the char literal",
+ "'",
+ Applicability::MaybeIncorrect,
)
- .note("labels are used before loops and blocks, allowing e.g., `break 'label` to them")
.emit();
+ } else {
+ err(self)
+ .span_suggestion_verbose(
+ lifetime.span.shrink_to_hi(),
+ "add `'` to close the char literal",
+ "'",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ ast::Lit {
+ token_lit: token::Lit::new(token::LitKind::Char, lifetime.name, None),
+ kind: ast::LitKind::Char(lifetime.name.as_str().chars().next().unwrap_or('_')),
+ span: lifetime.span,
+ }
}
/// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead.
- fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn recover_do_catch(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `do`
self.bump(); // `catch`
- let span_dc = lo.to(self.prev_token.span);
- self.struct_span_err(span_dc, "found removed `do catch` syntax")
- .span_suggestion(
- span_dc,
- "replace with the new syntax",
- "try",
- Applicability::MachineApplicable,
- )
- .note("following RFC #2388, the new non-placeholder syntax is `try`")
- .emit();
+ let span = lo.to(self.prev_token.span);
+ self.sess.emit_err(DoCatchSyntaxRemoved { span });
- self.parse_try_block(lo, attrs)
+ self.parse_try_block(lo)
}
/// Parse an expression if the token can begin one.
@@ -1675,15 +1670,15 @@ impl<'a> Parser<'a> {
}
/// Parse `"return" expr?`.
- fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_return_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Ret(self.parse_expr_opt()?);
- let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind);
self.maybe_recover_from_bad_qpath(expr)
}
/// Parse `"do" "yeet" expr?`.
- fn parse_yeet_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_yeet_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `do`
@@ -1693,7 +1688,7 @@ impl<'a> Parser<'a> {
let span = lo.to(self.prev_token.span);
self.sess.gated_spans.gate(sym::yeet_expr, span);
- let expr = self.mk_expr(span, kind, attrs);
+ let expr = self.mk_expr(span, kind);
self.maybe_recover_from_bad_qpath(expr)
}
@@ -1705,26 +1700,20 @@ impl<'a> Parser<'a> {
/// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value
/// expression only gets a warning for compatibility reasons; and a labeled break
/// with a labeled loop does not even get a warning because there is no ambiguity.
- fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_break_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let mut label = self.eat_label();
let kind = if label.is_some() && self.token == token::Colon {
// The value expression can be a labeled loop, see issue #86948, e.g.:
// `loop { break 'label: loop { break 'label 42; }; }`
- let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?;
- self.struct_span_err(
- lexpr.span,
- "parentheses are required around this expression to avoid confusion with a labeled break expression",
- )
- .multipart_suggestion(
- "wrap the expression in parentheses",
- vec![
- (lexpr.span.shrink_to_lo(), "(".to_string()),
- (lexpr.span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MachineApplicable,
- )
- .emit();
+ let lexpr = self.parse_labeled_expr(label.take().unwrap(), true)?;
+ self.sess.emit_err(LabeledLoopInBreak {
+ span: lexpr.span,
+ sub: WrapExpressionInParentheses {
+ left: lexpr.span.shrink_to_lo(),
+ right: lexpr.span.shrink_to_hi(),
+ },
+ });
Some(lexpr)
} else if self.token != token::OpenDelim(Delimiter::Brace)
|| !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
@@ -1753,17 +1742,17 @@ impl<'a> Parser<'a> {
} else {
None
};
- let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind));
self.maybe_recover_from_bad_qpath(expr)
}
/// Parse `"yield" expr?`.
- fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_yield_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Yield(self.parse_expr_opt()?);
let span = lo.to(self.prev_token.span);
self.sess.gated_spans.gate(sym::generators, span);
- let expr = self.mk_expr(span, kind, attrs);
+ let expr = self.mk_expr(span, kind);
self.maybe_recover_from_bad_qpath(expr)
}
@@ -1775,8 +1764,8 @@ impl<'a> Parser<'a> {
Some(lit) => match lit.kind {
ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit {
style,
- symbol: lit.token.symbol,
- suffix: lit.token.suffix,
+ symbol: lit.token_lit.symbol,
+ suffix: lit.token_lit.suffix,
span: lit.span,
symbol_unescaped,
}),
@@ -1787,7 +1776,7 @@ impl<'a> Parser<'a> {
}
pub(super) fn parse_lit(&mut self) -> PResult<'a, Lit> {
- self.parse_opt_lit().ok_or_else(|| {
+ self.parse_opt_lit().ok_or(()).or_else(|()| {
if let token::Interpolated(inner) = &self.token.kind {
let expr = match inner.as_ref() {
token::NtExpr(expr) => Some(expr),
@@ -1796,16 +1785,25 @@ impl<'a> Parser<'a> {
};
if let Some(expr) = expr {
if matches!(expr.kind, ExprKind::Err) {
- let mut err = self
- .diagnostic()
- .struct_span_err(self.token.span, "invalid interpolated expression");
+ let mut err = InvalidInterpolatedExpression { span: self.token.span }
+ .into_diagnostic(&self.sess.span_diagnostic);
err.downgrade_to_delayed_bug();
- return err;
+ return Err(err);
}
}
}
- let msg = format!("unexpected token: {}", super::token_descr(&self.token));
- self.struct_span_err(self.token.span, &msg)
+ let token = self.token.clone();
+ let err = |self_: &mut Self| {
+ let msg = format!("unexpected token: {}", super::token_descr(&token));
+ self_.struct_span_err(token.span, &msg)
+ };
+ // On an error path, eagerly consider a lifetime to be an unclosed character lit
+ if self.token.is_lifetime() {
+ let lt = self.expect_lifetime();
+ Ok(self.recover_unclosed_char(lt.ident, err))
+ } else {
+ Err(err(self))
+ }
})
}
@@ -1830,7 +1828,10 @@ impl<'a> Parser<'a> {
});
if let Some(token) = &recovered {
self.bump();
- self.error_float_lits_must_have_int_part(&token);
+ self.sess.emit_err(FloatLiteralRequiresIntegerPart {
+ span: token.span,
+ correct: pprust::token_to_string(token).into_owned(),
+ });
}
}
@@ -1853,22 +1854,11 @@ impl<'a> Parser<'a> {
let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None);
let symbol = Symbol::intern(&suffixless_lit.to_string());
let lit = token::Lit::new(token::Err, symbol, lit.suffix);
- Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!()))
+ Some(Lit::from_token_lit(lit, span).unwrap_or_else(|_| unreachable!()))
}
}
}
- fn error_float_lits_must_have_int_part(&self, token: &Token) {
- self.struct_span_err(token.span, "float literals must have an integer part")
- .span_suggestion(
- token.span,
- "must have an integer part",
- pprust::token_to_string(token),
- Applicability::MachineApplicable,
- )
- .emit();
- }
-
fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) {
// Checks if `s` looks like i32 or u1234 etc.
fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
@@ -1897,39 +1887,24 @@ impl<'a> Parser<'a> {
// by lexer, so here we don't report it the second time.
LitError::LexerError => {}
LitError::InvalidSuffix => {
- self.expect_no_suffix(
- span,
- &format!("{} {} literal", kind.article(), kind.descr()),
- suffix,
- );
+ if let Some(suffix) = suffix {
+ self.sess.emit_err(InvalidLiteralSuffix {
+ span,
+ kind: format!("{}", kind.descr()),
+ suffix,
+ });
+ }
}
LitError::InvalidIntSuffix => {
let suf = suffix.expect("suffix error with no suffix");
let suf = suf.as_str();
if looks_like_width_suffix(&['i', 'u'], &suf) {
// If it looks like a width, try to be helpful.
- let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
- self.struct_span_err(span, &msg)
- .help("valid widths are 8, 16, 32, 64 and 128")
- .emit();
+ self.sess.emit_err(InvalidIntLiteralWidth { span, width: suf[1..].into() });
} else if let Some(fixed) = fix_base_capitalisation(suf) {
- let msg = "invalid base prefix for number literal";
-
- self.struct_span_err(span, msg)
- .note("base prefixes (`0xff`, `0b1010`, `0o755`) are lowercase")
- .span_suggestion(
- span,
- "try making the prefix lowercase",
- fixed,
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(InvalidNumLiteralBasePrefix { span, fixed });
} else {
- let msg = format!("invalid suffix `{suf}` for number literal");
- self.struct_span_err(span, &msg)
- .span_label(span, format!("invalid suffix `{suf}`"))
- .help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)")
- .emit();
+ self.sess.emit_err(InvalidNumLiteralSuffix { span, suffix: suf.to_string() });
}
}
LitError::InvalidFloatSuffix => {
@@ -1937,65 +1912,37 @@ impl<'a> Parser<'a> {
let suf = suf.as_str();
if looks_like_width_suffix(&['f'], suf) {
// If it looks like a width, try to be helpful.
- let msg = format!("invalid width `{}` for float literal", &suf[1..]);
- self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit();
+ self.sess
+ .emit_err(InvalidFloatLiteralWidth { span, width: suf[1..].to_string() });
} else {
- let msg = format!("invalid suffix `{suf}` for float literal");
- self.struct_span_err(span, &msg)
- .span_label(span, format!("invalid suffix `{suf}`"))
- .help("valid suffixes are `f32` and `f64`")
- .emit();
+ self.sess.emit_err(InvalidFloatLiteralSuffix { span, suffix: suf.to_string() });
}
}
LitError::NonDecimalFloat(base) => {
- let descr = match base {
- 16 => "hexadecimal",
- 8 => "octal",
- 2 => "binary",
+ match base {
+ 16 => self.sess.emit_err(HexadecimalFloatLiteralNotSupported { span }),
+ 8 => self.sess.emit_err(OctalFloatLiteralNotSupported { span }),
+ 2 => self.sess.emit_err(BinaryFloatLiteralNotSupported { span }),
_ => unreachable!(),
};
- self.struct_span_err(span, &format!("{descr} float literal is not supported"))
- .span_label(span, "not supported")
- .emit();
}
LitError::IntTooLarge => {
- self.struct_span_err(span, "integer literal is too large").emit();
+ self.sess.emit_err(IntLiteralTooLarge { span });
}
}
}
- pub(super) fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<Symbol>) {
- if let Some(suf) = suffix {
- let mut err = if kind == "a tuple index"
- && [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suf)
- {
- // #59553: warn instead of reject out of hand to allow the fix to percolate
- // through the ecosystem when people fix their macros
- let mut err = self
- .sess
- .span_diagnostic
- .struct_span_warn(sp, &format!("suffixes on {kind} are invalid"));
- err.note(&format!(
- "`{}` is *temporarily* accepted on tuple index fields as it was \
- incorrectly accepted on stable for a few releases",
- suf,
- ));
- err.help(
- "on proc macros, you'll want to use `syn::Index::from` or \
- `proc_macro::Literal::*_unsuffixed` for code that will desugar \
- to tuple field access",
- );
- err.note(
- "see issue #60210 <https://github.com/rust-lang/rust/issues/60210> \
- for more information",
- );
- err
- } else {
- self.struct_span_err(sp, &format!("suffixes on {kind} are invalid"))
- .forget_guarantee()
- };
- err.span_label(sp, format!("invalid suffix `{suf}`"));
- err.emit();
+ pub(super) fn expect_no_tuple_index_suffix(&self, span: Span, suffix: Symbol) {
+ if [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suffix) {
+ // #59553: warn instead of reject out of hand to allow the fix to percolate
+ // through the ecosystem when people fix their macros
+ self.sess.emit_warning(InvalidLiteralSuffixOnTupleIndex {
+ span,
+ suffix,
+ exception: Some(()),
+ });
+ } else {
+ self.sess.emit_err(InvalidLiteralSuffixOnTupleIndex { span, suffix, exception: None });
}
}
@@ -2007,14 +1954,10 @@ impl<'a> Parser<'a> {
let lo = self.token.span;
let minus_present = self.eat(&token::BinOp(token::Minus));
let lit = self.parse_lit()?;
- let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new());
+ let expr = self.mk_expr(lit.span, ExprKind::Lit(lit));
if minus_present {
- Ok(self.mk_expr(
- lo.to(self.prev_token.span),
- self.mk_unary(UnOp::Neg, expr),
- AttrVec::new(),
- ))
+ Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_unary(UnOp::Neg, expr)))
} else {
Ok(expr)
}
@@ -2029,22 +1972,17 @@ impl<'a> Parser<'a> {
/// Emits a suggestion if it looks like the user meant an array but
/// accidentally used braces, causing the code to be interpreted as a block
/// expression.
- fn maybe_suggest_brackets_instead_of_braces(
- &mut self,
- lo: Span,
- attrs: AttrVec,
- ) -> Option<P<Expr>> {
+ fn maybe_suggest_brackets_instead_of_braces(&mut self, lo: Span) -> Option<P<Expr>> {
let mut snapshot = self.create_snapshot_for_diagnostic();
- match snapshot.parse_array_or_repeat_expr(attrs, Delimiter::Brace) {
+ match snapshot.parse_array_or_repeat_expr(Delimiter::Brace) {
Ok(arr) => {
- let hi = snapshot.prev_token.span;
- self.struct_span_err(arr.span, "this is a block expression, not an array")
- .multipart_suggestion(
- "to make an array, use square brackets instead of curly braces",
- vec![(lo, "[".to_owned()), (hi, "]".to_owned())],
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(ArrayBracketsInsteadOfSpaces {
+ span: arr.span,
+ sub: ArrayBracketsInsteadOfSpacesSugg {
+ left: lo,
+ right: snapshot.prev_token.span,
+ },
+ });
self.restore_snapshot(snapshot);
Some(self.mk_expr_err(arr.span))
@@ -2056,43 +1994,76 @@ impl<'a> Parser<'a> {
}
}
+ fn suggest_missing_semicolon_before_array(
+ &self,
+ prev_span: Span,
+ open_delim_span: Span,
+ ) -> PResult<'a, ()> {
+ if self.token.kind == token::Comma {
+ if !self.sess.source_map().is_multiline(prev_span.until(self.token.span)) {
+ return Ok(());
+ }
+ let mut snapshot = self.create_snapshot_for_diagnostic();
+ snapshot.bump();
+ match snapshot.parse_seq_to_before_end(
+ &token::CloseDelim(Delimiter::Bracket),
+ SeqSep::trailing_allowed(token::Comma),
+ |p| p.parse_expr(),
+ ) {
+ Ok(_)
+ // When the close delim is `)`, `token.kind` is expected to be `token::CloseDelim(Delimiter::Parenthesis)`,
+ // but the actual `token.kind` is `token::CloseDelim(Delimiter::Bracket)`.
+ // This is because the `token.kind` of the close delim is treated as the same as
+ // that of the open delim in `TokenTreesReader::parse_token_tree`, even if the delimiters of them are different.
+ // Therefore, `token.kind` should not be compared here.
+ if snapshot
+ .span_to_snippet(snapshot.token.span)
+ .map_or(false, |snippet| snippet == "]") =>
+ {
+ return Err(MissingSemicolonBeforeArray {
+ open_delim: open_delim_span,
+ semicolon: prev_span.shrink_to_hi(),
+ }.into_diagnostic(&self.sess.span_diagnostic));
+ }
+ Ok(_) => (),
+ Err(err) => err.cancel(),
+ }
+ }
+ Ok(())
+ }
+
/// Parses a block or unsafe block.
pub(super) fn parse_block_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
blk_mode: BlockCheckMode,
- mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
if self.is_array_like_block() {
- if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo, attrs.clone()) {
+ if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo) {
return Ok(arr);
}
}
- if let Some(label) = opt_label {
- self.sess.gated_spans.gate(sym::label_break_value, label.ident.span);
- }
-
if self.token.is_whole_block() {
- self.struct_span_err(self.token.span, "cannot use a `block` macro fragment here")
- .span_label(lo.to(self.token.span), "the `block` fragment is within this context")
- .emit();
+ self.sess.emit_err(InvalidBlockMacroSegment {
+ span: self.token.span,
+ context: lo.to(self.token.span),
+ });
}
- let (inner_attrs, blk) = self.parse_block_common(lo, blk_mode)?;
- attrs.extend(inner_attrs);
- Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs))
+ let (attrs, blk) = self.parse_block_common(lo, blk_mode)?;
+ Ok(self.mk_expr_with_attrs(blk.span, ExprKind::Block(blk, opt_label), attrs))
}
/// Parse a block which takes no attributes and has no label
fn parse_simple_block(&mut self) -> PResult<'a, P<Expr>> {
let blk = self.parse_block()?;
- Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()))
+ Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None)))
}
/// Parses a closure expression (e.g., `move |args| expr`).
- fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_closure_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
let binder = if self.check_keyword(kw::For) {
@@ -2127,7 +2098,7 @@ impl<'a> Parser<'a> {
_ => {
// If an explicit return type is given, require a block to appear (RFC 968).
let body_lo = self.token.span;
- self.parse_block_expr(None, body_lo, BlockCheckMode::Default, AttrVec::new())?
+ self.parse_block_expr(None, body_lo, BlockCheckMode::Default)?
}
};
@@ -2138,6 +2109,12 @@ impl<'a> Parser<'a> {
if self.token.kind == TokenKind::Semi
&& matches!(self.token_cursor.frame.delim_sp, Some((Delimiter::Parenthesis, _)))
+ // HACK: This is needed so we can detect whether we're inside a macro,
+ // where regular assumptions about what tokens can follow other tokens
+ // don't necessarily apply.
+ && self.may_recover()
+ // FIXME(Nilstrieb): Remove this check once `may_recover` actually stops recovery
+ && self.subparser_name.is_none()
{
// It is likely that the closure body is a block but where the
// braces have been removed. We will recover and eat the next
@@ -2158,7 +2135,6 @@ impl<'a> Parser<'a> {
body,
lo.to(decl_hi),
),
- attrs,
);
// Disable recovery for closure body
@@ -2175,7 +2151,8 @@ impl<'a> Parser<'a> {
// Check for `move async` and recover
if self.check_keyword(kw::Async) {
let move_async_span = self.token.span.with_lo(self.prev_token.span.data().lo);
- Err(self.incorrect_move_async_order_found(move_async_span))
+ Err(AsyncMoveOrderIncorrect { span: move_async_span }
+ .into_diagnostic(&self.sess.span_diagnostic))
} else {
Ok(CaptureBy::Value)
}
@@ -2221,10 +2198,10 @@ impl<'a> Parser<'a> {
Ok((
Param {
- attrs: attrs.into(),
+ attrs,
ty,
pat,
- span: lo.to(this.token.span),
+ span: lo.to(this.prev_token.span),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
@@ -2234,19 +2211,13 @@ impl<'a> Parser<'a> {
}
/// Parses an `if` expression (`if` token already eaten).
- fn parse_if_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_if_expr(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let cond = self.parse_cond_expr()?;
-
- self.parse_if_after_cond(attrs, lo, cond)
+ self.parse_if_after_cond(lo, cond)
}
- fn parse_if_after_cond(
- &mut self,
- attrs: AttrVec,
- lo: Span,
- mut cond: P<Expr>,
- ) -> PResult<'a, P<Expr>> {
+ fn parse_if_after_cond(&mut self, lo: Span, mut cond: P<Expr>) -> PResult<'a, P<Expr>> {
let cond_span = cond.span;
// Tries to interpret `cond` as either a missing expression if it's a block,
// or as an unfinished expression if it's a binop and the RHS is a block.
@@ -2255,11 +2226,19 @@ impl<'a> Parser<'a> {
let block = match &mut cond.kind {
ExprKind::Binary(Spanned { span: binop_span, .. }, _, right)
if let ExprKind::Block(_, None) = right.kind => {
- this.error_missing_if_then_block(lo, cond_span.shrink_to_lo().to(*binop_span), true).emit();
+ self.sess.emit_err(IfExpressionMissingThenBlock {
+ if_span: lo,
+ sub: IfExpressionMissingThenBlockSub::UnfinishedCondition(
+ cond_span.shrink_to_lo().to(*binop_span)
+ ),
+ });
std::mem::replace(right, this.mk_expr_err(binop_span.shrink_to_hi()))
},
ExprKind::Block(_, None) => {
- this.error_missing_if_cond(lo, cond_span).emit();
+ self.sess.emit_err(IfExpressionMissingCondition {
+ if_span: lo.shrink_to_hi(),
+ block_span: self.sess.source_map().start_point(cond_span),
+ });
std::mem::replace(&mut cond, this.mk_expr_err(cond_span.shrink_to_hi()))
}
_ => {
@@ -2277,7 +2256,10 @@ impl<'a> Parser<'a> {
if let Some(block) = recover_block_from_condition(self) {
block
} else {
- self.error_missing_if_then_block(lo, cond_span, false).emit();
+ self.sess.emit_err(IfExpressionMissingThenBlock {
+ if_span: lo,
+ sub: IfExpressionMissingThenBlockSub::AddThenBlock(cond_span.shrink_to_hi()),
+ });
self.mk_block_err(cond_span.shrink_to_hi())
}
} else {
@@ -2302,45 +2284,13 @@ impl<'a> Parser<'a> {
block
};
let els = if self.eat_keyword(kw::Else) { Some(self.parse_else_expr()?) } else { None };
- Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els), attrs))
- }
-
- fn error_missing_if_then_block(
- &self,
- if_span: Span,
- cond_span: Span,
- is_unfinished: bool,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err = self.struct_span_err(
- if_span,
- "this `if` expression is missing a block after the condition",
- );
- if is_unfinished {
- err.span_help(cond_span, "this binary operation is possibly unfinished");
- } else {
- err.span_help(cond_span.shrink_to_hi(), "add a block here");
- }
- err
- }
-
- fn error_missing_if_cond(
- &self,
- lo: Span,
- span: Span,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let next_span = self.sess.source_map().next_point(lo);
- let mut err = self.struct_span_err(next_span, "missing condition for `if` expression");
- err.span_label(next_span, "expected condition here");
- err.span_label(
- self.sess.source_map().start_point(span),
- "if this block is the condition of the `if` expression, then it must be followed by another block"
- );
- err
+ Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els)))
}
/// Parses the condition of a `if` or `while` expression.
fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> {
- let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL | Restrictions::ALLOW_LET, None)?;
+ let cond =
+ self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL | Restrictions::ALLOW_LET, None)?;
if let ExprKind::Let(..) = cond.kind {
// Remove the last feature gating of a `let` expression since it's stable.
@@ -2351,7 +2301,7 @@ impl<'a> Parser<'a> {
}
/// Parses a `let $pat = $expr` pseudo-expression.
- fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_let_expr(&mut self) -> PResult<'a, P<Expr>> {
// This is a *approximate* heuristic that detects if `let` chains are
// being parsed in the right position. It's approximate because it
// doesn't deny all invalid `let` expressions, just completely wrong usages.
@@ -2360,8 +2310,7 @@ impl<'a> Parser<'a> {
TokenKind::AndAnd | TokenKind::Ident(kw::If, _) | TokenKind::Ident(kw::While, _)
);
if !self.restrictions.contains(Restrictions::ALLOW_LET) || not_in_chain {
- self.struct_span_err(self.token.span, "expected expression, found `let` statement")
- .emit();
+ self.sess.emit_err(ExpectedExpressionFoundLet { span: self.token.span });
}
self.bump(); // Eat `let` token
@@ -2378,7 +2327,7 @@ impl<'a> Parser<'a> {
})?;
let span = lo.to(expr.span);
self.sess.gated_spans.gate(sym::let_chains, span);
- Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span), attrs))
+ Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span)))
}
/// Parses an `else { ... }` expression (`else` token already eaten).
@@ -2386,7 +2335,7 @@ impl<'a> Parser<'a> {
let else_span = self.prev_token.span; // `else`
let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let expr = if self.eat_keyword(kw::If) {
- self.parse_if_expr(AttrVec::new())?
+ self.parse_if_expr()?
} else if self.check(&TokenKind::OpenDelim(Delimiter::Brace)) {
self.parse_simple_block()?
} else {
@@ -2400,16 +2349,13 @@ impl<'a> Parser<'a> {
if self.check(&TokenKind::OpenDelim(Delimiter::Brace))
&& classify::expr_requires_semi_to_be_stmt(&cond) =>
{
- self.struct_span_err(first_tok_span, format!("expected `{{`, found {first_tok}"))
- .span_label(else_span, "expected an `if` or a block after this `else`")
- .span_suggestion(
- cond.span.shrink_to_lo(),
- "add an `if` if this is the condition of a chained `else if` statement",
- "if ",
- Applicability::MaybeIncorrect,
- )
- .emit();
- self.parse_if_after_cond(AttrVec::new(), cond.span.shrink_to_lo(), cond)?
+ self.sess.emit_err(ExpectedElseBlock {
+ first_tok_span,
+ first_tok,
+ else_span,
+ condition_start: cond.span.shrink_to_lo(),
+ });
+ self.parse_if_after_cond(cond.span.shrink_to_lo(), cond)?
}
Err(e) => {
e.cancel();
@@ -2433,25 +2379,22 @@ impl<'a> Parser<'a> {
branch_span: Span,
attrs: &[ast::Attribute],
) {
- let (span, last) = match attrs {
+ let (attributes, last) = match attrs {
[] => return,
[x0 @ xn] | [x0, .., xn] => (x0.span.to(xn.span), xn.span),
};
let ctx = if is_ctx_else { "else" } else { "if" };
- self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches")
- .span_label(branch_span, "the attributes are attached to this branch")
- .span_label(ctx_span, format!("the branch belongs to this `{ctx}`"))
- .span_suggestion(span, "remove the attributes", "", Applicability::MachineApplicable)
- .emit();
+ self.sess.emit_err(OuterAttributeNotAllowedOnIfElse {
+ last,
+ branch_span,
+ ctx_span,
+ ctx: ctx.to_string(),
+ attributes,
+ });
}
/// Parses `for <src_pat> in <src_expr> <src_loop_block>` (`for` token already eaten).
- fn parse_for_expr(
- &mut self,
- opt_label: Option<Label>,
- lo: Span,
- mut attrs: AttrVec,
- ) -> PResult<'a, P<Expr>> {
+ fn parse_for_expr(&mut self, opt_label: Option<Label>, lo: Span) -> PResult<'a, P<Expr>> {
// Record whether we are about to parse `for (`.
// This is used below for recovery in case of `for ( $stuff ) $block`
// in which case we will suggest `for $stuff $block`.
@@ -2474,63 +2417,51 @@ impl<'a> Parser<'a> {
let pat = self.recover_parens_around_for_head(pat, begin_paren);
- let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?;
- attrs.extend(iattrs);
+ let (attrs, loop_block) = self.parse_inner_attrs_and_block()?;
let kind = ExprKind::ForLoop(pat, expr, loop_block, opt_label);
- Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ Ok(self.mk_expr_with_attrs(lo.to(self.prev_token.span), kind, attrs))
}
fn error_missing_in_for_loop(&mut self) {
- let (span, msg, sugg) = if self.token.is_ident_named(sym::of) {
+ let (span, sub): (_, fn(_) -> _) = if self.token.is_ident_named(sym::of) {
// Possibly using JS syntax (#75311).
let span = self.token.span;
self.bump();
- (span, "try using `in` here instead", "in")
+ (span, MissingInInForLoopSub::InNotOf)
} else {
- (self.prev_token.span.between(self.token.span), "try adding `in` here", " in ")
+ (self.prev_token.span.between(self.token.span), MissingInInForLoopSub::AddIn)
};
- self.struct_span_err(span, "missing `in` in `for` loop")
- .span_suggestion_short(
- span,
- msg,
- sugg,
- // Has been misleading, at least in the past (closed Issue #48492).
- Applicability::MaybeIncorrect,
- )
- .emit();
+
+ self.sess.emit_err(MissingInInForLoop { span, sub: sub(span) });
}
/// Parses a `while` or `while let` expression (`while` token already eaten).
- fn parse_while_expr(
- &mut self,
- opt_label: Option<Label>,
- lo: Span,
- mut attrs: AttrVec,
- ) -> PResult<'a, P<Expr>> {
+ fn parse_while_expr(&mut self, opt_label: Option<Label>, lo: Span) -> PResult<'a, P<Expr>> {
let cond = self.parse_cond_expr().map_err(|mut err| {
err.span_label(lo, "while parsing the condition of this `while` expression");
err
})?;
- let (iattrs, body) = self.parse_inner_attrs_and_block().map_err(|mut err| {
+ let (attrs, body) = self.parse_inner_attrs_and_block().map_err(|mut err| {
err.span_label(lo, "while parsing the body of this `while` expression");
err.span_label(cond.span, "this `while` condition successfully parsed");
err
})?;
- attrs.extend(iattrs);
- Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::While(cond, body, opt_label), attrs))
+ Ok(self.mk_expr_with_attrs(
+ lo.to(self.prev_token.span),
+ ExprKind::While(cond, body, opt_label),
+ attrs,
+ ))
}
/// Parses `loop { ... }` (`loop` token already eaten).
- fn parse_loop_expr(
- &mut self,
- opt_label: Option<Label>,
- lo: Span,
- mut attrs: AttrVec,
- ) -> PResult<'a, P<Expr>> {
- let (iattrs, body) = self.parse_inner_attrs_and_block()?;
- attrs.extend(iattrs);
- Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::Loop(body, opt_label), attrs))
+ fn parse_loop_expr(&mut self, opt_label: Option<Label>, lo: Span) -> PResult<'a, P<Expr>> {
+ let (attrs, body) = self.parse_inner_attrs_and_block()?;
+ Ok(self.mk_expr_with_attrs(
+ lo.to(self.prev_token.span),
+ ExprKind::Loop(body, opt_label),
+ attrs,
+ ))
}
pub(crate) fn eat_label(&mut self) -> Option<Label> {
@@ -2541,7 +2472,7 @@ impl<'a> Parser<'a> {
}
/// Parses a `match ... { ... }` expression (`match` token already eaten).
- fn parse_match_expr(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_match_expr(&mut self) -> PResult<'a, P<Expr>> {
let match_span = self.prev_token.span;
let lo = self.prev_token.span;
let scrutinee = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
@@ -2561,7 +2492,7 @@ impl<'a> Parser<'a> {
return Err(e);
}
}
- attrs.extend(self.parse_inner_attributes()?);
+ let attrs = self.parse_inner_attributes()?;
let mut arms: Vec<Arm> = Vec::new();
while self.token != token::CloseDelim(Delimiter::Brace) {
@@ -2575,13 +2506,17 @@ impl<'a> Parser<'a> {
if self.token == token::CloseDelim(Delimiter::Brace) {
self.bump();
}
- return Ok(self.mk_expr(span, ExprKind::Match(scrutinee, arms), attrs));
+ return Ok(self.mk_expr_with_attrs(
+ span,
+ ExprKind::Match(scrutinee, arms),
+ attrs,
+ ));
}
}
}
let hi = self.token.span;
self.bump();
- Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
+ Ok(self.mk_expr_with_attrs(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
}
/// Attempt to recover from match arm body with statements and no surrounding braces.
@@ -2598,39 +2533,22 @@ impl<'a> Parser<'a> {
self.bump(); // `;`
let mut stmts =
vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))];
- let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| {
+ let err = |this: &Parser<'_>, stmts: Vec<ast::Stmt>| {
let span = stmts[0].span.to(stmts[stmts.len() - 1].span);
- let mut err = this.struct_span_err(span, "`match` arm body without braces");
- let (these, s, are) =
- if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") };
- err.span_label(
- span,
- &format!(
- "{these} statement{s} {are} not surrounded by a body",
- these = these,
- s = s,
- are = are
- ),
- );
- err.span_label(arrow_span, "while parsing the `match` arm starting here");
- if stmts.len() > 1 {
- err.multipart_suggestion(
- &format!("surround the statement{s} with a body"),
- vec![
- (span.shrink_to_lo(), "{ ".to_string()),
- (span.shrink_to_hi(), " }".to_string()),
- ],
- Applicability::MachineApplicable,
- );
- } else {
- err.span_suggestion(
- semi_sp,
- "use a comma to end a `match` arm expression",
- ",",
- Applicability::MachineApplicable,
- );
- }
- err.emit();
+
+ this.sess.emit_err(MatchArmBodyWithoutBraces {
+ statements: span,
+ arrow: arrow_span,
+ num_statements: stmts.len(),
+ sub: if stmts.len() > 1 {
+ MatchArmBodyWithoutBracesSugg::AddBraces {
+ left: span.shrink_to_lo(),
+ right: span.shrink_to_hi(),
+ }
+ } else {
+ MatchArmBodyWithoutBracesSugg::UseComma { semicolon: semi_sp }
+ },
+ });
this.mk_expr_err(span)
};
// We might have either a `,` -> `;` typo, or a block without braces. We need
@@ -2681,7 +2599,7 @@ impl<'a> Parser<'a> {
}
pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
- // Used to check the `let_chains` and `if_let_guard` features mostly by scaning
+ // Used to check the `let_chains` and `if_let_guard` features mostly by scanning
// `&&` tokens.
fn check_let_expr(expr: &Expr) -> (bool, bool) {
match expr.kind {
@@ -2756,7 +2674,7 @@ impl<'a> Parser<'a> {
let span = body.span;
return Ok((
ast::Arm {
- attrs: attrs.into(),
+ attrs,
pat,
guard,
body,
@@ -2811,17 +2729,9 @@ impl<'a> Parser<'a> {
.is_ok();
if pattern_follows && snapshot.check(&TokenKind::FatArrow) {
err.cancel();
- this.struct_span_err(
- hi.shrink_to_hi(),
- "expected `,` following `match` arm",
- )
- .span_suggestion(
- hi.shrink_to_hi(),
- "missing a comma here to end this `match` arm",
- ",",
- Applicability::MachineApplicable,
- )
- .emit();
+ this.sess.emit_err(MissingCommaAfterMatchArm {
+ span: hi.shrink_to_hi(),
+ });
return Ok(true);
}
}
@@ -2834,7 +2744,7 @@ impl<'a> Parser<'a> {
Ok((
ast::Arm {
- attrs: attrs.into(),
+ attrs,
pat,
guard,
body: expr,
@@ -2848,21 +2758,15 @@ impl<'a> Parser<'a> {
}
/// Parses a `try {...}` expression (`try` token already eaten).
- fn parse_try_block(&mut self, span_lo: Span, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
- let (iattrs, body) = self.parse_inner_attrs_and_block()?;
- attrs.extend(iattrs);
+ fn parse_try_block(&mut self, span_lo: Span) -> PResult<'a, P<Expr>> {
+ let (attrs, body) = self.parse_inner_attrs_and_block()?;
if self.eat_keyword(kw::Catch) {
- let mut error = self.struct_span_err(
- self.prev_token.span,
- "keyword `catch` cannot follow a `try` block",
- );
- error.help("try using `match` on the result of the `try` block instead");
- error.emit();
- Err(error)
+ Err(CatchAfterTry { span: self.prev_token.span }
+ .into_diagnostic(&self.sess.span_diagnostic))
} else {
let span = span_lo.to(body.span);
self.sess.gated_spans.gate(sym::try_blocks, span);
- Ok(self.mk_expr(span, ExprKind::TryBlock(body), attrs))
+ Ok(self.mk_expr_with_attrs(span, ExprKind::TryBlock(body), attrs))
}
}
@@ -2884,14 +2788,13 @@ impl<'a> Parser<'a> {
}
/// Parses an `async move? {...}` expression.
- fn parse_async_block(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ fn parse_async_block(&mut self) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect_keyword(kw::Async)?;
let capture_clause = self.parse_capture_clause()?;
- let (iattrs, body) = self.parse_inner_attrs_and_block()?;
- attrs.extend(iattrs);
+ let (attrs, body) = self.parse_inner_attrs_and_block()?;
let kind = ExprKind::Async(capture_clause, DUMMY_NODE_ID, body);
- Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ Ok(self.mk_expr_with_attrs(lo.to(self.prev_token.span), kind, attrs))
}
fn is_async_block(&self) -> bool {
@@ -2925,33 +2828,28 @@ impl<'a> Parser<'a> {
&mut self,
qself: Option<&ast::QSelf>,
path: &ast::Path,
- attrs: &AttrVec,
) -> Option<PResult<'a, P<Expr>>> {
let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
if struct_allowed || self.is_certainly_not_a_block() {
if let Err(err) = self.expect(&token::OpenDelim(Delimiter::Brace)) {
return Some(Err(err));
}
- let expr = self.parse_struct_expr(qself.cloned(), path.clone(), attrs.clone(), true);
+ let expr = self.parse_struct_expr(qself.cloned(), path.clone(), true);
if let (Ok(expr), false) = (&expr, struct_allowed) {
// This is a struct literal, but we don't can't accept them here.
- self.error_struct_lit_not_allowed_here(path.span, expr.span);
+ self.sess.emit_err(StructLiteralNotAllowedHere {
+ span: expr.span,
+ sub: StructLiteralNotAllowedHereSugg {
+ left: path.span.shrink_to_lo(),
+ right: expr.span.shrink_to_hi(),
+ },
+ });
}
return Some(expr);
}
None
}
- fn error_struct_lit_not_allowed_here(&self, lo: Span, sp: Span) {
- self.struct_span_err(sp, "struct literals are not allowed here")
- .multipart_suggestion(
- "surround the struct literal with parentheses",
- vec![(lo.shrink_to_lo(), "(".to_string()), (sp.shrink_to_hi(), ")".to_string())],
- Applicability::MachineApplicable,
- )
- .emit();
- }
-
pub(super) fn parse_struct_fields(
&mut self,
pth: ast::Path,
@@ -3069,7 +2967,6 @@ impl<'a> Parser<'a> {
&mut self,
qself: Option<ast::QSelf>,
pth: ast::Path,
- attrs: AttrVec,
recover: bool,
) -> PResult<'a, P<Expr>> {
let lo = pth.span;
@@ -3082,7 +2979,7 @@ impl<'a> Parser<'a> {
} else {
ExprKind::Struct(P(ast::StructExpr { qself, path: pth, fields, rest: base }))
};
- Ok(self.mk_expr(span, expr, attrs))
+ Ok(self.mk_expr(span, expr))
}
/// Use in case of error after field-looking code: `S { foo: () with a }`.
@@ -3110,18 +3007,10 @@ impl<'a> Parser<'a> {
if self.token != token::Comma {
return;
}
- self.struct_span_err(
- span.to(self.prev_token.span),
- "cannot use a comma after the base struct",
- )
- .span_suggestion_short(
- self.token.span,
- "remove this comma",
- "",
- Applicability::MachineApplicable,
- )
- .note("the base struct must always be the last field")
- .emit();
+ self.sess.emit_err(CommaAfterBaseStruct {
+ span: span.to(self.prev_token.span),
+ comma: self.token.span,
+ });
self.recover_stmt();
}
@@ -3137,7 +3026,7 @@ impl<'a> Parser<'a> {
// Mimic `x: x` for the `x` field shorthand.
let ident = this.parse_ident_common(false)?;
let path = ast::Path::from_ident(ident);
- (ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
+ (ident, this.mk_expr(ident.span, ExprKind::Path(None, path)))
} else {
let ident = this.parse_field_name()?;
this.error_on_eq_field_init(ident);
@@ -3151,7 +3040,7 @@ impl<'a> Parser<'a> {
span: lo.to(expr.span),
expr,
is_shorthand,
- attrs: attrs.into(),
+ attrs,
id: DUMMY_NODE_ID,
is_placeholder: false,
},
@@ -3167,43 +3056,18 @@ impl<'a> Parser<'a> {
return;
}
- self.struct_span_err(self.token.span, "expected `:`, found `=`")
- .span_suggestion(
- field_name.span.shrink_to_hi().to(self.token.span),
- "replace equals symbol with a colon",
- ":",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(EqFieldInit {
+ span: self.token.span,
+ eq: field_name.span.shrink_to_hi().to(self.token.span),
+ });
}
fn err_dotdotdot_syntax(&self, span: Span) {
- self.struct_span_err(span, "unexpected token: `...`")
- .span_suggestion(
- span,
- "use `..` for an exclusive range",
- "..",
- Applicability::MaybeIncorrect,
- )
- .span_suggestion(
- span,
- "or `..=` for an inclusive range",
- "..=",
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(DotDotDot { span });
}
fn err_larrow_operator(&self, span: Span) {
- self.struct_span_err(span, "unexpected token: `<-`")
- .span_suggestion(
- span,
- "if you meant to write a comparison against a negative value, add a \
- space in between `<` and `-`",
- "< -",
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(LeftArrowOperator { span });
}
fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
@@ -3242,17 +3106,21 @@ impl<'a> Parser<'a> {
fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> {
let span = lo.to(self.prev_token.span);
- let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new());
+ let await_expr = self.mk_expr(span, ExprKind::Await(self_arg));
self.recover_from_await_method_call();
await_expr
}
- pub(crate) fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
+ pub(crate) fn mk_expr_with_attrs(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None })
}
+ pub(crate) fn mk_expr(&self, span: Span, kind: ExprKind) -> P<Expr> {
+ P(Expr { kind, span, attrs: AttrVec::new(), id: DUMMY_NODE_ID, tokens: None })
+ }
+
pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> {
- self.mk_expr(span, ExprKind::Err, AttrVec::new())
+ self.mk_expr(span, ExprKind::Err)
}
/// Create expression span ensuring the span of the parent node
@@ -3268,7 +3136,7 @@ impl<'a> Parser<'a> {
fn collect_tokens_for_expr(
&mut self,
attrs: AttrWrapper,
- f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>,
+ f: impl FnOnce(&mut Self, ast::AttrVec) -> PResult<'a, P<Expr>>,
) -> PResult<'a, P<Expr>> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let res = f(this, attrs)?;
@@ -3276,6 +3144,8 @@ impl<'a> Parser<'a> {
&& this.token.kind == token::Semi
{
TrailingToken::Semi
+ } else if this.token.kind == token::Gt {
+ TrailingToken::Gt
} else {
// FIXME - pass this through from the place where we know
// we need a comma, rather than assuming that `#[attr] expr,`
diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs
index 1acfd93d8..fa75670b2 100644
--- a/compiler/rustc_parse/src/parser/generics.rs
+++ b/compiler/rustc_parse/src/parser/generics.rs
@@ -2,7 +2,7 @@ use super::{ForceCollect, Parser, TrailingToken};
use rustc_ast::token;
use rustc_ast::{
- self as ast, Attribute, GenericBounds, GenericParam, GenericParamKind, WhereClause,
+ self as ast, AttrVec, GenericBounds, GenericParam, GenericParamKind, TyKind, WhereClause,
};
use rustc_errors::{Applicability, PResult};
use rustc_span::symbol::kw;
@@ -26,24 +26,54 @@ impl<'a> Parser<'a> {
}
/// Matches `typaram = IDENT (`?` unbound)? optbounds ( EQ ty )?`.
- fn parse_ty_param(&mut self, preceding_attrs: Vec<Attribute>) -> PResult<'a, GenericParam> {
+ fn parse_ty_param(&mut self, preceding_attrs: AttrVec) -> PResult<'a, GenericParam> {
let ident = self.parse_ident()?;
// Parse optional colon and param bounds.
let mut colon_span = None;
let bounds = if self.eat(&token::Colon) {
colon_span = Some(self.prev_token.span);
+ // recover from `impl Trait` in type param bound
+ if self.token.is_keyword(kw::Impl) {
+ let impl_span = self.token.span;
+ let snapshot = self.create_snapshot_for_diagnostic();
+ match self.parse_ty() {
+ Ok(p) => {
+ if let TyKind::ImplTrait(_, bounds) = &(*p).kind {
+ let span = impl_span.to(self.token.span.shrink_to_lo());
+ let mut err = self.struct_span_err(
+ span,
+ "expected trait bound, found `impl Trait` type",
+ );
+ err.span_label(span, "not a trait");
+ if let [bound, ..] = &bounds[..] {
+ err.span_suggestion_verbose(
+ impl_span.until(bound.span()),
+ "use the trait bounds directly",
+ String::new(),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ return Err(err);
+ }
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ }
+ self.restore_snapshot(snapshot);
+ }
self.parse_generic_bounds(colon_span)?
} else {
Vec::new()
};
let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None };
-
Ok(GenericParam {
ident,
id: ast::DUMMY_NODE_ID,
- attrs: preceding_attrs.into(),
+ attrs: preceding_attrs,
bounds,
kind: GenericParamKind::Type { default },
is_placeholder: false,
@@ -53,7 +83,7 @@ impl<'a> Parser<'a> {
pub(crate) fn parse_const_param(
&mut self,
- preceding_attrs: Vec<Attribute>,
+ preceding_attrs: AttrVec,
) -> PResult<'a, GenericParam> {
let const_span = self.token.span;
@@ -68,7 +98,7 @@ impl<'a> Parser<'a> {
Ok(GenericParam {
ident,
id: ast::DUMMY_NODE_ID,
- attrs: preceding_attrs.into(),
+ attrs: preceding_attrs,
bounds: Vec::new(),
kind: GenericParamKind::Const { ty, kw_span: const_span, default },
is_placeholder: false,
@@ -109,7 +139,7 @@ impl<'a> Parser<'a> {
Some(ast::GenericParam {
ident: lifetime.ident,
id: lifetime.id,
- attrs: attrs.into(),
+ attrs,
bounds,
kind: ast::GenericParamKind::Lifetime,
is_placeholder: false,
@@ -314,7 +344,6 @@ impl<'a> Parser<'a> {
span: lo.to(self.prev_token.span),
lhs_ty: ty,
rhs_ty,
- id: ast::DUMMY_NODE_ID,
}))
} else {
self.maybe_recover_bounds_doubled_colon(&ty)?;
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 567072925..bda301c52 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -1,4 +1,6 @@
-use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error};
+use crate::errors::{DocCommentDoesNotDocumentAnything, UseEmptyBlockNotSemi};
+
+use super::diagnostics::{dummy_arg, ConsumeClosingDelim};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
use super::{AttrWrapper, FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken};
@@ -8,12 +10,12 @@ use rustc_ast::token::{self, Delimiter, TokenKind};
use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree};
use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID};
use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind};
-use rustc_ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind};
+use rustc_ast::{BindingAnnotation, Block, FnDecl, FnSig, Param, SelfKind};
use rustc_ast::{EnumDef, FieldDef, Generics, TraitRef, Ty, TyKind, Variant, VariantData};
use rustc_ast::{FnHeader, ForeignItem, Path, PathSegment, Visibility, VisibilityKind};
use rustc_ast::{MacArgs, MacCall, MacDelimiter};
use rustc_ast_pretty::pprust;
-use rustc_errors::{struct_span_err, Applicability, PResult, StashKey};
+use rustc_errors::{struct_span_err, Applicability, IntoDiagnostic, PResult, StashKey};
use rustc_span::edition::Edition;
use rustc_span::lev_distance::lev_distance;
use rustc_span::source_map::{self, Span};
@@ -22,7 +24,6 @@ use rustc_span::DUMMY_SP;
use std::convert::TryFrom;
use std::mem;
-use tracing::debug;
impl<'a> Parser<'a> {
/// Parses a source module as a crate. This is the main entry point for the parser.
@@ -32,7 +33,7 @@ impl<'a> Parser<'a> {
}
/// Parses a `mod <foo> { ... }` or `mod <foo>;` item.
- fn parse_item_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> {
+ fn parse_item_mod(&mut self, attrs: &mut AttrVec) -> PResult<'a, ItemInfo> {
let unsafety = self.parse_unsafety();
self.expect_keyword(kw::Mod)?;
let id = self.parse_ident()?;
@@ -40,9 +41,9 @@ impl<'a> Parser<'a> {
ModKind::Unloaded
} else {
self.expect(&token::OpenDelim(Delimiter::Brace))?;
- let (mut inner_attrs, items, inner_span) =
+ let (inner_attrs, items, inner_span) =
self.parse_mod(&token::CloseDelim(Delimiter::Brace))?;
- attrs.append(&mut inner_attrs);
+ attrs.extend(inner_attrs);
ModKind::Loaded(items, Inline::Yes, inner_span)
};
Ok((id, ItemKind::Mod(unsafety, mod_kind)))
@@ -52,7 +53,7 @@ impl<'a> Parser<'a> {
pub fn parse_mod(
&mut self,
term: &TokenKind,
- ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, ModSpans)> {
+ ) -> PResult<'a, (AttrVec, Vec<P<Item>>, ModSpans)> {
let lo = self.token.span;
let attrs = self.parse_inner_attributes()?;
@@ -68,7 +69,12 @@ impl<'a> Parser<'a> {
if !self.maybe_consume_incorrect_semicolon(&items) {
let msg = &format!("expected item, found {token_str}");
let mut err = self.struct_span_err(self.token.span, msg);
- err.span_label(self.token.span, "expected item");
+ let label = if self.is_kw_followed_by_ident(kw::Let) {
+ "consider using `const` or `static` instead of `let` for global variables"
+ } else {
+ "expected item"
+ };
+ err.span_label(self.token.span, label);
return Err(err);
}
}
@@ -129,7 +135,7 @@ impl<'a> Parser<'a> {
fn parse_item_common_(
&mut self,
- mut attrs: Vec<Attribute>,
+ mut attrs: AttrVec,
mac_allowed: bool,
attrs_allowed: bool,
fn_parse_mode: FnParseMode,
@@ -193,7 +199,7 @@ impl<'a> Parser<'a> {
/// Parses one of the items allowed by the flags.
fn parse_item_kind(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
macros_allowed: bool,
lo: Span,
vis: &Visibility,
@@ -271,7 +277,10 @@ impl<'a> Parser<'a> {
// MACRO_RULES ITEM
self.parse_item_macro_rules(vis, has_bang)?
} else if self.isnt_macro_invocation()
- && (self.token.is_ident_named(sym::import) || self.token.is_ident_named(sym::using))
+ && (self.token.is_ident_named(sym::import)
+ || self.token.is_ident_named(sym::using)
+ || self.token.is_ident_named(sym::include)
+ || self.token.is_ident_named(sym::require))
{
return self.recover_import_as_use();
} else if self.isnt_macro_invocation() && vis.kind.is_pub() {
@@ -279,7 +288,7 @@ impl<'a> Parser<'a> {
return Ok(None);
} else if macros_allowed && self.check_path() {
// MACRO INVOCATION ITEM
- (Ident::empty(), ItemKind::MacCall(self.parse_item_macro(vis)?))
+ (Ident::empty(), ItemKind::MacCall(P(self.parse_item_macro(vis)?)))
} else {
return Ok(None);
};
@@ -526,7 +535,7 @@ impl<'a> Parser<'a> {
/// ```
fn parse_item_impl(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
defaultness: Defaultness,
) -> PResult<'a, ItemInfo> {
let unsafety = self.parse_unsafety();
@@ -653,12 +662,20 @@ impl<'a> Parser<'a> {
fn parse_item_list<T>(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
mut parse_item: impl FnMut(&mut Parser<'a>) -> PResult<'a, Option<Option<T>>>,
) -> PResult<'a, Vec<T>> {
let open_brace_span = self.token.span;
+
+ // Recover `impl Ty;` instead of `impl Ty {}`
+ if self.token == TokenKind::Semi {
+ self.sess.emit_err(UseEmptyBlockNotSemi { span: self.token.span });
+ self.bump();
+ return Ok(vec![]);
+ }
+
self.expect(&token::OpenDelim(Delimiter::Brace))?;
- attrs.append(&mut self.parse_inner_attributes()?);
+ attrs.extend(self.parse_inner_attributes()?);
let mut items = Vec::new();
while !self.eat(&token::CloseDelim(Delimiter::Brace)) {
@@ -667,14 +684,55 @@ impl<'a> Parser<'a> {
}
match parse_item(self) {
Ok(None) => {
+ let is_unnecessary_semicolon = !items.is_empty()
+ // When the close delim is `)` in a case like the following, `token.kind` is expected to be `token::CloseDelim(Delimiter::Parenthesis)`,
+ // but the actual `token.kind` is `token::CloseDelim(Delimiter::Bracket)`.
+ // This is because the `token.kind` of the close delim is treated as the same as
+ // that of the open delim in `TokenTreesReader::parse_token_tree`, even if the delimiters of them are different.
+ // Therefore, `token.kind` should not be compared here.
+ //
+ // issue-60075.rs
+ // ```
+ // trait T {
+ // fn qux() -> Option<usize> {
+ // let _ = if true {
+ // });
+ // ^ this close delim
+ // Some(4)
+ // }
+ // ```
+ && self
+ .span_to_snippet(self.prev_token.span)
+ .map_or(false, |snippet| snippet == "}")
+ && self.token.kind == token::Semi;
+ let semicolon_span = self.token.span;
// We have to bail or we'll potentially never make progress.
let non_item_span = self.token.span;
+ let is_let = self.token.is_keyword(kw::Let);
+
+ let mut err = self.struct_span_err(non_item_span, "non-item in item list");
self.consume_block(Delimiter::Brace, ConsumeClosingDelim::Yes);
- self.struct_span_err(non_item_span, "non-item in item list")
- .span_label(open_brace_span, "item list starts here")
- .span_label(non_item_span, "non-item starts here")
- .span_label(self.prev_token.span, "item list ends here")
- .emit();
+ if is_let {
+ err.span_suggestion(
+ non_item_span,
+ "consider using `const` instead of `let` for associated const",
+ "const",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_label(open_brace_span, "item list starts here")
+ .span_label(non_item_span, "non-item starts here")
+ .span_label(self.prev_token.span, "item list ends here");
+ }
+ if is_unnecessary_semicolon {
+ err.span_suggestion(
+ semicolon_span,
+ "consider removing this semicolon",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
break;
}
Ok(Some(item)) => items.extend(item),
@@ -702,8 +760,8 @@ impl<'a> Parser<'a> {
)
.span_label(self.token.span, "this doc comment doesn't document anything")
.help(
- "doc comments must come before what they document, maybe a \
- comment was intended with `//`?",
+ "doc comments must come before what they document, if a comment was \
+ intended use `//`",
)
.emit();
self.bump();
@@ -737,7 +795,7 @@ impl<'a> Parser<'a> {
}
/// Parses `unsafe? auto? trait Foo { ... }` or `trait Foo = Bar;`.
- fn parse_item_trait(&mut self, attrs: &mut Vec<Attribute>, lo: Span) -> PResult<'a, ItemInfo> {
+ fn parse_item_trait(&mut self, attrs: &mut AttrVec, lo: Span) -> PResult<'a, ItemInfo> {
let unsafety = self.parse_unsafety();
// Parse optional `auto` prefix.
let is_auto = if self.eat_keyword(kw::Auto) { IsAuto::Yes } else { IsAuto::No };
@@ -1023,7 +1081,7 @@ impl<'a> Parser<'a> {
/// ```
fn parse_item_foreign_mod(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
mut unsafety: Unsafe,
) -> PResult<'a, ItemInfo> {
let abi = self.parse_abi(); // ABI?
@@ -1124,6 +1182,16 @@ impl<'a> Parser<'a> {
Applicability::MaybeIncorrect,
)
.emit();
+ } else if self.eat_keyword(kw::Let) {
+ let span = self.prev_token.span;
+ self.struct_span_err(const_span.to(span), "`const` and `let` are mutually exclusive")
+ .span_suggestion(
+ const_span.to(span),
+ "remove `let`",
+ "const",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
}
}
@@ -1131,7 +1199,7 @@ impl<'a> Parser<'a> {
fn recover_const_impl(
&mut self,
const_span: Span,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
defaultness: Defaultness,
) -> PResult<'a, ItemInfo> {
let impl_span = self.token.span;
@@ -1179,10 +1247,11 @@ impl<'a> Parser<'a> {
// Parse the type of a `const` or `static mut?` item.
// That is, the `":" $ty` fragment.
- let ty = if self.eat(&token::Colon) {
- self.parse_ty()?
- } else {
- self.recover_missing_const_type(id, m)
+ let ty = match (self.eat(&token::Colon), self.check(&token::Eq) | self.check(&token::Semi))
+ {
+ // If there wasn't a `:` or the colon was followed by a `=` or `;` recover a missing type.
+ (true, false) => self.parse_ty()?,
+ (colon, _) => self.recover_missing_const_type(colon, m),
};
let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None };
@@ -1190,9 +1259,9 @@ impl<'a> Parser<'a> {
Ok((id, ty, expr))
}
- /// We were supposed to parse `:` but the `:` was missing.
+ /// We were supposed to parse `":" $ty` but the `:` or the type was missing.
/// This means that the type is missing.
- fn recover_missing_const_type(&mut self, id: Ident, m: Option<Mutability>) -> P<Ty> {
+ fn recover_missing_const_type(&mut self, colon_present: bool, m: Option<Mutability>) -> P<Ty> {
// Construct the error and stash it away with the hope
// that typeck will later enrich the error with a type.
let kind = match m {
@@ -1200,29 +1269,34 @@ impl<'a> Parser<'a> {
Some(Mutability::Not) => "static",
None => "const",
};
- let mut err = self.struct_span_err(id.span, &format!("missing type for `{kind}` item"));
+
+ let colon = match colon_present {
+ true => "",
+ false => ":",
+ };
+
+ let span = self.prev_token.span.shrink_to_hi();
+ let mut err = self.struct_span_err(span, &format!("missing type for `{kind}` item"));
err.span_suggestion(
- id.span,
+ span,
"provide a type for the item",
- format!("{id}: <type>"),
+ format!("{colon} <type>"),
Applicability::HasPlaceholders,
);
- err.stash(id.span, StashKey::ItemNoType);
+ err.stash(span, StashKey::ItemNoType);
// The user intended that the type be inferred,
// so treat this as if the user wrote e.g. `const A: _ = expr;`.
- P(Ty { kind: TyKind::Infer, span: id.span, id: ast::DUMMY_NODE_ID, tokens: None })
+ P(Ty { kind: TyKind::Infer, span, id: ast::DUMMY_NODE_ID, tokens: None })
}
/// Parses an enum declaration.
fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> {
if self.token.is_keyword(kw::Struct) {
- let mut err = self.struct_span_err(
- self.prev_token.span.to(self.token.span),
- "`enum` and `struct` are mutually exclusive",
- );
+ let span = self.prev_token.span.to(self.token.span);
+ let mut err = self.struct_span_err(span, "`enum` and `struct` are mutually exclusive");
err.span_suggestion(
- self.prev_token.span.to(self.token.span),
+ span,
"replace `enum struct` with",
"enum",
Applicability::MachineApplicable,
@@ -1239,12 +1313,20 @@ impl<'a> Parser<'a> {
let mut generics = self.parse_generics()?;
generics.where_clause = self.parse_where_clause()?;
- let (variants, _) = self
- .parse_delim_comma_seq(Delimiter::Brace, |p| p.parse_enum_variant())
- .map_err(|e| {
- self.recover_stmt();
- e
- })?;
+ // Possibly recover `enum Foo;` instead of `enum Foo {}`
+ let (variants, _) = if self.token == TokenKind::Semi {
+ self.sess.emit_err(UseEmptyBlockNotSemi { span: self.token.span });
+ self.bump();
+ (vec![], false)
+ } else {
+ self.parse_delim_comma_seq(Delimiter::Brace, |p| p.parse_enum_variant()).map_err(
+ |mut e| {
+ e.span_label(id.span, "while parsing this enum");
+ self.recover_stmt();
+ e
+ },
+ )?
+ };
let enum_definition = EnumDef { variants: variants.into_iter().flatten().collect() };
Ok((id, ItemKind::Enum(enum_definition, generics)))
@@ -1266,7 +1348,8 @@ impl<'a> Parser<'a> {
let struct_def = if this.check(&token::OpenDelim(Delimiter::Brace)) {
// Parse a struct variant.
- let (fields, recovered) = this.parse_record_struct_body("struct", false)?;
+ let (fields, recovered) =
+ this.parse_record_struct_body("struct", ident.span, false)?;
VariantData::Struct(fields, recovered)
} else if this.check(&token::OpenDelim(Delimiter::Parenthesis)) {
VariantData::Tuple(this.parse_tuple_struct_body()?, DUMMY_NODE_ID)
@@ -1281,7 +1364,7 @@ impl<'a> Parser<'a> {
ident,
vis,
id: DUMMY_NODE_ID,
- attrs: variant_attrs.into(),
+ attrs: variant_attrs,
data: struct_def,
disr_expr,
span: vlo.to(this.prev_token.span),
@@ -1320,8 +1403,11 @@ impl<'a> Parser<'a> {
VariantData::Unit(DUMMY_NODE_ID)
} else {
// If we see: `struct Foo<T> where T: Copy { ... }`
- let (fields, recovered) =
- self.parse_record_struct_body("struct", generics.where_clause.has_where_token)?;
+ let (fields, recovered) = self.parse_record_struct_body(
+ "struct",
+ class_name.span,
+ generics.where_clause.has_where_token,
+ )?;
VariantData::Struct(fields, recovered)
}
// No `where` so: `struct Foo<T>;`
@@ -1329,8 +1415,11 @@ impl<'a> Parser<'a> {
VariantData::Unit(DUMMY_NODE_ID)
// Record-style struct definition
} else if self.token == token::OpenDelim(Delimiter::Brace) {
- let (fields, recovered) =
- self.parse_record_struct_body("struct", generics.where_clause.has_where_token)?;
+ let (fields, recovered) = self.parse_record_struct_body(
+ "struct",
+ class_name.span,
+ generics.where_clause.has_where_token,
+ )?;
VariantData::Struct(fields, recovered)
// Tuple-style struct definition with optional where-clause.
} else if self.token == token::OpenDelim(Delimiter::Parenthesis) {
@@ -1359,12 +1448,18 @@ impl<'a> Parser<'a> {
let vdata = if self.token.is_keyword(kw::Where) {
generics.where_clause = self.parse_where_clause()?;
- let (fields, recovered) =
- self.parse_record_struct_body("union", generics.where_clause.has_where_token)?;
+ let (fields, recovered) = self.parse_record_struct_body(
+ "union",
+ class_name.span,
+ generics.where_clause.has_where_token,
+ )?;
VariantData::Struct(fields, recovered)
} else if self.token == token::OpenDelim(Delimiter::Brace) {
- let (fields, recovered) =
- self.parse_record_struct_body("union", generics.where_clause.has_where_token)?;
+ let (fields, recovered) = self.parse_record_struct_body(
+ "union",
+ class_name.span,
+ generics.where_clause.has_where_token,
+ )?;
VariantData::Struct(fields, recovered)
} else {
let token_str = super::token_descr(&self.token);
@@ -1380,6 +1475,7 @@ impl<'a> Parser<'a> {
fn parse_record_struct_body(
&mut self,
adt_ty: &str,
+ ident_span: Span,
parsed_where: bool,
) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> {
let mut fields = Vec::new();
@@ -1394,6 +1490,7 @@ impl<'a> Parser<'a> {
match field {
Ok(field) => fields.push(field),
Err(mut err) => {
+ err.span_label(ident_span, format!("while parsing this {adt_ty}"));
err.emit();
break;
}
@@ -1438,7 +1535,7 @@ impl<'a> Parser<'a> {
ident: None,
id: DUMMY_NODE_ID,
ty,
- attrs: attrs.into(),
+ attrs,
is_placeholder: false,
},
TrailingToken::MaybeComma,
@@ -1464,13 +1561,24 @@ impl<'a> Parser<'a> {
adt_ty: &str,
lo: Span,
vis: Visibility,
- attrs: Vec<Attribute>,
+ attrs: AttrVec,
) -> PResult<'a, FieldDef> {
let mut seen_comma: bool = false;
let a_var = self.parse_name_and_ty(adt_ty, lo, vis, attrs)?;
if self.token == token::Comma {
seen_comma = true;
}
+ if self.eat(&token::Semi) {
+ let sp = self.prev_token.span;
+ let mut err = self.struct_span_err(sp, format!("{adt_ty} fields are separated by `,`"));
+ err.span_suggestion_short(
+ sp,
+ "replace `;` with `,`",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ return Err(err);
+ }
match self.token.kind {
token::Comma => {
self.bump();
@@ -1478,7 +1586,10 @@ impl<'a> Parser<'a> {
token::CloseDelim(Delimiter::Brace) => {}
token::DocComment(..) => {
let previous_span = self.prev_token.span;
- let mut err = self.span_err(self.token.span, Error::UselessDocComment);
+ let mut err = DocCommentDoesNotDocumentAnything {
+ span: self.token.span,
+ missing_comma: None,
+ };
self.bump(); // consume the doc comment
let comma_after_doc_seen = self.eat(&token::Comma);
// `seen_comma` is always false, because we are inside doc block
@@ -1487,18 +1598,13 @@ impl<'a> Parser<'a> {
seen_comma = true;
}
if comma_after_doc_seen || self.token == token::CloseDelim(Delimiter::Brace) {
- err.emit();
+ self.sess.emit_err(err);
} else {
if !seen_comma {
- let sp = self.sess.source_map().next_point(previous_span);
- err.span_suggestion(
- sp,
- "missing comma here",
- ",",
- Applicability::MachineApplicable,
- );
+ let sp = previous_span.shrink_to_hi();
+ err.missing_comma = Some(sp);
}
- return Err(err);
+ return Err(err.into_diagnostic(&self.sess.span_diagnostic));
}
}
_ => {
@@ -1528,8 +1634,12 @@ impl<'a> Parser<'a> {
}
}
- if self.token.is_ident() {
- // This is likely another field; emit the diagnostic and keep going
+ if self.token.is_ident()
+ || (self.token.kind == TokenKind::Pound
+ && (self.look_ahead(1, |t| t == &token::OpenDelim(Delimiter::Bracket))))
+ {
+ // This is likely another field, TokenKind::Pound is used for `#[..]` attribute for next field,
+ // emit the diagnostic and keep going
err.span_suggestion(
sp,
"try adding a comma",
@@ -1590,7 +1700,7 @@ impl<'a> Parser<'a> {
adt_ty: &str,
lo: Span,
vis: Visibility,
- attrs: Vec<Attribute>,
+ attrs: AttrVec,
) -> PResult<'a, FieldDef> {
let name = self.parse_field_ident(adt_ty, lo)?;
self.expect_field_ty_separator()?;
@@ -1624,7 +1734,7 @@ impl<'a> Parser<'a> {
vis,
id: DUMMY_NODE_ID,
ty,
- attrs: attrs.into(),
+ attrs,
is_placeholder: false,
})
}
@@ -1634,6 +1744,7 @@ impl<'a> Parser<'a> {
fn parse_field_ident(&mut self, adt_ty: &str, lo: Span) -> PResult<'a, Ident> {
let (ident, is_raw) = self.ident_or_err()?;
if !is_raw && ident.is_reserved() {
+ let snapshot = self.create_snapshot_for_diagnostic();
let err = if self.check_fn_front_matter(false) {
let inherited_vis = Visibility {
span: rustc_span::DUMMY_SP,
@@ -1642,20 +1753,63 @@ impl<'a> Parser<'a> {
};
// We use `parse_fn` to get a span for the function
let fn_parse_mode = FnParseMode { req_name: |_| true, req_body: true };
- if let Err(mut db) =
- self.parse_fn(&mut Vec::new(), fn_parse_mode, lo, &inherited_vis)
+ match self.parse_fn(&mut AttrVec::new(), fn_parse_mode, lo, &inherited_vis) {
+ Ok(_) => {
+ let mut err = self.struct_span_err(
+ lo.to(self.prev_token.span),
+ &format!("functions are not allowed in {adt_ty} definitions"),
+ );
+ err.help(
+ "unlike in C++, Java, and C#, functions are declared in `impl` blocks",
+ );
+ err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information");
+ err
+ }
+ Err(err) => {
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ self.expected_ident_found()
+ }
+ }
+ } else if self.eat_keyword(kw::Struct) {
+ match self.parse_item_struct() {
+ Ok((ident, _)) => {
+ let mut err = self.struct_span_err(
+ lo.with_hi(ident.span.hi()),
+ &format!("structs are not allowed in {adt_ty} definitions"),
+ );
+ err.help("consider creating a new `struct` definition instead of nesting");
+ err
+ }
+ Err(err) => {
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ self.expected_ident_found()
+ }
+ }
+ } else {
+ let mut err = self.expected_ident_found();
+ if self.eat_keyword_noexpect(kw::Let)
+ && let removal_span = self.prev_token.span.until(self.token.span)
+ && let Ok(ident) = self.parse_ident_common(false)
+ // Cancel this error, we don't need it.
+ .map_err(|err| err.cancel())
+ && self.token.kind == TokenKind::Colon
{
- db.delay_as_bug();
+ err.span_suggestion(
+ removal_span,
+ "remove this `let` keyword",
+ String::new(),
+ Applicability::MachineApplicable,
+ );
+ err.note("the `let` keyword is not allowed in `struct` fields");
+ err.note("see <https://doc.rust-lang.org/book/ch05-01-defining-structs.html> for more information");
+ err.emit();
+ return Ok(ident);
+ } else {
+ self.restore_snapshot(snapshot);
}
- let mut err = self.struct_span_err(
- lo.to(self.prev_token.span),
- &format!("functions are not allowed in {adt_ty} definitions"),
- );
- err.help("unlike in C++, Java, and C#, functions are declared in `impl` blocks");
- err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information");
err
- } else {
- self.expected_ident_found()
};
return Err(err);
}
@@ -1919,7 +2073,7 @@ impl<'a> Parser<'a> {
/// Parse a function starting from the front matter (`const ...`) to the body `{ ... }` or `;`.
fn parse_fn(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
fn_parse_mode: FnParseMode,
sig_lo: Span,
vis: &Visibility,
@@ -1942,7 +2096,7 @@ impl<'a> Parser<'a> {
/// or e.g. a block when the function is a provided one.
fn parse_fn_body(
&mut self,
- attrs: &mut Vec<Attribute>,
+ attrs: &mut AttrVec,
ident: &Ident,
sig_hi: &mut Span,
req_body: bool,
@@ -1957,7 +2111,7 @@ impl<'a> Parser<'a> {
// Include the trailing semicolon in the span of the signature
self.expect_semi()?;
*sig_hi = self.prev_token.span;
- (Vec::new(), None)
+ (AttrVec::new(), None)
} else if self.check(&token::OpenDelim(Delimiter::Brace)) || self.token.is_whole_block() {
self.parse_inner_attrs_and_block().map(|(attrs, body)| (attrs, Some(body)))?
} else if self.token.kind == token::Eq {
@@ -1974,7 +2128,7 @@ impl<'a> Parser<'a> {
Applicability::MachineApplicable,
)
.emit();
- (Vec::new(), Some(self.mk_block_err(span)))
+ (AttrVec::new(), Some(self.mk_block_err(span)))
} else {
let expected = if req_body {
&[token::OpenDelim(Delimiter::Brace)][..]
@@ -1991,7 +2145,7 @@ impl<'a> Parser<'a> {
return Err(err);
}
}
- (Vec::new(), None)
+ (AttrVec::new(), None)
};
attrs.extend(inner_attrs);
Ok(body)
@@ -2220,7 +2374,7 @@ impl<'a> Parser<'a> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
// Possibly parse `self`. Recover if we parsed it and it wasn't allowed here.
if let Some(mut param) = this.parse_self_param()? {
- param.attrs = attrs.into();
+ param.attrs = attrs;
let res = if first_param { Ok(param) } else { this.recover_bad_self_param(param) };
return Ok((res?, TrailingToken::None));
}
@@ -2249,7 +2403,7 @@ impl<'a> Parser<'a> {
(pat, this.parse_ty_for_param()?)
} else {
debug!("parse_param_general ident_to_pat");
- let parser_snapshot_before_ty = this.clone();
+ let parser_snapshot_before_ty = this.create_snapshot_for_diagnostic();
this.eat_incorrect_doc_comment_for_param_type();
let mut ty = this.parse_ty_for_param();
if ty.is_ok()
@@ -2263,7 +2417,7 @@ impl<'a> Parser<'a> {
match ty {
Ok(ty) => {
let ident = Ident::new(kw::Empty, this.prev_token.span);
- let bm = BindingMode::ByValue(Mutability::Not);
+ let bm = BindingAnnotation::NONE;
let pat = this.mk_pat_ident(ty.span, bm, ident);
(pat, ty)
}
@@ -2272,23 +2426,16 @@ impl<'a> Parser<'a> {
// Recover from attempting to parse the argument as a type without pattern.
Err(err) => {
err.cancel();
- *this = parser_snapshot_before_ty;
+ this.restore_snapshot(parser_snapshot_before_ty);
this.recover_arg_parse()?
}
}
};
- let span = lo.until(this.token.span);
+ let span = lo.to(this.prev_token.span);
Ok((
- Param {
- attrs: attrs.into(),
- id: ast::DUMMY_NODE_ID,
- is_placeholder: false,
- pat,
- span,
- ty,
- },
+ Param { attrs, id: ast::DUMMY_NODE_ID, is_placeholder: false, pat, span, ty },
TrailingToken::None,
))
})
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index 0c523ad22..5fe29062b 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -13,7 +13,6 @@ mod ty;
use crate::lexer::UnmatchedBrace;
pub use attr_wrapper::AttrWrapper;
pub use diagnostics::AttemptLocalParseRecovery;
-use diagnostics::Error;
pub(crate) use item::FnParseMode;
pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
pub use path::PathStyle;
@@ -32,16 +31,20 @@ use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::PResult;
use rustc_errors::{
- struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, MultiSpan,
+ Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, IntoDiagnostic, MultiSpan,
};
use rustc_session::parse::ParseSess;
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use tracing::debug;
use std::ops::Range;
use std::{cmp, mem, slice};
+use crate::errors::{
+ DocCommentDoesNotDocumentAnything, IncorrectVisibilityRestriction, MismatchedClosingDelimiter,
+ NonStringAbiLiteral,
+};
+
bitflags::bitflags! {
struct Restrictions: u8 {
const STMT_EXPR = 1 << 0;
@@ -76,6 +79,7 @@ pub enum ForceCollect {
pub enum TrailingToken {
None,
Semi,
+ Gt,
/// If the trailing token is a comma, then capture it
/// Otherwise, ignore the trailing token
MaybeComma,
@@ -111,6 +115,12 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath {
};
}
+#[derive(Clone, Copy)]
+pub enum Recovery {
+ Allowed,
+ Forbidden,
+}
+
#[derive(Clone)]
pub struct Parser<'a> {
pub sess: &'a ParseSess,
@@ -148,12 +158,15 @@ pub struct Parser<'a> {
/// This allows us to recover when the user forget to add braces around
/// multiple statements in the closure body.
pub current_closure: Option<ClosureSpans>,
+ /// Whether the parser is allowed to do recovery.
+ /// This is disabled when parsing macro arguments, see #103534
+ pub recovery: Recovery,
}
-// This type is used a lot, e.g. it's cloned when matching many declarative macro rules. Make sure
+// This type is used a lot, e.g. it's cloned when matching many declarative macro rules with nonterminals. Make sure
// it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(Parser<'_>, 328);
+rustc_data_structures::static_assert_size!(Parser<'_>, 336);
/// Stores span information about a closure.
#[derive(Clone)]
@@ -171,7 +184,7 @@ pub struct ClosureSpans {
/// attribute, we parse a nested AST node that has `#[cfg]` or `#[cfg_attr]`
/// In this case, we use a `ReplaceRange` to replace the entire inner AST node
/// with `FlatToken::AttrTarget`, allowing us to perform eager cfg-expansion
-/// on an `AttrAnnotatedTokenStream`
+/// on an `AttrTokenStream`.
///
/// 2. When we parse an inner attribute while collecting tokens. We
/// remove inner attributes from the token stream entirely, and
@@ -184,7 +197,7 @@ pub type ReplaceRange = (Range<u32>, Vec<(FlatToken, Spacing)>);
/// Controls how we capture tokens. Capturing can be expensive,
/// so we try to avoid performing capturing in cases where
-/// we will never need an `AttrAnnotatedTokenStream`
+/// we will never need an `AttrTokenStream`.
#[derive(Copy, Clone)]
pub enum Capturing {
/// We aren't performing any capturing - this is the default mode.
@@ -238,7 +251,7 @@ struct TokenCursor {
// the trailing `>>` token. The `break_last_token`
// field is used to track this token - it gets
// appended to the captured stream when
- // we evaluate a `LazyTokenStream`
+ // we evaluate a `LazyAttrTokenStream`.
break_last_token: bool,
}
@@ -281,7 +294,7 @@ impl TokenCursor {
if delim != Delimiter::Invisible {
return (Token::new(token::OpenDelim(delim), sp.open), Spacing::Alone);
}
- // No open delimeter to return; continue on to the next iteration.
+ // No open delimiter to return; continue on to the next iteration.
}
};
} else if let Some(frame) = self.stack.pop() {
@@ -299,7 +312,10 @@ impl TokenCursor {
fn desugar(&mut self, attr_style: AttrStyle, data: Symbol, span: Span) -> (Token, Spacing) {
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
- // required to wrap the text.
+ // required to wrap the text. E.g.
+ // - `abc d` is wrapped as `r"abc d"` (num_of_hashes = 0)
+ // - `abc "d"` is wrapped as `r#"abc "d""#` (num_of_hashes = 1)
+ // - `abc "##d##"` is wrapped as `r###"abc "d""###` (num_of_hashes = 3)
let mut num_of_hashes = 0;
let mut count = 0;
for ch in data.as_str().chars() {
@@ -311,6 +327,7 @@ impl TokenCursor {
num_of_hashes = cmp::max(num_of_hashes, count);
}
+ // `/// foo` becomes `doc = r"foo".
let delim_span = DelimSpan::from_single(span);
let body = TokenTree::Delimited(
delim_span,
@@ -407,24 +424,39 @@ pub enum FollowedByType {
No,
}
-fn token_descr_opt(token: &Token) -> Option<&'static str> {
- Some(match token.kind {
- _ if token.is_special_ident() => "reserved identifier",
- _ if token.is_used_keyword() => "keyword",
- _ if token.is_unused_keyword() => "reserved keyword",
- token::DocComment(..) => "doc comment",
- _ => return None,
- })
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum TokenDescription {
+ ReservedIdentifier,
+ Keyword,
+ ReservedKeyword,
+ DocComment,
}
-pub(super) fn token_descr(token: &Token) -> String {
- let token_str = pprust::token_to_string(token);
- match token_descr_opt(token) {
- Some(prefix) => format!("{} `{}`", prefix, token_str),
- _ => format!("`{}`", token_str),
+impl TokenDescription {
+ pub fn from_token(token: &Token) -> Option<Self> {
+ match token.kind {
+ _ if token.is_special_ident() => Some(TokenDescription::ReservedIdentifier),
+ _ if token.is_used_keyword() => Some(TokenDescription::Keyword),
+ _ if token.is_unused_keyword() => Some(TokenDescription::ReservedKeyword),
+ token::DocComment(..) => Some(TokenDescription::DocComment),
+ _ => None,
+ }
}
}
+pub(super) fn token_descr(token: &Token) -> String {
+ let name = pprust::token_to_string(token).to_string();
+
+ let kind = TokenDescription::from_token(token).map(|kind| match kind {
+ TokenDescription::ReservedIdentifier => "reserved identifier",
+ TokenDescription::Keyword => "keyword",
+ TokenDescription::ReservedKeyword => "reserved keyword",
+ TokenDescription::DocComment => "doc comment",
+ });
+
+ if let Some(kind) = kind { format!("{} `{}`", kind, name) } else { format!("`{}`", name) }
+}
+
impl<'a> Parser<'a> {
pub fn new(
sess: &'a ParseSess,
@@ -460,6 +492,7 @@ impl<'a> Parser<'a> {
inner_attr_ranges: Default::default(),
},
current_closure: None,
+ recovery: Recovery::Allowed,
};
// Make parser point to the first token.
@@ -468,6 +501,22 @@ impl<'a> Parser<'a> {
parser
}
+ pub fn forbid_recovery(mut self) -> Self {
+ self.recovery = Recovery::Forbidden;
+ self
+ }
+
+ /// Whether the parser is allowed to recover from broken code.
+ ///
+ /// If this returns false, recovering broken code into valid code (especially if this recovery does lookahead)
+ /// is not allowed. All recovery done by the parser must be gated behind this check.
+ ///
+ /// Technically, this only needs to restrict eager recovery by doing lookahead at more tokens.
+ /// But making the distinction is very subtle, and simply forbidding all recovery is a lot simpler to uphold.
+ fn may_recover(&self) -> bool {
+ matches!(self.recovery, Recovery::Allowed)
+ }
+
pub fn unexpected<T>(&mut self) -> PResult<'a, T> {
match self.expect_one_of(&[], &[]) {
Err(e) => Err(e),
@@ -519,9 +568,11 @@ impl<'a> Parser<'a> {
fn ident_or_err(&mut self) -> PResult<'a, (Ident, /* is_raw */ bool)> {
self.token.ident().ok_or_else(|| match self.prev_token.kind {
- TokenKind::DocComment(..) => {
- self.span_err(self.prev_token.span, Error::UselessDocComment)
+ TokenKind::DocComment(..) => DocCommentDoesNotDocumentAnything {
+ span: self.prev_token.span,
+ missing_comma: None,
}
+ .into_diagnostic(&self.sess.span_diagnostic),
_ => self.expected_ident_found(),
})
}
@@ -1116,10 +1167,14 @@ impl<'a> Parser<'a> {
let (attrs, blk) = self.parse_inner_attrs_and_block()?;
let anon_const = AnonConst {
id: DUMMY_NODE_ID,
- value: self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()),
+ value: self.mk_expr(blk.span, ExprKind::Block(blk, None)),
};
let blk_span = anon_const.value.span;
- Ok(self.mk_expr(span.to(blk_span), ExprKind::ConstBlock(anon_const), AttrVec::from(attrs)))
+ Ok(self.mk_expr_with_attrs(
+ span.to(blk_span),
+ ExprKind::ConstBlock(anon_const),
+ AttrVec::from(attrs),
+ ))
}
/// Parses mutability (`mut` or nothing).
@@ -1141,7 +1196,9 @@ impl<'a> Parser<'a> {
fn parse_field_name(&mut self) -> PResult<'a, Ident> {
if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = self.token.kind
{
- self.expect_no_suffix(self.token.span, "a tuple index", suffix);
+ if let Some(suffix) = suffix {
+ self.expect_no_tuple_index_suffix(self.token.span, suffix);
+ }
self.bump();
Ok(Ident::new(symbol, self.prev_token.span))
} else {
@@ -1295,7 +1352,11 @@ impl<'a> Parser<'a> {
self.bump(); // `in`
let path = self.parse_path(PathStyle::Mod)?; // `path`
self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
- let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
+ let vis = VisibilityKind::Restricted {
+ path: P(path),
+ id: ast::DUMMY_NODE_ID,
+ shorthand: false,
+ };
return Ok(Visibility {
span: lo.to(self.prev_token.span),
kind: vis,
@@ -1308,7 +1369,11 @@ impl<'a> Parser<'a> {
self.bump(); // `(`
let path = self.parse_path(PathStyle::Mod)?; // `crate`/`super`/`self`
self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
- let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
+ let vis = VisibilityKind::Restricted {
+ path: P(path),
+ id: ast::DUMMY_NODE_ID,
+ shorthand: true,
+ };
return Ok(Visibility {
span: lo.to(self.prev_token.span),
kind: vis,
@@ -1331,23 +1396,8 @@ impl<'a> Parser<'a> {
let path = self.parse_path(PathStyle::Mod)?;
self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
- let msg = "incorrect visibility restriction";
- let suggestion = r##"some possible visibility restrictions are:
-`pub(crate)`: visible only on the current crate
-`pub(super)`: visible only in the current module's parent
-`pub(in path::to::module)`: visible only on the specified path"##;
-
let path_str = pprust::path_to_string(&path);
-
- struct_span_err!(self.sess.span_diagnostic, path.span, E0704, "{}", msg)
- .help(suggestion)
- .span_suggestion(
- path.span,
- &format!("make this visible only to module `{}` with `in`", path_str),
- format!("in {}", path_str),
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(IncorrectVisibilityRestriction { span: path.span, inner_str: path_str });
Ok(())
}
@@ -1371,16 +1421,9 @@ impl<'a> Parser<'a> {
match self.parse_str_lit() {
Ok(str_lit) => Some(str_lit),
Err(Some(lit)) => match lit.kind {
- ast::LitKind::Err(_) => None,
+ ast::LitKind::Err => None,
_ => {
- self.struct_span_err(lit.span, "non-string ABI literal")
- .span_suggestion(
- lit.span,
- "specify the ABI with a string literal",
- "\"C\"",
- Applicability::MaybeIncorrect,
- )
- .emit();
+ self.sess.emit_err(NonStringAbiLiteral { span: lit.span });
None
}
},
@@ -1421,25 +1464,18 @@ pub(crate) fn make_unclosed_delims_error(
// `None` here means an `Eof` was found. We already emit those errors elsewhere, we add them to
// `unmatched_braces` only for error recovery in the `Parser`.
let found_delim = unmatched.found_delim?;
- let span: MultiSpan = if let Some(sp) = unmatched.unclosed_span {
- vec![unmatched.found_span, sp].into()
- } else {
- unmatched.found_span.into()
- };
- let mut err = sess.span_diagnostic.struct_span_err(
- span,
- &format!(
- "mismatched closing delimiter: `{}`",
- pprust::token_kind_to_string(&token::CloseDelim(found_delim)),
- ),
- );
- err.span_label(unmatched.found_span, "mismatched closing delimiter");
- if let Some(sp) = unmatched.candidate_span {
- err.span_label(sp, "closing delimiter possibly meant for this");
- }
+ let mut spans = vec![unmatched.found_span];
if let Some(sp) = unmatched.unclosed_span {
- err.span_label(sp, "unclosed delimiter");
- }
+ spans.push(sp);
+ };
+ let err = MismatchedClosingDelimiter {
+ spans,
+ delimiter: pprust::token_kind_to_string(&token::CloseDelim(found_delim)).to_string(),
+ unmatched: unmatched.found_span,
+ opening_candidate: unmatched.candidate_span,
+ unclosed: unmatched.unclosed_span,
+ }
+ .into_diagnostic(&sess.span_diagnostic);
Some(err)
}
@@ -1453,11 +1489,11 @@ pub fn emit_unclosed_delims(unclosed_delims: &mut Vec<UnmatchedBrace>, sess: &Pa
}
}
-/// A helper struct used when building an `AttrAnnotatedTokenStream` from
-/// a `LazyTokenStream`. Both delimiter and non-delimited tokens
+/// A helper struct used when building an `AttrTokenStream` from
+/// a `LazyAttrTokenStream`. Both delimiter and non-delimited tokens
/// are stored as `FlatToken::Token`. A vector of `FlatToken`s
-/// is then 'parsed' to build up an `AttrAnnotatedTokenStream` with nested
-/// `AttrAnnotatedTokenTree::Delimited` tokens
+/// is then 'parsed' to build up an `AttrTokenStream` with nested
+/// `AttrTokenTree::Delimited` tokens.
#[derive(Debug, Clone)]
pub enum FlatToken {
/// A token - this holds both delimiter (e.g. '{' and '}')
@@ -1465,11 +1501,11 @@ pub enum FlatToken {
Token(Token),
/// Holds the `AttributesData` for an AST node. The
/// `AttributesData` is inserted directly into the
- /// constructed `AttrAnnotatedTokenStream` as
- /// an `AttrAnnotatedTokenTree::Attributes`
+ /// constructed `AttrTokenStream` as
+ /// an `AttrTokenTree::Attributes`.
AttrTarget(AttributesData),
/// A special 'empty' token that is ignored during the conversion
- /// to an `AttrAnnotatedTokenStream`. This is used to simplify the
+ /// to an `AttrTokenStream`. This is used to simplify the
/// handling of replace ranges.
Empty,
}
diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs
index e215b6872..103dd8012 100644
--- a/compiler/rustc_parse/src/parser/nonterminal.rs
+++ b/compiler/rustc_parse/src/parser/nonterminal.rs
@@ -66,18 +66,18 @@ impl<'a> Parser<'a> {
},
NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
match token.kind {
- token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
- token::OpenDelim(Delimiter::Parenthesis) | // tuple pattern
- token::OpenDelim(Delimiter::Bracket) | // slice pattern
- token::BinOp(token::And) | // reference
- token::BinOp(token::Minus) | // negative literal
- token::AndAnd | // double reference
- token::Literal(..) | // literal
- token::DotDot | // range pattern (future compat)
- token::DotDotDot | // range pattern (future compat)
- token::ModSep | // path
- token::Lt | // path (UFCS constant)
- token::BinOp(token::Shl) => true, // path (double UFCS)
+ token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
+ token::OpenDelim(Delimiter::Parenthesis) | // tuple pattern
+ token::OpenDelim(Delimiter::Bracket) | // slice pattern
+ token::BinOp(token::And) | // reference
+ token::BinOp(token::Minus) | // negative literal
+ token::AndAnd | // double reference
+ token::Literal(..) | // literal
+ token::DotDot | // range pattern (future compat)
+ token::DotDotDot | // range pattern (future compat)
+ token::ModSep | // path
+ token::Lt | // path (UFCS constant)
+ token::BinOp(token::Shl) => true, // path (double UFCS)
// leading vert `|` or-pattern
token::BinOp(token::Or) => matches!(kind, NonterminalKind::PatWithOr {..}),
token::Interpolated(ref nt) => may_be_ident(nt),
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
index ba77a3958..52c11b4e3 100644
--- a/compiler/rustc_parse/src/parser/pat.rs
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -1,14 +1,16 @@
use super::{ForceCollect, Parser, PathStyle, TrailingToken};
+use crate::errors::RemoveLet;
use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor};
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Delimiter};
use rustc_ast::{
- self as ast, AttrVec, Attribute, BindingMode, Expr, ExprKind, MacCall, Mutability, Pat,
+ self as ast, AttrVec, BindingAnnotation, ByRef, Expr, ExprKind, MacCall, Mutability, Pat,
PatField, PatKind, Path, QSelf, RangeEnd, RangeSyntax,
};
use rustc_ast_pretty::pprust;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_session::errors::ExprParenthesesNeeded;
use rustc_span::source_map::{respan, Span, Spanned};
use rustc_span::symbol::{kw, sym, Ident};
@@ -320,7 +322,13 @@ impl<'a> Parser<'a> {
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole!(self, NtPat, |x| x);
- let lo = self.token.span;
+ let mut lo = self.token.span;
+
+ if self.token.is_keyword(kw::Let) && self.look_ahead(1, |tok| tok.can_begin_pattern()) {
+ self.bump();
+ self.sess.emit_err(RemoveLet { span: lo });
+ lo = self.token.span;
+ }
let pat = if self.check(&token::BinOp(token::And)) || self.token.kind == token::AndAnd {
self.parse_pat_deref(expected)?
@@ -353,7 +361,7 @@ impl<'a> Parser<'a> {
} else if self.eat_keyword(kw::Ref) {
// Parse ref ident @ pat / ref mut ident @ pat
let mutbl = self.parse_mutability();
- self.parse_pat_ident(BindingMode::ByRef(mutbl))?
+ self.parse_pat_ident(BindingAnnotation(ByRef::Yes, mutbl))?
} else if self.eat_keyword(kw::Box) {
self.parse_pat_box()?
} else if self.check_inline_const(0) {
@@ -369,7 +377,7 @@ impl<'a> Parser<'a> {
// Parse `ident @ pat`
// This can give false positives and parse nullary enums,
// they are dealt with later in resolve.
- self.parse_pat_ident(BindingMode::ByValue(Mutability::Not))?
+ self.parse_pat_ident(BindingAnnotation::NONE)?
} else if self.is_start_of_pat_with_path() {
// Parse pattern starting with a path
let (qself, path) = if self.eat_lt() {
@@ -385,7 +393,7 @@ impl<'a> Parser<'a> {
if qself.is_none() && self.check(&token::Not) {
self.parse_pat_mac_invoc(path)?
} else if let Some(form) = self.parse_range_end() {
- let begin = self.mk_expr(span, ExprKind::Path(qself, path), AttrVec::new());
+ let begin = self.mk_expr(span, ExprKind::Path(qself, path));
self.parse_pat_range_begin_with(begin, form)?
} else if self.check(&token::OpenDelim(Delimiter::Brace)) {
self.parse_pat_struct(qself, path)?
@@ -394,6 +402,25 @@ impl<'a> Parser<'a> {
} else {
PatKind::Path(qself, path)
}
+ } else if matches!(self.token.kind, token::Lifetime(_))
+ // In pattern position, we're totally fine with using "next token isn't colon"
+ // as a heuristic. We could probably just always try to recover if it's a lifetime,
+ // because we never have `'a: label {}` in a pattern position anyways, but it does
+ // keep us from suggesting something like `let 'a: Ty = ..` => `let 'a': Ty = ..`
+ && !self.look_ahead(1, |token| matches!(token.kind, token::Colon))
+ {
+ // Recover a `'a` as a `'a'` literal
+ let lt = self.expect_lifetime();
+ let lit = self.recover_unclosed_char(lt.ident, |self_| {
+ let expected = expected.unwrap_or("pattern");
+ let msg =
+ format!("expected {}, found {}", expected, super::token_descr(&self_.token));
+
+ let mut err = self_.struct_span_err(self_.token.span, &msg);
+ err.span_label(self_.token.span, format!("expected {}", expected));
+ err
+ });
+ PatKind::Lit(self.mk_expr(lo, ExprKind::Lit(lit)))
} else {
// Try to parse everything else as literal with optional minus
match self.parse_literal_maybe_minus() {
@@ -578,7 +605,8 @@ impl<'a> Parser<'a> {
let mut pat = self.parse_pat_no_top_alt(Some("identifier"))?;
// If we don't have `mut $ident (@ pat)?`, error.
- if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind {
+ if let PatKind::Ident(BindingAnnotation(ByRef::No, m @ Mutability::Not), ..) = &mut pat.kind
+ {
// Don't recurse into the subpattern.
// `mut` on the outer binding doesn't affect the inner bindings.
*m = Mutability::Mut;
@@ -604,7 +632,7 @@ impl<'a> Parser<'a> {
)
.emit();
- self.parse_pat_ident(BindingMode::ByRef(Mutability::Mut))
+ self.parse_pat_ident(BindingAnnotation::REF_MUT)
}
/// Turn all by-value immutable bindings in a pattern into mutable bindings.
@@ -613,7 +641,8 @@ impl<'a> Parser<'a> {
struct AddMut(bool);
impl MutVisitor for AddMut {
fn visit_pat(&mut self, pat: &mut P<Pat>) {
- if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind
+ if let PatKind::Ident(BindingAnnotation(ByRef::No, m @ Mutability::Not), ..) =
+ &mut pat.kind
{
self.0 = true;
*m = Mutability::Mut;
@@ -665,7 +694,7 @@ impl<'a> Parser<'a> {
fn parse_pat_mac_invoc(&mut self, path: Path) -> PResult<'a, PatKind> {
self.bump();
let args = self.parse_mac_args()?;
- let mac = MacCall { path, args, prior_type_ascription: self.last_type_ascription };
+ let mac = P(MacCall { path, args, prior_type_ascription: self.last_type_ascription });
Ok(PatKind::MacCall(mac))
}
@@ -684,7 +713,7 @@ impl<'a> Parser<'a> {
let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
- self.sess.expr_parentheses_needed(&mut err, *sp);
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
}
Err(err)
@@ -767,7 +796,6 @@ impl<'a> Parser<'a> {
/// expression syntax `...expr` for splatting in expressions.
fn parse_pat_range_to(&mut self, mut re: Spanned<RangeEnd>) -> PResult<'a, PatKind> {
let end = self.parse_pat_range_end()?;
- self.sess.gated_spans.gate(sym::half_open_range_patterns, re.span.to(self.prev_token.span));
if let RangeEnd::Included(ref mut syn @ RangeSyntax::DotDotDot) = &mut re.node {
*syn = RangeSyntax::DotDotEq;
self.struct_span_err(re.span, "range-to patterns with `...` are not allowed")
@@ -790,6 +818,7 @@ impl<'a> Parser<'a> {
|| t.kind == token::Dot // e.g. `.5` for recovery;
|| t.can_begin_literal_maybe_minus() // e.g. `42`.
|| t.is_whole_expr()
+ || t.is_lifetime() // recover `'a` instead of `'a'`
})
}
@@ -807,7 +836,7 @@ impl<'a> Parser<'a> {
(None, self.parse_path(PathStyle::Expr)?)
};
let hi = self.prev_token.span;
- Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path), AttrVec::new()))
+ Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path)))
} else {
self.parse_literal_maybe_minus()
}
@@ -838,7 +867,7 @@ impl<'a> Parser<'a> {
/// Parses `ident` or `ident @ pat`.
/// Used by the copy foo and ref foo patterns to give a good
/// error message when parsing mistakes like `ref foo(a, b)`.
- fn parse_pat_ident(&mut self, binding_mode: BindingMode) -> PResult<'a, PatKind> {
+ fn parse_pat_ident(&mut self, binding_annotation: BindingAnnotation) -> PResult<'a, PatKind> {
let ident = self.parse_ident()?;
let sub = if self.eat(&token::At) {
Some(self.parse_pat_no_top_alt(Some("binding pattern"))?)
@@ -856,7 +885,7 @@ impl<'a> Parser<'a> {
.struct_span_err(self.prev_token.span, "expected identifier, found enum pattern"));
}
- Ok(PatKind::Ident(binding_mode, ident, sub))
+ Ok(PatKind::Ident(binding_annotation, ident, sub))
}
/// Parse a struct ("record") pattern (e.g. `Foo { ... }` or `Foo::Bar { ... }`).
@@ -936,11 +965,7 @@ impl<'a> Parser<'a> {
None
};
- Ok(PatKind::Ident(
- BindingMode::ByValue(Mutability::Not),
- Ident::new(kw::Box, box_span),
- sub,
- ))
+ Ok(PatKind::Ident(BindingAnnotation::NONE, Ident::new(kw::Box, box_span), sub))
} else {
let pat = self.parse_pat_with_range_pat(false, None)?;
self.sess.gated_spans.gate(sym::box_patterns, box_span.to(self.prev_token.span));
@@ -1093,7 +1118,7 @@ impl<'a> Parser<'a> {
.emit();
}
- fn parse_pat_field(&mut self, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, PatField> {
+ fn parse_pat_field(&mut self, lo: Span, attrs: AttrVec) -> PResult<'a, PatField> {
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let hi;
let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) {
@@ -1117,14 +1142,12 @@ impl<'a> Parser<'a> {
let fieldname = self.parse_field_name()?;
hi = self.prev_token.span;
- let bind_type = match (is_ref, is_mut) {
- (true, true) => BindingMode::ByRef(Mutability::Mut),
- (true, false) => BindingMode::ByRef(Mutability::Not),
- (false, true) => BindingMode::ByValue(Mutability::Mut),
- (false, false) => BindingMode::ByValue(Mutability::Not),
+ let mutability = match is_mut {
+ false => Mutability::Not,
+ true => Mutability::Mut,
};
-
- let fieldpat = self.mk_pat_ident(boxed_span.to(hi), bind_type, fieldname);
+ let ann = BindingAnnotation(ByRef::from(is_ref), mutability);
+ let fieldpat = self.mk_pat_ident(boxed_span.to(hi), ann, fieldname);
let subpat =
if is_box { self.mk_pat(lo.to(hi), PatKind::Box(fieldpat)) } else { fieldpat };
(subpat, fieldname, true)
@@ -1134,15 +1157,15 @@ impl<'a> Parser<'a> {
ident: fieldname,
pat: subpat,
is_shorthand,
- attrs: attrs.into(),
+ attrs,
id: ast::DUMMY_NODE_ID,
span: lo.to(hi),
is_placeholder: false,
})
}
- pub(super) fn mk_pat_ident(&self, span: Span, bm: BindingMode, ident: Ident) -> P<Pat> {
- self.mk_pat(span, PatKind::Ident(bm, ident, None))
+ pub(super) fn mk_pat_ident(&self, span: Span, ann: BindingAnnotation, ident: Ident) -> P<Pat> {
+ self.mk_pat(span, PatKind::Ident(ann, ident, None))
}
pub(super) fn mk_pat(&self, span: Span, kind: PatKind) -> P<Pat> {
diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs
index 5cf1758c3..fdc1af27f 100644
--- a/compiler/rustc_parse/src/parser/path.rs
+++ b/compiler/rustc_parse/src/parser/path.rs
@@ -13,7 +13,6 @@ use rustc_span::source_map::{BytePos, Span};
use rustc_span::symbol::{kw, sym, Ident};
use std::mem;
-use tracing::debug;
/// Specifies how to parse a path.
#[derive(Copy, Clone, PartialEq)]
@@ -527,7 +526,7 @@ impl<'a> Parser<'a> {
Ok(ident_gen_args) => ident_gen_args,
Err(()) => return Ok(Some(AngleBracketedArg::Arg(arg))),
};
- if binder.is_some() {
+ if binder {
// FIXME(compiler-errors): this could be improved by suggesting lifting
// this up to the trait, at least before this becomes real syntax.
// e.g. `Trait<for<'a> Assoc = Ty>` -> `for<'a> Trait<Assoc = Ty>`
@@ -652,12 +651,7 @@ impl<'a> Parser<'a> {
pub(super) fn parse_const_arg(&mut self) -> PResult<'a, AnonConst> {
// Parse const argument.
let value = if let token::OpenDelim(Delimiter::Brace) = self.token.kind {
- self.parse_block_expr(
- None,
- self.token.span,
- BlockCheckMode::Default,
- ast::AttrVec::new(),
- )?
+ self.parse_block_expr(None, self.token.span, BlockCheckMode::Default)?
} else {
self.handle_unambiguous_unbraced_const_arg()?
};
@@ -725,28 +719,24 @@ impl<'a> Parser<'a> {
/// Given a arg inside of generics, we try to destructure it as if it were the LHS in
/// `LHS = ...`, i.e. an associated type binding.
- /// This returns (optionally, if they are present) any `for<'a, 'b>` binder args, the
+ /// This returns a bool indicating if there are any `for<'a, 'b>` binder args, the
/// identifier, and any GAT arguments.
fn get_ident_from_generic_arg(
&self,
gen_arg: &GenericArg,
- ) -> Result<(Option<Vec<ast::GenericParam>>, Ident, Option<GenericArgs>), ()> {
+ ) -> Result<(bool, Ident, Option<GenericArgs>), ()> {
if let GenericArg::Type(ty) = gen_arg {
if let ast::TyKind::Path(qself, path) = &ty.kind
&& qself.is_none()
&& let [seg] = path.segments.as_slice()
{
- return Ok((None, seg.ident, seg.args.as_deref().cloned()));
+ return Ok((false, seg.ident, seg.args.as_deref().cloned()));
} else if let ast::TyKind::TraitObject(bounds, ast::TraitObjectSyntax::None) = &ty.kind
&& let [ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)] =
bounds.as_slice()
&& let [seg] = trait_ref.trait_ref.path.segments.as_slice()
{
- return Ok((
- Some(trait_ref.bound_generic_params.clone()),
- seg.ident,
- seg.args.as_deref().cloned(),
- ));
+ return Ok((true, seg.ident, seg.args.as_deref().cloned()));
}
}
Err(())
diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs
index 51bd9d2d3..12753c678 100644
--- a/compiler/rustc_parse/src/parser/stmt.rs
+++ b/compiler/rustc_parse/src/parser/stmt.rs
@@ -1,5 +1,5 @@
-use super::attr::DEFAULT_INNER_ATTR_FORBIDDEN;
-use super::diagnostics::{AttemptLocalParseRecovery, Error};
+use super::attr::InnerAttrForbiddenReason;
+use super::diagnostics::AttemptLocalParseRecovery;
use super::expr::LhsExpr;
use super::pat::RecoverComma;
use super::path::PathStyle;
@@ -7,6 +7,12 @@ use super::TrailingToken;
use super::{
AttrWrapper, BlockMode, FnParseMode, ForceCollect, Parser, Restrictions, SemiColonMode,
};
+use crate::errors::{
+ AssignmentElseNotAllowed, CompoundAssignmentExpressionInLet, ConstLetMutuallyExclusive,
+ DocCommentDoesNotDocumentAnything, ExpectedStatementAfterOuterAttr, InvalidCurlyInLetElse,
+ InvalidExpressionInLetElse, InvalidVariableDeclaration, InvalidVariableDeclarationSub,
+ WrapExpressionInParentheses,
+};
use crate::maybe_whole;
use rustc_ast as ast;
@@ -34,7 +40,7 @@ impl<'a> Parser<'a> {
}))
}
- /// If `force_capture` is true, forces collection of tokens regardless of whether
+ /// If `force_collect` is [`ForceCollect::Yes`], forces collection of tokens regardless of whether
/// or not we have attributes
pub(crate) fn parse_stmt_without_recovery(
&mut self,
@@ -55,18 +61,25 @@ impl<'a> Parser<'a> {
return Ok(Some(stmt.into_inner()));
}
+ if self.token.is_keyword(kw::Mut) && self.is_keyword_ahead(1, &[kw::Let]) {
+ self.bump();
+ let mut_let_span = lo.to(self.token.span);
+ self.sess.emit_err(InvalidVariableDeclaration {
+ span: mut_let_span,
+ sub: InvalidVariableDeclarationSub::SwitchMutLetOrder(mut_let_span),
+ });
+ }
+
Ok(Some(if self.token.is_keyword(kw::Let) {
self.parse_local_mk(lo, attrs, capture_semi, force_collect)?
} else if self.is_kw_followed_by_ident(kw::Mut) {
- self.recover_stmt_local(lo, attrs, "missing keyword", "let mut")?
+ self.recover_stmt_local(lo, attrs, InvalidVariableDeclarationSub::MissingLet)?
} else if self.is_kw_followed_by_ident(kw::Auto) {
self.bump(); // `auto`
- let msg = "write `let` instead of `auto` to introduce a new variable";
- self.recover_stmt_local(lo, attrs, msg, "let")?
+ self.recover_stmt_local(lo, attrs, InvalidVariableDeclarationSub::UseLetNotAuto)?
} else if self.is_kw_followed_by_ident(sym::var) {
self.bump(); // `var`
- let msg = "write `let` instead of `var` to introduce a new variable";
- self.recover_stmt_local(lo, attrs, msg, "let")?
+ self.recover_stmt_local(lo, attrs, InvalidVariableDeclarationSub::UseLetNotVar)?
} else if self.check_path() && !self.token.is_qpath_start() && !self.is_path_start_item() {
// We have avoided contextual keywords like `union`, items with `crate` visibility,
// or `auto trait` items. We aim to parse an arbitrary path `a::b` but not something
@@ -103,11 +116,7 @@ impl<'a> Parser<'a> {
let bl = self.parse_block()?;
// Destructuring assignment ... else.
// This is not allowed, but point it out in a nice way.
- let mut err = self.struct_span_err(
- e.span.to(bl.span),
- "<assignment> ... else { ... } is not allowed",
- );
- err.emit();
+ self.sess.emit_err(AssignmentElseNotAllowed { span: e.span.to(bl.span) });
}
self.mk_stmt(lo.to(e.span), StmtKind::Expr(e))
} else {
@@ -121,7 +130,7 @@ impl<'a> Parser<'a> {
let path = this.parse_path(PathStyle::Expr)?;
if this.eat(&token::Not) {
- let stmt_mac = this.parse_stmt_mac(lo, attrs.into(), path)?;
+ let stmt_mac = this.parse_stmt_mac(lo, attrs, path)?;
if this.token == token::Semi {
return Ok((stmt_mac, TrailingToken::Semi));
} else {
@@ -130,10 +139,10 @@ impl<'a> Parser<'a> {
}
let expr = if this.eat(&token::OpenDelim(Delimiter::Brace)) {
- this.parse_struct_expr(None, path, AttrVec::new(), true)?
+ this.parse_struct_expr(None, path, true)?
} else {
let hi = this.prev_token.span;
- this.mk_expr(lo.to(hi), ExprKind::Path(None, path), AttrVec::new())
+ this.mk_expr(lo.to(hi), ExprKind::Path(None, path))
};
let expr = this.with_res(Restrictions::STMT_EXPR, |this| {
@@ -168,7 +177,7 @@ impl<'a> Parser<'a> {
None => unreachable!(),
};
- let mac = MacCall { path, args, prior_type_ascription: self.last_type_ascription };
+ let mac = P(MacCall { path, args, prior_type_ascription: self.last_type_ascription });
let kind = if (style == MacStmtStyle::Braces
&& self.token != token::Dot
@@ -179,9 +188,9 @@ impl<'a> Parser<'a> {
StmtKind::MacCall(P(MacCallStmt { mac, style, attrs, tokens: None }))
} else {
// Since none of the above applied, this is an expression statement macro.
- let e = self.mk_expr(lo.to(hi), ExprKind::MacCall(mac), AttrVec::new());
+ let e = self.mk_expr(lo.to(hi), ExprKind::MacCall(mac));
let e = self.maybe_recover_from_bad_qpath(e)?;
- let e = self.parse_dot_or_call_expr_with(e, lo, attrs.into())?;
+ let e = self.parse_dot_or_call_expr_with(e, lo, attrs)?;
let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?;
StmtKind::Expr(e)
};
@@ -193,9 +202,12 @@ impl<'a> Parser<'a> {
fn error_outer_attrs(&self, attrs: &[Attribute]) {
if let [.., last] = attrs {
if last.is_doc_comment() {
- self.span_err(last.span, Error::UselessDocComment).emit();
+ self.sess.emit_err(DocCommentDoesNotDocumentAnything {
+ span: last.span,
+ missing_comma: None,
+ });
} else if attrs.iter().any(|a| a.style == AttrStyle::Outer) {
- self.struct_span_err(last.span, "expected statement after outer attribute").emit();
+ self.sess.emit_err(ExpectedStatementAfterOuterAttr { span: last.span });
}
}
}
@@ -204,13 +216,10 @@ impl<'a> Parser<'a> {
&mut self,
lo: Span,
attrs: AttrWrapper,
- msg: &str,
- sugg: &str,
+ subdiagnostic: fn(Span) -> InvalidVariableDeclarationSub,
) -> PResult<'a, Stmt> {
let stmt = self.recover_local_after_let(lo, attrs)?;
- self.struct_span_err(lo, "invalid variable declaration")
- .span_suggestion(lo, msg, sugg, Applicability::MachineApplicable)
- .emit();
+ self.sess.emit_err(InvalidVariableDeclaration { span: lo, sub: subdiagnostic(lo) });
Ok(stmt)
}
@@ -223,7 +232,7 @@ impl<'a> Parser<'a> {
) -> PResult<'a, Stmt> {
self.collect_tokens_trailing_token(attrs, force_collect, |this, attrs| {
this.expect_keyword(kw::Let)?;
- let local = this.parse_local(attrs.into())?;
+ let local = this.parse_local(attrs)?;
let trailing = if capture_semi && this.token.kind == token::Semi {
TrailingToken::Semi
} else {
@@ -235,7 +244,7 @@ impl<'a> Parser<'a> {
fn recover_local_after_let(&mut self, lo: Span, attrs: AttrWrapper) -> PResult<'a, Stmt> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
- let local = this.parse_local(attrs.into())?;
+ let local = this.parse_local(attrs)?;
// FIXME - maybe capture semicolon in recovery?
Ok((
this.mk_stmt(lo.to(this.prev_token.span), StmtKind::Local(local)),
@@ -247,6 +256,12 @@ impl<'a> Parser<'a> {
/// Parses a local variable declaration.
fn parse_local(&mut self, attrs: AttrVec) -> PResult<'a, P<Local>> {
let lo = self.prev_token.span;
+
+ if self.token.is_keyword(kw::Const) && self.look_ahead(1, |t| t.is_ident()) {
+ self.sess.emit_err(ConstLetMutuallyExclusive { span: lo.to(self.token.span) });
+ self.bump();
+ }
+
let (pat, colon) = self.parse_pat_before_ty(None, RecoverComma::Yes, "`let` bindings")?;
let (err, ty) = if colon {
@@ -341,44 +356,27 @@ impl<'a> Parser<'a> {
fn check_let_else_init_bool_expr(&self, init: &ast::Expr) {
if let ast::ExprKind::Binary(op, ..) = init.kind {
if op.node.lazy() {
- let suggs = vec![
- (init.span.shrink_to_lo(), "(".to_string()),
- (init.span.shrink_to_hi(), ")".to_string()),
- ];
- self.struct_span_err(
- init.span,
- &format!(
- "a `{}` expression cannot be directly assigned in `let...else`",
- op.node.to_string()
- ),
- )
- .multipart_suggestion(
- "wrap the expression in parentheses",
- suggs,
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(InvalidExpressionInLetElse {
+ span: init.span,
+ operator: op.node.to_string(),
+ sugg: WrapExpressionInParentheses {
+ left: init.span.shrink_to_lo(),
+ right: init.span.shrink_to_hi(),
+ },
+ });
}
}
}
fn check_let_else_init_trailing_brace(&self, init: &ast::Expr) {
if let Some(trailing) = classify::expr_trailing_brace(init) {
- let err_span = trailing.span.with_lo(trailing.span.hi() - BytePos(1));
- let suggs = vec![
- (trailing.span.shrink_to_lo(), "(".to_string()),
- (trailing.span.shrink_to_hi(), ")".to_string()),
- ];
- self.struct_span_err(
- err_span,
- "right curly brace `}` before `else` in a `let...else` statement not allowed",
- )
- .multipart_suggestion(
- "try wrapping the expression in parentheses",
- suggs,
- Applicability::MachineApplicable,
- )
- .emit();
+ self.sess.emit_err(InvalidCurlyInLetElse {
+ span: trailing.span.with_lo(trailing.span.hi() - BytePos(1)),
+ sugg: WrapExpressionInParentheses {
+ left: trailing.span.shrink_to_lo(),
+ right: trailing.span.shrink_to_hi(),
+ },
+ });
}
}
@@ -387,18 +385,7 @@ impl<'a> Parser<'a> {
let eq_consumed = match self.token.kind {
token::BinOpEq(..) => {
// Recover `let x <op>= 1` as `let x = 1`
- self.struct_span_err(
- self.token.span,
- "can't reassign to an uninitialized variable",
- )
- .span_suggestion_short(
- self.token.span,
- "initialize the variable",
- "=",
- Applicability::MaybeIncorrect,
- )
- .help("if you meant to overwrite, remove the `let` binding")
- .emit();
+ self.sess.emit_err(CompoundAssignmentExpressionInLet { span: self.token.span });
self.bump();
true
}
@@ -412,7 +399,12 @@ impl<'a> Parser<'a> {
pub(super) fn parse_block(&mut self) -> PResult<'a, P<Block>> {
let (attrs, block) = self.parse_inner_attrs_and_block()?;
if let [.., last] = &*attrs {
- self.error_on_forbidden_inner_attr(last.span, DEFAULT_INNER_ATTR_FORBIDDEN);
+ self.error_on_forbidden_inner_attr(
+ last.span,
+ super::attr::InnerAttrPolicy::Forbidden(Some(
+ InnerAttrForbiddenReason::InCodeBlock,
+ )),
+ );
}
Ok(block)
}
@@ -487,9 +479,7 @@ impl<'a> Parser<'a> {
}
/// Parses a block. Inner attributes are allowed.
- pub(super) fn parse_inner_attrs_and_block(
- &mut self,
- ) -> PResult<'a, (Vec<Attribute>, P<Block>)> {
+ pub(super) fn parse_inner_attrs_and_block(&mut self) -> PResult<'a, (AttrVec, P<Block>)> {
self.parse_block_common(self.token.span, BlockCheckMode::Default)
}
@@ -498,8 +488,8 @@ impl<'a> Parser<'a> {
&mut self,
lo: Span,
blk_mode: BlockCheckMode,
- ) -> PResult<'a, (Vec<Attribute>, P<Block>)> {
- maybe_whole!(self, NtBlock, |x| (Vec::new(), x));
+ ) -> PResult<'a, (AttrVec, P<Block>)> {
+ maybe_whole!(self, NtBlock, |x| (AttrVec::new(), x));
self.maybe_recover_unexpected_block_label();
if !self.eat(&token::OpenDelim(Delimiter::Brace)) {
@@ -563,39 +553,46 @@ impl<'a> Parser<'a> {
match stmt.kind {
// Expression without semicolon.
StmtKind::Expr(ref mut expr)
- if self.token != token::Eof && classify::expr_requires_semi_to_be_stmt(expr) =>
- {
+ if self.token != token::Eof && classify::expr_requires_semi_to_be_stmt(expr) => {
// Just check for errors and recover; do not eat semicolon yet.
- if let Err(mut e) =
- self.expect_one_of(&[], &[token::Semi, token::CloseDelim(Delimiter::Brace)])
- {
- if let TokenKind::DocComment(..) = self.token.kind {
- if let Ok(snippet) = self.span_to_snippet(self.token.span) {
- let sp = self.token.span;
- let marker = &snippet[..3];
- let (comment_marker, doc_comment_marker) = marker.split_at(2);
-
- e.span_suggestion(
- sp.with_hi(sp.lo() + BytePos(marker.len() as u32)),
- &format!(
- "add a space before `{}` to use a regular comment",
- doc_comment_marker,
- ),
- format!("{} {}", comment_marker, doc_comment_marker),
- Applicability::MaybeIncorrect,
- );
+ // `expect_one_of` returns PResult<'a, bool /* recovered */>
+ let replace_with_err =
+ match self.expect_one_of(&[], &[token::Semi, token::CloseDelim(Delimiter::Brace)]) {
+ // Recover from parser, skip type error to avoid extra errors.
+ Ok(true) => true,
+ Err(mut e) => {
+ if let TokenKind::DocComment(..) = self.token.kind &&
+ let Ok(snippet) = self.span_to_snippet(self.token.span) {
+ let sp = self.token.span;
+ let marker = &snippet[..3];
+ let (comment_marker, doc_comment_marker) = marker.split_at(2);
+
+ e.span_suggestion(
+ sp.with_hi(sp.lo() + BytePos(marker.len() as u32)),
+ &format!(
+ "add a space before `{}` to use a regular comment",
+ doc_comment_marker,
+ ),
+ format!("{} {}", comment_marker, doc_comment_marker),
+ Applicability::MaybeIncorrect,
+ );
}
- }
- if let Err(mut e) =
- self.check_mistyped_turbofish_with_multiple_type_params(e, expr)
- {
- if recover.no() {
- return Err(e);
+
+ if let Err(mut e) =
+ self.check_mistyped_turbofish_with_multiple_type_params(e, expr)
+ {
+ if recover.no() {
+ return Err(e);
+ }
+ e.emit();
+ self.recover_stmt();
}
- e.emit();
- self.recover_stmt();
+ true
}
- // Don't complain about type errors in body tail after parse error (#57383).
+ _ => false
+ };
+ if replace_with_err {
+ // We already emitted an error, so don't emit another type error
let sp = expr.span.to(self.prev_token.span);
*expr = self.mk_expr_err(sp);
}
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
index 31b40a83e..2a8512acf 100644
--- a/compiler/rustc_parse/src/parser/ty.rs
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -397,10 +397,13 @@ impl<'a> Parser<'a> {
fn parse_ty_ptr(&mut self) -> PResult<'a, TyKind> {
let mutbl = self.parse_const_or_mut().unwrap_or_else(|| {
let span = self.prev_token.span;
- let msg = "expected mut or const in raw pointer type";
- self.struct_span_err(span, msg)
- .span_label(span, msg)
- .help("use `*mut T` or `*const T` as appropriate")
+ self.struct_span_err(span, "expected `mut` or `const` keyword in raw pointer type")
+ .span_suggestions(
+ span.shrink_to_hi(),
+ "add `mut` or `const` here",
+ ["mut ".to_string(), "const ".to_string()].into_iter(),
+ Applicability::HasPlaceholders,
+ )
.emit();
Mutability::Not
});
@@ -567,7 +570,8 @@ impl<'a> Parser<'a> {
self.check_keyword(kw::Dyn)
&& (!self.token.uninterpolated_span().rust_2015()
|| self.look_ahead(1, |t| {
- t.can_begin_bound() && !can_continue_type_after_non_fn_ident(t)
+ (t.can_begin_bound() || t.kind == TokenKind::BinOp(token::Star))
+ && !can_continue_type_after_non_fn_ident(t)
}))
}
@@ -576,10 +580,18 @@ impl<'a> Parser<'a> {
/// Note that this does *not* parse bare trait objects.
fn parse_dyn_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
self.bump(); // `dyn`
+
+ // parse dyn* types
+ let syntax = if self.eat(&TokenKind::BinOp(token::Star)) {
+ TraitObjectSyntax::DynStar
+ } else {
+ TraitObjectSyntax::Dyn
+ };
+
// Always parse bounds greedily for better error recovery.
let bounds = self.parse_generic_bounds(None)?;
*impl_dyn_multi = bounds.len() > 1 || self.prev_token.kind == TokenKind::BinOp(token::Plus);
- Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn))
+ Ok(TyKind::TraitObject(bounds, syntax))
}
/// Parses a type starting with a path.
@@ -598,11 +610,11 @@ impl<'a> Parser<'a> {
let path = self.parse_path_inner(PathStyle::Type, ty_generics)?;
if self.eat(&token::Not) {
// Macro invocation in type position
- Ok(TyKind::MacCall(MacCall {
+ Ok(TyKind::MacCall(P(MacCall {
path,
args: self.parse_mac_args()?,
prior_type_ascription: self.last_type_ascription,
- }))
+ })))
} else if allow_plus == AllowPlus::Yes && self.check_plus() {
// `Trait1 + Trait2 + 'a`
self.parse_remaining_bounds_path(Vec::new(), path, lo, true)
@@ -640,7 +652,13 @@ impl<'a> Parser<'a> {
let mut bounds = Vec::new();
let mut negative_bounds = Vec::new();
- while self.can_begin_bound() || self.token.is_keyword(kw::Dyn) {
+ while self.can_begin_bound()
+ // Continue even if we find a keyword.
+ // This is necessary for error recover on, for example, `impl fn()`.
+ //
+ // The only keyword that can go after generic bounds is `where`, so stop if it's it.
+ || (self.token.is_reserved_ident() && !self.token.is_keyword(kw::Where))
+ {
if self.token.is_keyword(kw::Dyn) {
// Account for `&dyn Trait + dyn Other`.
self.struct_span_err(self.token.span, "invalid `dyn` keyword")
@@ -804,6 +822,20 @@ impl<'a> Parser<'a> {
let span = tilde.to(self.prev_token.span);
self.sess.gated_spans.gate(sym::const_trait_impl, span);
Some(span)
+ } else if self.eat_keyword(kw::Const) {
+ let span = self.prev_token.span;
+ self.sess.gated_spans.gate(sym::const_trait_impl, span);
+
+ self.struct_span_err(span, "const bounds must start with `~`")
+ .span_suggestion(
+ span.shrink_to_lo(),
+ "add `~`",
+ "~",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ Some(span)
} else {
None
};
diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs
index 4890fade5..1394993ab 100644
--- a/compiler/rustc_parse_format/src/lib.rs
+++ b/compiler/rustc_parse_format/src/lib.rs
@@ -9,6 +9,8 @@
html_playground_url = "https://play.rust-lang.org/",
test(attr(deny(warnings)))
)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
// We want to be able to build this crate with a stable compiler, so no
// `#![feature]` attributes should be added.
@@ -165,6 +167,8 @@ pub enum Count<'a> {
CountIsName(&'a str, InnerSpan),
/// The count is specified by the argument at the given index.
CountIsParam(usize),
+ /// The count is specified by a star (like in `{:.*}`) that refers to the argument at the given index.
+ CountIsStar(usize),
/// The count is implied and cannot be explicitly specified.
CountImplied,
}
@@ -220,7 +224,7 @@ impl<'a> Iterator for Parser<'a> {
'{' => {
let curr_last_brace = self.last_opening_brace;
let byte_pos = self.to_span_index(pos);
- let lbrace_end = InnerOffset(byte_pos.0 + 1);
+ let lbrace_end = self.to_span_index(pos + 1);
self.last_opening_brace = Some(byte_pos.to(lbrace_end));
self.cur.next();
if self.consume('{') {
@@ -262,9 +266,7 @@ impl<'a> Iterator for Parser<'a> {
}
} else {
if self.is_literal {
- let start = self.to_span_index(self.cur_line_start);
- let end = self.to_span_index(self.input.len());
- let span = start.to(end);
+ let span = self.span(self.cur_line_start, self.input.len());
if self.line_spans.last() != Some(&span) {
self.line_spans.push(span);
}
@@ -382,6 +384,12 @@ impl<'a> Parser<'a> {
InnerOffset(raw + pos + 1)
}
+ fn span(&self, start_pos: usize, end_pos: usize) -> InnerSpan {
+ let start = self.to_span_index(start_pos);
+ let end = self.to_span_index(end_pos);
+ start.to(end)
+ }
+
/// Forces consumption of the specified character. If the character is not
/// found, an error is emitted.
fn must_consume(&mut self, c: char) -> Option<usize> {
@@ -470,9 +478,7 @@ impl<'a> Parser<'a> {
return &self.input[start..pos];
}
'\n' if self.is_literal => {
- let start = self.to_span_index(self.cur_line_start);
- let end = self.to_span_index(pos);
- self.line_spans.push(start.to(end));
+ self.line_spans.push(self.span(self.cur_line_start, pos));
self.cur_line_start = pos + 1;
self.cur.next();
}
@@ -535,6 +541,10 @@ impl<'a> Parser<'a> {
}
}
+ fn current_pos(&mut self) -> usize {
+ if let Some(&(pos, _)) = self.cur.peek() { pos } else { self.input.len() }
+ }
+
/// Parses a format specifier at the current position, returning all of the
/// relevant information in the `FormatSpec` struct.
fn format(&mut self) -> FormatSpec<'a> {
@@ -588,39 +598,37 @@ impl<'a> Parser<'a> {
// no '0' flag and '0$' as the width instead.
if let Some(end) = self.consume_pos('$') {
spec.width = CountIsParam(0);
-
- if let Some((pos, _)) = self.cur.peek().cloned() {
- spec.width_span = Some(self.to_span_index(pos - 2).to(self.to_span_index(pos)));
- }
+ spec.width_span = Some(self.span(end - 1, end + 1));
havewidth = true;
- spec.width_span = Some(self.to_span_index(end - 1).to(self.to_span_index(end + 1)));
} else {
spec.flags |= 1 << (FlagSignAwareZeroPad as u32);
}
}
+
if !havewidth {
- let width_span_start = if let Some((pos, _)) = self.cur.peek() { *pos } else { 0 };
- let (w, sp) = self.count(width_span_start);
- spec.width = w;
- spec.width_span = sp;
+ let start = self.current_pos();
+ spec.width = self.count(start);
+ if spec.width != CountImplied {
+ let end = self.current_pos();
+ spec.width_span = Some(self.span(start, end));
+ }
}
if let Some(start) = self.consume_pos('.') {
- if let Some(end) = self.consume_pos('*') {
+ if self.consume('*') {
// Resolve `CountIsNextParam`.
// We can do this immediately as `position` is resolved later.
let i = self.curarg;
self.curarg += 1;
- spec.precision = CountIsParam(i);
- spec.precision_span =
- Some(self.to_span_index(start).to(self.to_span_index(end + 1)));
+ spec.precision = CountIsStar(i);
} else {
- let (p, sp) = self.count(start);
- spec.precision = p;
- spec.precision_span = sp;
+ spec.precision = self.count(start + 1);
}
+ let end = self.current_pos();
+ spec.precision_span = Some(self.span(start, end));
}
- let ty_span_start = self.cur.peek().map(|(pos, _)| *pos);
+
+ let ty_span_start = self.current_pos();
// Optional radix followed by the actual format specifier
if self.consume('x') {
if self.consume('?') {
@@ -640,11 +648,9 @@ impl<'a> Parser<'a> {
spec.ty = "?";
} else {
spec.ty = self.word();
- let ty_span_end = self.cur.peek().map(|(pos, _)| *pos);
if !spec.ty.is_empty() {
- spec.ty_span = ty_span_start
- .and_then(|s| ty_span_end.map(|e| (s, e)))
- .map(|(start, end)| self.to_span_index(start).to(self.to_span_index(end)));
+ let ty_span_end = self.current_pos();
+ spec.ty_span = Some(self.span(ty_span_start, ty_span_end));
}
}
spec
@@ -668,13 +674,11 @@ impl<'a> Parser<'a> {
return spec;
}
- let ty_span_start = self.cur.peek().map(|(pos, _)| *pos);
+ let ty_span_start = self.current_pos();
spec.ty = self.word();
- let ty_span_end = self.cur.peek().map(|(pos, _)| *pos);
if !spec.ty.is_empty() {
- spec.ty_span = ty_span_start
- .and_then(|s| ty_span_end.map(|e| (s, e)))
- .map(|(start, end)| self.to_span_index(start).to(self.to_span_index(end)));
+ let ty_span_end = self.current_pos();
+ spec.ty_span = Some(self.span(ty_span_start, ty_span_end));
}
spec
@@ -683,26 +687,21 @@ impl<'a> Parser<'a> {
/// Parses a `Count` parameter at the current position. This does not check
/// for 'CountIsNextParam' because that is only used in precision, not
/// width.
- fn count(&mut self, start: usize) -> (Count<'a>, Option<InnerSpan>) {
+ fn count(&mut self, start: usize) -> Count<'a> {
if let Some(i) = self.integer() {
- if let Some(end) = self.consume_pos('$') {
- let span = self.to_span_index(start).to(self.to_span_index(end + 1));
- (CountIsParam(i), Some(span))
- } else {
- (CountIs(i), None)
- }
+ if self.consume('$') { CountIsParam(i) } else { CountIs(i) }
} else {
let tmp = self.cur.clone();
let word = self.word();
if word.is_empty() {
self.cur = tmp;
- (CountImplied, None)
+ CountImplied
} else if let Some(end) = self.consume_pos('$') {
- let span = self.to_span_index(start + 1).to(self.to_span_index(end));
- (CountIsName(word, span), None)
+ let name_span = self.span(start, end);
+ CountIsName(word, name_span)
} else {
self.cur = tmp;
- (CountImplied, None)
+ CountImplied
}
}
}
@@ -735,26 +734,46 @@ impl<'a> Parser<'a> {
"invalid argument name `_`",
"invalid argument name",
"argument name cannot be a single underscore",
- self.to_span_index(start).to(self.to_span_index(end)),
+ self.span(start, end),
);
}
word
}
- /// Optionally parses an integer at the current position. This doesn't deal
- /// with overflow at all, it's just accumulating digits.
fn integer(&mut self) -> Option<usize> {
- let mut cur = 0;
+ let mut cur: usize = 0;
let mut found = false;
+ let mut overflow = false;
+ let start = self.current_pos();
while let Some(&(_, c)) = self.cur.peek() {
if let Some(i) = c.to_digit(10) {
- cur = cur * 10 + i as usize;
+ let (tmp, mul_overflow) = cur.overflowing_mul(10);
+ let (tmp, add_overflow) = tmp.overflowing_add(i as usize);
+ if mul_overflow || add_overflow {
+ overflow = true;
+ }
+ cur = tmp;
found = true;
self.cur.next();
} else {
break;
}
}
+
+ if overflow {
+ let end = self.current_pos();
+ let overflowed_int = &self.input[start..end];
+ self.err(
+ format!(
+ "integer `{}` does not fit into the type `usize` whose range is `0..={}`",
+ overflowed_int,
+ usize::MAX
+ ),
+ "integer out of range for `usize`",
+ self.span(start, end),
+ );
+ }
+
if found { Some(cur) } else { None }
}
diff --git a/compiler/rustc_parse_format/src/tests.rs b/compiler/rustc_parse_format/src/tests.rs
index 578530696..3f9cb149b 100644
--- a/compiler/rustc_parse_format/src/tests.rs
+++ b/compiler/rustc_parse_format/src/tests.rs
@@ -1,5 +1,6 @@
use super::*;
+#[track_caller]
fn same(fmt: &'static str, p: &[Piece<'static>]) {
let parser = Parser::new(fmt, None, None, false, ParseMode::Format);
assert_eq!(parser.collect::<Vec<Piece<'static>>>(), p);
@@ -57,6 +58,21 @@ fn invalid06() {
}
#[test]
+fn invalid_position() {
+ musterr("{18446744073709551616}");
+}
+
+#[test]
+fn invalid_width() {
+ musterr("{:18446744073709551616}");
+}
+
+#[test]
+fn invalid_precision() {
+ musterr("{:.18446744073709551616}");
+}
+
+#[test]
fn format_nothing() {
same(
"{}",
@@ -190,9 +206,9 @@ fn format_counts() {
align: AlignUnknown,
flags: 0,
precision: CountImplied,
- width: CountIs(10),
precision_span: None,
- width_span: None,
+ width: CountIs(10),
+ width_span: Some(InnerSpan { start: 3, end: 5 }),
ty: "x",
ty_span: None,
},
@@ -208,9 +224,9 @@ fn format_counts() {
align: AlignUnknown,
flags: 0,
precision: CountIs(10),
+ precision_span: Some(InnerSpan { start: 6, end: 9 }),
width: CountIsParam(10),
- precision_span: None,
- width_span: Some(InnerSpan::new(3, 6)),
+ width_span: Some(InnerSpan { start: 3, end: 6 }),
ty: "x",
ty_span: None,
},
@@ -226,9 +242,9 @@ fn format_counts() {
align: AlignUnknown,
flags: 0,
precision: CountIs(10),
+ precision_span: Some(InnerSpan { start: 6, end: 9 }),
width: CountIsParam(0),
- precision_span: None,
- width_span: Some(InnerSpan::new(4, 6)),
+ width_span: Some(InnerSpan { start: 4, end: 6 }),
ty: "x",
ty_span: None,
},
@@ -243,9 +259,9 @@ fn format_counts() {
fill: None,
align: AlignUnknown,
flags: 0,
- precision: CountIsParam(0),
+ precision: CountIsStar(0),
+ precision_span: Some(InnerSpan { start: 3, end: 5 }),
width: CountImplied,
- precision_span: Some(InnerSpan::new(3, 5)),
width_span: None,
ty: "x",
ty_span: None,
@@ -279,15 +295,33 @@ fn format_counts() {
fill: None,
align: AlignUnknown,
flags: 0,
- precision: CountIsName("b", InnerSpan::new(6, 7)),
- width: CountIsName("a", InnerSpan::new(4, 4)),
- precision_span: None,
- width_span: None,
+ precision: CountIsName("b", InnerSpan { start: 6, end: 7 }),
+ precision_span: Some(InnerSpan { start: 5, end: 8 }),
+ width: CountIsName("a", InnerSpan { start: 3, end: 4 }),
+ width_span: Some(InnerSpan { start: 3, end: 5 }),
ty: "?",
ty_span: None,
},
})],
);
+ same(
+ "{:.4}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIs(4),
+ precision_span: Some(InnerSpan { start: 3, end: 5 }),
+ width: CountImplied,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ },
+ })],
+ )
}
#[test]
fn format_flags() {
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index a2ac329f2..27a57adf9 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -4,18 +4,24 @@
//! conflicts between multiple such attributes attached to the same
//! item.
-use crate::errors;
+use crate::errors::{
+ self, AttrApplication, DebugVisualizerUnreadable, InvalidAttrAtCrateLevel, ObjectLifetimeErr,
+ OnlyHasEffectOn, TransparentIncompatible, UnrecognizedReprHint,
+};
use rustc_ast::{ast, AttrStyle, Attribute, Lit, LitKind, MetaItemKind, NestedMetaItem};
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{fluent, struct_span_err, Applicability, MultiSpan};
+use rustc_errors::{fluent, Applicability, MultiSpan};
use rustc_expand::base::resolve_path;
use rustc_feature::{AttributeDuplicates, AttributeType, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
use rustc_hir as hir;
-use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID};
+use rustc_hir::{
+ self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID, CRATE_OWNER_ID,
+};
use rustc_hir::{MethodKind, Target};
use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::resolve_lifetime::ObjectLifetimeDefault;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::{
@@ -34,8 +40,8 @@ pub(crate) fn target_from_impl_item<'tcx>(
match impl_item.kind {
hir::ImplItemKind::Const(..) => Target::AssocConst,
hir::ImplItemKind::Fn(..) => {
- let parent_hir_id = tcx.hir().get_parent_item(impl_item.hir_id());
- let containing_item = tcx.hir().expect_item(parent_hir_id);
+ let parent_def_id = tcx.hir().get_parent_item(impl_item.hir_id()).def_id;
+ let containing_item = tcx.hir().expect_item(parent_def_id);
let containing_impl_is_for_trait = match &containing_item.kind {
hir::ItemKind::Impl(impl_) => impl_.of_trait.is_some(),
_ => bug!("parent of an ImplItem must be an Impl"),
@@ -46,7 +52,7 @@ pub(crate) fn target_from_impl_item<'tcx>(
Target::Method(MethodKind::Inherent)
}
}
- hir::ImplItemKind::TyAlias(..) => Target::AssocTy,
+ hir::ImplItemKind::Type(..) => Target::AssocTy,
}
}
@@ -130,6 +136,7 @@ impl CheckAttrVisitor<'_> {
| sym::rustc_if_this_changed
| sym::rustc_then_this_would_need => self.check_rustc_dirty_clean(&attr),
sym::cmse_nonsecure_entry => self.check_cmse_nonsecure_entry(attr, span, target),
+ sym::collapse_debuginfo => self.check_collapse_debuginfo(attr, span, target),
sym::const_trait => self.check_const_trait(attr, span, target),
sym::must_not_suspend => self.check_must_not_suspend(&attr, span, target),
sym::must_use => self.check_must_use(hir_id, &attr, span, target),
@@ -146,6 +153,7 @@ impl CheckAttrVisitor<'_> {
| sym::stable
| sym::rustc_allowed_through_unstable_modules
| sym::rustc_promotable => self.check_stability_promotable(&attr, span, target),
+ sym::link_ordinal => self.check_link_ordinal(&attr, span, target),
_ => true,
};
is_valid &= attr_is_valid;
@@ -159,18 +167,19 @@ impl CheckAttrVisitor<'_> {
sym::no_mangle => self.check_no_mangle(hir_id, attr, span, target),
sym::deprecated => self.check_deprecated(hir_id, attr, span, target),
sym::macro_use | sym::macro_escape => self.check_macro_use(hir_id, attr, target),
- sym::path => self.check_generic_attr(hir_id, attr, target, &[Target::Mod]),
+ sym::path => self.check_generic_attr(hir_id, attr, target, Target::Mod),
sym::plugin_registrar => self.check_plugin_registrar(hir_id, attr, target),
sym::macro_export => self.check_macro_export(hir_id, attr, target),
sym::ignore | sym::should_panic | sym::proc_macro_derive => {
- self.check_generic_attr(hir_id, attr, target, &[Target::Fn])
+ self.check_generic_attr(hir_id, attr, target, Target::Fn)
}
sym::automatically_derived => {
- self.check_generic_attr(hir_id, attr, target, &[Target::Impl])
+ self.check_generic_attr(hir_id, attr, target, Target::Impl)
}
sym::no_implicit_prelude => {
- self.check_generic_attr(hir_id, attr, target, &[Target::Mod])
+ self.check_generic_attr(hir_id, attr, target, Target::Mod)
}
+ sym::rustc_object_lifetime_default => self.check_object_lifetime_default(hir_id),
_ => {}
}
@@ -209,7 +218,14 @@ impl CheckAttrVisitor<'_> {
}
// FIXME(@lcnr): this doesn't belong here.
- if matches!(target, Target::Closure | Target::Fn | Target::Method(_) | Target::ForeignFn) {
+ if matches!(
+ target,
+ Target::Closure
+ | Target::Fn
+ | Target::Method(_)
+ | Target::ForeignFn
+ | Target::ForeignStatic
+ ) {
self.tcx.ensure().codegen_fn_attrs(self.tcx.hir().local_def_id(hir_id));
}
@@ -252,7 +268,7 @@ impl CheckAttrVisitor<'_> {
}
// FIXME(#65833): We permit associated consts to have an `#[inline]` attribute with
// just a lint, because we previously erroneously allowed it and some crates used it
- // accidentally, to to be compatible with crates depending on them, we can't throw an
+ // accidentally, to be compatible with crates depending on them, we can't throw an
// error here.
Target::AssocConst => {
self.tcx.emit_spanned_lint(
@@ -338,29 +354,18 @@ impl CheckAttrVisitor<'_> {
hir_id: HirId,
attr: &Attribute,
target: Target,
- allowed_targets: &[Target],
+ allowed_target: Target,
) {
- if !allowed_targets.iter().any(|t| t == &target) {
- let name = attr.name_or_empty();
- let mut i = allowed_targets.iter();
- // Pluralize
- let b = i.next().map_or_else(String::new, |t| t.to_string() + "s");
- let supported_names = i.enumerate().fold(b, |mut b, (i, allowed_target)| {
- if allowed_targets.len() > 2 && i == allowed_targets.len() - 2 {
- b.push_str(", and ");
- } else if allowed_targets.len() == 2 && i == allowed_targets.len() - 2 {
- b.push_str(" and ");
- } else {
- b.push_str(", ");
- }
- // Pluralize
- b.push_str(&(allowed_target.to_string() + "s"));
- b
- });
- self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
- lint.build(&format!("`#[{name}]` only has an effect on {}", supported_names))
- .emit();
- });
+ if target != allowed_target {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ OnlyHasEffectOn {
+ attr_name: attr.name_or_empty(),
+ target_name: allowed_target.name().replace(" ", "_"),
+ },
+ );
}
}
@@ -371,7 +376,7 @@ impl CheckAttrVisitor<'_> {
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "naked");
@@ -402,6 +407,38 @@ impl CheckAttrVisitor<'_> {
}
}
+ /// Debugging aid for `object_lifetime_default` query.
+ fn check_object_lifetime_default(&self, hir_id: HirId) {
+ let tcx = self.tcx;
+ if let Some(generics) = tcx.hir().get_generics(tcx.hir().local_def_id(hir_id)) {
+ for p in generics.params {
+ let hir::GenericParamKind::Type { .. } = p.kind else { continue };
+ let param_id = tcx.hir().local_def_id(p.hir_id);
+ let default = tcx.object_lifetime_default(param_id);
+ let repr = match default {
+ ObjectLifetimeDefault::Empty => "BaseDefault".to_owned(),
+ ObjectLifetimeDefault::Static => "'static".to_owned(),
+ ObjectLifetimeDefault::Param(def_id) => tcx.item_name(def_id).to_string(),
+ ObjectLifetimeDefault::Ambiguous => "Ambiguous".to_owned(),
+ };
+ tcx.sess.emit_err(ObjectLifetimeErr { span: p.span, repr });
+ }
+ }
+ }
+
+ /// Checks if `#[collapse_debuginfo]` is applied to a macro.
+ fn check_collapse_debuginfo(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::MacroDef => true,
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::CollapseDebuginfo { attr_span: attr.span, defn_span: span });
+ false
+ }
+ }
+ }
+
/// Checks if a `#[track_caller]` is applied to a non-naked function. Returns `true` if valid.
fn check_track_caller(
&self,
@@ -419,7 +456,7 @@ impl CheckAttrVisitor<'_> {
Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[track_caller]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
for attr in attrs {
@@ -448,7 +485,7 @@ impl CheckAttrVisitor<'_> {
Target::Struct | Target::Enum | Target::Variant => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[non_exhaustive]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "non_exhaustive");
@@ -470,7 +507,7 @@ impl CheckAttrVisitor<'_> {
Target::Trait => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[marker]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "marker");
@@ -529,7 +566,7 @@ impl CheckAttrVisitor<'_> {
}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[target_feature]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "target_feature");
@@ -597,8 +634,8 @@ impl CheckAttrVisitor<'_> {
let span = meta.span();
if let Some(location) = match target {
Target::AssocTy => {
- let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
- let containing_item = self.tcx.hir().expect_item(parent_hir_id);
+ let parent_def_id = self.tcx.hir().get_parent_item(hir_id).def_id;
+ let containing_item = self.tcx.hir().expect_item(parent_def_id);
if Target::from_item(containing_item) == Target::Impl {
Some("type alias in implementation block")
} else {
@@ -606,8 +643,8 @@ impl CheckAttrVisitor<'_> {
}
}
Target::AssocConst => {
- let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
- let containing_item = self.tcx.hir().expect_item(parent_hir_id);
+ let parent_def_id = self.tcx.hir().get_parent_item(hir_id).def_id;
+ let containing_item = self.tcx.hir().expect_item(parent_def_id);
// We can't link to trait impl's consts.
let err = "associated constant in trait implementation block";
match containing_item.kind {
@@ -632,6 +669,7 @@ impl CheckAttrVisitor<'_> {
| Target::GlobalAsm
| Target::TyAlias
| Target::OpaqueTy
+ | Target::ImplTraitPlaceholder
| Target::Enum
| Target::Variant
| Target::Struct
@@ -644,7 +682,9 @@ impl CheckAttrVisitor<'_> {
| Target::ForeignStatic
| Target::ForeignTy
| Target::GenericParam(..)
- | Target::MacroDef => None,
+ | Target::MacroDef
+ | Target::PatField
+ | Target::ExprField => None,
} {
tcx.sess.emit_err(errors::DocAliasBadLocation { span, attr_str, location });
return false;
@@ -782,8 +822,8 @@ impl CheckAttrVisitor<'_> {
if let Some((prev_inline, prev_span)) = *specified_inline {
if do_inline != prev_inline {
let mut spans = MultiSpan::from_spans(vec![prev_span, meta.span()]);
- spans.push_span_label(prev_span, fluent::passes::doc_inline_conflict_first);
- spans.push_span_label(meta.span(), fluent::passes::doc_inline_conflict_second);
+ spans.push_span_label(prev_span, fluent::passes_doc_inline_conflict_first);
+ spans.push_span_label(meta.span(), fluent::passes_doc_inline_conflict_second);
self.tcx.sess.emit_err(errors::DocKeywordConflict { spans });
return false;
}
@@ -829,25 +869,31 @@ impl CheckAttrVisitor<'_> {
hir_id: HirId,
) -> bool {
if hir_id != CRATE_HIR_ID {
- self.tcx.struct_span_lint_hir(INVALID_DOC_ATTRIBUTES, hir_id, meta.span(), |lint| {
- let mut err = lint.build(fluent::passes::attr_crate_level);
- if attr.style == AttrStyle::Outer
- && self.tcx.hir().get_parent_item(hir_id) == CRATE_DEF_ID
- {
- if let Ok(mut src) = self.tcx.sess.source_map().span_to_snippet(attr.span) {
- src.insert(1, '!');
- err.span_suggestion_verbose(
- attr.span,
- fluent::passes::suggestion,
- src,
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_help(attr.span, fluent::passes::help);
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ fluent::passes_attr_crate_level,
+ |err| {
+ if attr.style == AttrStyle::Outer
+ && self.tcx.hir().get_parent_item(hir_id) == CRATE_OWNER_ID
+ {
+ if let Ok(mut src) = self.tcx.sess.source_map().span_to_snippet(attr.span) {
+ src.insert(1, '!');
+ err.span_suggestion_verbose(
+ attr.span,
+ fluent::suggestion,
+ src,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_help(attr.span, fluent::help);
+ }
}
- }
- err.note(fluent::passes::note).emit();
- });
+ err.note(fluent::note);
+ err
+ },
+ );
return false;
}
true
@@ -888,6 +934,22 @@ impl CheckAttrVisitor<'_> {
is_valid
}
+ /// Check that the `#![doc(cfg_hide(...))]` attribute only contains a list of attributes.
+ /// Returns `true` if valid.
+ fn check_doc_cfg_hide(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
+ if meta.meta_item_list().is_some() {
+ true
+ } else {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocCfgHideTakesList,
+ );
+ false
+ }
+ }
+
/// Runs various checks on `#[doc]` attributes. Returns `true` if valid.
///
/// `specified_inline` should be initialized to `None` and kept for the scope
@@ -941,6 +1003,13 @@ impl CheckAttrVisitor<'_> {
is_valid = false;
}
+ sym::cfg_hide
+ if !self.check_attr_crate_level(attr, meta, hir_id)
+ || !self.check_doc_cfg_hide(meta, hir_id) =>
+ {
+ is_valid = false;
+ }
+
sym::inline | sym::no_inline
if !self.check_doc_inline(
attr,
@@ -1159,7 +1228,7 @@ impl CheckAttrVisitor<'_> {
Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[cold]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "cold");
@@ -1201,7 +1270,7 @@ impl CheckAttrVisitor<'_> {
Target::ForeignFn | Target::ForeignStatic => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[link_name]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_name");
@@ -1235,7 +1304,7 @@ impl CheckAttrVisitor<'_> {
Target::ExternCrate => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[no_link]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_link");
@@ -1265,7 +1334,7 @@ impl CheckAttrVisitor<'_> {
Target::Method(..) if self.is_impl_item(hir_id) => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[export_name]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "export_name");
@@ -1457,7 +1526,7 @@ impl CheckAttrVisitor<'_> {
Target::Static | Target::Fn | Target::Method(..) => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[link_section]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_section");
@@ -1482,7 +1551,7 @@ impl CheckAttrVisitor<'_> {
Target::Method(..) if self.is_impl_item(hir_id) => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
// `#[no_mangle]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_mangle");
@@ -1548,12 +1617,17 @@ impl CheckAttrVisitor<'_> {
continue;
}
- let (article, allowed_targets) = match hint.name_or_empty() {
+ match hint.name_or_empty() {
sym::C => {
is_c = true;
match target {
Target::Struct | Target::Union | Target::Enum => continue,
- _ => ("a", "struct, enum, or union"),
+ _ => {
+ self.tcx.sess.emit_err(AttrApplication::StructEnumUnion {
+ hint_span: hint.span(),
+ span,
+ });
+ }
}
}
sym::align => {
@@ -1569,12 +1643,20 @@ impl CheckAttrVisitor<'_> {
match target {
Target::Struct | Target::Union | Target::Enum | Target::Fn => continue,
- _ => ("a", "struct, enum, function, or union"),
+ _ => {
+ self.tcx.sess.emit_err(AttrApplication::StructEnumFunctionUnion {
+ hint_span: hint.span(),
+ span,
+ });
+ }
}
}
sym::packed => {
if target != Target::Struct && target != Target::Union {
- ("a", "struct or union")
+ self.tcx.sess.emit_err(AttrApplication::StructUnion {
+ hint_span: hint.span(),
+ span,
+ });
} else {
continue;
}
@@ -1582,7 +1664,9 @@ impl CheckAttrVisitor<'_> {
sym::simd => {
is_simd = true;
if target != Target::Struct {
- ("a", "struct")
+ self.tcx
+ .sess
+ .emit_err(AttrApplication::Struct { hint_span: hint.span(), span });
} else {
continue;
}
@@ -1591,7 +1675,12 @@ impl CheckAttrVisitor<'_> {
is_transparent = true;
match target {
Target::Struct | Target::Union | Target::Enum => continue,
- _ => ("a", "struct, enum, or union"),
+ _ => {
+ self.tcx.sess.emit_err(AttrApplication::StructEnumUnion {
+ hint_span: hint.span(),
+ span,
+ });
+ }
}
}
sym::i8
@@ -1608,33 +1697,18 @@ impl CheckAttrVisitor<'_> {
| sym::usize => {
int_reprs += 1;
if target != Target::Enum {
- ("an", "enum")
+ self.tcx
+ .sess
+ .emit_err(AttrApplication::Enum { hint_span: hint.span(), span });
} else {
continue;
}
}
_ => {
- struct_span_err!(
- self.tcx.sess,
- hint.span(),
- E0552,
- "unrecognized representation hint"
- )
- .emit();
-
+ self.tcx.sess.emit_err(UnrecognizedReprHint { span: hint.span() });
continue;
}
};
-
- struct_span_err!(
- self.tcx.sess,
- hint.span(),
- E0517,
- "{}",
- &format!("attribute should be applied to {article} {allowed_targets}")
- )
- .span_label(span, &format!("not {article} {allowed_targets}"))
- .emit();
}
// Just point at all repr hints if there are any incompatibilities.
@@ -1644,14 +1718,9 @@ impl CheckAttrVisitor<'_> {
// Error on repr(transparent, <anything else>).
if is_transparent && hints.len() > 1 {
let hint_spans: Vec<_> = hint_spans.clone().collect();
- struct_span_err!(
- self.tcx.sess,
- hint_spans,
- E0692,
- "transparent {} cannot have other repr hints",
- target
- )
- .emit();
+ self.tcx
+ .sess
+ .emit_err(TransparentIncompatible { hint_spans, target: target.to_string() });
}
// Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8)
if (int_reprs > 1)
@@ -1694,7 +1763,7 @@ impl CheckAttrVisitor<'_> {
}
}
Some(_) => {
- // This error case is handled in rustc_typeck::collect.
+ // This error case is handled in rustc_hir_analysis::collect.
}
None => {
// Default case (compiler) when arg isn't defined.
@@ -1736,7 +1805,7 @@ impl CheckAttrVisitor<'_> {
Target::MacroDef => true,
// FIXME(#80564): We permit struct fields and match arms to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm => {
self.inline_attr_str_error_without_macro_def(
@@ -1803,14 +1872,12 @@ impl CheckAttrVisitor<'_> {
match std::fs::File::open(&file) {
Ok(_) => true,
- Err(err) => {
- self.tcx
- .sess
- .struct_span_err(
- meta_item.span,
- &format!("couldn't read {}: {}", file.display(), err),
- )
- .emit();
+ Err(error) => {
+ self.tcx.sess.emit_err(DebugVisualizerUnreadable {
+ span: meta_item.span,
+ file: &file,
+ error,
+ });
false
}
}
@@ -1833,7 +1900,7 @@ impl CheckAttrVisitor<'_> {
}
// FIXME(#80564): We permit struct fields and match arms to have an
// `#[allow_internal_unstable]` attribute with just a lint, because we previously
- // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // erroneously allowed it and some crates used it accidentally, to be compatible
// with crates depending on them, we can't throw an error here.
Target::Field | Target::Arm | Target::MacroDef => {
self.inline_attr_str_error_with_macro_def(hir_id, attr, "allow_internal_unstable");
@@ -1886,6 +1953,16 @@ impl CheckAttrVisitor<'_> {
}
}
+ fn check_link_ordinal(&self, attr: &Attribute, _span: Span, target: Target) -> bool {
+ match target {
+ Target::ForeignFn | Target::ForeignStatic => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::LinkOrdinal { attr_span: attr.span });
+ false
+ }
+ }
+ }
+
fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: Span, target: Target) {
match target {
Target::Closure | Target::Expression | Target::Statement | Target::Arm => {
@@ -1985,7 +2062,7 @@ impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> {
// so this lets us continue to run them while maintaining backwards compatibility.
// In the long run, the checks should be harmonized.
if let ItemKind::Macro(ref macro_def, _) = item.kind {
- let def_id = item.def_id.to_def_id();
+ let def_id = item.owner_id.to_def_id();
if macro_def.macro_rules && !self.tcx.has_attr(def_id, sym::macro_export) {
check_non_exported_macro_for_invalid_attrs(self.tcx, item);
}
@@ -2048,14 +2125,14 @@ impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> {
intravisit::walk_expr(self, expr)
}
- fn visit_variant(
- &mut self,
- variant: &'tcx hir::Variant<'tcx>,
- generics: &'tcx hir::Generics<'tcx>,
- item_id: HirId,
- ) {
+ fn visit_expr_field(&mut self, field: &'tcx hir::ExprField<'tcx>) {
+ self.check_attributes(field.hir_id, field.span, Target::ExprField, None);
+ intravisit::walk_expr_field(self, field)
+ }
+
+ fn visit_variant(&mut self, variant: &'tcx hir::Variant<'tcx>) {
self.check_attributes(variant.id, variant.span, Target::Variant, None);
- intravisit::walk_variant(self, variant, generics, item_id)
+ intravisit::walk_variant(self, variant)
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
@@ -2063,6 +2140,11 @@ impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> {
intravisit::walk_param(self, param);
}
+
+ fn visit_pat_field(&mut self, field: &'tcx hir::PatField<'tcx>) {
+ self.check_attributes(field.hir_id, field.span, Target::PatField, None);
+ intravisit::walk_pat_field(self, field);
+ }
}
fn is_c_like_enum(item: &Item<'_>) -> bool {
@@ -2092,6 +2174,7 @@ fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
sym::automatically_derived,
sym::start,
sym::rustc_main,
+ sym::unix_sigpipe,
sym::derive,
sym::test,
sym::test_case,
@@ -2105,25 +2188,11 @@ fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
if attr.style == AttrStyle::Inner {
for attr_to_check in ATTRS_TO_CHECK {
if attr.has_name(*attr_to_check) {
- let mut err = tcx.sess.struct_span_err(
- attr.span,
- &format!(
- "`{}` attribute cannot be used at crate level",
- attr_to_check.to_ident_string()
- ),
- );
- // Only emit an error with a suggestion if we can create a
- // string out of the attribute span
- if let Ok(src) = tcx.sess.source_map().span_to_snippet(attr.span) {
- let replacement = src.replace("#!", "#");
- err.span_suggestion_verbose(
- attr.span,
- "perhaps you meant to use an outer attribute",
- replacement,
- rustc_errors::Applicability::MachineApplicable,
- );
- }
- err.emit();
+ tcx.sess.emit_err(InvalidAttrAtCrateLevel {
+ span: attr.span,
+ snippet: tcx.sess.source_map().span_to_snippet(attr.span).ok(),
+ name: *attr_to_check,
+ });
}
}
}
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
index 70518284c..aa726d6cd 100644
--- a/compiler/rustc_passes/src/check_const.rs
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -8,7 +8,6 @@
//! through, but errors for structured control flow in a `const` should be emitted here.
use rustc_attr as attr;
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, Visitor};
@@ -18,6 +17,8 @@ use rustc_middle::ty::TyCtxt;
use rustc_session::parse::feature_err;
use rustc_span::{sym, Span, Symbol};
+use crate::errors::ExprNotAllowedInContext;
+
/// An expression that is not *always* legal in a const context.
#[derive(Clone, Copy)]
enum NonConstExpr {
@@ -133,18 +134,22 @@ impl<'tcx> CheckConstVisitor<'tcx> {
let const_kind =
const_kind.expect("`const_check_violated` may only be called inside a const context");
- let msg = format!("{} is not allowed in a `{}`", expr.name(), const_kind.keyword_name());
-
let required_gates = required_gates.unwrap_or(&[]);
let missing_gates: Vec<_> =
required_gates.iter().copied().filter(|&g| !features.enabled(g)).collect();
match missing_gates.as_slice() {
[] => {
- struct_span_err!(tcx.sess, span, E0744, "{}", msg).emit();
+ tcx.sess.emit_err(ExprNotAllowedInContext {
+ span,
+ expr: expr.name(),
+ context: const_kind.keyword_name(),
+ });
}
[missing_primary, ref missing_secondary @ ..] => {
+ let msg =
+ format!("{} is not allowed in a `{}`", expr.name(), const_kind.keyword_name());
let mut err = feature_err(&tcx.sess.parse_sess, *missing_primary, span, &msg);
// If multiple feature gates would be required to enable this expression, include
@@ -191,10 +196,6 @@ impl<'tcx> Visitor<'tcx> for CheckConstVisitor<'tcx> {
self.tcx.hir()
}
- fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- intravisit::walk_item(self, item);
- }
-
fn visit_anon_const(&mut self, anon: &'tcx hir::AnonConst) {
let kind = Some(hir::ConstContext::Const);
self.recurse_into(kind, None, |this| intravisit::walk_anon_const(this, anon));
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
index 1e2fbeb38..753d01f46 100644
--- a/compiler/rustc_passes/src/dead.rs
+++ b/compiler/rustc_passes/src/dead.rs
@@ -11,13 +11,15 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Node, PatKind, TyKind};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::middle::privacy;
+use rustc_middle::middle::privacy::Level;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, DefIdTree, TyCtxt};
use rustc_session::lint;
use rustc_span::symbol::{sym, Symbol};
use std::mem;
+use crate::errors::UselessAssignment;
+
// Any local node that may call something in its body block should be
// explored. For example, if it's a live Node::Item that is a
// function, then we should explore its block to check for codes that
@@ -102,14 +104,8 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
}
}
Res::Def(_, def_id) => self.check_def_id(def_id),
- Res::SelfTy { trait_: t, alias_to: i } => {
- if let Some(t) = t {
- self.check_def_id(t);
- }
- if let Some((i, _)) = i {
- self.check_def_id(i);
- }
- }
+ Res::SelfTyParam { trait_: t } => self.check_def_id(t),
+ Res::SelfTyAlias { alias_to: i, .. } => self.check_def_id(i),
Res::ToolMod | Res::NonMacroAttr(..) | Res::Err => {}
}
}
@@ -186,18 +182,11 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
&& !assign.span.from_expansion()
{
let is_field_assign = matches!(lhs.kind, hir::ExprKind::Field(..));
- self.tcx.struct_span_lint_hir(
+ self.tcx.emit_spanned_lint(
lint::builtin::DEAD_CODE,
assign.hir_id,
assign.span,
- |lint| {
- lint.build(&format!(
- "useless assignment of {} of type `{}` to itself",
- if is_field_assign { "field" } else { "variable" },
- self.typeck_results().expr_ty(lhs),
- ))
- .emit();
- },
+ UselessAssignment { is_field_assign, ty: self.typeck_results().expr_ty(lhs) }
)
}
}
@@ -226,19 +215,16 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
lhs: &hir::Pat<'_>,
res: Res,
pats: &[hir::Pat<'_>],
- dotdot: Option<usize>,
+ dotdot: hir::DotDotPos,
) {
let variant = match self.typeck_results().node_type(lhs.hir_id).kind() {
ty::Adt(adt, _) => adt.variant_of_res(res),
_ => span_bug!(lhs.span, "non-ADT in tuple struct pattern"),
};
- let first_n = pats.iter().enumerate().take(dotdot.unwrap_or(pats.len()));
+ let dotdot = dotdot.as_opt_usize().unwrap_or(pats.len());
+ let first_n = pats.iter().enumerate().take(dotdot);
let missing = variant.fields.len() - pats.len();
- let last_n = pats
- .iter()
- .enumerate()
- .skip(dotdot.unwrap_or(pats.len()))
- .map(|(idx, pat)| (idx + missing, pat));
+ let last_n = pats.iter().enumerate().skip(dotdot).map(|(idx, pat)| (idx + missing, pat));
for (idx, pat) in first_n.chain(last_n) {
if let PatKind::Wild = pat.kind {
continue;
@@ -294,8 +280,8 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
}
fn visit_node(&mut self, node: Node<'tcx>) {
- if let Node::ImplItem(hir::ImplItem { def_id, .. }) = node
- && self.should_ignore_item(def_id.to_def_id())
+ if let Node::ImplItem(hir::ImplItem { owner_id, .. }) = node
+ && self.should_ignore_item(owner_id.to_def_id())
{
return;
}
@@ -307,7 +293,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
match node {
Node::Item(item) => match item.kind {
hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
- let def = self.tcx.adt_def(item.def_id);
+ let def = self.tcx.adt_def(item.owner_id);
self.repr_has_repr_c = def.repr().c();
self.repr_has_repr_simd = def.repr().simd();
@@ -320,7 +306,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
intravisit::walk_trait_item(self, trait_item);
}
Node::ImplItem(impl_item) => {
- let item = self.tcx.local_parent(impl_item.def_id);
+ let item = self.tcx.local_parent(impl_item.owner_id.def_id);
if self.tcx.impl_trait_ref(item).is_none() {
//// If it's a type whose items are live, then it's live, too.
//// This is done to handle the case where, for example, the static
@@ -368,14 +354,7 @@ impl<'tcx> Visitor<'tcx> for MarkSymbolVisitor<'tcx> {
self.maybe_typeck_results = old_maybe_typeck_results;
}
- fn visit_variant_data(
- &mut self,
- def: &'tcx hir::VariantData<'tcx>,
- _: Symbol,
- _: &hir::Generics<'_>,
- _: hir::HirId,
- _: rustc_span::Span,
- ) {
+ fn visit_variant_data(&mut self, def: &'tcx hir::VariantData<'tcx>) {
let tcx = self.tcx;
let has_repr_c = self.repr_has_repr_c;
let has_repr_simd = self.repr_has_repr_simd;
@@ -384,7 +363,7 @@ impl<'tcx> Visitor<'tcx> for MarkSymbolVisitor<'tcx> {
if has_repr_c || (f.is_positional() && has_repr_simd) {
return Some(def_id);
}
- if !tcx.visibility(f.hir_id.owner).is_public() {
+ if !tcx.visibility(f.hir_id.owner.def_id).is_public() {
return None;
}
if tcx.visibility(def_id).is_public() { Some(def_id) } else { None }
@@ -457,7 +436,7 @@ impl<'tcx> Visitor<'tcx> for MarkSymbolVisitor<'tcx> {
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
- if let TyKind::OpaqueDef(item_id, _) = ty.kind {
+ if let TyKind::OpaqueDef(item_id, _, _) = ty.kind {
let item = self.tcx.hir().item(item_id);
intravisit::walk_item(self, item);
}
@@ -538,10 +517,10 @@ fn check_item<'tcx>(
) {
let allow_dead_code = has_allow_dead_code_or_lang_attr(tcx, id.hir_id());
if allow_dead_code {
- worklist.push(id.def_id);
+ worklist.push(id.owner_id.def_id);
}
- match tcx.def_kind(id.def_id) {
+ match tcx.def_kind(id.owner_id) {
DefKind::Enum => {
let item = tcx.hir().item(id);
if let hir::ItemKind::Enum(ref enum_def, _) = item.kind {
@@ -561,15 +540,15 @@ fn check_item<'tcx>(
}
}
DefKind::Impl => {
- let of_trait = tcx.impl_trait_ref(id.def_id);
+ let of_trait = tcx.impl_trait_ref(id.owner_id);
if of_trait.is_some() {
- worklist.push(id.def_id);
+ worklist.push(id.owner_id.def_id);
}
// get DefIds from another query
let local_def_ids = tcx
- .associated_item_def_ids(id.def_id)
+ .associated_item_def_ids(id.owner_id)
.iter()
.filter_map(|def_id| def_id.as_local());
@@ -587,12 +566,12 @@ fn check_item<'tcx>(
if let hir::ItemKind::Struct(ref variant_data, _) = item.kind
&& let Some(ctor_hir_id) = variant_data.ctor_hir_id()
{
- struct_constructors.insert(tcx.hir().local_def_id(ctor_hir_id), item.def_id);
+ struct_constructors.insert(tcx.hir().local_def_id(ctor_hir_id), item.owner_id.def_id);
}
}
DefKind::GlobalAsm => {
// global_asm! is always live.
- worklist.push(id.def_id);
+ worklist.push(id.owner_id.def_id);
}
_ => {}
}
@@ -600,12 +579,12 @@ fn check_item<'tcx>(
fn check_trait_item<'tcx>(tcx: TyCtxt<'tcx>, worklist: &mut Vec<LocalDefId>, id: hir::TraitItemId) {
use hir::TraitItemKind::{Const, Fn};
- if matches!(tcx.def_kind(id.def_id), DefKind::AssocConst | DefKind::AssocFn) {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::AssocConst | DefKind::AssocFn) {
let trait_item = tcx.hir().trait_item(id);
if matches!(trait_item.kind, Const(_, Some(_)) | Fn(_, hir::TraitFn::Provided(_)))
&& has_allow_dead_code_or_lang_attr(tcx, trait_item.hir_id())
{
- worklist.push(trait_item.def_id);
+ worklist.push(trait_item.owner_id.def_id);
}
}
}
@@ -615,27 +594,24 @@ fn check_foreign_item<'tcx>(
worklist: &mut Vec<LocalDefId>,
id: hir::ForeignItemId,
) {
- if matches!(tcx.def_kind(id.def_id), DefKind::Static(_) | DefKind::Fn)
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Static(_) | DefKind::Fn)
&& has_allow_dead_code_or_lang_attr(tcx, id.hir_id())
{
- worklist.push(id.def_id);
+ worklist.push(id.owner_id.def_id);
}
}
fn create_and_seed_worklist<'tcx>(
tcx: TyCtxt<'tcx>,
) -> (Vec<LocalDefId>, FxHashMap<LocalDefId, LocalDefId>) {
- let access_levels = &tcx.privacy_access_levels(());
+ let effective_visibilities = &tcx.effective_visibilities(());
// see `MarkSymbolVisitor::struct_constructors`
let mut struct_constructors = Default::default();
- let mut worklist = access_levels
- .map
+ let mut worklist = effective_visibilities
.iter()
- .filter_map(
- |(&id, &level)| {
- if level >= privacy::AccessLevel::Reachable { Some(id) } else { None }
- },
- )
+ .filter_map(|(&id, effective_vis)| {
+ effective_vis.is_public_at_level(Level::Reachable).then_some(id)
+ })
// Seed entry point
.chain(tcx.entry_fn(()).and_then(|(def_id, _)| def_id.as_local()))
.collect::<Vec<_>>();
@@ -736,6 +712,26 @@ impl<'tcx> DeadVisitor<'tcx> {
})
.collect();
+ let descr = tcx.def_kind(first_id).descr(first_id.to_def_id());
+ let span_len = dead_codes.len();
+ let names = match &names[..] {
+ _ if span_len > 6 => String::new(),
+ [name] => format!("`{name}` "),
+ [names @ .., last] => {
+ format!(
+ "{} and `{last}` ",
+ names.iter().map(|name| format!("`{name}`")).join(", ")
+ )
+ }
+ [] => unreachable!(),
+ };
+ let msg = format!(
+ "{these}{descr}{s} {names}{are} never {participle}",
+ these = if span_len > 6 { "multiple " } else { "" },
+ s = pluralize!(span_len),
+ are = pluralize!("is", span_len),
+ );
+
tcx.struct_span_lint_hir(
if is_positional {
lint::builtin::UNUSED_TUPLE_STRUCT_FIELDS
@@ -744,27 +740,8 @@ impl<'tcx> DeadVisitor<'tcx> {
},
tcx.hir().local_def_id_to_hir_id(first_id),
MultiSpan::from_spans(spans.clone()),
- |lint| {
- let descr = tcx.def_kind(first_id).descr(first_id.to_def_id());
- let span_len = dead_codes.len();
- let names = match &names[..] {
- _ if span_len > 6 => String::new(),
- [name] => format!("`{name}` "),
- [names @ .., last] => {
- format!(
- "{} and `{last}` ",
- names.iter().map(|name| format!("`{name}`")).join(", ")
- )
- }
- [] => unreachable!(),
- };
- let mut err = lint.build(&format!(
- "{these}{descr}{s} {names}{are} never {participle}",
- these = if span_len > 6 { "multiple " } else { "" },
- s = pluralize!(span_len),
- are = pluralize!("is", span_len),
- ));
-
+ msg,
+ |err| {
if is_positional {
err.multipart_suggestion(
&format!(
@@ -810,7 +787,7 @@ impl<'tcx> DeadVisitor<'tcx> {
);
err.note(&msg);
}
- err.emit();
+ err
},
);
}
@@ -884,19 +861,19 @@ fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalDefId) {
let module_items = tcx.hir_module_items(module);
for item in module_items.items() {
- if !live_symbols.contains(&item.def_id) {
- let parent = tcx.local_parent(item.def_id);
+ if !live_symbols.contains(&item.owner_id.def_id) {
+ let parent = tcx.local_parent(item.owner_id.def_id);
if parent != module && !live_symbols.contains(&parent) {
// We already have diagnosed something.
continue;
}
- visitor.check_definition(item.def_id);
+ visitor.check_definition(item.owner_id.def_id);
continue;
}
- let def_kind = tcx.def_kind(item.def_id);
+ let def_kind = tcx.def_kind(item.owner_id);
if let DefKind::Struct | DefKind::Union | DefKind::Enum = def_kind {
- let adt = tcx.adt_def(item.def_id);
+ let adt = tcx.adt_def(item.owner_id);
let mut dead_variants = Vec::new();
for variant in adt.variants() {
@@ -939,16 +916,21 @@ fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalDefId) {
visitor.warn_dead_fields_and_variants(def_id, "read", dead_fields, is_positional)
}
- visitor.warn_dead_fields_and_variants(item.def_id, "constructed", dead_variants, false);
+ visitor.warn_dead_fields_and_variants(
+ item.owner_id.def_id,
+ "constructed",
+ dead_variants,
+ false,
+ );
}
}
for impl_item in module_items.impl_items() {
- visitor.check_definition(impl_item.def_id);
+ visitor.check_definition(impl_item.owner_id.def_id);
}
for foreign_item in module_items.foreign_items() {
- visitor.check_definition(foreign_item.def_id);
+ visitor.check_definition(foreign_item.owner_id.def_id);
}
// We do not warn trait items.
diff --git a/compiler/rustc_passes/src/debugger_visualizer.rs b/compiler/rustc_passes/src/debugger_visualizer.rs
index e08683fe2..253b0a88e 100644
--- a/compiler/rustc_passes/src/debugger_visualizer.rs
+++ b/compiler/rustc_passes/src/debugger_visualizer.rs
@@ -13,6 +13,8 @@ use rustc_span::{sym, DebuggerVisualizerFile, DebuggerVisualizerType};
use std::sync::Arc;
+use crate::errors::DebugVisualizerUnreadable;
+
fn check_for_debugger_visualizer<'tcx>(
tcx: TyCtxt<'tcx>,
hir_id: HirId,
@@ -54,13 +56,12 @@ fn check_for_debugger_visualizer<'tcx>(
debugger_visualizers
.insert(DebuggerVisualizerFile::new(Arc::from(contents), visualizer_type));
}
- Err(err) => {
- tcx.sess
- .struct_span_err(
- meta_item.span,
- &format!("couldn't read {}: {}", file.display(), err),
- )
- .emit();
+ Err(error) => {
+ tcx.sess.emit_err(DebugVisualizerUnreadable {
+ span: meta_item.span,
+ file: &file,
+ error,
+ });
}
}
}
diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs
index e6b69d898..a72056e00 100644
--- a/compiler/rustc_passes/src/diagnostic_items.rs
+++ b/compiler/rustc_passes/src/diagnostic_items.rs
@@ -14,7 +14,9 @@ use rustc_hir::diagnostic_items::DiagnosticItems;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::{kw::Empty, sym, Symbol};
+
+use crate::errors::{DuplicateDiagnosticItem, DuplicateDiagnosticItemInCrate};
fn observe_item<'tcx>(
tcx: TyCtxt<'tcx>,
@@ -33,25 +35,22 @@ fn collect_item(tcx: TyCtxt<'_>, items: &mut DiagnosticItems, name: Symbol, item
items.id_to_name.insert(item_def_id, name);
if let Some(original_def_id) = items.name_to_id.insert(name, item_def_id) {
if original_def_id != item_def_id {
- let mut err = match tcx.hir().span_if_local(item_def_id) {
- Some(span) => tcx
- .sess
- .struct_span_err(span, &format!("duplicate diagnostic item found: `{name}`.")),
- None => tcx.sess.struct_err(&format!(
- "duplicate diagnostic item in crate `{}`: `{}`.",
- tcx.crate_name(item_def_id.krate),
- name
- )),
- };
- if let Some(span) = tcx.hir().span_if_local(original_def_id) {
- err.span_note(span, "the diagnostic item is first defined here");
+ let orig_span = tcx.hir().span_if_local(original_def_id);
+ let orig_crate_name = if orig_span.is_some() {
+ None
} else {
- err.note(&format!(
- "the diagnostic item is first defined in crate `{}`.",
- tcx.crate_name(original_def_id.krate)
- ));
- }
- err.emit();
+ Some(tcx.crate_name(original_def_id.krate))
+ };
+ match tcx.hir().span_if_local(item_def_id) {
+ Some(span) => tcx.sess.emit_err(DuplicateDiagnosticItem { span, name }),
+ None => tcx.sess.emit_err(DuplicateDiagnosticItemInCrate {
+ span: orig_span,
+ orig_crate_name: orig_crate_name.unwrap_or(Empty),
+ have_orig_crate_name: orig_crate_name.map(|_| ()),
+ crate_name: tcx.crate_name(item_def_id.krate),
+ name,
+ }),
+ };
}
}
}
@@ -74,19 +73,19 @@ fn diagnostic_items<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> DiagnosticItems
let crate_items = tcx.hir_crate_items(());
for id in crate_items.items() {
- observe_item(tcx, &mut diagnostic_items, id.def_id);
+ observe_item(tcx, &mut diagnostic_items, id.owner_id.def_id);
}
for id in crate_items.trait_items() {
- observe_item(tcx, &mut diagnostic_items, id.def_id);
+ observe_item(tcx, &mut diagnostic_items, id.owner_id.def_id);
}
for id in crate_items.impl_items() {
- observe_item(tcx, &mut diagnostic_items, id.def_id);
+ observe_item(tcx, &mut diagnostic_items, id.owner_id.def_id);
}
for id in crate_items.foreign_items() {
- observe_item(tcx, &mut diagnostic_items, id.def_id);
+ observe_item(tcx, &mut diagnostic_items, id.owner_id.def_id);
}
diagnostic_items
diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs
index 7381019a6..5885f45ae 100644
--- a/compiler/rustc_passes/src/entry.rs
+++ b/compiler/rustc_passes/src/entry.rs
@@ -1,14 +1,19 @@
-use rustc_ast::{entry::EntryPointType, Attribute};
-use rustc_errors::struct_span_err;
+use rustc_ast::entry::EntryPointType;
+use rustc_errors::error_code;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::{ItemId, Node, CRATE_HIR_ID};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{DefIdTree, TyCtxt};
-use rustc_session::config::{CrateType, EntryFnType};
+use rustc_session::config::{sigpipe, CrateType, EntryFnType};
use rustc_session::parse::feature_err;
use rustc_span::symbol::sym;
-use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_span::{Span, Symbol};
+
+use crate::errors::{
+ AttrOnlyInFunctions, AttrOnlyOnMain, AttrOnlyOnRootMain, ExternMain, MultipleRustcMain,
+ MultipleStartFunctions, NoMainErr, UnixSigpipeValues,
+};
struct EntryContext<'tcx> {
tcx: TyCtxt<'tcx>,
@@ -57,7 +62,7 @@ fn entry_point_type(ctxt: &EntryContext<'_>, id: ItemId, at_root: bool) -> Entry
} else if ctxt.tcx.sess.contains_name(attrs, sym::rustc_main) {
EntryPointType::RustcMainAttr
} else {
- if let Some(name) = ctxt.tcx.opt_item_name(id.def_id.to_def_id())
+ if let Some(name) = ctxt.tcx.opt_item_name(id.owner_id.to_def_id())
&& name == sym::main {
if at_root {
// This is a top-level function so can be `main`.
@@ -71,63 +76,57 @@ fn entry_point_type(ctxt: &EntryContext<'_>, id: ItemId, at_root: bool) -> Entry
}
}
-fn err_if_attr_found(ctxt: &EntryContext<'_>, attrs: &[Attribute], sym: Symbol) {
- if let Some(attr) = ctxt.tcx.sess.find_by_name(attrs, sym) {
- ctxt.tcx
- .sess
- .struct_span_err(
- attr.span,
- &format!("`{}` attribute can only be used on functions", sym),
- )
- .emit();
- }
+fn attr_span_by_symbol(ctxt: &EntryContext<'_>, id: ItemId, sym: Symbol) -> Option<Span> {
+ let attrs = ctxt.tcx.hir().attrs(id.hir_id());
+ ctxt.tcx.sess.find_by_name(attrs, sym).map(|attr| attr.span)
}
fn find_item(id: ItemId, ctxt: &mut EntryContext<'_>) {
- let at_root = ctxt.tcx.opt_local_parent(id.def_id) == Some(CRATE_DEF_ID);
+ let at_root = ctxt.tcx.opt_local_parent(id.owner_id.def_id) == Some(CRATE_DEF_ID);
match entry_point_type(ctxt, id, at_root) {
- EntryPointType::None => (),
- _ if !matches!(ctxt.tcx.def_kind(id.def_id), DefKind::Fn) => {
- let attrs = ctxt.tcx.hir().attrs(id.hir_id());
- err_if_attr_found(ctxt, attrs, sym::start);
- err_if_attr_found(ctxt, attrs, sym::rustc_main);
+ EntryPointType::None => {
+ if let Some(span) = attr_span_by_symbol(ctxt, id, sym::unix_sigpipe) {
+ ctxt.tcx.sess.emit_err(AttrOnlyOnMain { span, attr: sym::unix_sigpipe });
+ }
+ }
+ _ if !matches!(ctxt.tcx.def_kind(id.owner_id), DefKind::Fn) => {
+ for attr in [sym::start, sym::rustc_main] {
+ if let Some(span) = attr_span_by_symbol(ctxt, id, attr) {
+ ctxt.tcx.sess.emit_err(AttrOnlyInFunctions { span, attr });
+ }
+ }
}
EntryPointType::MainNamed => (),
EntryPointType::OtherMain => {
- ctxt.non_main_fns.push(ctxt.tcx.def_span(id.def_id));
+ if let Some(span) = attr_span_by_symbol(ctxt, id, sym::unix_sigpipe) {
+ ctxt.tcx.sess.emit_err(AttrOnlyOnRootMain { span, attr: sym::unix_sigpipe });
+ }
+ ctxt.non_main_fns.push(ctxt.tcx.def_span(id.owner_id));
}
EntryPointType::RustcMainAttr => {
if ctxt.attr_main_fn.is_none() {
- ctxt.attr_main_fn = Some((id.def_id, ctxt.tcx.def_span(id.def_id.to_def_id())));
+ ctxt.attr_main_fn = Some((id.owner_id.def_id, ctxt.tcx.def_span(id.owner_id)));
} else {
- struct_span_err!(
- ctxt.tcx.sess,
- ctxt.tcx.def_span(id.def_id.to_def_id()),
- E0137,
- "multiple functions with a `#[rustc_main]` attribute"
- )
- .span_label(
- ctxt.tcx.def_span(id.def_id.to_def_id()),
- "additional `#[rustc_main]` function",
- )
- .span_label(ctxt.attr_main_fn.unwrap().1, "first `#[rustc_main]` function")
- .emit();
+ ctxt.tcx.sess.emit_err(MultipleRustcMain {
+ span: ctxt.tcx.def_span(id.owner_id.to_def_id()),
+ first: ctxt.attr_main_fn.unwrap().1,
+ additional: ctxt.tcx.def_span(id.owner_id.to_def_id()),
+ });
}
}
EntryPointType::Start => {
+ if let Some(span) = attr_span_by_symbol(ctxt, id, sym::unix_sigpipe) {
+ ctxt.tcx.sess.emit_err(AttrOnlyOnMain { span, attr: sym::unix_sigpipe });
+ }
if ctxt.start_fn.is_none() {
- ctxt.start_fn = Some((id.def_id, ctxt.tcx.def_span(id.def_id.to_def_id())));
+ ctxt.start_fn = Some((id.owner_id.def_id, ctxt.tcx.def_span(id.owner_id)));
} else {
- struct_span_err!(
- ctxt.tcx.sess,
- ctxt.tcx.def_span(id.def_id.to_def_id()),
- E0138,
- "multiple `start` functions"
- )
- .span_label(ctxt.start_fn.unwrap().1, "previous `#[start]` function here")
- .span_label(ctxt.tcx.def_span(id.def_id.to_def_id()), "multiple `start` functions")
- .emit();
+ ctxt.tcx.sess.emit_err(MultipleStartFunctions {
+ span: ctxt.tcx.def_span(id.owner_id),
+ labeled: ctxt.tcx.def_span(id.owner_id.to_def_id()),
+ previous: ctxt.start_fn.unwrap().1,
+ });
}
}
}
@@ -136,18 +135,14 @@ fn find_item(id: ItemId, ctxt: &mut EntryContext<'_>) {
fn configure_main(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) -> Option<(DefId, EntryFnType)> {
if let Some((def_id, _)) = visitor.start_fn {
Some((def_id.to_def_id(), EntryFnType::Start))
- } else if let Some((def_id, _)) = visitor.attr_main_fn {
- Some((def_id.to_def_id(), EntryFnType::Main))
+ } else if let Some((local_def_id, _)) = visitor.attr_main_fn {
+ let def_id = local_def_id.to_def_id();
+ Some((def_id, EntryFnType::Main { sigpipe: sigpipe(tcx, def_id) }))
} else {
if let Some(main_def) = tcx.resolutions(()).main_def && let Some(def_id) = main_def.opt_fn_def_id() {
// non-local main imports are handled below
if let Some(def_id) = def_id.as_local() && matches!(tcx.hir().find_by_def_id(def_id), Some(Node::ForeignItem(_))) {
- tcx.sess
- .struct_span_err(
- tcx.def_span(def_id),
- "the `main` function cannot be declared in an `extern` block",
- )
- .emit();
+ tcx.sess.emit_err(ExternMain { span: tcx.def_span(def_id) });
return None;
}
@@ -161,13 +156,34 @@ fn configure_main(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) -> Option<(DefId,
)
.emit();
}
- return Some((def_id, EntryFnType::Main));
+ return Some((def_id, EntryFnType::Main { sigpipe: sigpipe(tcx, def_id) }));
}
no_main_err(tcx, visitor);
None
}
}
+fn sigpipe(tcx: TyCtxt<'_>, def_id: DefId) -> u8 {
+ if let Some(attr) = tcx.get_attr(def_id, sym::unix_sigpipe) {
+ match (attr.value_str(), attr.meta_item_list()) {
+ (Some(sym::inherit), None) => sigpipe::INHERIT,
+ (Some(sym::sig_ign), None) => sigpipe::SIG_IGN,
+ (Some(sym::sig_dfl), None) => sigpipe::SIG_DFL,
+ (_, Some(_)) => {
+ // Keep going so that `fn emit_malformed_attribute()` can print
+ // an excellent error message
+ sigpipe::DEFAULT
+ }
+ _ => {
+ tcx.sess.emit_err(UnixSigpipeValues { span: attr.span });
+ sigpipe::DEFAULT
+ }
+ }
+ } else {
+ sigpipe::DEFAULT
+ }
+}
+
fn no_main_err(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) {
let sp = tcx.def_span(CRATE_DEF_ID);
if *tcx.sess.parse_sess.reached_eof.borrow() {
@@ -178,52 +194,29 @@ fn no_main_err(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) {
}
// There is no main function.
- let mut err = struct_span_err!(
- tcx.sess,
- DUMMY_SP,
- E0601,
- "`main` function not found in crate `{}`",
- tcx.crate_name(LOCAL_CRATE)
- );
- let filename = &tcx.sess.local_crate_source_file;
- let note = if !visitor.non_main_fns.is_empty() {
- for &span in &visitor.non_main_fns {
- err.span_note(span, "here is a function named `main`");
- }
- err.note("you have one or more functions named `main` not defined at the crate level");
- err.help("consider moving the `main` function definitions");
- // There were some functions named `main` though. Try to give the user a hint.
- format!(
- "the main function must be defined at the crate level{}",
- filename.as_ref().map(|f| format!(" (in `{}`)", f.display())).unwrap_or_default()
- )
- } else if let Some(filename) = filename {
- format!("consider adding a `main` function to `{}`", filename.display())
- } else {
- String::from("consider adding a `main` function at the crate level")
- };
+ let mut has_filename = true;
+ let filename = tcx.sess.local_crate_source_file.clone().unwrap_or_else(|| {
+ has_filename = false;
+ Default::default()
+ });
+ let main_def_opt = tcx.resolutions(()).main_def;
+ let diagnostic_id = error_code!(E0601);
+ let add_teach_note = tcx.sess.teach(&diagnostic_id);
// The file may be empty, which leads to the diagnostic machinery not emitting this
// note. This is a relatively simple way to detect that case and emit a span-less
// note instead.
- if tcx.sess.source_map().lookup_line(sp.hi()).is_ok() {
- err.set_span(sp.shrink_to_hi());
- err.span_label(sp.shrink_to_hi(), &note);
- } else {
- err.note(&note);
- }
-
- if let Some(main_def) = tcx.resolutions(()).main_def && main_def.opt_fn_def_id().is_none(){
- // There is something at `crate::main`, but it is not a function definition.
- err.span_label(main_def.span, "non-function item at `crate::main` is found");
- }
-
- if tcx.sess.teach(&err.get_code().unwrap()) {
- err.note(
- "If you don't know the basics of Rust, you can go look to the Rust Book \
- to get started: https://doc.rust-lang.org/book/",
- );
- }
- err.emit();
+ let file_empty = !tcx.sess.source_map().lookup_line(sp.hi()).is_ok();
+
+ tcx.sess.emit_err(NoMainErr {
+ sp,
+ crate_name: tcx.crate_name(LOCAL_CRATE),
+ has_filename,
+ filename,
+ file_empty,
+ non_main_fns: visitor.non_main_fns.clone(),
+ main_def_opt,
+ add_teach_note,
+ });
}
pub fn provide(providers: &mut Providers) {
diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs
index 5feb0e295..adaaf5392 100644
--- a/compiler/rustc_passes/src/errors.rs
+++ b/compiler/rustc_passes/src/errors.rs
@@ -1,39 +1,49 @@
-use rustc_errors::{Applicability, MultiSpan};
-use rustc_macros::{LintDiagnostic, SessionDiagnostic, SessionSubdiagnostic};
-use rustc_span::{Span, Symbol};
+use std::{
+ io::Error,
+ path::{Path, PathBuf},
+};
+
+use rustc_ast::Label;
+use rustc_errors::{error_code, Applicability, ErrorGuaranteed, IntoDiagnostic, MultiSpan};
+use rustc_hir::{self as hir, ExprKind, Target};
+use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
+use rustc_middle::ty::{MainDefinition, Ty};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use crate::lang_items::Duplicate;
#[derive(LintDiagnostic)]
-#[lint(passes::outer_crate_level_attr)]
+#[diag(passes_outer_crate_level_attr)]
pub struct OuterCrateLevelAttr;
#[derive(LintDiagnostic)]
-#[lint(passes::inner_crate_level_attr)]
+#[diag(passes_inner_crate_level_attr)]
pub struct InnerCrateLevelAttr;
#[derive(LintDiagnostic)]
-#[lint(passes::ignored_attr_with_macro)]
+#[diag(passes_ignored_attr_with_macro)]
pub struct IgnoredAttrWithMacro<'a> {
pub sym: &'a str,
}
#[derive(LintDiagnostic)]
-#[lint(passes::ignored_attr)]
+#[diag(passes_ignored_attr)]
pub struct IgnoredAttr<'a> {
pub sym: &'a str,
}
#[derive(LintDiagnostic)]
-#[lint(passes::inline_ignored_function_prototype)]
+#[diag(passes_inline_ignored_function_prototype)]
pub struct IgnoredInlineAttrFnProto;
#[derive(LintDiagnostic)]
-#[lint(passes::inline_ignored_constants)]
-#[warn_]
+#[diag(passes_inline_ignored_constants)]
+#[warning]
#[note]
pub struct IgnoredInlineAttrConstants;
-#[derive(SessionDiagnostic)]
-#[error(passes::inline_not_fn_or_closure, code = "E0518")]
+#[derive(Diagnostic)]
+#[diag(passes_inline_not_fn_or_closure, code = "E0518")]
pub struct InlineNotFnOrClosure {
#[primary_span]
pub attr_span: Span,
@@ -42,19 +52,19 @@ pub struct InlineNotFnOrClosure {
}
#[derive(LintDiagnostic)]
-#[lint(passes::no_coverage_ignored_function_prototype)]
+#[diag(passes_no_coverage_ignored_function_prototype)]
pub struct IgnoredNoCoverageFnProto;
#[derive(LintDiagnostic)]
-#[lint(passes::no_coverage_propagate)]
+#[diag(passes_no_coverage_propagate)]
pub struct IgnoredNoCoveragePropagate;
#[derive(LintDiagnostic)]
-#[lint(passes::no_coverage_fn_defn)]
+#[diag(passes_no_coverage_fn_defn)]
pub struct IgnoredNoCoverageFnDefn;
-#[derive(SessionDiagnostic)]
-#[error(passes::no_coverage_not_coverable, code = "E0788")]
+#[derive(Diagnostic)]
+#[diag(passes_no_coverage_not_coverable, code = "E0788")]
pub struct IgnoredNoCoverageNotCoverable {
#[primary_span]
pub attr_span: Span,
@@ -62,8 +72,8 @@ pub struct IgnoredNoCoverageNotCoverable {
pub defn_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::should_be_applied_to_fn)]
+#[derive(Diagnostic)]
+#[diag(passes_should_be_applied_to_fn)]
pub struct AttrShouldBeAppliedToFn {
#[primary_span]
pub attr_span: Span,
@@ -71,15 +81,15 @@ pub struct AttrShouldBeAppliedToFn {
pub defn_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::naked_tracked_caller, code = "E0736")]
+#[derive(Diagnostic)]
+#[diag(passes_naked_tracked_caller, code = "E0736")]
pub struct NakedTrackedCaller {
#[primary_span]
pub attr_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::should_be_applied_to_fn, code = "E0739")]
+#[derive(Diagnostic)]
+#[diag(passes_should_be_applied_to_fn, code = "E0739")]
pub struct TrackedCallerWrongLocation {
#[primary_span]
pub attr_span: Span,
@@ -87,8 +97,8 @@ pub struct TrackedCallerWrongLocation {
pub defn_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::should_be_applied_to_struct_enum, code = "E0701")]
+#[derive(Diagnostic)]
+#[diag(passes_should_be_applied_to_struct_enum, code = "E0701")]
pub struct NonExhaustiveWrongLocation {
#[primary_span]
pub attr_span: Span,
@@ -96,8 +106,8 @@ pub struct NonExhaustiveWrongLocation {
pub defn_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::should_be_applied_to_trait)]
+#[derive(Diagnostic)]
+#[diag(passes_should_be_applied_to_trait)]
pub struct AttrShouldBeAppliedToTrait {
#[primary_span]
pub attr_span: Span,
@@ -106,11 +116,11 @@ pub struct AttrShouldBeAppliedToTrait {
}
#[derive(LintDiagnostic)]
-#[lint(passes::target_feature_on_statement)]
+#[diag(passes_target_feature_on_statement)]
pub struct TargetFeatureOnStatement;
-#[derive(SessionDiagnostic)]
-#[error(passes::should_be_applied_to_static)]
+#[derive(Diagnostic)]
+#[diag(passes_should_be_applied_to_static)]
pub struct AttrShouldBeAppliedToStatic {
#[primary_span]
pub attr_span: Span,
@@ -118,24 +128,24 @@ pub struct AttrShouldBeAppliedToStatic {
pub defn_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_expect_str)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_expect_str)]
pub struct DocExpectStr<'a> {
#[primary_span]
pub attr_span: Span,
pub attr_name: &'a str,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_empty)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_empty)]
pub struct DocAliasEmpty<'a> {
#[primary_span]
pub span: Span,
pub attr_str: &'a str,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_bad_char)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_bad_char)]
pub struct DocAliasBadChar<'a> {
#[primary_span]
pub span: Span,
@@ -143,16 +153,16 @@ pub struct DocAliasBadChar<'a> {
pub char_: char,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_start_end)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_start_end)]
pub struct DocAliasStartEnd<'a> {
#[primary_span]
pub span: Span,
pub attr_str: &'a str,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_bad_location)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_bad_location)]
pub struct DocAliasBadLocation<'a> {
#[primary_span]
pub span: Span,
@@ -160,8 +170,8 @@ pub struct DocAliasBadLocation<'a> {
pub location: &'a str,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_not_an_alias)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_not_an_alias)]
pub struct DocAliasNotAnAlias<'a> {
#[primary_span]
pub span: Span,
@@ -169,64 +179,64 @@ pub struct DocAliasNotAnAlias<'a> {
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_alias_duplicated)]
+#[diag(passes_doc_alias_duplicated)]
pub struct DocAliasDuplicated {
#[label]
pub first_defn: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_not_string_literal)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_not_string_literal)]
pub struct DocAliasNotStringLiteral {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_alias_malformed)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_alias_malformed)]
pub struct DocAliasMalformed {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_keyword_empty_mod)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_keyword_empty_mod)]
pub struct DocKeywordEmptyMod {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_keyword_not_mod)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_keyword_not_mod)]
pub struct DocKeywordNotMod {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_keyword_invalid_ident)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_keyword_invalid_ident)]
pub struct DocKeywordInvalidIdent {
#[primary_span]
pub span: Span,
pub doc_keyword: Symbol,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_fake_variadic_not_valid)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_fake_variadic_not_valid)]
pub struct DocFakeVariadicNotValid {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_keyword_only_impl)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_keyword_only_impl)]
pub struct DocKeywordOnlyImpl {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_inline_conflict)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_inline_conflict)]
#[help]
pub struct DocKeywordConflict {
#[primary_span]
@@ -234,17 +244,17 @@ pub struct DocKeywordConflict {
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_inline_only_use)]
+#[diag(passes_doc_inline_only_use)]
#[note]
pub struct DocInlineOnlyUse {
#[label]
pub attr_span: Span,
- #[label(passes::not_a_use_item_label)]
+ #[label(not_a_use_item_label)]
pub item_span: Option<Span>,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::doc_attr_not_crate_level)]
+#[derive(Diagnostic)]
+#[diag(passes_doc_attr_not_crate_level)]
pub struct DocAttrNotCrateLevel<'a> {
#[primary_span]
pub span: Span,
@@ -252,29 +262,33 @@ pub struct DocAttrNotCrateLevel<'a> {
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_test_unknown)]
+#[diag(passes_doc_test_unknown)]
pub struct DocTestUnknown {
pub path: String,
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_test_takes_list)]
+#[diag(passes_doc_test_takes_list)]
pub struct DocTestTakesList;
#[derive(LintDiagnostic)]
-#[lint(passes::doc_primitive)]
+#[diag(passes_doc_cfg_hide_takes_list)]
+pub struct DocCfgHideTakesList;
+
+#[derive(LintDiagnostic)]
+#[diag(passes_doc_primitive)]
pub struct DocPrimitive;
#[derive(LintDiagnostic)]
-#[lint(passes::doc_test_unknown_any)]
+#[diag(passes_doc_test_unknown_any)]
pub struct DocTestUnknownAny {
pub path: String,
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_test_unknown_spotlight)]
+#[diag(passes_doc_test_unknown_spotlight)]
#[note]
-#[note(passes::no_op_note)]
+#[note(no_op_note)]
pub struct DocTestUnknownSpotlight {
pub path: String,
#[suggestion_short(applicability = "machine-applicable", code = "notable_trait")]
@@ -282,7 +296,7 @@ pub struct DocTestUnknownSpotlight {
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_test_unknown_include)]
+#[diag(passes_doc_test_unknown_include)]
pub struct DocTestUnknownInclude {
pub path: String,
pub value: String,
@@ -292,11 +306,11 @@ pub struct DocTestUnknownInclude {
}
#[derive(LintDiagnostic)]
-#[lint(passes::doc_invalid)]
+#[diag(passes_doc_invalid)]
pub struct DocInvalid;
-#[derive(SessionDiagnostic)]
-#[error(passes::pass_by_value)]
+#[derive(Diagnostic)]
+#[diag(passes_pass_by_value)]
pub struct PassByValue {
#[primary_span]
pub attr_span: Span,
@@ -304,8 +318,8 @@ pub struct PassByValue {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::allow_incoherent_impl)]
+#[derive(Diagnostic)]
+#[diag(passes_allow_incoherent_impl)]
pub struct AllowIncoherentImpl {
#[primary_span]
pub attr_span: Span,
@@ -313,8 +327,8 @@ pub struct AllowIncoherentImpl {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::has_incoherent_inherent_impl)]
+#[derive(Diagnostic)]
+#[diag(passes_has_incoherent_inherent_impl)]
pub struct HasIncoherentInherentImpl {
#[primary_span]
pub attr_span: Span,
@@ -323,21 +337,21 @@ pub struct HasIncoherentInherentImpl {
}
#[derive(LintDiagnostic)]
-#[lint(passes::must_use_async)]
+#[diag(passes_must_use_async)]
pub struct MustUseAsync {
#[label]
pub span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::must_use_no_effect)]
+#[diag(passes_must_use_no_effect)]
pub struct MustUseNoEffect {
pub article: &'static str,
pub target: rustc_hir::Target,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::must_not_suspend)]
+#[derive(Diagnostic)]
+#[diag(passes_must_not_suspend)]
pub struct MustNotSuspend {
#[primary_span]
pub attr_span: Span,
@@ -346,24 +360,24 @@ pub struct MustNotSuspend {
}
#[derive(LintDiagnostic)]
-#[lint(passes::cold)]
-#[warn_]
+#[diag(passes_cold)]
+#[warning]
pub struct Cold {
#[label]
pub span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::link)]
-#[warn_]
+#[diag(passes_link)]
+#[warning]
pub struct Link {
#[label]
pub span: Option<Span>,
}
#[derive(LintDiagnostic)]
-#[lint(passes::link_name)]
-#[warn_]
+#[diag(passes_link_name)]
+#[warning]
pub struct LinkName<'a> {
#[help]
pub attr_span: Option<Span>,
@@ -372,8 +386,8 @@ pub struct LinkName<'a> {
pub value: &'a str,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::no_link)]
+#[derive(Diagnostic)]
+#[diag(passes_no_link)]
pub struct NoLink {
#[primary_span]
pub attr_span: Span,
@@ -381,8 +395,8 @@ pub struct NoLink {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::export_name)]
+#[derive(Diagnostic)]
+#[diag(passes_export_name)]
pub struct ExportName {
#[primary_span]
pub attr_span: Span,
@@ -390,8 +404,8 @@ pub struct ExportName {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_layout_scalar_valid_range_not_struct)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_layout_scalar_valid_range_not_struct)]
pub struct RustcLayoutScalarValidRangeNotStruct {
#[primary_span]
pub attr_span: Span,
@@ -399,15 +413,15 @@ pub struct RustcLayoutScalarValidRangeNotStruct {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_layout_scalar_valid_range_arg)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_layout_scalar_valid_range_arg)]
pub struct RustcLayoutScalarValidRangeArg {
#[primary_span]
pub attr_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_legacy_const_generics_only)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_legacy_const_generics_only)]
pub struct RustcLegacyConstGenericsOnly {
#[primary_span]
pub attr_span: Span,
@@ -415,8 +429,8 @@ pub struct RustcLegacyConstGenericsOnly {
pub param_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_legacy_const_generics_index)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_legacy_const_generics_index)]
pub struct RustcLegacyConstGenericsIndex {
#[primary_span]
pub attr_span: Span,
@@ -424,8 +438,8 @@ pub struct RustcLegacyConstGenericsIndex {
pub generics_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_legacy_const_generics_index_exceed)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_legacy_const_generics_index_exceed)]
pub struct RustcLegacyConstGenericsIndexExceed {
#[primary_span]
#[label]
@@ -433,75 +447,75 @@ pub struct RustcLegacyConstGenericsIndexExceed {
pub arg_count: usize,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_legacy_const_generics_index_negative)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_legacy_const_generics_index_negative)]
pub struct RustcLegacyConstGenericsIndexNegative {
#[primary_span]
pub invalid_args: Vec<Span>,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_dirty_clean)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_dirty_clean)]
pub struct RustcDirtyClean {
#[primary_span]
pub span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::link_section)]
-#[warn_]
+#[diag(passes_link_section)]
+#[warning]
pub struct LinkSection {
#[label]
pub span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::no_mangle_foreign)]
-#[warn_]
+#[diag(passes_no_mangle_foreign)]
+#[warning]
#[note]
pub struct NoMangleForeign {
#[label]
pub span: Span,
- #[suggestion(applicability = "machine-applicable")]
+ #[suggestion(code = "", applicability = "machine-applicable")]
pub attr_span: Span,
pub foreign_item_kind: &'static str,
}
#[derive(LintDiagnostic)]
-#[lint(passes::no_mangle)]
-#[warn_]
+#[diag(passes_no_mangle)]
+#[warning]
pub struct NoMangle {
#[label]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::repr_ident, code = "E0565")]
+#[derive(Diagnostic)]
+#[diag(passes_repr_ident, code = "E0565")]
pub struct ReprIdent {
#[primary_span]
pub span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::repr_conflicting, code = "E0566")]
+#[diag(passes_repr_conflicting, code = "E0566")]
pub struct ReprConflicting;
-#[derive(SessionDiagnostic)]
-#[error(passes::used_static)]
+#[derive(Diagnostic)]
+#[diag(passes_used_static)]
pub struct UsedStatic {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::used_compiler_linker)]
+#[derive(Diagnostic)]
+#[diag(passes_used_compiler_linker)]
pub struct UsedCompilerLinker {
#[primary_span]
pub spans: Vec<Span>,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::allow_internal_unstable)]
+#[derive(Diagnostic)]
+#[diag(passes_allow_internal_unstable)]
pub struct AllowInternalUnstable {
#[primary_span]
pub attr_span: Span,
@@ -509,25 +523,34 @@ pub struct AllowInternalUnstable {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::debug_visualizer_placement)]
+#[derive(Diagnostic)]
+#[diag(passes_debug_visualizer_placement)]
pub struct DebugVisualizerPlacement {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::debug_visualizer_invalid)]
-#[note(passes::note_1)]
-#[note(passes::note_2)]
-#[note(passes::note_3)]
+#[derive(Diagnostic)]
+#[diag(passes_debug_visualizer_invalid)]
+#[note(note_1)]
+#[note(note_2)]
+#[note(note_3)]
pub struct DebugVisualizerInvalid {
#[primary_span]
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_allow_const_fn_unstable)]
+#[derive(Diagnostic)]
+#[diag(passes_debug_visualizer_unreadable)]
+pub struct DebugVisualizerUnreadable<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub file: &'a Path,
+ pub error: Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_rustc_allow_const_fn_unstable)]
pub struct RustcAllowConstFnUnstable {
#[primary_span]
pub attr_span: Span,
@@ -535,8 +558,8 @@ pub struct RustcAllowConstFnUnstable {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_std_internal_symbol)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_std_internal_symbol)]
pub struct RustcStdInternalSymbol {
#[primary_span]
pub attr_span: Span,
@@ -544,59 +567,66 @@ pub struct RustcStdInternalSymbol {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::const_trait)]
+#[derive(Diagnostic)]
+#[diag(passes_const_trait)]
pub struct ConstTrait {
#[primary_span]
pub attr_span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::stability_promotable)]
+#[derive(Diagnostic)]
+#[diag(passes_link_ordinal)]
+pub struct LinkOrdinal {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_stability_promotable)]
pub struct StabilityPromotable {
#[primary_span]
pub attr_span: Span,
}
#[derive(LintDiagnostic)]
-#[lint(passes::deprecated)]
+#[diag(passes_deprecated)]
pub struct Deprecated;
#[derive(LintDiagnostic)]
-#[lint(passes::macro_use)]
+#[diag(passes_macro_use)]
pub struct MacroUse {
pub name: Symbol,
}
#[derive(LintDiagnostic)]
-#[lint(passes::macro_export)]
+#[diag(passes_macro_export)]
pub struct MacroExport;
#[derive(LintDiagnostic)]
-#[lint(passes::plugin_registrar)]
+#[diag(passes_plugin_registrar)]
pub struct PluginRegistrar;
-#[derive(SessionSubdiagnostic)]
+#[derive(Subdiagnostic)]
pub enum UnusedNote {
- #[note(passes::unused_empty_lints_note)]
+ #[note(passes_unused_empty_lints_note)]
EmptyList { name: Symbol },
- #[note(passes::unused_no_lints_note)]
+ #[note(passes_unused_no_lints_note)]
NoLints { name: Symbol },
- #[note(passes::unused_default_method_body_const_note)]
+ #[note(passes_unused_default_method_body_const_note)]
DefaultMethodBodyConst,
}
#[derive(LintDiagnostic)]
-#[lint(passes::unused)]
+#[diag(passes_unused)]
pub struct Unused {
- #[suggestion(applicability = "machine-applicable")]
+ #[suggestion(code = "", applicability = "machine-applicable")]
pub attr_span: Span,
#[subdiagnostic]
pub note: UnusedNote,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::non_exported_macro_invalid_attrs, code = "E0518")]
+#[derive(Diagnostic)]
+#[diag(passes_non_exported_macro_invalid_attrs, code = "E0518")]
pub struct NonExportedMacroInvalidAttrs {
#[primary_span]
#[label]
@@ -604,19 +634,18 @@ pub struct NonExportedMacroInvalidAttrs {
}
#[derive(LintDiagnostic)]
-#[lint(passes::unused_duplicate)]
+#[diag(passes_unused_duplicate)]
pub struct UnusedDuplicate {
- #[primary_span]
#[suggestion(code = "", applicability = "machine-applicable")]
pub this: Span,
#[note]
pub other: Span,
- #[warn_]
+ #[warning]
pub warning: Option<()>,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::unused_multiple)]
+#[derive(Diagnostic)]
+#[diag(passes_unused_multiple)]
pub struct UnusedMultiple {
#[primary_span]
#[suggestion(code = "", applicability = "machine-applicable")]
@@ -626,8 +655,8 @@ pub struct UnusedMultiple {
pub name: Symbol,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_lint_opt_ty)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_lint_opt_ty)]
pub struct RustcLintOptTy {
#[primary_span]
pub attr_span: Span,
@@ -635,11 +664,788 @@ pub struct RustcLintOptTy {
pub span: Span,
}
-#[derive(SessionDiagnostic)]
-#[error(passes::rustc_lint_opt_deny_field_access)]
+#[derive(Diagnostic)]
+#[diag(passes_rustc_lint_opt_deny_field_access)]
pub struct RustcLintOptDenyFieldAccess {
#[primary_span]
pub attr_span: Span,
#[label]
pub span: Span,
}
+
+#[derive(Diagnostic)]
+#[diag(passes_collapse_debuginfo)]
+pub struct CollapseDebuginfo {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_deprecated_annotation_has_no_effect)]
+pub struct DeprecatedAnnotationHasNoEffect {
+ #[suggestion(applicability = "machine-applicable", code = "")]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unknown_external_lang_item, code = "E0264")]
+pub struct UnknownExternLangItem {
+ #[primary_span]
+ pub span: Span,
+ pub lang_item: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_missing_panic_handler)]
+pub struct MissingPanicHandler;
+
+#[derive(Diagnostic)]
+#[diag(passes_alloc_func_required)]
+pub struct AllocFuncRequired;
+
+#[derive(Diagnostic)]
+#[diag(passes_missing_alloc_error_handler)]
+pub struct MissingAllocErrorHandler;
+
+#[derive(Diagnostic)]
+#[diag(passes_missing_lang_item)]
+#[note]
+#[help]
+pub struct MissingLangItem {
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_lang_item_on_incorrect_target, code = "E0718")]
+pub struct LangItemOnIncorrectTarget {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub name: Symbol,
+ pub expected_target: Target,
+ pub actual_target: Target,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unknown_lang_item, code = "E0522")]
+pub struct UnknownLangItem {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+pub struct InvalidAttrAtCrateLevel {
+ pub span: Span,
+ pub snippet: Option<String>,
+ pub name: Symbol,
+}
+
+impl IntoDiagnostic<'_> for InvalidAttrAtCrateLevel {
+ fn into_diagnostic(
+ self,
+ handler: &'_ rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(rustc_errors::fluent::passes_invalid_attr_at_crate_level);
+ diag.set_span(self.span);
+ diag.set_arg("name", self.name);
+ // Only emit an error with a suggestion if we can create a string out
+ // of the attribute span
+ if let Some(src) = self.snippet {
+ let replacement = src.replace("#!", "#");
+ diag.span_suggestion_verbose(
+ self.span,
+ rustc_errors::fluent::suggestion,
+ replacement,
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_duplicate_diagnostic_item)]
+pub struct DuplicateDiagnosticItem {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_duplicate_diagnostic_item_in_crate)]
+pub struct DuplicateDiagnosticItemInCrate {
+ #[note(passes_diagnostic_item_first_defined)]
+ pub span: Option<Span>,
+ pub orig_crate_name: Symbol,
+ #[note]
+ pub have_orig_crate_name: Option<()>,
+ pub crate_name: Symbol,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_abi)]
+pub struct Abi {
+ #[primary_span]
+ pub span: Span,
+ pub abi: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_align)]
+pub struct Align {
+ #[primary_span]
+ pub span: Span,
+ pub align: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_size)]
+pub struct Size {
+ #[primary_span]
+ pub span: Span,
+ pub size: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_homogeneous_aggregate)]
+pub struct HomogeneousAggregate {
+ #[primary_span]
+ pub span: Span,
+ pub homogeneous_aggregate: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_layout_of)]
+pub struct LayoutOf {
+ #[primary_span]
+ pub span: Span,
+ pub normalized_ty: String,
+ pub ty_layout: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unrecognized_field)]
+pub struct UnrecognizedField {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_feature_stable_twice, code = "E0711")]
+pub struct FeatureStableTwice {
+ #[primary_span]
+ pub span: Span,
+ pub feature: Symbol,
+ pub since: Symbol,
+ pub prev_since: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_feature_previously_declared, code = "E0711")]
+pub struct FeaturePreviouslyDeclared<'a, 'b> {
+ #[primary_span]
+ pub span: Span,
+ pub feature: Symbol,
+ pub declared: &'a str,
+ pub prev_declared: &'b str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_expr_not_allowed_in_context, code = "E0744")]
+pub struct ExprNotAllowedInContext<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub expr: String,
+ pub context: &'a str,
+}
+
+pub struct BreakNonLoop<'a> {
+ pub span: Span,
+ pub head: Option<Span>,
+ pub kind: &'a str,
+ pub suggestion: String,
+ pub loop_label: Option<Label>,
+ pub break_label: Option<Label>,
+ pub break_expr_kind: &'a ExprKind<'a>,
+ pub break_expr_span: Span,
+}
+
+impl<'a> IntoDiagnostic<'_> for BreakNonLoop<'a> {
+ fn into_diagnostic(
+ self,
+ handler: &rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_span_err_with_code(
+ self.span,
+ rustc_errors::fluent::passes_break_non_loop,
+ error_code!(E0571),
+ );
+ diag.set_arg("kind", self.kind);
+ diag.span_label(self.span, rustc_errors::fluent::label);
+ if let Some(head) = self.head {
+ diag.span_label(head, rustc_errors::fluent::label2);
+ }
+ diag.span_suggestion(
+ self.span,
+ rustc_errors::fluent::suggestion,
+ self.suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ if let (Some(label), None) = (self.loop_label, self.break_label) {
+ match self.break_expr_kind {
+ ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { segments: [segment], res: hir::def::Res::Err, .. },
+ )) if label.ident.to_string() == format!("'{}", segment.ident) => {
+ // This error is redundant, we will have already emitted a
+ // suggestion to use the label when `segment` wasn't found
+ // (hence the `Res::Err` check).
+ diag.delay_as_bug();
+ }
+ _ => {
+ diag.span_suggestion(
+ self.break_expr_span,
+ rustc_errors::fluent::break_expr_suggestion,
+ label.ident,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_continue_labeled_block, code = "E0696")]
+pub struct ContinueLabeledBlock {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(block_label)]
+ pub block_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_break_inside_closure, code = "E0267")]
+pub struct BreakInsideClosure<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(closure_label)]
+ pub closure_span: Span,
+ pub name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_break_inside_async_block, code = "E0267")]
+pub struct BreakInsideAsyncBlock<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(async_block_label)]
+ pub closure_span: Span,
+ pub name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_outside_loop, code = "E0268")]
+pub struct OutsideLoop<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unlabeled_in_labeled_block, code = "E0695")]
+pub struct UnlabeledInLabeledBlock<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub cf_type: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unlabeled_cf_in_while_condition, code = "E0590")]
+pub struct UnlabeledCfInWhileCondition<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub cf_type: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_cannot_inline_naked_function)]
+pub struct CannotInlineNakedFunction {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_undefined_naked_function_abi)]
+pub struct UndefinedNakedFunctionAbi;
+
+#[derive(Diagnostic)]
+#[diag(passes_no_patterns)]
+pub struct NoPatterns {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_params_not_allowed)]
+#[help]
+pub struct ParamsNotAllowed {
+ #[primary_span]
+ pub span: Span,
+}
+
+pub struct NakedFunctionsAsmBlock {
+ pub span: Span,
+ pub multiple_asms: Vec<Span>,
+ pub non_asms: Vec<Span>,
+}
+
+impl IntoDiagnostic<'_> for NakedFunctionsAsmBlock {
+ fn into_diagnostic(
+ self,
+ handler: &rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_span_err_with_code(
+ self.span,
+ rustc_errors::fluent::passes_naked_functions_asm_block,
+ error_code!(E0787),
+ );
+ for span in self.multiple_asms.iter() {
+ diag.span_label(*span, rustc_errors::fluent::label_multiple_asm);
+ }
+ for span in self.non_asms.iter() {
+ diag.span_label(*span, rustc_errors::fluent::label_non_asm);
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_naked_functions_operands, code = "E0787")]
+pub struct NakedFunctionsOperands {
+ #[primary_span]
+ pub unsupported_operands: Vec<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_naked_functions_asm_options, code = "E0787")]
+pub struct NakedFunctionsAsmOptions {
+ #[primary_span]
+ pub span: Span,
+ pub unsupported_options: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_naked_functions_must_use_noreturn, code = "E0787")]
+pub struct NakedFunctionsMustUseNoreturn {
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(code = ", options(noreturn)", applicability = "machine-applicable")]
+ pub last_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_attr_only_on_main)]
+pub struct AttrOnlyOnMain {
+ #[primary_span]
+ pub span: Span,
+ pub attr: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_attr_only_on_root_main)]
+pub struct AttrOnlyOnRootMain {
+ #[primary_span]
+ pub span: Span,
+ pub attr: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_attr_only_in_functions)]
+pub struct AttrOnlyInFunctions {
+ #[primary_span]
+ pub span: Span,
+ pub attr: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_multiple_rustc_main, code = "E0137")]
+pub struct MultipleRustcMain {
+ #[primary_span]
+ pub span: Span,
+ #[label(first)]
+ pub first: Span,
+ #[label(additional)]
+ pub additional: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_multiple_start_functions, code = "E0138")]
+pub struct MultipleStartFunctions {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub labeled: Span,
+ #[label(previous)]
+ pub previous: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_extern_main)]
+pub struct ExternMain {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unix_sigpipe_values)]
+pub struct UnixSigpipeValues {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_no_main_function, code = "E0601")]
+pub struct NoMainFunction {
+ #[primary_span]
+ pub span: Span,
+ pub crate_name: String,
+}
+
+pub struct NoMainErr {
+ pub sp: Span,
+ pub crate_name: Symbol,
+ pub has_filename: bool,
+ pub filename: PathBuf,
+ pub file_empty: bool,
+ pub non_main_fns: Vec<Span>,
+ pub main_def_opt: Option<MainDefinition>,
+ pub add_teach_note: bool,
+}
+
+impl<'a> IntoDiagnostic<'a> for NoMainErr {
+ fn into_diagnostic(
+ self,
+ handler: &'a rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut diag = handler.struct_span_err_with_code(
+ DUMMY_SP,
+ rustc_errors::fluent::passes_no_main_function,
+ error_code!(E0601),
+ );
+ diag.set_arg("crate_name", self.crate_name);
+ diag.set_arg("filename", self.filename);
+ diag.set_arg("has_filename", self.has_filename);
+ let note = if !self.non_main_fns.is_empty() {
+ for &span in &self.non_main_fns {
+ diag.span_note(span, rustc_errors::fluent::here_is_main);
+ }
+ diag.note(rustc_errors::fluent::one_or_more_possible_main);
+ diag.help(rustc_errors::fluent::consider_moving_main);
+ // There were some functions named `main` though. Try to give the user a hint.
+ rustc_errors::fluent::main_must_be_defined_at_crate
+ } else if self.has_filename {
+ rustc_errors::fluent::consider_adding_main_to_file
+ } else {
+ rustc_errors::fluent::consider_adding_main_at_crate
+ };
+ if self.file_empty {
+ diag.note(note);
+ } else {
+ diag.set_span(self.sp.shrink_to_hi());
+ diag.span_label(self.sp.shrink_to_hi(), note);
+ }
+
+ if let Some(main_def) = self.main_def_opt && main_def.opt_fn_def_id().is_none(){
+ // There is something at `crate::main`, but it is not a function definition.
+ diag.span_label(main_def.span, rustc_errors::fluent::non_function_main);
+ }
+
+ if self.add_teach_note {
+ diag.note(rustc_errors::fluent::teach_note);
+ }
+ diag
+ }
+}
+
+pub struct DuplicateLangItem {
+ pub local_span: Option<Span>,
+ pub lang_item_name: Symbol,
+ pub crate_name: Symbol,
+ pub dependency_of: Symbol,
+ pub is_local: bool,
+ pub path: String,
+ pub first_defined_span: Option<Span>,
+ pub orig_crate_name: Symbol,
+ pub orig_dependency_of: Symbol,
+ pub orig_is_local: bool,
+ pub orig_path: String,
+ pub(crate) duplicate: Duplicate,
+}
+
+impl IntoDiagnostic<'_> for DuplicateLangItem {
+ fn into_diagnostic(
+ self,
+ handler: &rustc_errors::Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err_with_code(
+ match self.duplicate {
+ Duplicate::Plain => rustc_errors::fluent::passes_duplicate_lang_item,
+
+ Duplicate::Crate => rustc_errors::fluent::passes_duplicate_lang_item_crate,
+ Duplicate::CrateDepends => {
+ rustc_errors::fluent::passes_duplicate_lang_item_crate_depends
+ }
+ },
+ error_code!(E0152),
+ );
+ diag.set_arg("lang_item_name", self.lang_item_name);
+ diag.set_arg("crate_name", self.crate_name);
+ diag.set_arg("dependency_of", self.dependency_of);
+ diag.set_arg("path", self.path);
+ diag.set_arg("orig_crate_name", self.orig_crate_name);
+ diag.set_arg("orig_dependency_of", self.orig_dependency_of);
+ diag.set_arg("orig_path", self.orig_path);
+ if let Some(span) = self.local_span {
+ diag.set_span(span);
+ }
+ if let Some(span) = self.first_defined_span {
+ diag.span_note(span, rustc_errors::fluent::first_defined_span);
+ } else {
+ if self.orig_dependency_of.is_empty() {
+ diag.note(rustc_errors::fluent::first_defined_crate);
+ } else {
+ diag.note(rustc_errors::fluent::first_defined_crate_depends);
+ }
+
+ if self.orig_is_local {
+ diag.note(rustc_errors::fluent::first_definition_local);
+ } else {
+ diag.note(rustc_errors::fluent::first_definition_path);
+ }
+
+ if self.is_local {
+ diag.note(rustc_errors::fluent::second_definition_local);
+ } else {
+ diag.note(rustc_errors::fluent::second_definition_path);
+ }
+ }
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_incorrect_target, code = "E0718")]
+pub struct IncorrectTarget<'a> {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub generics_span: Span,
+ pub name: &'a str, // cannot be symbol because it renders e.g. `r#fn` instead of `fn`
+ pub kind: &'static str,
+ pub num: usize,
+ pub actual_num: usize,
+ pub at_least: bool,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_useless_assignment)]
+pub struct UselessAssignment<'a> {
+ pub is_field_assign: bool,
+ pub ty: Ty<'a>,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(passes_only_has_effect_on)]
+pub struct OnlyHasEffectOn {
+ pub attr_name: Symbol,
+ pub target_name: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_object_lifetime_err)]
+pub struct ObjectLifetimeErr {
+ #[primary_span]
+ pub span: Span,
+ pub repr: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unrecognized_repr_hint, code = "E0552")]
+#[help]
+pub struct UnrecognizedReprHint {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+pub enum AttrApplication {
+ #[diag(passes_attr_application_enum, code = "E0517")]
+ Enum {
+ #[primary_span]
+ hint_span: Span,
+ #[label]
+ span: Span,
+ },
+ #[diag(passes_attr_application_struct, code = "E0517")]
+ Struct {
+ #[primary_span]
+ hint_span: Span,
+ #[label]
+ span: Span,
+ },
+ #[diag(passes_attr_application_struct_union, code = "E0517")]
+ StructUnion {
+ #[primary_span]
+ hint_span: Span,
+ #[label]
+ span: Span,
+ },
+ #[diag(passes_attr_application_struct_enum_union, code = "E0517")]
+ StructEnumUnion {
+ #[primary_span]
+ hint_span: Span,
+ #[label]
+ span: Span,
+ },
+ #[diag(passes_attr_application_struct_enum_function_union, code = "E0517")]
+ StructEnumFunctionUnion {
+ #[primary_span]
+ hint_span: Span,
+ #[label]
+ span: Span,
+ },
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_transparent_incompatible, code = "E0692")]
+pub struct TransparentIncompatible {
+ #[primary_span]
+ pub hint_spans: Vec<Span>,
+ pub target: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_deprecated_attribute, code = "E0549")]
+pub struct DeprecatedAttribute {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_useless_stability)]
+pub struct UselessStability {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(item)]
+ pub item_sp: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_invalid_stability)]
+pub struct InvalidStability {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(item)]
+ pub item_sp: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_cannot_stabilize_deprecated)]
+pub struct CannotStabilizeDeprecated {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(item)]
+ pub item_sp: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_invalid_deprecation_version)]
+pub struct InvalidDeprecationVersion {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(item)]
+ pub item_sp: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_missing_stability_attr)]
+pub struct MissingStabilityAttr<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub descr: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_missing_const_stab_attr)]
+pub struct MissingConstStabAttr<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub descr: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_trait_impl_const_stable)]
+#[note]
+pub struct TraitImplConstStable {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_feature_only_on_nightly, code = "E0554")]
+pub struct FeatureOnlyOnNightly {
+ #[primary_span]
+ pub span: Span,
+ pub release_channel: &'static str,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_unknown_feature, code = "E0635")]
+pub struct UnknownFeature {
+ #[primary_span]
+ pub span: Span,
+ pub feature: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_implied_feature_not_exist)]
+pub struct ImpliedFeatureNotExist {
+ #[primary_span]
+ pub span: Span,
+ pub feature: Symbol,
+ pub implied_by: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_duplicate_feature_err, code = "E0636")]
+pub struct DuplicateFeatureErr {
+ #[primary_span]
+ pub span: Span,
+ pub feature: Symbol,
+}
+#[derive(Diagnostic)]
+#[diag(passes_missing_const_err)]
+pub struct MissingConstErr {
+ #[primary_span]
+ #[help]
+ pub fn_sig_span: Span,
+ #[label]
+ pub const_span: Span,
+}
diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs
index 212ea9e57..88bb39deb 100644
--- a/compiler/rustc_passes/src/hir_id_validator.rs
+++ b/compiler/rustc_passes/src/hir_id_validator.rs
@@ -1,6 +1,5 @@
use rustc_data_structures::sync::Lock;
use rustc_hir as hir;
-use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::intravisit;
use rustc_hir::{HirId, ItemLocalId};
use rustc_index::bit_set::GrowableBitSet;
@@ -42,7 +41,7 @@ pub fn check_crate(tcx: TyCtxt<'_>) {
struct HirIdValidator<'a, 'hir> {
hir_map: Map<'hir>,
- owner: Option<LocalDefId>,
+ owner: Option<hir::OwnerId>,
hir_ids_seen: GrowableBitSet<ItemLocalId>,
errors: &'a Lock<Vec<String>>,
}
@@ -63,12 +62,12 @@ impl<'a, 'hir> HirIdValidator<'a, 'hir> {
self.errors.lock().push(f());
}
- fn check<F: FnOnce(&mut HirIdValidator<'a, 'hir>)>(&mut self, owner: LocalDefId, walk: F) {
+ fn check<F: FnOnce(&mut HirIdValidator<'a, 'hir>)>(&mut self, owner: hir::OwnerId, walk: F) {
assert!(self.owner.is_none());
self.owner = Some(owner);
walk(self);
- if owner == CRATE_DEF_ID {
+ if owner == hir::CRATE_OWNER_ID {
return;
}
@@ -97,14 +96,14 @@ impl<'a, 'hir> HirIdValidator<'a, 'hir> {
missing_items.push(format!(
"[local_id: {}, owner: {}]",
local_id,
- self.hir_map.def_path(owner).to_string_no_crate_verbose()
+ self.hir_map.def_path(owner.def_id).to_string_no_crate_verbose()
));
}
self.error(|| {
format!(
"ItemLocalIds not assigned densely in {}. \
- Max ItemLocalId = {}, missing IDs = {:?}; seens IDs = {:?}",
- self.hir_map.def_path(owner).to_string_no_crate_verbose(),
+ Max ItemLocalId = {}, missing IDs = {:#?}; seens IDs = {:#?}",
+ self.hir_map.def_path(owner.def_id).to_string_no_crate_verbose(),
max,
missing_items,
self.hir_ids_seen
@@ -127,7 +126,7 @@ impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> {
fn visit_item(&mut self, i: &'hir hir::Item<'hir>) {
let mut inner_visitor = self.new_visitor(self.hir_map);
- inner_visitor.check(i.def_id, |this| intravisit::walk_item(this, i));
+ inner_visitor.check(i.owner_id, |this| intravisit::walk_item(this, i));
}
fn visit_id(&mut self, hir_id: HirId) {
@@ -138,8 +137,8 @@ impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> {
format!(
"HirIdValidator: The recorded owner of {} is {} instead of {}",
self.hir_map.node_to_string(hir_id),
- self.hir_map.def_path(hir_id.owner).to_string_no_crate_verbose(),
- self.hir_map.def_path(owner).to_string_no_crate_verbose()
+ self.hir_map.def_path(hir_id.owner.def_id).to_string_no_crate_verbose(),
+ self.hir_map.def_path(owner.def_id).to_string_no_crate_verbose()
)
});
}
@@ -149,16 +148,16 @@ impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> {
fn visit_foreign_item(&mut self, i: &'hir hir::ForeignItem<'hir>) {
let mut inner_visitor = self.new_visitor(self.hir_map);
- inner_visitor.check(i.def_id, |this| intravisit::walk_foreign_item(this, i));
+ inner_visitor.check(i.owner_id, |this| intravisit::walk_foreign_item(this, i));
}
fn visit_trait_item(&mut self, i: &'hir hir::TraitItem<'hir>) {
let mut inner_visitor = self.new_visitor(self.hir_map);
- inner_visitor.check(i.def_id, |this| intravisit::walk_trait_item(this, i));
+ inner_visitor.check(i.owner_id, |this| intravisit::walk_trait_item(this, i));
}
fn visit_impl_item(&mut self, i: &'hir hir::ImplItem<'hir>) {
let mut inner_visitor = self.new_visitor(self.hir_map);
- inner_visitor.check(i.def_id, |this| intravisit::walk_impl_item(this, i));
+ inner_visitor.check(i.owner_id, |this| intravisit::walk_impl_item(this, i));
}
}
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
index a3be827a7..33220fd2b 100644
--- a/compiler/rustc_passes/src/hir_stats.rs
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -21,75 +21,169 @@ enum Id {
None,
}
-struct NodeData {
+struct NodeStats {
count: usize,
size: usize,
}
+impl NodeStats {
+ fn new() -> NodeStats {
+ NodeStats { count: 0, size: 0 }
+ }
+}
+
+struct Node {
+ stats: NodeStats,
+ subnodes: FxHashMap<&'static str, NodeStats>,
+}
+
+impl Node {
+ fn new() -> Node {
+ Node { stats: NodeStats::new(), subnodes: FxHashMap::default() }
+ }
+}
+
+/// This type measures the size of AST and HIR nodes, by implementing the AST
+/// and HIR `Visitor` traits. But we don't measure every visited type because
+/// that could cause double counting.
+///
+/// For example, `ast::Visitor` has `visit_ident`, but `Ident`s are always
+/// stored inline within other AST nodes, so we don't implement `visit_ident`
+/// here. In contrast, we do implement `visit_expr` because `ast::Expr` is
+/// always stored as `P<ast::Expr>`, and every such expression should be
+/// measured separately.
+///
+/// In general, a `visit_foo` method should be implemented here if the
+/// corresponding `Foo` type is always stored on its own, e.g.: `P<Foo>`,
+/// `Box<Foo>`, `Vec<Foo>`, `Box<[Foo]>`.
+///
+/// There are some types in the AST and HIR tree that the visitors do not have
+/// a `visit_*` method for, and so we cannot measure these, which is
+/// unfortunate.
struct StatCollector<'k> {
krate: Option<Map<'k>>,
- data: FxHashMap<&'static str, NodeData>,
+ nodes: FxHashMap<&'static str, Node>,
seen: FxHashSet<Id>,
}
pub fn print_hir_stats(tcx: TyCtxt<'_>) {
let mut collector = StatCollector {
krate: Some(tcx.hir()),
- data: FxHashMap::default(),
+ nodes: FxHashMap::default(),
seen: FxHashSet::default(),
};
tcx.hir().walk_toplevel_module(&mut collector);
tcx.hir().walk_attributes(&mut collector);
- collector.print("HIR STATS");
+ collector.print("HIR STATS", "hir-stats");
}
-pub fn print_ast_stats(krate: &ast::Crate, title: &str) {
+pub fn print_ast_stats(krate: &ast::Crate, title: &str, prefix: &str) {
+ use rustc_ast::visit::Visitor;
+
let mut collector =
- StatCollector { krate: None, data: FxHashMap::default(), seen: FxHashSet::default() };
- ast_visit::walk_crate(&mut collector, krate);
- collector.print(title);
+ StatCollector { krate: None, nodes: FxHashMap::default(), seen: FxHashSet::default() };
+ collector.visit_crate(krate);
+ collector.print(title, prefix);
}
impl<'k> StatCollector<'k> {
- fn record<T>(&mut self, label: &'static str, id: Id, node: &T) {
+ // Record a top-level node.
+ fn record<T>(&mut self, label: &'static str, id: Id, val: &T) {
+ self.record_inner(label, None, id, val);
+ }
+
+ // Record a two-level entry, with a top-level enum type and a variant.
+ fn record_variant<T>(&mut self, label1: &'static str, label2: &'static str, id: Id, val: &T) {
+ self.record_inner(label1, Some(label2), id, val);
+ }
+
+ fn record_inner<T>(
+ &mut self,
+ label1: &'static str,
+ label2: Option<&'static str>,
+ id: Id,
+ val: &T,
+ ) {
if id != Id::None && !self.seen.insert(id) {
return;
}
- let entry = self.data.entry(label).or_insert(NodeData { count: 0, size: 0 });
+ let node = self.nodes.entry(label1).or_insert(Node::new());
+ node.stats.count += 1;
+ node.stats.size = std::mem::size_of_val(val);
- entry.count += 1;
- entry.size = std::mem::size_of_val(node);
+ if let Some(label2) = label2 {
+ let subnode = node.subnodes.entry(label2).or_insert(NodeStats::new());
+ subnode.count += 1;
+ subnode.size = std::mem::size_of_val(val);
+ }
}
- fn print(&self, title: &str) {
- let mut stats: Vec<_> = self.data.iter().collect();
-
- stats.sort_by_key(|&(_, ref d)| d.count * d.size);
+ fn print(&self, title: &str, prefix: &str) {
+ let mut nodes: Vec<_> = self.nodes.iter().collect();
+ nodes.sort_by_key(|&(_, ref node)| node.stats.count * node.stats.size);
- let mut total_size = 0;
+ let total_size = nodes.iter().map(|(_, node)| node.stats.count * node.stats.size).sum();
- eprintln!("\n{}\n", title);
+ eprintln!("{} {}", prefix, title);
+ eprintln!(
+ "{} {:<18}{:>18}{:>14}{:>14}",
+ prefix, "Name", "Accumulated Size", "Count", "Item Size"
+ );
+ eprintln!("{} ----------------------------------------------------------------", prefix);
- eprintln!("{:<18}{:>18}{:>14}{:>14}", "Name", "Accumulated Size", "Count", "Item Size");
- eprintln!("----------------------------------------------------------------");
+ let percent = |m, n| (m * 100) as f64 / n as f64;
- for (label, data) in stats {
+ for (label, node) in nodes {
+ let size = node.stats.count * node.stats.size;
eprintln!(
- "{:<18}{:>18}{:>14}{:>14}",
+ "{} {:<18}{:>10} ({:4.1}%){:>14}{:>14}",
+ prefix,
label,
- to_readable_str(data.count * data.size),
- to_readable_str(data.count),
- to_readable_str(data.size)
+ to_readable_str(size),
+ percent(size, total_size),
+ to_readable_str(node.stats.count),
+ to_readable_str(node.stats.size)
);
-
- total_size += data.count * data.size;
+ if !node.subnodes.is_empty() {
+ let mut subnodes: Vec<_> = node.subnodes.iter().collect();
+ subnodes.sort_by_key(|&(_, ref subnode)| subnode.count * subnode.size);
+
+ for (label, subnode) in subnodes {
+ let size = subnode.count * subnode.size;
+ eprintln!(
+ "{} - {:<18}{:>10} ({:4.1}%){:>14}",
+ prefix,
+ label,
+ to_readable_str(size),
+ percent(size, total_size),
+ to_readable_str(subnode.count),
+ );
+ }
+ }
}
- eprintln!("----------------------------------------------------------------");
- eprintln!("{:<18}{:>18}\n", "Total", to_readable_str(total_size));
+ eprintln!("{} ----------------------------------------------------------------", prefix);
+ eprintln!("{} {:<18}{:>10}", prefix, "Total", to_readable_str(total_size));
+ eprintln!("{}", prefix);
}
}
+// Used to avoid boilerplate for types with many variants.
+macro_rules! record_variants {
+ (
+ ($self:ident, $val:expr, $kind:expr, $id:expr, $mod:ident, $ty:ty, $tykind:ident),
+ [$($variant:ident),*]
+ ) => {
+ match $kind {
+ $(
+ $mod::$tykind::$variant { .. } => {
+ $self.record_variant(stringify!($ty), stringify!($variant), $id, $val)
+ }
+ )*
+ }
+ };
+}
+
impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
fn visit_param(&mut self, param: &'v hir::Param<'v>) {
self.record("Param", Id::Node(param.hir_id), param);
@@ -122,12 +216,46 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
}
fn visit_item(&mut self, i: &'v hir::Item<'v>) {
- self.record("Item", Id::Node(i.hir_id()), i);
+ record_variants!(
+ (self, i, i.kind, Id::Node(i.hir_id()), hir, Item, ItemKind),
+ [
+ ExternCrate,
+ Use,
+ Static,
+ Const,
+ Fn,
+ Macro,
+ Mod,
+ ForeignMod,
+ GlobalAsm,
+ TyAlias,
+ OpaqueTy,
+ Enum,
+ Struct,
+ Union,
+ Trait,
+ TraitAlias,
+ Impl
+ ]
+ );
hir_visit::walk_item(self, i)
}
+ fn visit_body(&mut self, b: &'v hir::Body<'v>) {
+ self.record("Body", Id::None, b);
+ hir_visit::walk_body(self, b);
+ }
+
+ fn visit_mod(&mut self, m: &'v hir::Mod<'v>, _s: Span, n: HirId) {
+ self.record("Mod", Id::None, m);
+ hir_visit::walk_mod(self, m, n)
+ }
+
fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem<'v>) {
- self.record("ForeignItem", Id::Node(i.hir_id()), i);
+ record_variants!(
+ (self, i, i.kind, Id::Node(i.hir_id()), hir, ForeignItem, ForeignItemKind),
+ [Fn, Static, Type]
+ );
hir_visit::walk_foreign_item(self, i)
}
@@ -142,7 +270,10 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
}
fn visit_stmt(&mut self, s: &'v hir::Stmt<'v>) {
- self.record("Stmt", Id::Node(s.hir_id), s);
+ record_variants!(
+ (self, s, s.kind, Id::Node(s.hir_id), hir, Stmt, StmtKind),
+ [Local, Item, Expr, Semi]
+ );
hir_visit::walk_stmt(self, s)
}
@@ -152,50 +283,135 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
}
fn visit_pat(&mut self, p: &'v hir::Pat<'v>) {
- self.record("Pat", Id::Node(p.hir_id), p);
+ record_variants!(
+ (self, p, p.kind, Id::Node(p.hir_id), hir, Pat, PatKind),
+ [Wild, Binding, Struct, TupleStruct, Or, Path, Tuple, Box, Ref, Lit, Range, Slice]
+ );
hir_visit::walk_pat(self, p)
}
- fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
- self.record("Expr", Id::Node(ex.hir_id), ex);
- hir_visit::walk_expr(self, ex)
+ fn visit_pat_field(&mut self, f: &'v hir::PatField<'v>) {
+ self.record("PatField", Id::Node(f.hir_id), f);
+ hir_visit::walk_pat_field(self, f)
+ }
+
+ fn visit_expr(&mut self, e: &'v hir::Expr<'v>) {
+ record_variants!(
+ (self, e, e.kind, Id::Node(e.hir_id), hir, Expr, ExprKind),
+ [
+ Box, ConstBlock, Array, Call, MethodCall, Tup, Binary, Unary, Lit, Cast, Type,
+ DropTemps, Let, If, Loop, Match, Closure, Block, Assign, AssignOp, Field, Index,
+ Path, AddrOf, Break, Continue, Ret, InlineAsm, Struct, Repeat, Yield, Err
+ ]
+ );
+ hir_visit::walk_expr(self, e)
+ }
+
+ fn visit_let_expr(&mut self, lex: &'v hir::Let<'v>) {
+ self.record("Let", Id::Node(lex.hir_id), lex);
+ hir_visit::walk_let_expr(self, lex)
+ }
+
+ fn visit_expr_field(&mut self, f: &'v hir::ExprField<'v>) {
+ self.record("ExprField", Id::Node(f.hir_id), f);
+ hir_visit::walk_expr_field(self, f)
}
fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
- self.record("Ty", Id::Node(t.hir_id), t);
+ record_variants!(
+ (self, t, t.kind, Id::Node(t.hir_id), hir, Ty, TyKind),
+ [
+ Slice,
+ Array,
+ Ptr,
+ Rptr,
+ BareFn,
+ Never,
+ Tup,
+ Path,
+ OpaqueDef,
+ TraitObject,
+ Typeof,
+ Infer,
+ Err
+ ]
+ );
hir_visit::walk_ty(self, t)
}
+ fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) {
+ self.record("GenericParam", Id::Node(p.hir_id), p);
+ hir_visit::walk_generic_param(self, p)
+ }
+
+ fn visit_generics(&mut self, g: &'v hir::Generics<'v>) {
+ self.record("Generics", Id::None, g);
+ hir_visit::walk_generics(self, g)
+ }
+
+ fn visit_where_predicate(&mut self, p: &'v hir::WherePredicate<'v>) {
+ record_variants!(
+ (self, p, p, Id::None, hir, WherePredicate, WherePredicate),
+ [BoundPredicate, RegionPredicate, EqPredicate]
+ );
+ hir_visit::walk_where_predicate(self, p)
+ }
+
fn visit_fn(
&mut self,
fk: hir_visit::FnKind<'v>,
fd: &'v hir::FnDecl<'v>,
b: hir::BodyId,
- s: Span,
+ _: Span,
id: hir::HirId,
) {
self.record("FnDecl", Id::None, fd);
- hir_visit::walk_fn(self, fk, fd, b, s, id)
+ hir_visit::walk_fn(self, fk, fd, b, id)
}
- fn visit_where_predicate(&mut self, predicate: &'v hir::WherePredicate<'v>) {
- self.record("WherePredicate", Id::None, predicate);
- hir_visit::walk_where_predicate(self, predicate)
+ fn visit_use(&mut self, p: &'v hir::Path<'v>, hir_id: hir::HirId) {
+ // This is `visit_use`, but the type is `Path` so record it that way.
+ self.record("Path", Id::None, p);
+ hir_visit::walk_use(self, p, hir_id)
}
fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) {
- self.record("TraitItem", Id::Node(ti.hir_id()), ti);
+ record_variants!(
+ (self, ti, ti.kind, Id::Node(ti.hir_id()), hir, TraitItem, TraitItemKind),
+ [Const, Fn, Type]
+ );
hir_visit::walk_trait_item(self, ti)
}
+ fn visit_trait_item_ref(&mut self, ti: &'v hir::TraitItemRef) {
+ self.record("TraitItemRef", Id::Node(ti.id.hir_id()), ti);
+ hir_visit::walk_trait_item_ref(self, ti)
+ }
+
fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
- self.record("ImplItem", Id::Node(ii.hir_id()), ii);
+ record_variants!(
+ (self, ii, ii.kind, Id::Node(ii.hir_id()), hir, ImplItem, ImplItemKind),
+ [Const, Fn, Type]
+ );
hir_visit::walk_impl_item(self, ii)
}
- fn visit_param_bound(&mut self, bounds: &'v hir::GenericBound<'v>) {
- self.record("GenericBound", Id::None, bounds);
- hir_visit::walk_param_bound(self, bounds)
+ fn visit_foreign_item_ref(&mut self, fi: &'v hir::ForeignItemRef) {
+ self.record("ForeignItemRef", Id::Node(fi.id.hir_id()), fi);
+ hir_visit::walk_foreign_item_ref(self, fi)
+ }
+
+ fn visit_impl_item_ref(&mut self, ii: &'v hir::ImplItemRef) {
+ self.record("ImplItemRef", Id::Node(ii.id.hir_id()), ii);
+ hir_visit::walk_impl_item_ref(self, ii)
+ }
+
+ fn visit_param_bound(&mut self, b: &'v hir::GenericBound<'v>) {
+ record_variants!(
+ (self, b, b, Id::None, hir, GenericBound, GenericBound),
+ [Trait, LangItemTrait, Outlives]
+ );
+ hir_visit::walk_param_bound(self, b)
}
fn visit_field_def(&mut self, s: &'v hir::FieldDef<'v>) {
@@ -203,14 +419,22 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
hir_visit::walk_field_def(self, s)
}
- fn visit_variant(
- &mut self,
- v: &'v hir::Variant<'v>,
- g: &'v hir::Generics<'v>,
- item_id: hir::HirId,
- ) {
+ fn visit_variant(&mut self, v: &'v hir::Variant<'v>) {
self.record("Variant", Id::None, v);
- hir_visit::walk_variant(self, v, g, item_id)
+ hir_visit::walk_variant(self, v)
+ }
+
+ fn visit_generic_arg(&mut self, ga: &'v hir::GenericArg<'v>) {
+ record_variants!(
+ (self, ga, ga, Id::Node(ga.hir_id()), hir, GenericArg, GenericArg),
+ [Lifetime, Type, Const, Infer]
+ );
+ match ga {
+ hir::GenericArg::Lifetime(lt) => self.visit_lifetime(lt),
+ hir::GenericArg::Type(ty) => self.visit_ty(ty),
+ hir::GenericArg::Const(ct) => self.visit_anon_const(&ct.value),
+ hir::GenericArg::Infer(inf) => self.visit_infer(inf),
+ }
}
fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) {
@@ -218,19 +442,19 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
hir_visit::walk_lifetime(self, lifetime)
}
- fn visit_qpath(&mut self, qpath: &'v hir::QPath<'v>, id: hir::HirId, span: Span) {
- self.record("QPath", Id::None, qpath);
- hir_visit::walk_qpath(self, qpath, id, span)
- }
-
fn visit_path(&mut self, path: &'v hir::Path<'v>, _id: hir::HirId) {
self.record("Path", Id::None, path);
hir_visit::walk_path(self, path)
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v hir::PathSegment<'v>) {
+ fn visit_path_segment(&mut self, path_segment: &'v hir::PathSegment<'v>) {
self.record("PathSegment", Id::None, path_segment);
- hir_visit::walk_path_segment(self, path_span, path_segment)
+ hir_visit::walk_path_segment(self, path_segment)
+ }
+
+ fn visit_generic_args(&mut self, ga: &'v hir::GenericArgs<'v>) {
+ self.record("GenericArgs", Id::None, ga);
+ hir_visit::walk_generic_args(self, ga)
}
fn visit_assoc_type_binding(&mut self, type_binding: &'v hir::TypeBinding<'v>) {
@@ -241,16 +465,45 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
self.record("Attribute", Id::Attr(attr.id), attr);
}
+
+ fn visit_inline_asm(&mut self, asm: &'v hir::InlineAsm<'v>, id: HirId) {
+ self.record("InlineAsm", Id::None, asm);
+ hir_visit::walk_inline_asm(self, asm, id);
+ }
}
impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
fn visit_foreign_item(&mut self, i: &'v ast::ForeignItem) {
- self.record("ForeignItem", Id::None, i);
+ record_variants!(
+ (self, i, i.kind, Id::None, ast, ForeignItem, ForeignItemKind),
+ [Static, Fn, TyAlias, MacCall]
+ );
ast_visit::walk_foreign_item(self, i)
}
fn visit_item(&mut self, i: &'v ast::Item) {
- self.record("Item", Id::None, i);
+ record_variants!(
+ (self, i, i.kind, Id::None, ast, Item, ItemKind),
+ [
+ ExternCrate,
+ Use,
+ Static,
+ Const,
+ Fn,
+ Mod,
+ ForeignMod,
+ GlobalAsm,
+ TyAlias,
+ Enum,
+ Struct,
+ Union,
+ Trait,
+ TraitAlias,
+ Impl,
+ MacCall,
+ MacroDef
+ ]
+ );
ast_visit::walk_item(self, i)
}
@@ -265,47 +518,119 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
}
fn visit_stmt(&mut self, s: &'v ast::Stmt) {
- self.record("Stmt", Id::None, s);
+ record_variants!(
+ (self, s, s.kind, Id::None, ast, Stmt, StmtKind),
+ [Local, Item, Expr, Semi, Empty, MacCall]
+ );
ast_visit::walk_stmt(self, s)
}
+ fn visit_param(&mut self, p: &'v ast::Param) {
+ self.record("Param", Id::None, p);
+ ast_visit::walk_param(self, p)
+ }
+
fn visit_arm(&mut self, a: &'v ast::Arm) {
self.record("Arm", Id::None, a);
ast_visit::walk_arm(self, a)
}
fn visit_pat(&mut self, p: &'v ast::Pat) {
- self.record("Pat", Id::None, p);
+ record_variants!(
+ (self, p, p.kind, Id::None, ast, Pat, PatKind),
+ [
+ Wild,
+ Ident,
+ Struct,
+ TupleStruct,
+ Or,
+ Path,
+ Tuple,
+ Box,
+ Ref,
+ Lit,
+ Range,
+ Slice,
+ Rest,
+ Paren,
+ MacCall
+ ]
+ );
ast_visit::walk_pat(self, p)
}
- fn visit_expr(&mut self, ex: &'v ast::Expr) {
- self.record("Expr", Id::None, ex);
- ast_visit::walk_expr(self, ex)
+ fn visit_expr(&mut self, e: &'v ast::Expr) {
+ record_variants!(
+ (self, e, e.kind, Id::None, ast, Expr, ExprKind),
+ [
+ Box, Array, ConstBlock, Call, MethodCall, Tup, Binary, Unary, Lit, Cast, Type, Let,
+ If, While, ForLoop, Loop, Match, Closure, Block, Async, Await, TryBlock, Assign,
+ AssignOp, Field, Index, Range, Underscore, Path, AddrOf, Break, Continue, Ret,
+ InlineAsm, MacCall, Struct, Repeat, Paren, Try, Yield, Yeet, Err
+ ]
+ );
+ ast_visit::walk_expr(self, e)
}
fn visit_ty(&mut self, t: &'v ast::Ty) {
- self.record("Ty", Id::None, t);
+ record_variants!(
+ (self, t, t.kind, Id::None, ast, Ty, TyKind),
+ [
+ Slice,
+ Array,
+ Ptr,
+ Rptr,
+ BareFn,
+ Never,
+ Tup,
+ Path,
+ TraitObject,
+ ImplTrait,
+ Paren,
+ Typeof,
+ Infer,
+ ImplicitSelf,
+ MacCall,
+ Err,
+ CVarArgs
+ ]
+ );
+
ast_visit::walk_ty(self, t)
}
- fn visit_fn(&mut self, fk: ast_visit::FnKind<'v>, s: Span, _: NodeId) {
+ fn visit_generic_param(&mut self, g: &'v ast::GenericParam) {
+ self.record("GenericParam", Id::None, g);
+ ast_visit::walk_generic_param(self, g)
+ }
+
+ fn visit_where_predicate(&mut self, p: &'v ast::WherePredicate) {
+ record_variants!(
+ (self, p, p, Id::None, ast, WherePredicate, WherePredicate),
+ [BoundPredicate, RegionPredicate, EqPredicate]
+ );
+ ast_visit::walk_where_predicate(self, p)
+ }
+
+ fn visit_fn(&mut self, fk: ast_visit::FnKind<'v>, _: Span, _: NodeId) {
self.record("FnDecl", Id::None, fk.decl());
- ast_visit::walk_fn(self, fk, s)
+ ast_visit::walk_fn(self, fk)
}
- fn visit_assoc_item(&mut self, item: &'v ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
- let label = match ctxt {
- ast_visit::AssocCtxt::Trait => "TraitItem",
- ast_visit::AssocCtxt::Impl => "ImplItem",
- };
- self.record(label, Id::None, item);
- ast_visit::walk_assoc_item(self, item, ctxt);
+ fn visit_assoc_item(&mut self, i: &'v ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
+ record_variants!(
+ (self, i, i.kind, Id::None, ast, AssocItem, AssocItemKind),
+ [Const, Fn, Type, MacCall]
+ );
+ ast_visit::walk_assoc_item(self, i, ctxt);
}
- fn visit_param_bound(&mut self, bounds: &'v ast::GenericBound, _ctxt: BoundKind) {
- self.record("GenericBound", Id::None, bounds);
- ast_visit::walk_param_bound(self, bounds)
+ fn visit_param_bound(&mut self, b: &'v ast::GenericBound, _ctxt: BoundKind) {
+ record_variants!(
+ (self, b, b, Id::None, ast, GenericBound, GenericBound),
+ [Trait, Outlives]
+ );
+ ast_visit::walk_param_bound(self, b)
}
fn visit_field_def(&mut self, s: &'v ast::FieldDef) {
@@ -318,27 +643,52 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
ast_visit::walk_variant(self, v)
}
- fn visit_lifetime(&mut self, lifetime: &'v ast::Lifetime, _: ast_visit::LifetimeCtxt) {
- self.record("Lifetime", Id::None, lifetime);
- ast_visit::walk_lifetime(self, lifetime)
+ // `UseTree` has one inline use (in `ast::ItemKind::Use`) and one
+ // non-inline use (in `ast::UseTreeKind::Nested). The former case is more
+ // common, so we don't implement `visit_use_tree` and tolerate the missed
+ // coverage in the latter case.
+
+ // `PathSegment` has one inline use (in `ast::ExprKind::MethodCall`) and
+ // one non-inline use (in `ast::Path::segments`). The latter case is more
+ // common than the former case, so we implement this visitor and tolerate
+ // the double counting in the former case.
+ fn visit_path_segment(&mut self, path_segment: &'v ast::PathSegment) {
+ self.record("PathSegment", Id::None, path_segment);
+ ast_visit::walk_path_segment(self, path_segment)
}
- fn visit_mac_call(&mut self, mac: &'v ast::MacCall) {
- self.record("MacCall", Id::None, mac);
- ast_visit::walk_mac(self, mac)
+ // `GenericArgs` has one inline use (in `ast::AssocConstraint::gen_args`) and one
+ // non-inline use (in `ast::PathSegment::args`). The latter case is more
+ // common, so we implement `visit_generic_args` and tolerate the double
+ // counting in the former case.
+ fn visit_generic_args(&mut self, g: &'v ast::GenericArgs) {
+ record_variants!(
+ (self, g, g, Id::None, ast, GenericArgs, GenericArgs),
+ [AngleBracketed, Parenthesized]
+ );
+ ast_visit::walk_generic_args(self, g)
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v ast::PathSegment) {
- self.record("PathSegment", Id::None, path_segment);
- ast_visit::walk_path_segment(self, path_span, path_segment)
+ fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
+ record_variants!(
+ (self, attr, attr.kind, Id::None, ast, Attribute, AttrKind),
+ [Normal, DocComment]
+ );
+ ast_visit::walk_attribute(self, attr)
}
- fn visit_assoc_constraint(&mut self, constraint: &'v ast::AssocConstraint) {
- self.record("AssocConstraint", Id::None, constraint);
- ast_visit::walk_assoc_constraint(self, constraint)
+ fn visit_expr_field(&mut self, f: &'v ast::ExprField) {
+ self.record("ExprField", Id::None, f);
+ ast_visit::walk_expr_field(self, f)
}
- fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
- self.record("Attribute", Id::None, attr);
+ fn visit_crate(&mut self, krate: &'v ast::Crate) {
+ self.record("Crate", Id::None, krate);
+ ast_visit::walk_crate(self, krate)
+ }
+
+ fn visit_inline_asm(&mut self, asm: &'v ast::InlineAsm) {
+ self.record("InlineAsm", Id::None, asm);
+ ast_visit::walk_inline_asm(self, asm)
}
}
diff --git a/compiler/rustc_passes/src/lang_items.rs b/compiler/rustc_passes/src/lang_items.rs
index 79900a90a..df811be2a 100644
--- a/compiler/rustc_passes/src/lang_items.rs
+++ b/compiler/rustc_passes/src/lang_items.rs
@@ -8,9 +8,11 @@
//! * Functions called by the compiler itself.
use crate::check_attr::target_from_impl_item;
+use crate::errors::{
+ DuplicateLangItem, IncorrectTarget, LangItemOnIncorrectTarget, UnknownLangItem,
+};
use crate::weak_lang_items;
-use rustc_errors::{pluralize, struct_span_err};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
@@ -18,10 +20,16 @@ use rustc_hir::lang_items::{extract, GenericRequirement, ITEM_REFS};
use rustc_hir::{HirId, LangItem, LanguageItems, Target};
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::ExternCrate;
-use rustc_span::Span;
+use rustc_span::{symbol::kw::Empty, Span};
use rustc_middle::ty::query::Providers;
+pub(crate) enum Duplicate {
+ Plain,
+ Crate,
+ CrateDepends,
+}
+
struct LanguageItemCollector<'tcx> {
items: LanguageItems,
tcx: TyCtxt<'tcx>,
@@ -34,42 +42,24 @@ impl<'tcx> LanguageItemCollector<'tcx> {
fn check_for_lang(&mut self, actual_target: Target, hir_id: HirId) {
let attrs = self.tcx.hir().attrs(hir_id);
- if let Some((value, span)) = extract(&attrs) {
- match ITEM_REFS.get(&value).cloned() {
+ if let Some((name, span)) = extract(&attrs) {
+ match ITEM_REFS.get(&name).cloned() {
// Known lang item with attribute on correct target.
Some((item_index, expected_target)) if actual_target == expected_target => {
self.collect_item_extended(item_index, hir_id, span);
}
// Known lang item with attribute on incorrect target.
Some((_, expected_target)) => {
- struct_span_err!(
- self.tcx.sess,
+ self.tcx.sess.emit_err(LangItemOnIncorrectTarget {
span,
- E0718,
- "`{}` language item must be applied to a {}",
- value,
+ name,
expected_target,
- )
- .span_label(
- span,
- format!(
- "attribute should be applied to a {}, not a {}",
- expected_target, actual_target,
- ),
- )
- .emit();
+ actual_target,
+ });
}
// Unknown lang item.
_ => {
- struct_span_err!(
- self.tcx.sess,
- span,
- E0522,
- "definition of an unknown language item: `{}`",
- value
- )
- .span_label(span, format!("definition of unknown language item `{}`", value))
- .emit();
+ self.tcx.sess.emit_err(UnknownLangItem { span, name });
}
}
}
@@ -79,74 +69,72 @@ impl<'tcx> LanguageItemCollector<'tcx> {
// Check for duplicates.
if let Some(original_def_id) = self.items.items[item_index] {
if original_def_id != item_def_id {
- let lang_item = LangItem::from_u32(item_index as u32).unwrap();
- let name = lang_item.name();
- let mut err = match self.tcx.hir().span_if_local(item_def_id) {
- Some(span) => struct_span_err!(
- self.tcx.sess,
- span,
- E0152,
- "found duplicate lang item `{}`",
- name
- ),
- None => match self.tcx.extern_crate(item_def_id) {
- Some(ExternCrate { dependency_of, .. }) => {
- self.tcx.sess.struct_err(&format!(
- "duplicate lang item in crate `{}` (which `{}` depends on): `{}`.",
- self.tcx.crate_name(item_def_id.krate),
- self.tcx.crate_name(*dependency_of),
- name
- ))
- }
- _ => self.tcx.sess.struct_err(&format!(
- "duplicate lang item in crate `{}`: `{}`.",
- self.tcx.crate_name(item_def_id.krate),
- name
- )),
- },
+ let local_span = self.tcx.hir().span_if_local(item_def_id);
+ let lang_item_name = LangItem::from_u32(item_index as u32).unwrap().name();
+ let crate_name = self.tcx.crate_name(item_def_id.krate);
+ let mut dependency_of = Empty;
+ let is_local = item_def_id.is_local();
+ let path = if is_local {
+ String::new()
+ } else {
+ self.tcx
+ .crate_extern_paths(item_def_id.krate)
+ .iter()
+ .map(|p| p.display().to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ .into()
};
- if let Some(span) = self.tcx.hir().span_if_local(original_def_id) {
- err.span_note(span, "the lang item is first defined here");
+ let first_defined_span = self.tcx.hir().span_if_local(original_def_id);
+ let mut orig_crate_name = Empty;
+ let mut orig_dependency_of = Empty;
+ let orig_is_local = original_def_id.is_local();
+ let orig_path = if orig_is_local {
+ String::new()
} else {
- match self.tcx.extern_crate(original_def_id) {
- Some(ExternCrate { dependency_of, .. }) => {
- err.note(&format!(
- "the lang item is first defined in crate `{}` (which `{}` depends on)",
- self.tcx.crate_name(original_def_id.krate),
- self.tcx.crate_name(*dependency_of)
- ));
- }
- _ => {
- err.note(&format!(
- "the lang item is first defined in crate `{}`.",
- self.tcx.crate_name(original_def_id.krate)
- ));
- }
+ self.tcx
+ .crate_extern_paths(original_def_id.krate)
+ .iter()
+ .map(|p| p.display().to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ .into()
+ };
+ if first_defined_span.is_none() {
+ orig_crate_name = self.tcx.crate_name(original_def_id.krate);
+ if let Some(ExternCrate { dependency_of: inner_dependency_of, .. }) =
+ self.tcx.extern_crate(original_def_id)
+ {
+ orig_dependency_of = self.tcx.crate_name(*inner_dependency_of);
}
- let mut note_def = |which, def_id: DefId| {
- let crate_name = self.tcx.crate_name(def_id.krate);
- let note = if def_id.is_local() {
- format!("{} definition in the local crate (`{}`)", which, crate_name)
- } else {
- let paths: Vec<_> = self
- .tcx
- .crate_extern_paths(def_id.krate)
- .iter()
- .map(|p| p.display().to_string())
- .collect();
- format!(
- "{} definition in `{}` loaded from {}",
- which,
- crate_name,
- paths.join(", ")
- )
- };
- err.note(&note);
- };
- note_def("first", original_def_id);
- note_def("second", item_def_id);
}
- err.emit();
+
+ let duplicate = if local_span.is_some() {
+ Duplicate::Plain
+ } else {
+ match self.tcx.extern_crate(item_def_id) {
+ Some(ExternCrate { dependency_of: inner_dependency_of, .. }) => {
+ dependency_of = self.tcx.crate_name(*inner_dependency_of);
+ Duplicate::CrateDepends
+ }
+ _ => Duplicate::Crate,
+ }
+ };
+
+ self.tcx.sess.emit_err(DuplicateLangItem {
+ local_span,
+ lang_item_name,
+ crate_name,
+ dependency_of,
+ is_local,
+ path,
+ first_defined_span,
+ orig_crate_name,
+ orig_dependency_of,
+ orig_is_local,
+ orig_path,
+ duplicate,
+ });
}
}
@@ -179,41 +167,30 @@ impl<'tcx> LanguageItemCollector<'tcx> {
None => (0, *item_span),
};
+ let mut at_least = false;
let required = match lang_item.required_generics() {
- GenericRequirement::Exact(num) if num != actual_num => {
- Some((format!("{}", num), pluralize!(num)))
- }
+ GenericRequirement::Exact(num) if num != actual_num => Some(num),
GenericRequirement::Minimum(num) if actual_num < num => {
- Some((format!("at least {}", num), pluralize!(num)))
- }
+ at_least = true;
+ Some(num)}
+ ,
// If the number matches, or there is no requirement, handle it normally
_ => None,
};
- if let Some((range_str, pluralized)) = required {
+ if let Some(num) = required {
// We are issuing E0718 "incorrect target" here, because while the
// item kind of the target is correct, the target is still wrong
// because of the wrong number of generic arguments.
- struct_span_err!(
- self.tcx.sess,
+ self.tcx.sess.emit_err(IncorrectTarget {
span,
- E0718,
- "`{}` language item must be applied to a {} with {} generic argument{}",
- name,
- kind.descr(),
- range_str,
- pluralized,
- )
- .span_label(
generics_span,
- format!(
- "this {} has {} generic argument{}",
- kind.descr(),
- actual_num,
- pluralize!(actual_num),
- ),
- )
- .emit();
+ name: name.as_str(),
+ kind: kind.descr(),
+ num,
+ actual_num,
+ at_least,
+ });
// return early to not collect the lang item
return;
@@ -240,9 +217,9 @@ fn get_lang_items(tcx: TyCtxt<'_>, (): ()) -> LanguageItems {
let crate_items = tcx.hir_crate_items(());
for id in crate_items.items() {
- collector.check_for_lang(Target::from_def_kind(tcx.def_kind(id.def_id)), id.hir_id());
+ collector.check_for_lang(Target::from_def_kind(tcx.def_kind(id.owner_id)), id.hir_id());
- if matches!(tcx.def_kind(id.def_id), DefKind::Enum) {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Enum) {
let item = tcx.hir().item(id);
if let hir::ItemKind::Enum(def, ..) = &item.kind {
for variant in def.variants {
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index fd03f6571..5322baee7 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -3,20 +3,23 @@ use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_span::source_map::Spanned;
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, TargetDataLayout};
+use crate::errors::{Abi, Align, HomogeneousAggregate, LayoutOf, Size, UnrecognizedField};
+
pub fn test_layout(tcx: TyCtxt<'_>) {
if tcx.features().rustc_attrs {
// if the `rustc_attrs` feature is not enabled, don't bother testing layout
for id in tcx.hir().items() {
if matches!(
- tcx.def_kind(id.def_id),
+ tcx.def_kind(id.owner_id),
DefKind::TyAlias | DefKind::Enum | DefKind::Struct | DefKind::Union
) {
- for attr in tcx.get_attrs(id.def_id.to_def_id(), sym::rustc_layout) {
- dump_layout_of(tcx, id.def_id, attr);
+ for attr in tcx.get_attrs(id.owner_id.to_def_id(), sym::rustc_layout) {
+ dump_layout_of(tcx, id.owner_id.def_id, attr);
}
}
}
@@ -35,62 +38,64 @@ fn dump_layout_of<'tcx>(tcx: TyCtxt<'tcx>, item_def_id: LocalDefId, attr: &Attri
for meta_item in meta_items {
match meta_item.name_or_empty() {
sym::abi => {
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!("abi: {:?}", ty_layout.abi),
- );
+ tcx.sess.emit_err(Abi {
+ span: tcx.def_span(item_def_id.to_def_id()),
+ abi: format!("{:?}", ty_layout.abi),
+ });
}
sym::align => {
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!("align: {:?}", ty_layout.align),
- );
+ tcx.sess.emit_err(Align {
+ span: tcx.def_span(item_def_id.to_def_id()),
+ align: format!("{:?}", ty_layout.align),
+ });
}
sym::size => {
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!("size: {:?}", ty_layout.size),
- );
+ tcx.sess.emit_err(Size {
+ span: tcx.def_span(item_def_id.to_def_id()),
+ size: format!("{:?}", ty_layout.size),
+ });
}
sym::homogeneous_aggregate => {
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!(
- "homogeneous_aggregate: {:?}",
- ty_layout.homogeneous_aggregate(&UnwrapLayoutCx { tcx, param_env }),
+ tcx.sess.emit_err(HomogeneousAggregate {
+ span: tcx.def_span(item_def_id.to_def_id()),
+ homogeneous_aggregate: format!(
+ "{:?}",
+ ty_layout.homogeneous_aggregate(&UnwrapLayoutCx { tcx, param_env })
),
- );
+ });
}
sym::debug => {
- let normalized_ty = tcx.normalize_erasing_regions(
- param_env.with_reveal_all_normalized(tcx),
- ty,
- );
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!("layout_of({:?}) = {:#?}", normalized_ty, *ty_layout),
+ let normalized_ty = format!(
+ "{:?}",
+ tcx.normalize_erasing_regions(
+ param_env.with_reveal_all_normalized(tcx),
+ ty,
+ )
);
+ let ty_layout = format!("{:#?}", *ty_layout);
+ tcx.sess.emit_err(LayoutOf {
+ span: tcx.def_span(item_def_id.to_def_id()),
+ normalized_ty,
+ ty_layout,
+ });
}
name => {
- tcx.sess.span_err(
- meta_item.span(),
- &format!("unrecognized field name `{}`", name),
- );
+ tcx.sess.emit_err(UnrecognizedField { span: meta_item.span(), name });
}
}
}
}
Err(layout_error) => {
- tcx.sess.span_err(
- tcx.def_span(item_def_id.to_def_id()),
- &format!("layout error: {:?}", layout_error),
- );
+ tcx.sess.emit_fatal(Spanned {
+ node: layout_error,
+ span: tcx.def_span(item_def_id.to_def_id()),
+ });
}
}
}
diff --git a/compiler/rustc_passes/src/lib.rs b/compiler/rustc_passes/src/lib.rs
index 7b2f83958..15f60f626 100644
--- a/compiler/rustc_passes/src/lib.rs
+++ b/compiler/rustc_passes/src/lib.rs
@@ -5,10 +5,11 @@
//! This API is completely unstable and subject to change.
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(iter_intersperse)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
#![feature(try_blocks)]
diff --git a/compiler/rustc_passes/src/lib_features.rs b/compiler/rustc_passes/src/lib_features.rs
index e05994f13..b5843c0ae 100644
--- a/compiler/rustc_passes/src/lib_features.rs
+++ b/compiler/rustc_passes/src/lib_features.rs
@@ -5,7 +5,7 @@
//! collect them instead.
use rustc_ast::{Attribute, MetaItemKind};
-use rustc_errors::struct_span_err;
+use rustc_attr::{rust_version_symbol, VERSION_PLACEHOLDER};
use rustc_hir::intravisit::Visitor;
use rustc_middle::hir::nested_filter;
use rustc_middle::middle::lib_features::LibFeatures;
@@ -14,6 +14,8 @@ use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::Symbol;
use rustc_span::{sym, Span};
+use crate::errors::{FeaturePreviouslyDeclared, FeatureStableTwice};
+
fn new_lib_features() -> LibFeatures {
LibFeatures { stable: Default::default(), unstable: Default::default() }
}
@@ -29,11 +31,16 @@ impl<'tcx> LibFeatureCollector<'tcx> {
}
fn extract(&self, attr: &Attribute) -> Option<(Symbol, Option<Symbol>, Span)> {
- let stab_attrs =
- [sym::stable, sym::unstable, sym::rustc_const_stable, sym::rustc_const_unstable];
+ let stab_attrs = [
+ sym::stable,
+ sym::unstable,
+ sym::rustc_const_stable,
+ sym::rustc_const_unstable,
+ sym::rustc_default_body_unstable,
+ ];
// Find a stability attribute: one of #[stable(…)], #[unstable(…)],
- // #[rustc_const_stable(…)], or #[rustc_const_unstable(…)].
+ // #[rustc_const_stable(…)], #[rustc_const_unstable(…)] or #[rustc_default_body_unstable].
if let Some(stab_attr) = stab_attrs.iter().find(|stab_attr| attr.has_name(**stab_attr)) {
let meta_kind = attr.meta_kind();
if let Some(MetaItemKind::List(ref metas)) = meta_kind {
@@ -49,12 +56,21 @@ impl<'tcx> LibFeatureCollector<'tcx> {
}
}
}
+
+ if let Some(s) = since && s.as_str() == VERSION_PLACEHOLDER {
+ since = Some(rust_version_symbol());
+ }
+
if let Some(feature) = feature {
// This additional check for stability is to make sure we
// don't emit additional, irrelevant errors for malformed
// attributes.
- let is_unstable =
- matches!(*stab_attr, sym::unstable | sym::rustc_const_unstable);
+ let is_unstable = matches!(
+ *stab_attr,
+ sym::unstable
+ | sym::rustc_const_unstable
+ | sym::rustc_default_body_unstable
+ );
if since.is_some() || is_unstable {
return Some((feature, since, attr.span));
}
@@ -77,14 +93,12 @@ impl<'tcx> LibFeatureCollector<'tcx> {
(Some(since), _, false) => {
if let Some((prev_since, _)) = self.lib_features.stable.get(&feature) {
if *prev_since != since {
- self.span_feature_error(
+ self.tcx.sess.emit_err(FeatureStableTwice {
span,
- &format!(
- "feature `{}` is declared stable since {}, \
- but was previously declared stable since {}",
- feature, since, prev_since,
- ),
- );
+ feature,
+ since,
+ prev_since: *prev_since,
+ });
return;
}
}
@@ -95,22 +109,17 @@ impl<'tcx> LibFeatureCollector<'tcx> {
self.lib_features.unstable.insert(feature, span);
}
(Some(_), _, true) | (None, true, _) => {
- self.span_feature_error(
+ let declared = if since.is_some() { "stable" } else { "unstable" };
+ let prev_declared = if since.is_none() { "stable" } else { "unstable" };
+ self.tcx.sess.emit_err(FeaturePreviouslyDeclared {
span,
- &format!(
- "feature `{}` is declared {}, but was previously declared {}",
- feature,
- if since.is_some() { "stable" } else { "unstable" },
- if since.is_none() { "stable" } else { "unstable" },
- ),
- );
+ feature,
+ declared,
+ prev_declared,
+ });
}
}
}
-
- fn span_feature_error(&self, span: Span, msg: &str) {
- struct_span_err!(self.tcx.sess, span, E0711, "{}", &msg).emit();
- }
}
impl<'tcx> Visitor<'tcx> for LibFeatureCollector<'tcx> {
diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs
index 461dd52b9..c6fe40f72 100644
--- a/compiler/rustc_passes/src/liveness.rs
+++ b/compiler/rustc_passes/src/liveness.rs
@@ -89,16 +89,15 @@ use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::*;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Expr, HirId, HirIdMap, HirIdSet};
use rustc_index::vec::IndexVec;
-use rustc_middle::hir::nested_filter;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, DefIdTree, RootVariableMinCaptureList, Ty, TyCtxt};
use rustc_session::lint;
use rustc_span::symbol::{kw, sym, Symbol};
-use rustc_span::Span;
+use rustc_span::{BytePos, Span};
use std::collections::VecDeque;
use std::io;
@@ -139,12 +138,54 @@ fn live_node_kind_to_string(lnk: LiveNodeKind, tcx: TyCtxt<'_>) -> String {
}
}
-fn check_mod_liveness(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
- tcx.hir().visit_item_likes_in_module(module_def_id, &mut IrMaps::new(tcx));
+fn check_liveness(tcx: TyCtxt<'_>, def_id: DefId) {
+ let local_def_id = match def_id.as_local() {
+ None => return,
+ Some(def_id) => def_id,
+ };
+
+ // Don't run unused pass for #[derive()]
+ let parent = tcx.local_parent(local_def_id);
+ if let DefKind::Impl = tcx.def_kind(parent)
+ && tcx.has_attr(parent.to_def_id(), sym::automatically_derived)
+ {
+ return;
+ }
+
+ // Don't run unused pass for #[naked]
+ if tcx.has_attr(def_id, sym::naked) {
+ return;
+ }
+
+ let mut maps = IrMaps::new(tcx);
+ let body_id = tcx.hir().body_owned_by(local_def_id);
+ let hir_id = tcx.hir().body_owner(body_id);
+ let body = tcx.hir().body(body_id);
+
+ if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+ for &var_hir_id in upvars.keys() {
+ let var_name = tcx.hir().name(var_hir_id);
+ maps.add_variable(Upvar(var_hir_id, var_name));
+ }
+ }
+
+ // gather up the various local variables, significant expressions,
+ // and so forth:
+ maps.visit_body(body);
+
+ // compute liveness
+ let mut lsets = Liveness::new(&mut maps, local_def_id);
+ let entry_ln = lsets.compute(&body, hir_id);
+ lsets.log_liveness(entry_ln, body_id.hir_id);
+
+ // check for various error conditions
+ lsets.visit_body(body);
+ lsets.warn_about_unused_upvars(entry_ln);
+ lsets.warn_about_unused_args(body, entry_ln);
}
pub fn provide(providers: &mut Providers) {
- *providers = Providers { check_mod_liveness, ..*providers };
+ *providers = Providers { check_liveness, ..*providers };
}
// ______________________________________________________________________
@@ -188,6 +229,19 @@ enum VarKind {
Upvar(HirId, Symbol),
}
+struct CollectLitsVisitor<'tcx> {
+ lit_exprs: Vec<&'tcx hir::Expr<'tcx>>,
+}
+
+impl<'tcx> Visitor<'tcx> for CollectLitsVisitor<'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ if let hir::ExprKind::Lit(_) = expr.kind {
+ self.lit_exprs.push(expr);
+ }
+ intravisit::walk_expr(self, expr);
+ }
+}
+
struct IrMaps<'tcx> {
tcx: TyCtxt<'tcx>,
live_node_map: HirIdMap<LiveNode>,
@@ -316,56 +370,6 @@ impl<'tcx> IrMaps<'tcx> {
}
impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> {
- type NestedFilter = nested_filter::OnlyBodies;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
- debug!("visit_body {:?}", body.id());
-
- // swap in a new set of IR maps for this body
- let mut maps = IrMaps::new(self.tcx);
- let hir_id = maps.tcx.hir().body_owner(body.id());
- let local_def_id = maps.tcx.hir().local_def_id(hir_id);
- let def_id = local_def_id.to_def_id();
-
- // Don't run unused pass for #[derive()]
- let parent = self.tcx.local_parent(local_def_id);
- if let DefKind::Impl = self.tcx.def_kind(parent)
- && self.tcx.has_attr(parent.to_def_id(), sym::automatically_derived)
- {
- return;
- }
-
- // Don't run unused pass for #[naked]
- if self.tcx.has_attr(def_id, sym::naked) {
- return;
- }
-
- if let Some(upvars) = maps.tcx.upvars_mentioned(def_id) {
- for &var_hir_id in upvars.keys() {
- let var_name = maps.tcx.hir().name(var_hir_id);
- maps.add_variable(Upvar(var_hir_id, var_name));
- }
- }
-
- // gather up the various local variables, significant expressions,
- // and so forth:
- intravisit::walk_body(&mut maps, body);
-
- // compute liveness
- let mut lsets = Liveness::new(&mut maps, local_def_id);
- let entry_ln = lsets.compute(&body, hir_id);
- lsets.log_liveness(entry_ln, body.id().hir_id);
-
- // check for various error conditions
- lsets.visit_body(body);
- lsets.warn_about_unused_upvars(entry_ln);
- lsets.warn_about_unused_args(body, entry_ln);
- }
-
fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
self.add_from_pat(&local.pat);
if local.els.is_some() {
@@ -1035,9 +1039,10 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
self.propagate_through_expr(&f, succ)
}
- hir::ExprKind::MethodCall(.., ref args, _) => {
+ hir::ExprKind::MethodCall(.., receiver, ref args, _) => {
let succ = self.check_is_ty_uninhabited(expr, succ);
- self.propagate_through_exprs(args, succ)
+ let succ = self.propagate_through_exprs(args, succ);
+ self.propagate_through_expr(receiver, succ)
}
hir::ExprKind::Tup(ref exprs) => self.propagate_through_exprs(exprs, succ),
@@ -1314,14 +1319,14 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
// that we do not emit the same warning twice if the uninhabited type
// is indeed `!`.
+ let msg = format!("unreachable {}", descr);
self.ir.tcx.struct_span_lint_hir(
lint::builtin::UNREACHABLE_CODE,
expr_id,
expr_span,
- |lint| {
- let msg = format!("unreachable {}", descr);
- lint.build(&msg)
- .span_label(expr_span, &msg)
+ &msg,
+ |diag| {
+ diag.span_label(expr_span, &msg)
.span_label(orig_span, "any code following this expression is unreachable")
.span_note(
orig_span,
@@ -1330,7 +1335,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
orig_ty
),
)
- .emit();
},
);
}
@@ -1342,7 +1346,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> {
fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
- self.check_unused_vars_in_pat(&local.pat, None, |spans, hir_id, ln, var| {
+ self.check_unused_vars_in_pat(&local.pat, None, None, |spans, hir_id, ln, var| {
if local.init.is_some() {
self.warn_about_dead_assign(spans, hir_id, ln, var);
}
@@ -1357,7 +1361,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> {
}
fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
- self.check_unused_vars_in_pat(&arm.pat, None, |_, _, _, _| {});
+ self.check_unused_vars_in_pat(&arm.pat, None, None, |_, _, _, _| {});
intravisit::walk_arm(self, arm);
}
}
@@ -1396,7 +1400,7 @@ fn check_expr<'tcx>(this: &mut Liveness<'_, 'tcx>, expr: &'tcx Expr<'tcx>) {
}
hir::ExprKind::Let(let_expr) => {
- this.check_unused_vars_in_pat(let_expr.pat, None, |_, _, _, _| {});
+ this.check_unused_vars_in_pat(let_expr.pat, None, None, |_, _, _, _| {});
}
// no correctness conditions related to liveness
@@ -1486,14 +1490,8 @@ impl<'tcx> Liveness<'_, 'tcx> {
lint::builtin::UNUSED_ASSIGNMENTS,
var_hir_id,
vec![span],
- |lint| {
- lint.build(&format!(
- "value captured by `{}` is never read",
- name
- ))
- .help("did you mean to capture by reference instead?")
- .emit();
- },
+ format!("value captured by `{}` is never read", name),
+ |lint| lint.help("did you mean to capture by reference instead?"),
);
}
}
@@ -1503,11 +1501,8 @@ impl<'tcx> Liveness<'_, 'tcx> {
lint::builtin::UNUSED_VARIABLES,
var_hir_id,
vec![span],
- |lint| {
- lint.build(&format!("unused variable: `{}`", name))
- .help("did you mean to capture by reference instead?")
- .emit();
- },
+ format!("unused variable: `{}`", name),
+ |lint| lint.help("did you mean to capture by reference instead?"),
);
}
}
@@ -1517,13 +1512,18 @@ impl<'tcx> Liveness<'_, 'tcx> {
fn warn_about_unused_args(&self, body: &hir::Body<'_>, entry_ln: LiveNode) {
for p in body.params {
- self.check_unused_vars_in_pat(&p.pat, Some(entry_ln), |spans, hir_id, ln, var| {
- if !self.live_on_entry(ln, var) {
- self.report_unused_assign(hir_id, spans, var, |name| {
- format!("value passed to `{}` is never read", name)
- });
- }
- });
+ self.check_unused_vars_in_pat(
+ &p.pat,
+ Some(entry_ln),
+ Some(body),
+ |spans, hir_id, ln, var| {
+ if !self.live_on_entry(ln, var) {
+ self.report_unused_assign(hir_id, spans, var, |name| {
+ format!("value passed to `{}` is never read", name)
+ });
+ }
+ },
+ );
}
}
@@ -1531,6 +1531,7 @@ impl<'tcx> Liveness<'_, 'tcx> {
&self,
pat: &hir::Pat<'_>,
entry_ln: Option<LiveNode>,
+ opt_body: Option<&hir::Body<'_>>,
on_used_on_entry: impl Fn(Vec<Span>, HirId, LiveNode, Variable),
) {
// In an or-pattern, only consider the variable; any later patterns must have the same
@@ -1549,6 +1550,8 @@ impl<'tcx> Liveness<'_, 'tcx> {
.or_insert_with(|| (ln, var, vec![id_and_sp]));
});
+ let can_remove = matches!(&pat.kind, hir::PatKind::Struct(_, _, true));
+
for (_, (ln, var, hir_ids_and_spans)) in vars {
if self.used_on_entry(ln, var) {
let id = hir_ids_and_spans[0].0;
@@ -1556,16 +1559,20 @@ impl<'tcx> Liveness<'_, 'tcx> {
hir_ids_and_spans.into_iter().map(|(_, _, ident_span)| ident_span).collect();
on_used_on_entry(spans, id, ln, var);
} else {
- self.report_unused(hir_ids_and_spans, ln, var);
+ self.report_unused(hir_ids_and_spans, ln, var, can_remove, pat, opt_body);
}
}
}
+ #[instrument(skip(self), level = "INFO")]
fn report_unused(
&self,
hir_ids_and_spans: Vec<(HirId, Span, Span)>,
ln: LiveNode,
var: Variable,
+ can_remove: bool,
+ pat: &hir::Pat<'_>,
+ opt_body: Option<&hir::Body<'_>>,
) {
let first_hir_id = hir_ids_and_spans[0].0;
@@ -1584,12 +1591,34 @@ impl<'tcx> Liveness<'_, 'tcx> {
.into_iter()
.map(|(_, _, ident_span)| ident_span)
.collect::<Vec<_>>(),
+ format!("variable `{}` is assigned to, but never used", name),
+ |lint| lint.note(&format!("consider using `_{}` instead", name)),
+ )
+ } else if can_remove {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans.iter().map(|(_, pat_span, _)| *pat_span).collect::<Vec<_>>(),
+ format!("unused variable: `{}`", name),
|lint| {
- lint.build(&format!("variable `{}` is assigned to, but never used", name))
- .note(&format!("consider using `_{}` instead", name))
- .emit();
+ lint.multipart_suggestion(
+ "try removing the field",
+ hir_ids_and_spans
+ .iter()
+ .map(|(_, pat_span, _)| {
+ let span = self
+ .ir
+ .tcx
+ .sess
+ .source_map()
+ .span_extend_to_next_char(*pat_span, ',', true);
+ (span.with_hi(BytePos(span.hi().0 + 1)), String::new())
+ })
+ .collect(),
+ Applicability::MachineApplicable,
+ )
},
- )
+ );
} else {
let (shorthands, non_shorthands): (Vec<_>, Vec<_>) =
hir_ids_and_spans.iter().copied().partition(|(hir_id, _, ident_span)| {
@@ -1618,14 +1647,13 @@ impl<'tcx> Liveness<'_, 'tcx> {
.iter()
.map(|(_, pat_span, _)| *pat_span)
.collect::<Vec<_>>(),
+ format!("unused variable: `{}`", name),
|lint| {
- let mut err = lint.build(&format!("unused variable: `{}`", name));
- err.multipart_suggestion(
+ lint.multipart_suggestion(
"try ignoring the field",
shorthands,
Applicability::MachineApplicable,
- );
- err.emit();
+ )
},
);
} else {
@@ -1641,14 +1669,16 @@ impl<'tcx> Liveness<'_, 'tcx> {
.iter()
.map(|(_, _, ident_span)| *ident_span)
.collect::<Vec<_>>(),
+ format!("unused variable: `{}`", name),
|lint| {
- let mut err = lint.build(&format!("unused variable: `{}`", name));
- err.multipart_suggestion(
+ if self.has_added_lit_match_name_span(&name, opt_body, lint) {
+ lint.span_label(pat.span, "unused variable");
+ }
+ lint.multipart_suggestion(
"if this is intentional, prefix it with an underscore",
non_shorthands,
Applicability::MachineApplicable,
- );
- err.emit();
+ )
},
);
}
@@ -1656,6 +1686,42 @@ impl<'tcx> Liveness<'_, 'tcx> {
}
}
+ fn has_added_lit_match_name_span(
+ &self,
+ name: &str,
+ opt_body: Option<&hir::Body<'_>>,
+ err: &mut rustc_errors::DiagnosticBuilder<'_, ()>,
+ ) -> bool {
+ let mut has_litstring = false;
+ let Some(opt_body) = opt_body else {return false;};
+ let mut visitor = CollectLitsVisitor { lit_exprs: vec![] };
+ intravisit::walk_body(&mut visitor, opt_body);
+ for lit_expr in visitor.lit_exprs {
+ let hir::ExprKind::Lit(litx) = &lit_expr.kind else { continue };
+ let rustc_ast::LitKind::Str(syb, _) = litx.node else{ continue; };
+ let name_str: &str = syb.as_str();
+ let mut name_pa = String::from("{");
+ name_pa.push_str(&name);
+ name_pa.push('}');
+ if name_str.contains(&name_pa) {
+ err.span_label(
+ lit_expr.span,
+ "you might have meant to use string interpolation in this string literal",
+ );
+ err.multipart_suggestion(
+ "string interpolation only works in `format!` invocations",
+ vec![
+ (lit_expr.span.shrink_to_lo(), "format!(".to_string()),
+ (lit_expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ has_litstring = true;
+ }
+ }
+ has_litstring
+ }
+
fn warn_about_dead_assign(&self, spans: Vec<Span>, hir_id: HirId, ln: LiveNode, var: Variable) {
if !self.live_on_exit(ln, var) {
self.report_unused_assign(hir_id, spans, var, |name| {
@@ -1676,11 +1742,8 @@ impl<'tcx> Liveness<'_, 'tcx> {
lint::builtin::UNUSED_ASSIGNMENTS,
hir_id,
spans,
- |lint| {
- lint.build(&message(&name))
- .help("maybe it is overwritten before being read?")
- .emit();
- },
+ message(&name),
+ |lint| lint.help("maybe it is overwritten before being read?"),
)
}
}
diff --git a/compiler/rustc_passes/src/loops.rs b/compiler/rustc_passes/src/loops.rs
index cdda0e388..077194ec6 100644
--- a/compiler/rustc_passes/src/loops.rs
+++ b/compiler/rustc_passes/src/loops.rs
@@ -1,6 +1,5 @@
use Context::*;
-use rustc_errors::{struct_span_err, Applicability};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, Visitor};
@@ -13,6 +12,11 @@ use rustc_session::Session;
use rustc_span::hygiene::DesugaringKind;
use rustc_span::Span;
+use crate::errors::{
+ BreakInsideAsyncBlock, BreakInsideClosure, BreakNonLoop, ContinueLabeledBlock, OutsideLoop,
+ UnlabeledCfInWhileCondition, UnlabeledInLabeledBlock,
+};
+
#[derive(Clone, Copy, Debug, PartialEq)]
enum Context {
Normal,
@@ -90,7 +94,10 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> {
Ok(loop_id) => Some(loop_id),
Err(hir::LoopIdError::OutsideLoopScope) => None,
Err(hir::LoopIdError::UnlabeledCfInWhileCondition) => {
- self.emit_unlabled_cf_in_while_condition(e.span, "break");
+ self.sess.emit_err(UnlabeledCfInWhileCondition {
+ span: e.span,
+ cf_type: "break",
+ });
None
}
Err(hir::LoopIdError::UnresolvedLabel) => None,
@@ -116,69 +123,22 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> {
match loop_kind {
None | Some(hir::LoopSource::Loop) => (),
Some(kind) => {
- let mut err = struct_span_err!(
- self.sess,
- e.span,
- E0571,
- "`break` with value from a `{}` loop",
- kind.name()
- );
- err.span_label(
- e.span,
- "can only break with a value inside `loop` or breakable block",
+ let suggestion = format!(
+ "break{}",
+ break_label
+ .label
+ .map_or_else(String::new, |l| format!(" {}", l.ident))
);
- if let Some(head) = head {
- err.span_label(
- head,
- &format!(
- "you can't `break` with a value in a `{}` loop",
- kind.name()
- ),
- );
- }
- err.span_suggestion(
- e.span,
- &format!(
- "use `break` on its own without a value inside this `{}` loop",
- kind.name(),
- ),
- format!(
- "break{}",
- break_label
- .label
- .map_or_else(String::new, |l| format!(" {}", l.ident))
- ),
- Applicability::MaybeIncorrect,
- );
- if let (Some(label), None) = (loop_label, break_label.label) {
- match break_expr.kind {
- hir::ExprKind::Path(hir::QPath::Resolved(
- None,
- hir::Path {
- segments: [segment],
- res: hir::def::Res::Err,
- ..
- },
- )) if label.ident.to_string()
- == format!("'{}", segment.ident) =>
- {
- // This error is redundant, we will have already emitted a
- // suggestion to use the label when `segment` wasn't found
- // (hence the `Res::Err` check).
- err.delay_as_bug();
- }
- _ => {
- err.span_suggestion(
- break_expr.span,
- "alternatively, you might have meant to use the \
- available loop label",
- label.ident,
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
- err.emit();
+ self.sess.emit_err(BreakNonLoop {
+ span: e.span,
+ head,
+ kind: kind.name(),
+ suggestion,
+ loop_label,
+ break_label: break_label.label,
+ break_expr_kind: &break_expr.kind,
+ break_expr_span: break_expr.span,
+ });
}
}
}
@@ -191,19 +151,17 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> {
match destination.target_id {
Ok(loop_id) => {
if let Node::Block(block) = self.hir_map.find(loop_id).unwrap() {
- struct_span_err!(
- self.sess,
- e.span,
- E0696,
- "`continue` pointing to a labeled block"
- )
- .span_label(e.span, "labeled blocks cannot be `continue`'d")
- .span_label(block.span, "labeled block the `continue` points to")
- .emit();
+ self.sess.emit_err(ContinueLabeledBlock {
+ span: e.span,
+ block_span: block.span,
+ });
}
}
Err(hir::LoopIdError::UnlabeledCfInWhileCondition) => {
- self.emit_unlabled_cf_in_while_condition(e.span, "continue");
+ self.sess.emit_err(UnlabeledCfInWhileCondition {
+ span: e.span,
+ cf_type: "continue",
+ });
}
Err(_) => {}
}
@@ -226,21 +184,16 @@ impl<'a, 'hir> CheckLoopVisitor<'a, 'hir> {
}
fn require_break_cx(&self, name: &str, span: Span) {
- let err_inside_of = |article, ty, closure_span| {
- struct_span_err!(self.sess, span, E0267, "`{}` inside of {} {}", name, article, ty)
- .span_label(span, format!("cannot `{}` inside of {} {}", name, article, ty))
- .span_label(closure_span, &format!("enclosing {}", ty))
- .emit();
- };
-
match self.cx {
LabeledBlock | Loop(_) => {}
- Closure(closure_span) => err_inside_of("a", "closure", closure_span),
- AsyncClosure(closure_span) => err_inside_of("an", "`async` block", closure_span),
+ Closure(closure_span) => {
+ self.sess.emit_err(BreakInsideClosure { span, closure_span, name });
+ }
+ AsyncClosure(closure_span) => {
+ self.sess.emit_err(BreakInsideAsyncBlock { span, closure_span, name });
+ }
Normal | AnonConst => {
- struct_span_err!(self.sess, span, E0268, "`{}` outside of a loop", name)
- .span_label(span, format!("cannot `{}` outside of a loop", name))
- .emit();
+ self.sess.emit_err(OutsideLoop { span, name });
}
}
}
@@ -251,37 +204,13 @@ impl<'a, 'hir> CheckLoopVisitor<'a, 'hir> {
label: &Destination,
cf_type: &str,
) -> bool {
- if !span.is_desugaring(DesugaringKind::QuestionMark) && self.cx == LabeledBlock {
- if label.label.is_none() {
- struct_span_err!(
- self.sess,
- span,
- E0695,
- "unlabeled `{}` inside of a labeled block",
- cf_type
- )
- .span_label(
- span,
- format!(
- "`{}` statements that would diverge to or through \
- a labeled block need to bear a label",
- cf_type
- ),
- )
- .emit();
- return true;
- }
+ if !span.is_desugaring(DesugaringKind::QuestionMark)
+ && self.cx == LabeledBlock
+ && label.label.is_none()
+ {
+ self.sess.emit_err(UnlabeledInLabeledBlock { span, cf_type });
+ return true;
}
false
}
- fn emit_unlabled_cf_in_while_condition(&mut self, span: Span, cf_type: &str) {
- struct_span_err!(
- self.sess,
- span,
- E0590,
- "`break` or `continue` with no label in the condition of a `while` loop"
- )
- .span_label(span, format!("unlabeled `{}` in the condition of a `while` loop", cf_type))
- .emit();
- }
}
diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs
index 20765abf3..acc54e7e1 100644
--- a/compiler/rustc_passes/src/naked_functions.rs
+++ b/compiler/rustc_passes/src/naked_functions.rs
@@ -1,11 +1,11 @@
//! Checks validity of naked functions.
-use rustc_ast::{Attribute, InlineAsmOptions};
-use rustc_errors::{struct_span_err, Applicability};
+use rustc_ast::InlineAsmOptions;
use rustc_hir as hir;
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
-use rustc_hir::intravisit::{FnKind, Visitor};
-use rustc_hir::{ExprKind, HirId, InlineAsmOperand, StmtKind};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{ExprKind, InlineAsmOperand, StmtKind};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::UNDEFINED_NAKED_FUNCTION_ABI;
@@ -13,73 +13,69 @@ use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
-fn check_mod_naked_functions(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
- tcx.hir().visit_item_likes_in_module(module_def_id, &mut CheckNakedFunctions { tcx });
-}
+use crate::errors::{
+ CannotInlineNakedFunction, NakedFunctionsAsmBlock, NakedFunctionsAsmOptions,
+ NakedFunctionsMustUseNoreturn, NakedFunctionsOperands, NoPatterns, ParamsNotAllowed,
+ UndefinedNakedFunctionAbi,
+};
pub(crate) fn provide(providers: &mut Providers) {
*providers = Providers { check_mod_naked_functions, ..*providers };
}
-struct CheckNakedFunctions<'tcx> {
- tcx: TyCtxt<'tcx>,
-}
-
-impl<'tcx> Visitor<'tcx> for CheckNakedFunctions<'tcx> {
- fn visit_fn(
- &mut self,
- fk: FnKind<'_>,
- _fd: &'tcx hir::FnDecl<'tcx>,
- body_id: hir::BodyId,
- span: Span,
- hir_id: HirId,
- ) {
- let ident_span;
- let fn_header;
-
- match fk {
- FnKind::Closure => {
- // Closures with a naked attribute are rejected during attribute
- // check. Don't validate them any further.
- return;
- }
- FnKind::ItemFn(ident, _, ref header, ..) => {
- ident_span = ident.span;
- fn_header = header;
- }
-
- FnKind::Method(ident, ref sig, ..) => {
- ident_span = ident.span;
- fn_header = &sig.header;
- }
+fn check_mod_naked_functions(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let items = tcx.hir_module_items(module_def_id);
+ for def_id in items.definitions() {
+ if !matches!(tcx.def_kind(def_id), DefKind::Fn | DefKind::AssocFn) {
+ continue;
}
- let attrs = self.tcx.hir().attrs(hir_id);
- let naked = attrs.iter().any(|attr| attr.has_name(sym::naked));
- if naked {
- let body = self.tcx.hir().body(body_id);
- check_abi(self.tcx, hir_id, fn_header.abi, ident_span);
- check_no_patterns(self.tcx, body.params);
- check_no_parameters_use(self.tcx, body);
- check_asm(self.tcx, body, span);
- check_inline(self.tcx, attrs);
+ let naked = tcx.has_attr(def_id.to_def_id(), sym::naked);
+ if !naked {
+ continue;
}
+
+ let (fn_header, body_id) = match tcx.hir().get_by_def_id(def_id) {
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, _, body_id), .. })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(sig, body_id),
+ ..
+ }) => (sig.header, *body_id),
+ _ => continue,
+ };
+
+ let body = tcx.hir().body(body_id);
+ check_abi(tcx, def_id, fn_header.abi);
+ check_no_patterns(tcx, body.params);
+ check_no_parameters_use(tcx, body);
+ check_asm(tcx, def_id, body);
+ check_inline(tcx, def_id);
}
}
/// Check that the function isn't inlined.
-fn check_inline(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
- for attr in attrs.iter().filter(|attr| attr.has_name(sym::inline)) {
- tcx.sess.struct_span_err(attr.span, "naked functions cannot be inlined").emit();
+fn check_inline(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let attrs = tcx.get_attrs(def_id.to_def_id(), sym::inline);
+ for attr in attrs {
+ tcx.sess.emit_err(CannotInlineNakedFunction { span: attr.span });
}
}
/// Checks that function uses non-Rust ABI.
-fn check_abi(tcx: TyCtxt<'_>, hir_id: HirId, abi: Abi, fn_ident_span: Span) {
+fn check_abi(tcx: TyCtxt<'_>, def_id: LocalDefId, abi: Abi) {
if abi == Abi::Rust {
- tcx.struct_span_lint_hir(UNDEFINED_NAKED_FUNCTION_ABI, hir_id, fn_ident_span, |lint| {
- lint.build("Rust ABI is unsupported in naked functions").emit();
- });
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let span = tcx.def_span(def_id);
+ tcx.emit_spanned_lint(
+ UNDEFINED_NAKED_FUNCTION_ABI,
+ hir_id,
+ span,
+ UndefinedNakedFunctionAbi,
+ );
}
}
@@ -88,14 +84,9 @@ fn check_no_patterns(tcx: TyCtxt<'_>, params: &[hir::Param<'_>]) {
for param in params {
match param.pat.kind {
hir::PatKind::Wild
- | hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, _, None) => {}
+ | hir::PatKind::Binding(hir::BindingAnnotation::NONE, _, _, None) => {}
_ => {
- tcx.sess
- .struct_span_err(
- param.pat.span,
- "patterns not allowed in naked function parameters",
- )
- .emit();
+ tcx.sess.emit_err(NoPatterns { span: param.pat.span });
}
}
}
@@ -125,14 +116,7 @@ impl<'tcx> Visitor<'tcx> for CheckParameters<'tcx> {
)) = expr.kind
{
if self.params.contains(var_hir_id) {
- self.tcx
- .sess
- .struct_span_err(
- expr.span,
- "referencing function parameters is not allowed in naked functions",
- )
- .help("follow the calling convention in asm block to use parameters")
- .emit();
+ self.tcx.sess.emit_err(ParamsNotAllowed { span: expr.span });
return;
}
}
@@ -141,32 +125,27 @@ impl<'tcx> Visitor<'tcx> for CheckParameters<'tcx> {
}
/// Checks that function body contains a single inline assembly block.
-fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, body: &'tcx hir::Body<'tcx>, fn_span: Span) {
+fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &'tcx hir::Body<'tcx>) {
let mut this = CheckInlineAssembly { tcx, items: Vec::new() };
this.visit_body(body);
if let [(ItemKind::Asm | ItemKind::Err, _)] = this.items[..] {
// Ok.
} else {
- let mut diag = struct_span_err!(
- tcx.sess,
- fn_span,
- E0787,
- "naked functions must contain a single asm block"
- );
-
let mut must_show_error = false;
let mut has_asm = false;
let mut has_err = false;
+ let mut multiple_asms = vec![];
+ let mut non_asms = vec![];
for &(kind, span) in &this.items {
match kind {
ItemKind::Asm if has_asm => {
must_show_error = true;
- diag.span_label(span, "multiple asm blocks are unsupported in naked functions");
+ multiple_asms.push(span);
}
ItemKind::Asm => has_asm = true,
ItemKind::NonAsm => {
must_show_error = true;
- diag.span_label(span, "non-asm is unsupported in naked functions");
+ non_asms.push(span);
}
ItemKind::Err => has_err = true,
}
@@ -176,9 +155,11 @@ fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, body: &'tcx hir::Body<'tcx>, fn_span: Span
// errors, then don't show an additional error. This allows for appending/prepending
// `compile_error!("...")` statements and reduces error noise.
if must_show_error || !has_err {
- diag.emit();
- } else {
- diag.cancel();
+ tcx.sess.emit_err(NakedFunctionsAsmBlock {
+ span: tcx.def_span(def_id),
+ multiple_asms,
+ non_asms,
+ });
}
}
}
@@ -259,13 +240,7 @@ impl<'tcx> CheckInlineAssembly<'tcx> {
})
.collect();
if !unsupported_operands.is_empty() {
- struct_span_err!(
- self.tcx.sess,
- unsupported_operands,
- E0787,
- "only `const` and `sym` operands are supported in naked functions",
- )
- .emit();
+ self.tcx.sess.emit_err(NakedFunctionsOperands { unsupported_operands });
}
let unsupported_options: Vec<&'static str> = [
@@ -281,14 +256,10 @@ impl<'tcx> CheckInlineAssembly<'tcx> {
.collect();
if !unsupported_options.is_empty() {
- struct_span_err!(
- self.tcx.sess,
+ self.tcx.sess.emit_err(NakedFunctionsAsmOptions {
span,
- E0787,
- "asm options unsupported in naked functions: {}",
- unsupported_options.join(", ")
- )
- .emit();
+ unsupported_options: unsupported_options.join(", "),
+ });
}
if !asm.options.contains(InlineAsmOptions::NORETURN) {
@@ -298,20 +269,7 @@ impl<'tcx> CheckInlineAssembly<'tcx> {
.map_or_else(|| asm.template_strs.last().unwrap().2, |op| op.1)
.shrink_to_hi();
- struct_span_err!(
- self.tcx.sess,
- span,
- E0787,
- "asm in naked functions must use `noreturn` option"
- )
- .span_suggestion(
- last_span,
- "consider specifying that the asm block is responsible \
- for returning from the function",
- ", options(noreturn)",
- Applicability::MachineApplicable,
- )
- .emit();
+ self.tcx.sess.emit_err(NakedFunctionsMustUseNoreturn { span, last_span });
}
}
}
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
index f7e3fac6b..50070869a 100644
--- a/compiler/rustc_passes/src/reachable.rs
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -12,7 +12,7 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::Node;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
-use rustc_middle::middle::privacy;
+use rustc_middle::middle::privacy::{self, Level};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, DefIdTree, TyCtxt};
use rustc_session::config::CrateType;
@@ -29,7 +29,7 @@ fn item_might_be_inlined(tcx: TyCtxt<'_>, item: &hir::Item<'_>, attrs: &CodegenF
match item.kind {
hir::ItemKind::Fn(ref sig, ..) if sig.header.is_const() => true,
hir::ItemKind::Impl { .. } | hir::ItemKind::Fn(..) => {
- let generics = tcx.generics_of(item.def_id);
+ let generics = tcx.generics_of(item.owner_id);
generics.requires_monomorphization(tcx)
}
_ => false,
@@ -42,7 +42,7 @@ fn method_might_be_inlined(
impl_src: LocalDefId,
) -> bool {
let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id().owner.to_def_id());
- let generics = tcx.generics_of(impl_item.def_id);
+ let generics = tcx.generics_of(impl_item.owner_id);
if codegen_fn_attrs.requests_inline() || generics.requires_monomorphization(tcx) {
return true;
}
@@ -116,6 +116,17 @@ impl<'tcx> Visitor<'tcx> for ReachableContext<'tcx> {
intravisit::walk_expr(self, expr)
}
+
+ fn visit_inline_asm(&mut self, asm: &'tcx hir::InlineAsm<'tcx>, id: hir::HirId) {
+ for (op, _) in asm.operands {
+ if let hir::InlineAsmOperand::SymStatic { def_id, .. } = op {
+ if let Some(def_id) = def_id.as_local() {
+ self.reachable_symbols.insert(def_id);
+ }
+ }
+ }
+ intravisit::walk_inline_asm(self, asm, id);
+ }
}
impl<'tcx> ReachableContext<'tcx> {
@@ -153,9 +164,9 @@ impl<'tcx> ReachableContext<'tcx> {
hir::ImplItemKind::Fn(..) => {
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
let impl_did = self.tcx.hir().get_parent_item(hir_id);
- method_might_be_inlined(self.tcx, impl_item, impl_did)
+ method_might_be_inlined(self.tcx, impl_item, impl_did.def_id)
}
- hir::ImplItemKind::TyAlias(_) => false,
+ hir::ImplItemKind::Type(_) => false,
},
Some(_) => false,
None => false, // This will happen for default methods.
@@ -216,7 +227,7 @@ impl<'tcx> ReachableContext<'tcx> {
if item_might_be_inlined(
self.tcx,
&item,
- self.tcx.codegen_fn_attrs(item.def_id),
+ self.tcx.codegen_fn_attrs(item.owner_id),
) {
self.visit_nested_body(body);
}
@@ -271,7 +282,7 @@ impl<'tcx> ReachableContext<'tcx> {
self.visit_nested_body(body)
}
}
- hir::ImplItemKind::TyAlias(_) => {}
+ hir::ImplItemKind::Type(_) => {}
},
Node::Expr(&hir::Expr {
kind: hir::ExprKind::Closure(&hir::Closure { body, .. }),
@@ -303,13 +314,13 @@ fn check_item<'tcx>(
tcx: TyCtxt<'tcx>,
id: hir::ItemId,
worklist: &mut Vec<LocalDefId>,
- access_levels: &privacy::AccessLevels,
+ effective_visibilities: &privacy::EffectiveVisibilities,
) {
- if has_custom_linkage(tcx, id.def_id) {
- worklist.push(id.def_id);
+ if has_custom_linkage(tcx, id.owner_id.def_id) {
+ worklist.push(id.owner_id.def_id);
}
- if !matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
+ if !matches!(tcx.def_kind(id.owner_id), DefKind::Impl) {
return;
}
@@ -318,8 +329,8 @@ fn check_item<'tcx>(
if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref trait_ref), ref items, .. }) =
item.kind
{
- if !access_levels.is_reachable(item.def_id) {
- worklist.extend(items.iter().map(|ii_ref| ii_ref.id.def_id));
+ if !effective_visibilities.is_reachable(item.owner_id.def_id) {
+ worklist.extend(items.iter().map(|ii_ref| ii_ref.id.owner_id.def_id));
let Res::Def(DefKind::Trait, trait_def_id) = trait_ref.path.res else {
unreachable!();
@@ -354,7 +365,7 @@ fn has_custom_linkage<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> bool {
}
fn reachable_set<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> FxHashSet<LocalDefId> {
- let access_levels = &tcx.privacy_access_levels(());
+ let effective_visibilities = &tcx.effective_visibilities(());
let any_library =
tcx.sess.crate_types().iter().any(|ty| {
@@ -373,7 +384,13 @@ fn reachable_set<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> FxHashSet<LocalDefId> {
// If other crates link to us, they're going to expect to be able to
// use the lang items, so we need to be sure to mark them as
// exported.
- reachable_context.worklist.extend(access_levels.map.keys());
+ reachable_context.worklist = effective_visibilities
+ .iter()
+ .filter_map(|(&id, effective_vis)| {
+ effective_vis.is_public_at_level(Level::ReachableThroughImplTrait).then_some(id)
+ })
+ .collect::<Vec<_>>();
+
for item in tcx.lang_items().items().iter() {
if let Some(def_id) = *item {
if let Some(def_id) = def_id.as_local() {
@@ -393,12 +410,12 @@ fn reachable_set<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> FxHashSet<LocalDefId> {
let crate_items = tcx.hir_crate_items(());
for id in crate_items.items() {
- check_item(tcx, id, &mut reachable_context.worklist, access_levels);
+ check_item(tcx, id, &mut reachable_context.worklist, effective_visibilities);
}
for id in crate_items.impl_items() {
- if has_custom_linkage(tcx, id.def_id) {
- reachable_context.worklist.push(id.def_id);
+ if has_custom_linkage(tcx, id.owner_id.def_id) {
+ reachable_context.worklist.push(id.owner_id.def_id);
}
}
}
diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs
index ca6a2ac3d..78afa2f25 100644
--- a/compiler/rustc_passes/src/stability.rs
+++ b/compiler/rustc_passes/src/stability.rs
@@ -1,23 +1,30 @@
//! A pass that annotates every item and method with its stability level,
//! propagating default levels lexically from parent to children ast nodes.
-use attr::StabilityLevel;
-use rustc_attr::{self as attr, ConstStability, Stability, Unstable, UnstableReason};
+use crate::errors::{
+ self, CannotStabilizeDeprecated, DeprecatedAttribute, DuplicateFeatureErr,
+ FeatureOnlyOnNightly, ImpliedFeatureNotExist, InvalidDeprecationVersion, InvalidStability,
+ MissingConstErr, MissingConstStabAttr, MissingStabilityAttr, TraitImplConstStable,
+ UnknownFeature, UselessStability,
+};
+use rustc_attr::{
+ self as attr, rust_version_symbol, ConstStability, Stability, StabilityLevel, Unstable,
+ UnstableReason, VERSION_PLACEHOLDER,
+};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
-use rustc_errors::{struct_span_err, Applicability};
+use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
use rustc_hir::hir_id::CRATE_HIR_ID;
use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{FieldDef, Generics, HirId, Item, ItemKind, TraitRef, Ty, TyKind, Variant};
+use rustc_hir::{FieldDef, Item, ItemKind, TraitRef, Ty, TyKind, Variant};
use rustc_middle::hir::nested_filter;
-use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::middle::stability::{AllowUnstable, DeprecationEntry, Index};
use rustc_middle::ty::{query::Providers, TyCtxt};
use rustc_session::lint;
use rustc_session::lint::builtin::{INEFFECTIVE_UNSTABLE_TRAIT_IMPL, USELESS_DEPRECATED};
-use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
@@ -120,16 +127,12 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
if kind == AnnotationKind::Prohibited || kind == AnnotationKind::DeprecationProhibited {
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
- self.tcx.struct_span_lint_hir(USELESS_DEPRECATED, hir_id, *span, |lint| {
- lint.build("this `#[deprecated]` annotation has no effect")
- .span_suggestion_short(
- *span,
- "remove the unnecessary deprecation attribute",
- "",
- rustc_errors::Applicability::MachineApplicable,
- )
- .emit();
- });
+ self.tcx.emit_spanned_lint(
+ USELESS_DEPRECATED,
+ hir_id,
+ *span,
+ errors::DeprecatedAnnotationHasNoEffect { span: *span },
+ );
}
// `Deprecation` is just two pointers, no need to intern it
@@ -161,7 +164,7 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
return;
}
- let (stab, const_stab) = attr::find_stability(&self.tcx.sess, attrs, item_sp);
+ let (stab, const_stab, body_stab) = attr::find_stability(&self.tcx.sess, attrs, item_sp);
let mut const_span = None;
let const_stab = const_stab.map(|(const_stab, const_span_node)| {
@@ -180,7 +183,9 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
if !self.in_trait_impl
|| (self.in_trait_impl && !self.tcx.is_const_fn_raw(def_id.to_def_id()))
{
- missing_const_err(&self.tcx.sess, fn_sig.span, const_span);
+ self.tcx
+ .sess
+ .emit_err(MissingConstErr { fn_sig_span: fn_sig.span, const_span });
}
}
}
@@ -198,26 +203,23 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
if let Some((rustc_attr::Deprecation { is_since_rustc_version: true, .. }, span)) = &depr {
if stab.is_none() {
- struct_span_err!(
- self.tcx.sess,
- *span,
- E0549,
- "deprecated attribute must be paired with \
- either stable or unstable attribute"
- )
- .emit();
+ self.tcx.sess.emit_err(DeprecatedAttribute { span: *span });
}
}
+ if let Some((body_stab, _span)) = body_stab {
+ // FIXME: check that this item can have body stability
+
+ self.index.default_body_stab_map.insert(def_id, body_stab);
+ debug!(?self.index.default_body_stab_map);
+ }
+
let stab = stab.map(|(stab, span)| {
// Error if prohibited, or can't inherit anything from a container.
if kind == AnnotationKind::Prohibited
|| (kind == AnnotationKind::Container && stab.level.is_stable() && is_deprecated)
{
- self.tcx.sess.struct_span_err(span,"this stability annotation is useless")
- .span_label(span, "useless stability annotation")
- .span_label(item_sp, "the stability attribute annotates this item")
- .emit();
+ self.tcx.sess.emit_err(UselessStability { span, item_sp });
}
debug!("annotate: found {:?}", stab);
@@ -233,19 +235,15 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
{
match stab_v.parse::<u64>() {
Err(_) => {
- self.tcx.sess.struct_span_err(span, "invalid stability version found")
- .span_label(span, "invalid stability version")
- .span_label(item_sp, "the stability attribute annotates this item")
- .emit();
+ self.tcx.sess.emit_err(InvalidStability { span, item_sp });
break;
}
Ok(stab_vp) => match dep_v.parse::<u64>() {
Ok(dep_vp) => match dep_vp.cmp(&stab_vp) {
Ordering::Less => {
- self.tcx.sess.struct_span_err(span, "an API can't be stabilized after it is deprecated")
- .span_label(span, "invalid version")
- .span_label(item_sp, "the stability attribute annotates this item")
- .emit();
+ self.tcx
+ .sess
+ .emit_err(CannotStabilizeDeprecated { span, item_sp });
break;
}
Ordering::Equal => continue,
@@ -253,10 +251,9 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
},
Err(_) => {
if dep_v != "TBD" {
- self.tcx.sess.struct_span_err(span, "invalid deprecation version found")
- .span_label(span, "invalid deprecation version")
- .span_label(item_sp, "the stability attribute annotates this item")
- .emit();
+ self.tcx
+ .sess
+ .emit_err(InvalidDeprecationVersion { span, item_sp });
}
break;
}
@@ -265,7 +262,9 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> {
}
}
- if let Stability { level: Unstable { implied_by: Some(implied_by), .. }, feature } = stab {
+ if let Stability { level: Unstable { implied_by: Some(implied_by), .. }, feature } =
+ stab
+ {
self.index.implications.insert(implied_by, feature);
}
@@ -379,7 +378,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
}
self.annotate(
- i.def_id,
+ i.owner_id.def_id,
i.span,
fn_sig,
kind,
@@ -398,7 +397,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
};
self.annotate(
- ti.def_id,
+ ti.owner_id.def_id,
ti.span,
fn_sig,
AnnotationKind::Required,
@@ -421,7 +420,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
};
self.annotate(
- ii.def_id,
+ ii.owner_id.def_id,
ii.span,
fn_sig,
kind,
@@ -434,7 +433,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
);
}
- fn visit_variant(&mut self, var: &'tcx Variant<'tcx>, g: &'tcx Generics<'tcx>, item_id: HirId) {
+ fn visit_variant(&mut self, var: &'tcx Variant<'tcx>) {
self.annotate(
self.tcx.hir().local_def_id(var.id),
var.span,
@@ -452,12 +451,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
AnnotationKind::Required,
InheritDeprecation::Yes,
InheritConstStability::No,
- InheritStability::No,
+ InheritStability::Yes,
|_| {},
);
}
- intravisit::walk_variant(v, var, g, item_id)
+ intravisit::walk_variant(v, var)
},
)
}
@@ -479,7 +478,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
self.annotate(
- i.def_id,
+ i.owner_id.def_id,
i.span,
None,
AnnotationKind::Required,
@@ -517,15 +516,18 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
struct MissingStabilityAnnotations<'tcx> {
tcx: TyCtxt<'tcx>,
- access_levels: &'tcx AccessLevels,
+ effective_visibilities: &'tcx EffectiveVisibilities,
}
impl<'tcx> MissingStabilityAnnotations<'tcx> {
fn check_missing_stability(&self, def_id: LocalDefId, span: Span) {
let stab = self.tcx.stability().local_stability(def_id);
- if !self.tcx.sess.opts.test && stab.is_none() && self.access_levels.is_reachable(def_id) {
+ if !self.tcx.sess.opts.test
+ && stab.is_none()
+ && self.effective_visibilities.is_reachable(def_id)
+ {
let descr = self.tcx.def_kind(def_id).descr(def_id.to_def_id());
- self.tcx.sess.span_err(span, &format!("{} has missing stability attribute", descr));
+ self.tcx.sess.emit_err(MissingStabilityAttr { span, descr });
}
}
@@ -541,11 +543,11 @@ impl<'tcx> MissingStabilityAnnotations<'tcx> {
.lookup_stability(def_id)
.map_or(false, |stability| stability.level.is_stable());
let missing_const_stability_attribute = self.tcx.lookup_const_stability(def_id).is_none();
- let is_reachable = self.access_levels.is_reachable(def_id);
+ let is_reachable = self.effective_visibilities.is_reachable(def_id);
if is_const && is_stable && missing_const_stability_attribute && is_reachable {
let descr = self.tcx.def_kind(def_id).descr(def_id.to_def_id());
- self.tcx.sess.span_err(span, &format!("{descr} has missing const stability attribute"));
+ self.tcx.sess.emit_err(MissingConstStabAttr { span, descr });
}
}
}
@@ -567,32 +569,35 @@ impl<'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'tcx> {
hir::ItemKind::Impl(hir::Impl { of_trait: None, .. })
| hir::ItemKind::ForeignMod { .. }
) {
- self.check_missing_stability(i.def_id, i.span);
+ self.check_missing_stability(i.owner_id.def_id, i.span);
}
// Ensure stable `const fn` have a const stability attribute.
- self.check_missing_const_stability(i.def_id, i.span);
+ self.check_missing_const_stability(i.owner_id.def_id, i.span);
intravisit::walk_item(self, i)
}
fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
- self.check_missing_stability(ti.def_id, ti.span);
+ self.check_missing_stability(ti.owner_id.def_id, ti.span);
intravisit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
let impl_def_id = self.tcx.hir().get_parent_item(ii.hir_id());
if self.tcx.impl_trait_ref(impl_def_id).is_none() {
- self.check_missing_stability(ii.def_id, ii.span);
- self.check_missing_const_stability(ii.def_id, ii.span);
+ self.check_missing_stability(ii.owner_id.def_id, ii.span);
+ self.check_missing_const_stability(ii.owner_id.def_id, ii.span);
}
intravisit::walk_impl_item(self, ii);
}
- fn visit_variant(&mut self, var: &'tcx Variant<'tcx>, g: &'tcx Generics<'tcx>, item_id: HirId) {
+ fn visit_variant(&mut self, var: &'tcx Variant<'tcx>) {
self.check_missing_stability(self.tcx.hir().local_def_id(var.id), var.span);
- intravisit::walk_variant(self, var, g, item_id);
+ if let Some(ctor_hir_id) = var.data.ctor_hir_id() {
+ self.check_missing_stability(self.tcx.hir().local_def_id(ctor_hir_id), var.span);
+ }
+ intravisit::walk_variant(self, var);
}
fn visit_field_def(&mut self, s: &'tcx FieldDef<'tcx>) {
@@ -601,7 +606,7 @@ impl<'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'tcx> {
}
fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
- self.check_missing_stability(i.def_id, i.span);
+ self.check_missing_stability(i.owner_id.def_id, i.span);
intravisit::walk_foreign_item(self, i);
}
// Note that we don't need to `check_missing_stability` for default generic parameters,
@@ -613,6 +618,7 @@ fn stability_index(tcx: TyCtxt<'_>, (): ()) -> Index {
let mut index = Index {
stab_map: Default::default(),
const_stab_map: Default::default(),
+ default_body_stab_map: Default::default(),
depr_map: Default::default(),
implications: Default::default(),
};
@@ -673,6 +679,9 @@ pub(crate) fn provide(providers: &mut Providers) {
stability_implications: |tcx, _| tcx.stability().implications.clone(),
lookup_stability: |tcx, id| tcx.stability().local_stability(id.expect_local()),
lookup_const_stability: |tcx, id| tcx.stability().local_const_stability(id.expect_local()),
+ lookup_default_body_stability: |tcx, id| {
+ tcx.stability().local_default_body_stability(id.expect_local())
+ },
lookup_deprecation_entry: |tcx, id| {
tcx.stability().local_deprecation_entry(id.expect_local())
},
@@ -703,7 +712,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
return;
}
- let Some(cnum) = self.tcx.extern_mod_stmt_cnum(item.def_id) else {
+ let Some(cnum) = self.tcx.extern_mod_stmt_cnum(item.owner_id.def_id) else {
return;
};
let def_id = cnum.as_def_id();
@@ -723,7 +732,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
let features = self.tcx.features();
if features.staged_api {
let attrs = self.tcx.hir().attrs(item.hir_id());
- let (stab, const_stab) = attr::find_stability(&self.tcx.sess, attrs, item.span);
+ let (stab, const_stab, _) =
+ attr::find_stability(&self.tcx.sess, attrs, item.span);
// If this impl block has an #[unstable] attribute, give an
// error if all involved types and traits are stable, because
@@ -738,10 +748,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
item.hir_id(),
span,
- |lint| {lint
- .build("an `#[unstable]` annotation here has no effect")
- .note("see issue #55436 <https://github.com/rust-lang/rust/issues/55436> for more information")
- .emit();}
+ "an `#[unstable]` annotation here has no effect",
+ |lint| lint.note("see issue #55436 <https://github.com/rust-lang/rust/issues/55436> for more information")
);
}
}
@@ -752,16 +760,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
&& *constness == hir::Constness::Const
&& const_stab.map_or(false, |(stab, _)| stab.is_const_stable())
{
- self.tcx
- .sess
- .struct_span_err(item.span, "trait implementations cannot be const stable yet")
- .note("see issue #67792 <https://github.com/rust-lang/rust/issues/67792> for more information")
- .emit();
+ self.tcx.sess.emit_err(TraitImplConstStable { span: item.span });
}
}
for impl_item_ref in *items {
- let impl_item = self.tcx.associated_item(impl_item_ref.id.def_id);
+ let impl_item = self.tcx.associated_item(impl_item_ref.id.owner_id);
if let Some(def_id) = impl_item.trait_item_def_id {
// Pass `None` to skip deprecation warnings.
@@ -816,7 +820,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
// added, such as `core::intrinsics::transmute`
let parents = path.segments.iter().rev().skip(1);
for path_segment in parents {
- if let Some(def_id) = path_segment.res.as_ref().and_then(Res::opt_def_id) {
+ if let Some(def_id) = path_segment.res.opt_def_id() {
// use `None` for id to prevent deprecation check
self.tcx.check_stability_allow_unstable(
def_id,
@@ -855,7 +859,7 @@ fn is_unstable_reexport<'tcx>(tcx: TyCtxt<'tcx>, id: hir::HirId) -> bool {
}
// If this is a path that isn't a use, we don't need to do anything special
- if !matches!(tcx.hir().item(hir::ItemId { def_id }).kind, ItemKind::Use(..)) {
+ if !matches!(tcx.hir().expect_item(def_id).kind, ItemKind::Use(..)) {
return false;
}
@@ -890,8 +894,25 @@ impl<'tcx> Visitor<'tcx> for CheckTraitImplStable<'tcx> {
if let TyKind::Never = t.kind {
self.fully_stable = false;
}
+ if let TyKind::BareFn(f) = t.kind {
+ if rustc_target::spec::abi::is_stable(f.abi.name()).is_err() {
+ self.fully_stable = false;
+ }
+ }
intravisit::walk_ty(self, t)
}
+
+ fn visit_fn_decl(&mut self, fd: &'tcx hir::FnDecl<'tcx>) {
+ for ty in fd.inputs {
+ self.visit_ty(ty)
+ }
+ if let hir::FnRetTy::Return(output_ty) = fd.output {
+ match output_ty.kind {
+ TyKind::Never => {} // `-> !` is stable
+ _ => self.visit_ty(output_ty),
+ }
+ }
+ }
}
/// Given the list of enabled features that were not language features (i.e., that
@@ -901,8 +922,8 @@ pub fn check_unused_or_stable_features(tcx: TyCtxt<'_>) {
let is_staged_api =
tcx.sess.opts.unstable_opts.force_unstable_if_unmarked || tcx.features().staged_api;
if is_staged_api {
- let access_levels = &tcx.privacy_access_levels(());
- let mut missing = MissingStabilityAnnotations { tcx, access_levels };
+ let effective_visibilities = &tcx.effective_visibilities(());
+ let mut missing = MissingStabilityAnnotations { tcx, effective_visibilities };
missing.check_missing_stability(CRATE_DEF_ID, tcx.hir().span(CRATE_HIR_ID));
tcx.hir().walk_toplevel_module(&mut missing);
tcx.hir().visit_all_item_likes_in_crate(&mut missing);
@@ -917,7 +938,7 @@ pub fn check_unused_or_stable_features(tcx: TyCtxt<'_>) {
}
if !lang_features.insert(feature) {
// Warn if the user enables a lang feature multiple times.
- duplicate_feature_err(tcx.sess, span, feature);
+ tcx.sess.emit_err(DuplicateFeatureErr { span, feature });
}
}
@@ -925,18 +946,14 @@ pub fn check_unused_or_stable_features(tcx: TyCtxt<'_>) {
let mut remaining_lib_features = FxIndexMap::default();
for (feature, span) in declared_lib_features {
if !tcx.sess.opts.unstable_features.is_nightly_build() {
- struct_span_err!(
- tcx.sess,
- *span,
- E0554,
- "`#![feature]` may not be used on the {} release channel",
- env!("CFG_RELEASE_CHANNEL")
- )
- .emit();
+ tcx.sess.emit_err(FeatureOnlyOnNightly {
+ span: *span,
+ release_channel: env!("CFG_RELEASE_CHANNEL"),
+ });
}
if remaining_lib_features.contains_key(&feature) {
// Warn if the user enables a lib feature multiple times.
- duplicate_feature_err(tcx.sess, *span, *feature);
+ tcx.sess.emit_err(DuplicateFeatureErr { span: *span, feature: *feature });
}
remaining_lib_features.insert(feature, *span);
}
@@ -949,56 +966,106 @@ pub fn check_unused_or_stable_features(tcx: TyCtxt<'_>) {
remaining_lib_features.remove(&sym::libc);
remaining_lib_features.remove(&sym::test);
- // We always collect the lib features declared in the current crate, even if there are
- // no unknown features, because the collection also does feature attribute validation.
- let local_defined_features = tcx.lib_features(());
- let mut all_lib_features: FxHashMap<_, _> =
- local_defined_features.to_vec().iter().map(|el| *el).collect();
- let mut implications = tcx.stability_implications(rustc_hir::def_id::LOCAL_CRATE).clone();
- for &cnum in tcx.crates(()) {
- implications.extend(tcx.stability_implications(cnum));
- all_lib_features.extend(tcx.defined_lib_features(cnum).iter().map(|el| *el));
- }
-
- // Check that every feature referenced by an `implied_by` exists (for features defined in the
- // local crate).
- for (implied_by, feature) in tcx.stability_implications(rustc_hir::def_id::LOCAL_CRATE) {
- // Only `implied_by` needs to be checked, `feature` is guaranteed to exist.
- if !all_lib_features.contains_key(implied_by) {
- let span = local_defined_features
- .stable
- .get(feature)
- .map(|(_, span)| span)
- .or_else(|| local_defined_features.unstable.get(feature))
- .expect("feature that implied another does not exist");
- tcx.sess
- .struct_span_err(
- *span,
- format!("feature `{implied_by}` implying `{feature}` does not exist"),
- )
- .emit();
- }
- }
-
- if !remaining_lib_features.is_empty() {
- for (feature, since) in all_lib_features.iter() {
+ /// For each feature in `defined_features`..
+ ///
+ /// - If it is in `remaining_lib_features` (those features with `#![feature(..)]` attributes in
+ /// the current crate), check if it is stable (or partially stable) and thus an unnecessary
+ /// attribute.
+ /// - If it is in `remaining_implications` (a feature that is referenced by an `implied_by`
+ /// from the current crate), then remove it from the remaining implications.
+ ///
+ /// Once this function has been invoked for every feature (local crate and all extern crates),
+ /// then..
+ ///
+ /// - If features remain in `remaining_lib_features`, then the user has enabled a feature that
+ /// does not exist.
+ /// - If features remain in `remaining_implications`, the `implied_by` refers to a feature that
+ /// does not exist.
+ ///
+ /// By structuring the code in this way: checking the features defined from each crate one at a
+ /// time, less loading from metadata is performed and thus compiler performance is improved.
+ fn check_features<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ remaining_lib_features: &mut FxIndexMap<&Symbol, Span>,
+ remaining_implications: &mut FxHashMap<Symbol, Symbol>,
+ defined_features: &[(Symbol, Option<Symbol>)],
+ all_implications: &FxHashMap<Symbol, Symbol>,
+ ) {
+ for (feature, since) in defined_features {
if let Some(since) = since && let Some(span) = remaining_lib_features.get(&feature) {
// Warn if the user has enabled an already-stable lib feature.
- if let Some(implies) = implications.get(&feature) {
+ if let Some(implies) = all_implications.get(&feature) {
unnecessary_partially_stable_feature_lint(tcx, *span, *feature, *implies, *since);
} else {
unnecessary_stable_feature_lint(tcx, *span, *feature, *since);
}
+
}
- remaining_lib_features.remove(&feature);
- if remaining_lib_features.is_empty() {
+ remaining_lib_features.remove(feature);
+
+ // `feature` is the feature doing the implying, but `implied_by` is the feature with
+ // the attribute that establishes this relationship. `implied_by` is guaranteed to be a
+ // feature defined in the local crate because `remaining_implications` is only the
+ // implications from this crate.
+ remaining_implications.remove(feature);
+
+ if remaining_lib_features.is_empty() && remaining_implications.is_empty() {
break;
}
}
}
+ // All local crate implications need to have the feature that implies it confirmed to exist.
+ let mut remaining_implications =
+ tcx.stability_implications(rustc_hir::def_id::LOCAL_CRATE).clone();
+
+ // We always collect the lib features declared in the current crate, even if there are
+ // no unknown features, because the collection also does feature attribute validation.
+ let local_defined_features = tcx.lib_features(()).to_vec();
+ if !remaining_lib_features.is_empty() || !remaining_implications.is_empty() {
+ // Loading the implications of all crates is unavoidable to be able to emit the partial
+ // stabilization diagnostic, but it can be avoided when there are no
+ // `remaining_lib_features`.
+ let mut all_implications = remaining_implications.clone();
+ for &cnum in tcx.crates(()) {
+ all_implications.extend(tcx.stability_implications(cnum));
+ }
+
+ check_features(
+ tcx,
+ &mut remaining_lib_features,
+ &mut remaining_implications,
+ local_defined_features.as_slice(),
+ &all_implications,
+ );
+
+ for &cnum in tcx.crates(()) {
+ if remaining_lib_features.is_empty() && remaining_implications.is_empty() {
+ break;
+ }
+ check_features(
+ tcx,
+ &mut remaining_lib_features,
+ &mut remaining_implications,
+ tcx.defined_lib_features(cnum).to_vec().as_slice(),
+ &all_implications,
+ );
+ }
+ }
+
for (feature, span) in remaining_lib_features {
- struct_span_err!(tcx.sess, span, E0635, "unknown feature `{}`", feature).emit();
+ tcx.sess.emit_err(UnknownFeature { span, feature: *feature });
+ }
+
+ for (implied_by, feature) in remaining_implications {
+ let local_defined_features = tcx.lib_features(());
+ let span = *local_defined_features
+ .stable
+ .get(&feature)
+ .map(|(_, span)| span)
+ .or_else(|| local_defined_features.unstable.get(&feature))
+ .expect("feature that implied another does not exist");
+ tcx.sess.emit_err(ImpliedFeatureNotExist { span, feature, implied_by });
}
// FIXME(#44232): the `used_features` table no longer exists, so we
@@ -1012,52 +1079,43 @@ fn unnecessary_partially_stable_feature_lint(
implies: Symbol,
since: Symbol,
) {
- tcx.struct_span_lint_hir(lint::builtin::STABLE_FEATURES, hir::CRATE_HIR_ID, span, |lint| {
- lint.build(&format!(
+ tcx.struct_span_lint_hir(
+ lint::builtin::STABLE_FEATURES,
+ hir::CRATE_HIR_ID,
+ span,
+ format!(
"the feature `{feature}` has been partially stabilized since {since} and is succeeded \
by the feature `{implies}`"
- ))
- .span_suggestion(
- span,
- &format!(
+ ),
+ |lint| {
+ lint.span_suggestion(
+ span,
+ &format!(
"if you are using features which are still unstable, change to using `{implies}`"
),
- implies,
- Applicability::MaybeIncorrect,
- )
- .span_suggestion(
- tcx.sess.source_map().span_extend_to_line(span),
- "if you are using features which are now stable, remove this line",
- "",
- Applicability::MaybeIncorrect,
- )
- .emit();
- });
+ implies,
+ Applicability::MaybeIncorrect,
+ )
+ .span_suggestion(
+ tcx.sess.source_map().span_extend_to_line(span),
+ "if you are using features which are now stable, remove this line",
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ },
+ );
}
-fn unnecessary_stable_feature_lint(tcx: TyCtxt<'_>, span: Span, feature: Symbol, since: Symbol) {
- tcx.struct_span_lint_hir(lint::builtin::STABLE_FEATURES, hir::CRATE_HIR_ID, span, |lint| {
- lint.build(&format!(
- "the feature `{feature}` has been stable since {since} and no longer requires an \
- attribute to enable",
- ))
- .emit();
+fn unnecessary_stable_feature_lint(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ feature: Symbol,
+ mut since: Symbol,
+) {
+ if since.as_str() == VERSION_PLACEHOLDER {
+ since = rust_version_symbol();
+ }
+ tcx.struct_span_lint_hir(lint::builtin::STABLE_FEATURES, hir::CRATE_HIR_ID, span, format!("the feature `{feature}` has been stable since {since} and no longer requires an attribute to enable"), |lint| {
+ lint
});
}
-
-fn duplicate_feature_err(sess: &Session, span: Span, feature: Symbol) {
- struct_span_err!(sess, span, E0636, "the feature `{}` has already been declared", feature)
- .emit();
-}
-
-fn missing_const_err(session: &Session, fn_sig_span: Span, const_span: Span) {
- const ERROR_MSG: &'static str = "attributes `#[rustc_const_unstable]` \
- and `#[rustc_const_stable]` require \
- the function or method to be `const`";
-
- session
- .struct_span_err(fn_sig_span, ERROR_MSG)
- .span_help(fn_sig_span, "make the function or method const")
- .span_label(const_span, "attribute specified here")
- .emit();
-}
diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs
index c48b4ecf8..959ee600c 100644
--- a/compiler/rustc_passes/src/weak_lang_items.rs
+++ b/compiler/rustc_passes/src/weak_lang_items.rs
@@ -1,13 +1,17 @@
//! Validity checking for weak lang items
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::struct_span_err;
use rustc_hir::lang_items::{self, LangItem};
use rustc_hir::weak_lang_items::WEAK_ITEMS_REFS;
use rustc_middle::middle::lang_items::required;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::CrateType;
+use crate::errors::{
+ AllocFuncRequired, MissingAllocErrorHandler, MissingLangItem, MissingPanicHandler,
+ UnknownExternLangItem,
+};
+
/// Checks the crate for usage of weak lang items, returning a vector of all the
/// language items required by this crate, but not defined yet.
pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>, items: &mut lang_items::LanguageItems) {
@@ -30,15 +34,8 @@ pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>, items: &mut lang_items::LanguageItem
items.missing.push(item);
}
} else {
- let span = tcx.def_span(id.def_id);
- struct_span_err!(
- tcx.sess,
- span,
- E0264,
- "unknown external lang item: `{}`",
- lang_item
- )
- .emit();
+ let span = tcx.def_span(id.owner_id);
+ tcx.sess.emit_err(UnknownExternLangItem { span, lang_item });
}
}
}
@@ -71,20 +68,14 @@ fn verify<'tcx>(tcx: TyCtxt<'tcx>, items: &lang_items::LanguageItems) {
for (name, &item) in WEAK_ITEMS_REFS.iter() {
if missing.contains(&item) && required(tcx, item) && items.require(item).is_err() {
if item == LangItem::PanicImpl {
- tcx.sess.err("`#[panic_handler]` function required, but not found");
+ tcx.sess.emit_err(MissingPanicHandler);
} else if item == LangItem::Oom {
if !tcx.features().default_alloc_error_handler {
- tcx.sess.err("`#[alloc_error_handler]` function required, but not found");
- tcx.sess.note_without_error("use `#![feature(default_alloc_error_handler)]` for a default error handler");
+ tcx.sess.emit_err(AllocFuncRequired);
+ tcx.sess.emit_note(MissingAllocErrorHandler);
}
} else {
- tcx
- .sess
- .diagnostic()
- .struct_err(&format!("language item required, but not found: `{}`", name))
- .note(&format!("this can occur when a binary crate with `#![no_std]` is compiled for a target where `{}` is defined in the standard library", name))
- .help(&format!("you may be able to compile for a target that doesn't need `{}`, specify a target with `--target` or in `.cargo/config`", name))
- .emit();
+ tcx.sess.emit_err(MissingLangItem { name: *name });
}
}
}
diff --git a/compiler/rustc_plugin_impl/Cargo.toml b/compiler/rustc_plugin_impl/Cargo.toml
index b6ea533c8..fa27bfc61 100644
--- a/compiler/rustc_plugin_impl/Cargo.toml
+++ b/compiler/rustc_plugin_impl/Cargo.toml
@@ -5,12 +5,12 @@ build = false
edition = "2021"
[lib]
-doctest = false
[dependencies]
libloading = "0.7.1"
rustc_errors = { path = "../rustc_errors" }
rustc_lint = { path = "../rustc_lint" }
+rustc_macros = { path = "../rustc_macros" }
rustc_metadata = { path = "../rustc_metadata" }
rustc_ast = { path = "../rustc_ast" }
rustc_session = { path = "../rustc_session" }
diff --git a/compiler/rustc_plugin_impl/src/errors.rs b/compiler/rustc_plugin_impl/src/errors.rs
new file mode 100644
index 000000000..e6a7fc86b
--- /dev/null
+++ b/compiler/rustc_plugin_impl/src/errors.rs
@@ -0,0 +1,20 @@
+//! Errors emitted by plugin_impl
+
+use rustc_macros::Diagnostic;
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(plugin_impl_load_plugin_error)]
+pub struct LoadPluginError {
+ #[primary_span]
+ pub span: Span,
+ pub msg: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(plugin_impl_malformed_plugin_attribute, code = "E0498")]
+pub struct MalformedPluginAttribute {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
diff --git a/compiler/rustc_plugin_impl/src/lib.rs b/compiler/rustc_plugin_impl/src/lib.rs
index 1195045bd..9ac27c65d 100644
--- a/compiler/rustc_plugin_impl/src/lib.rs
+++ b/compiler/rustc_plugin_impl/src/lib.rs
@@ -8,9 +8,12 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![recursion_limit = "256"]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
use rustc_lint::LintStore;
+mod errors;
pub mod load;
/// Structure used to register plugins.
diff --git a/compiler/rustc_plugin_impl/src/load.rs b/compiler/rustc_plugin_impl/src/load.rs
index 618682da4..8e75e969a 100644
--- a/compiler/rustc_plugin_impl/src/load.rs
+++ b/compiler/rustc_plugin_impl/src/load.rs
@@ -1,16 +1,14 @@
//! Used by `rustc` when loading a plugin.
+use crate::errors::{LoadPluginError, MalformedPluginAttribute};
use crate::Registry;
use libloading::Library;
use rustc_ast::Crate;
-use rustc_errors::struct_span_err;
use rustc_metadata::locator;
use rustc_session::cstore::MetadataLoader;
use rustc_session::Session;
use rustc_span::symbol::{sym, Ident};
-use rustc_span::Span;
-use std::borrow::ToOwned;
use std::env;
use std::mem;
use std::path::PathBuf;
@@ -18,12 +16,6 @@ use std::path::PathBuf;
/// Pointer to a registrar function.
type PluginRegistrarFn = fn(&mut Registry<'_>);
-fn call_malformed_plugin_attribute(sess: &Session, span: Span) {
- struct_span_err!(sess, span, E0498, "malformed `plugin` attribute")
- .span_label(span, "malformed attribute")
- .emit();
-}
-
/// Read plugin metadata and dynamically load registrar functions.
pub fn load_plugins(
sess: &Session,
@@ -42,7 +34,9 @@ pub fn load_plugins(
Some(ident) if plugin.is_word() => {
load_plugin(&mut plugins, sess, metadata_loader, ident)
}
- _ => call_malformed_plugin_attribute(sess, plugin.span()),
+ _ => {
+ sess.emit_err(MalformedPluginAttribute { span: plugin.span() });
+ }
}
}
}
@@ -60,7 +54,7 @@ fn load_plugin(
let fun = dylink_registrar(lib).unwrap_or_else(|err| {
// This is fatal: there are almost certainly macros we need inside this crate, so
// continuing would spew "macro undefined" errors.
- sess.span_fatal(ident.span, &err.to_string());
+ sess.emit_fatal(LoadPluginError { span: ident.span, msg: err.to_string() });
});
plugins.push(fun);
}
diff --git a/compiler/rustc_privacy/Cargo.toml b/compiler/rustc_privacy/Cargo.toml
index 5785921fb..832fdc9f0 100644
--- a/compiler/rustc_privacy/Cargo.toml
+++ b/compiler/rustc_privacy/Cargo.toml
@@ -14,5 +14,5 @@ rustc_middle = { path = "../rustc_middle" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
-rustc_typeck = { path = "../rustc_typeck" }
+rustc_hir_analysis = { path = "../rustc_hir_analysis" }
tracing = "0.1"
diff --git a/compiler/rustc_privacy/src/errors.rs b/compiler/rustc_privacy/src/errors.rs
index aca7d770f..a6c95f1a8 100644
--- a/compiler/rustc_privacy/src/errors.rs
+++ b/compiler/rustc_privacy/src/errors.rs
@@ -1,9 +1,9 @@
use rustc_errors::DiagnosticArgFromDisplay;
-use rustc_macros::{LintDiagnostic, SessionDiagnostic, SessionSubdiagnostic};
+use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_span::{Span, Symbol};
-#[derive(SessionDiagnostic)]
-#[error(privacy::field_is_private, code = "E0451")]
+#[derive(Diagnostic)]
+#[diag(privacy_field_is_private, code = "E0451")]
pub struct FieldIsPrivate {
#[primary_span]
pub span: Span,
@@ -14,23 +14,23 @@ pub struct FieldIsPrivate {
pub label: FieldIsPrivateLabel,
}
-#[derive(SessionSubdiagnostic)]
+#[derive(Subdiagnostic)]
pub enum FieldIsPrivateLabel {
- #[label(privacy::field_is_private_is_update_syntax_label)]
+ #[label(privacy_field_is_private_is_update_syntax_label)]
IsUpdateSyntax {
#[primary_span]
span: Span,
field_name: Symbol,
},
- #[label(privacy::field_is_private_label)]
+ #[label(privacy_field_is_private_label)]
Other {
#[primary_span]
span: Span,
},
}
-#[derive(SessionDiagnostic)]
-#[error(privacy::item_is_private)]
+#[derive(Diagnostic)]
+#[diag(privacy_item_is_private)]
pub struct ItemIsPrivate<'a> {
#[primary_span]
#[label]
@@ -39,8 +39,8 @@ pub struct ItemIsPrivate<'a> {
pub descr: DiagnosticArgFromDisplay<'a>,
}
-#[derive(SessionDiagnostic)]
-#[error(privacy::unnamed_item_is_private)]
+#[derive(Diagnostic)]
+#[diag(privacy_unnamed_item_is_private)]
pub struct UnnamedItemIsPrivate {
#[primary_span]
pub span: Span,
@@ -48,8 +48,8 @@ pub struct UnnamedItemIsPrivate {
}
// Duplicate of `InPublicInterface` but with a different error code, shares the same slug.
-#[derive(SessionDiagnostic)]
-#[error(privacy::in_public_interface, code = "E0445")]
+#[derive(Diagnostic)]
+#[diag(privacy_in_public_interface, code = "E0445")]
pub struct InPublicInterfaceTraits<'a> {
#[primary_span]
#[label]
@@ -57,13 +57,13 @@ pub struct InPublicInterfaceTraits<'a> {
pub vis_descr: &'static str,
pub kind: &'a str,
pub descr: DiagnosticArgFromDisplay<'a>,
- #[label(privacy::visibility_label)]
+ #[label(visibility_label)]
pub vis_span: Span,
}
// Duplicate of `InPublicInterfaceTraits` but with a different error code, shares the same slug.
-#[derive(SessionDiagnostic)]
-#[error(privacy::in_public_interface, code = "E0446")]
+#[derive(Diagnostic)]
+#[diag(privacy_in_public_interface, code = "E0446")]
pub struct InPublicInterface<'a> {
#[primary_span]
#[label]
@@ -71,12 +71,20 @@ pub struct InPublicInterface<'a> {
pub vis_descr: &'static str,
pub kind: &'a str,
pub descr: DiagnosticArgFromDisplay<'a>,
- #[label(privacy::visibility_label)]
+ #[label(visibility_label)]
pub vis_span: Span,
}
+#[derive(Diagnostic)]
+#[diag(privacy_report_effective_visibility)]
+pub struct ReportEffectiveVisibility {
+ #[primary_span]
+ pub span: Span,
+ pub descr: String,
+}
+
#[derive(LintDiagnostic)]
-#[lint(privacy::from_private_dep_in_public_interface)]
+#[diag(privacy_from_private_dep_in_public_interface)]
pub struct FromPrivateDependencyInPublicInterface<'a> {
pub kind: &'a str,
pub descr: DiagnosticArgFromDisplay<'a>,
@@ -84,7 +92,7 @@ pub struct FromPrivateDependencyInPublicInterface<'a> {
}
#[derive(LintDiagnostic)]
-#[lint(privacy::private_in_public_lint)]
+#[diag(privacy_private_in_public_lint)]
pub struct PrivateInPublicLint<'a> {
pub vis_descr: &'static str,
pub kind: &'a str,
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index c28d0569d..865d6306b 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -4,9 +4,11 @@
#![feature(rustc_private)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![allow(rustc::potential_query_instability)]
-#![cfg_attr(not(bootstrap), deny(rustc::untranslatable_diagnostic))]
-#![cfg_attr(not(bootstrap), deny(rustc::diagnostic_outside_of_impl))]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
+#[macro_use]
+extern crate tracing;
mod errors;
@@ -21,7 +23,7 @@ use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{AssocItemKind, HirIdSet, ItemId, Node, PatKind};
use rustc_middle::bug;
use rustc_middle::hir::nested_filter;
-use rustc_middle::middle::privacy::{AccessLevel, AccessLevels};
+use rustc_middle::middle::privacy::{EffectiveVisibilities, Level};
use rustc_middle::span_bug;
use rustc_middle::ty::abstract_const::{walk_abstract_const, AbstractConst, Node as ACNode};
use rustc_middle::ty::query::Providers;
@@ -30,7 +32,7 @@ use rustc_middle::ty::{self, Const, DefIdTree, GenericParamDefKind};
use rustc_middle::ty::{TraitRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
use rustc_session::lint;
use rustc_span::hygiene::Transparency;
-use rustc_span::symbol::{kw, Ident};
+use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
use std::marker::PhantomData;
@@ -39,7 +41,8 @@ use std::{cmp, fmt, mem};
use errors::{
FieldIsPrivate, FieldIsPrivateLabel, FromPrivateDependencyInPublicInterface, InPublicInterface,
- InPublicInterfaceTraits, ItemIsPrivate, PrivateInPublicLint, UnnamedItemIsPrivate,
+ InPublicInterfaceTraits, ItemIsPrivate, PrivateInPublicLint, ReportEffectiveVisibility,
+ UnnamedItemIsPrivate,
};
////////////////////////////////////////////////////////////////////////////////
@@ -119,8 +122,20 @@ where
&mut self,
projection: ty::ProjectionTy<'tcx>,
) -> ControlFlow<V::BreakTy> {
- let (trait_ref, assoc_substs) =
- projection.trait_ref_and_own_substs(self.def_id_visitor.tcx());
+ let tcx = self.def_id_visitor.tcx();
+ let (trait_ref, assoc_substs) = if tcx.def_kind(projection.item_def_id)
+ != DefKind::ImplTraitPlaceholder
+ {
+ projection.trait_ref_and_own_substs(tcx)
+ } else {
+ // HACK(RPITIT): Remove this when RPITITs are lowered to regular assoc tys
+ let def_id = tcx.impl_trait_in_trait_parent(projection.item_def_id);
+ let trait_generics = tcx.generics_of(def_id);
+ (
+ ty::TraitRef { def_id, substs: projection.substs.truncate_to(tcx, trait_generics) },
+ &projection.substs[trait_generics.count()..],
+ )
+ };
self.visit_trait(trait_ref)?;
if self.def_id_visitor.shallow() {
ControlFlow::CONTINUE
@@ -144,34 +159,12 @@ where
ty.visit_with(self)
}
ty::PredicateKind::RegionOutlives(..) => ControlFlow::CONTINUE,
- ty::PredicateKind::ConstEvaluatable(uv)
- if self.def_id_visitor.tcx().features().generic_const_exprs =>
- {
- let tcx = self.def_id_visitor.tcx();
- if let Ok(Some(ct)) = AbstractConst::new(tcx, uv) {
- self.visit_abstract_const_expr(tcx, ct)?;
- }
- ControlFlow::CONTINUE
- }
+ ty::PredicateKind::ConstEvaluatable(ct) => ct.visit_with(self),
ty::PredicateKind::WellFormed(arg) => arg.visit_with(self),
_ => bug!("unexpected predicate: {:?}", predicate),
}
}
- fn visit_abstract_const_expr(
- &mut self,
- tcx: TyCtxt<'tcx>,
- ct: AbstractConst<'tcx>,
- ) -> ControlFlow<V::BreakTy> {
- walk_abstract_const(tcx, ct, |node| match node.root(tcx) {
- ACNode::Leaf(leaf) => self.visit_const(leaf),
- ACNode::Cast(_, _, ty) => self.visit_ty(ty),
- ACNode::Binop(..) | ACNode::UnaryOp(..) | ACNode::FunctionCall(_, _) => {
- ControlFlow::CONTINUE
- }
- })
- }
-
fn visit_predicates(
&mut self,
predicates: ty::GenericPredicates<'tcx>,
@@ -294,9 +287,16 @@ where
self.visit_ty(c.ty())?;
let tcx = self.def_id_visitor.tcx();
if let Ok(Some(ct)) = AbstractConst::from_const(tcx, c) {
- self.visit_abstract_const_expr(tcx, ct)?;
+ walk_abstract_const(tcx, ct, |node| match node.root(tcx) {
+ ACNode::Leaf(leaf) => self.visit_const(leaf),
+ ACNode::Cast(_, _, ty) => self.visit_ty(ty),
+ ACNode::Binop(..) | ACNode::UnaryOp(..) | ACNode::FunctionCall(_, _) => {
+ ControlFlow::CONTINUE
+ }
+ })
+ } else {
+ ControlFlow::CONTINUE
}
- ControlFlow::CONTINUE
}
}
@@ -310,7 +310,7 @@ fn min(vis1: ty::Visibility, vis2: ty::Visibility, tcx: TyCtxt<'_>) -> ty::Visib
struct FindMin<'a, 'tcx, VL: VisibilityLike> {
tcx: TyCtxt<'tcx>,
- access_levels: &'a AccessLevels,
+ effective_visibilities: &'a EffectiveVisibilities,
min: VL,
}
@@ -330,7 +330,9 @@ impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL>
_kind: &str,
_descr: &dyn fmt::Display,
) -> ControlFlow<Self::BreakTy> {
- self.min = VL::new_min(self, def_id);
+ if let Some(def_id) = def_id.as_local() {
+ self.min = VL::new_min(self, def_id);
+ }
ControlFlow::CONTINUE
}
}
@@ -338,12 +340,16 @@ impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL>
trait VisibilityLike: Sized {
const MAX: Self;
const SHALLOW: bool = false;
- fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self;
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: LocalDefId) -> Self;
// Returns an over-approximation (`skip_assoc_tys` = true) of visibility due to
// associated types for which we can't determine visibility precisely.
- fn of_impl(def_id: LocalDefId, tcx: TyCtxt<'_>, access_levels: &AccessLevels) -> Self {
- let mut find = FindMin { tcx, access_levels, min: Self::MAX };
+ fn of_impl(
+ def_id: LocalDefId,
+ tcx: TyCtxt<'_>,
+ effective_visibilities: &EffectiveVisibilities,
+ ) -> Self {
+ let mut find = FindMin { tcx, effective_visibilities, min: Self::MAX };
find.visit(tcx.type_of(def_id));
if let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
find.visit_trait(trait_ref);
@@ -353,12 +359,12 @@ trait VisibilityLike: Sized {
}
impl VisibilityLike for ty::Visibility {
const MAX: Self = ty::Visibility::Public;
- fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
- min(find.tcx.visibility(def_id), find.min, find.tcx)
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: LocalDefId) -> Self {
+ min(find.tcx.local_visibility(def_id), find.min, find.tcx)
}
}
-impl VisibilityLike for Option<AccessLevel> {
- const MAX: Self = Some(AccessLevel::Public);
+impl VisibilityLike for Option<Level> {
+ const MAX: Self = Some(Level::Direct);
// Type inference is very smart sometimes.
// It can make an impl reachable even some components of its type or trait are unreachable.
// E.g. methods of `impl ReachableTrait<UnreachableTy> for ReachableTy<UnreachableTy> { ... }`
@@ -369,15 +375,8 @@ impl VisibilityLike for Option<AccessLevel> {
// both "shallow" version of its self type and "shallow" version of its trait if it exists
// (which require reaching the `DefId`s in them).
const SHALLOW: bool = true;
- fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
- cmp::min(
- if let Some(def_id) = def_id.as_local() {
- find.access_levels.map.get(&def_id).copied()
- } else {
- Self::MAX
- },
- find.min,
- )
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: LocalDefId) -> Self {
+ cmp::min(find.effective_visibilities.public_at_level(def_id), find.min)
}
}
@@ -388,8 +387,8 @@ impl VisibilityLike for Option<AccessLevel> {
struct EmbargoVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
- /// Accessibility levels for reachable nodes.
- access_levels: AccessLevels,
+ /// Effective visibilities for reachable nodes.
+ effective_visibilities: EffectiveVisibilities,
/// A set of pairs corresponding to modules, where the first module is
/// reachable via a macro that's defined in the second module. This cannot
/// be represented as reachable because it can't handle the following case:
@@ -403,38 +402,38 @@ struct EmbargoVisitor<'tcx> {
/// n::p::f()
/// }
macro_reachable: FxHashSet<(LocalDefId, LocalDefId)>,
- /// Previous accessibility level; `None` means unreachable.
- prev_level: Option<AccessLevel>,
+ /// Previous visibility level; `None` means unreachable.
+ prev_level: Option<Level>,
/// Has something changed in the level map?
changed: bool,
}
struct ReachEverythingInTheInterfaceVisitor<'a, 'tcx> {
- access_level: Option<AccessLevel>,
+ level: Option<Level>,
item_def_id: LocalDefId,
ev: &'a mut EmbargoVisitor<'tcx>,
}
impl<'tcx> EmbargoVisitor<'tcx> {
- fn get(&self, def_id: LocalDefId) -> Option<AccessLevel> {
- self.access_levels.map.get(&def_id).copied()
+ fn get(&self, def_id: LocalDefId) -> Option<Level> {
+ self.effective_visibilities.public_at_level(def_id)
}
- fn update_with_hir_id(
- &mut self,
- hir_id: hir::HirId,
- level: Option<AccessLevel>,
- ) -> Option<AccessLevel> {
+ fn update_with_hir_id(&mut self, hir_id: hir::HirId, level: Option<Level>) -> Option<Level> {
let def_id = self.tcx.hir().local_def_id(hir_id);
self.update(def_id, level)
}
/// Updates node level and returns the updated level.
- fn update(&mut self, def_id: LocalDefId, level: Option<AccessLevel>) -> Option<AccessLevel> {
+ fn update(&mut self, def_id: LocalDefId, level: Option<Level>) -> Option<Level> {
let old_level = self.get(def_id);
- // Accessibility levels can only grow.
+ // Visibility levels can only grow.
if level > old_level {
- self.access_levels.map.insert(def_id, level.unwrap());
+ self.effective_visibilities.set_public_at_level(
+ def_id,
+ || ty::Visibility::Restricted(self.tcx.parent_module_from_def_id(def_id)),
+ level.unwrap(),
+ );
self.changed = true;
level
} else {
@@ -445,10 +444,10 @@ impl<'tcx> EmbargoVisitor<'tcx> {
fn reach(
&mut self,
def_id: LocalDefId,
- access_level: Option<AccessLevel>,
+ level: Option<Level>,
) -> ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
ReachEverythingInTheInterfaceVisitor {
- access_level: cmp::min(access_level, Some(AccessLevel::Reachable)),
+ level: cmp::min(level, Some(Level::Reachable)),
item_def_id: def_id,
ev: self,
}
@@ -506,16 +505,16 @@ impl<'tcx> EmbargoVisitor<'tcx> {
fn update_macro_reachable_mod(&mut self, module_def_id: LocalDefId, defining_mod: LocalDefId) {
let module = self.tcx.hir().get_module(module_def_id).0;
for item_id in module.item_ids {
- let def_kind = self.tcx.def_kind(item_id.def_id);
- let vis = self.tcx.visibility(item_id.def_id);
- self.update_macro_reachable_def(item_id.def_id, def_kind, vis, defining_mod);
+ let def_kind = self.tcx.def_kind(item_id.owner_id);
+ let vis = self.tcx.local_visibility(item_id.owner_id.def_id);
+ self.update_macro_reachable_def(item_id.owner_id.def_id, def_kind, vis, defining_mod);
}
if let Some(exports) = self.tcx.module_reexports(module_def_id) {
for export in exports {
- if export.vis.is_accessible_from(defining_mod.to_def_id(), self.tcx) {
+ if export.vis.is_accessible_from(defining_mod, self.tcx) {
if let Res::Def(def_kind, def_id) = export.res {
if let Some(def_id) = def_id.as_local() {
- let vis = self.tcx.visibility(def_id.to_def_id());
+ let vis = self.tcx.local_visibility(def_id);
self.update_macro_reachable_def(def_id, def_kind, vis, defining_mod);
}
}
@@ -531,14 +530,14 @@ impl<'tcx> EmbargoVisitor<'tcx> {
vis: ty::Visibility,
module: LocalDefId,
) {
- let level = Some(AccessLevel::Reachable);
+ let level = Some(Level::Reachable);
if vis.is_public() {
self.update(def_id, level);
}
match def_kind {
// No type privacy, so can be directly marked as reachable.
DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias => {
- if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ if vis.is_accessible_from(module, self.tcx) {
self.update(def_id, level);
}
}
@@ -550,7 +549,7 @@ impl<'tcx> EmbargoVisitor<'tcx> {
DefKind::Macro(_) => {
let item = self.tcx.hir().expect_item(def_id);
if let hir::ItemKind::Macro(MacroDef { macro_rules: false, .. }, _) = item.kind {
- if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ if vis.is_accessible_from(module, self.tcx) {
self.update(def_id, level);
}
}
@@ -561,7 +560,7 @@ impl<'tcx> EmbargoVisitor<'tcx> {
// hygiene these don't need to be marked reachable. The contents of
// the module, however may be reachable.
DefKind::Mod => {
- if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ if vis.is_accessible_from(module, self.tcx) {
self.update_macro_reachable(def_id, module);
}
}
@@ -575,8 +574,8 @@ impl<'tcx> EmbargoVisitor<'tcx> {
{
for field in struct_def.fields() {
let def_id = self.tcx.hir().local_def_id(field.hir_id);
- let field_vis = self.tcx.visibility(def_id);
- if field_vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ let field_vis = self.tcx.local_visibility(def_id);
+ if field_vis.is_accessible_from(module, self.tcx) {
self.reach(def_id, level).ty();
}
}
@@ -596,6 +595,7 @@ impl<'tcx> EmbargoVisitor<'tcx> {
| DefKind::ForeignTy
| DefKind::Fn
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::AssocFn
| DefKind::Trait
| DefKind::TyParam
@@ -627,11 +627,14 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let item_level = match item.kind {
hir::ItemKind::Impl { .. } => {
- let impl_level =
- Option::<AccessLevel>::of_impl(item.def_id, self.tcx, &self.access_levels);
- self.update(item.def_id, impl_level)
+ let impl_level = Option::<Level>::of_impl(
+ item.owner_id.def_id,
+ self.tcx,
+ &self.effective_visibilities,
+ );
+ self.update(item.owner_id.def_id, impl_level)
}
- _ => self.get(item.def_id),
+ _ => self.get(item.owner_id.def_id),
};
// Update levels of nested things.
@@ -650,15 +653,15 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
hir::ItemKind::Impl(ref impl_) => {
for impl_item_ref in impl_.items {
if impl_.of_trait.is_some()
- || self.tcx.visibility(impl_item_ref.id.def_id) == ty::Visibility::Public
+ || self.tcx.visibility(impl_item_ref.id.owner_id).is_public()
{
- self.update(impl_item_ref.id.def_id, item_level);
+ self.update(impl_item_ref.id.owner_id.def_id, item_level);
}
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
for trait_item_ref in trait_item_refs {
- self.update(trait_item_ref.id.def_id, item_level);
+ self.update(trait_item_ref.id.owner_id.def_id, item_level);
}
}
hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
@@ -674,12 +677,12 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
}
}
hir::ItemKind::Macro(ref macro_def, _) => {
- self.update_reachability_from_macro(item.def_id, macro_def);
+ self.update_reachability_from_macro(item.owner_id.def_id, macro_def);
}
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
- if self.tcx.visibility(foreign_item.id.def_id) == ty::Visibility::Public {
- self.update(foreign_item.id.def_id, item_level);
+ if self.tcx.visibility(foreign_item.id.owner_id).is_public() {
+ self.update(foreign_item.id.owner_id.def_id, item_level);
}
}
}
@@ -702,22 +705,21 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
hir::ItemKind::Macro(..) | hir::ItemKind::ExternCrate(..) => {}
// All nested items are checked by `visit_item`.
hir::ItemKind::Mod(..) => {}
- // Handled in the access level of in rustc_resolve
+ // Handled in `rustc_resolve`.
hir::ItemKind::Use(..) => {}
// The interface is empty.
hir::ItemKind::GlobalAsm(..) => {}
- hir::ItemKind::OpaqueTy(..) => {
+ hir::ItemKind::OpaqueTy(ref opaque) => {
// HACK(jynelson): trying to infer the type of `impl trait` breaks `async-std` (and `pub async fn` in general)
// Since rustdoc never needs to do codegen and doesn't care about link-time reachability,
// mark this as unreachable.
// See https://github.com/rust-lang/rust/issues/75100
- if !self.tcx.sess.opts.actually_rustdoc {
+ if !opaque.in_trait && !self.tcx.sess.opts.actually_rustdoc {
// FIXME: This is some serious pessimization intended to workaround deficiencies
// in the reachability pass (`middle/reachable.rs`). Types are marked as link-time
// reachable if they are returned via `impl Trait`, even from private functions.
- let exist_level =
- cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait));
- self.reach(item.def_id, exist_level).generics().predicates().ty();
+ let exist_level = cmp::max(item_level, Some(Level::ReachableThroughImplTrait));
+ self.reach(item.owner_id.def_id, exist_level).generics().predicates().ty();
}
}
// Visit everything.
@@ -726,20 +728,20 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
| hir::ItemKind::Fn(..)
| hir::ItemKind::TyAlias(..) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates().ty();
+ self.reach(item.owner_id.def_id, item_level).generics().predicates().ty();
}
}
hir::ItemKind::Trait(.., trait_item_refs) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates();
+ self.reach(item.owner_id.def_id, item_level).generics().predicates();
for trait_item_ref in trait_item_refs {
let tcx = self.tcx;
- let mut reach = self.reach(trait_item_ref.id.def_id, item_level);
+ let mut reach = self.reach(trait_item_ref.id.owner_id.def_id, item_level);
reach.generics().predicates();
if trait_item_ref.kind == AssocItemKind::Type
- && !tcx.impl_defaultness(trait_item_ref.id.def_id).has_value()
+ && !tcx.impl_defaultness(trait_item_ref.id.owner_id).has_value()
{
// No type to visit.
} else {
@@ -750,18 +752,22 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
}
hir::ItemKind::TraitAlias(..) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates();
+ self.reach(item.owner_id.def_id, item_level).generics().predicates();
}
}
// Visit everything except for private impl items.
hir::ItemKind::Impl(ref impl_) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates().ty().trait_ref();
+ self.reach(item.owner_id.def_id, item_level)
+ .generics()
+ .predicates()
+ .ty()
+ .trait_ref();
for impl_item_ref in impl_.items {
- let impl_item_level = self.get(impl_item_ref.id.def_id);
+ let impl_item_level = self.get(impl_item_ref.id.owner_id.def_id);
if impl_item_level.is_some() {
- self.reach(impl_item_ref.id.def_id, impl_item_level)
+ self.reach(impl_item_ref.id.owner_id.def_id, impl_item_level)
.generics()
.predicates()
.ty();
@@ -773,7 +779,7 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
// Visit everything, but enum variants have their own levels.
hir::ItemKind::Enum(ref def, _) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates();
+ self.reach(item.owner_id.def_id, item_level).generics().predicates();
}
for variant in def.variants {
let variant_level = self.get(self.tcx.hir().local_def_id(variant.id));
@@ -784,13 +790,13 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
}
// Corner case: if the variant is reachable, but its
// enum is not, make the enum reachable as well.
- self.reach(item.def_id, variant_level).ty();
+ self.reach(item.owner_id.def_id, variant_level).ty();
}
if let Some(hir_id) = variant.data.ctor_hir_id() {
let ctor_def_id = self.tcx.hir().local_def_id(hir_id);
let ctor_level = self.get(ctor_def_id);
if ctor_level.is_some() {
- self.reach(item.def_id, ctor_level).ty();
+ self.reach(item.owner_id.def_id, ctor_level).ty();
}
}
}
@@ -798,9 +804,9 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
// Visit everything, but foreign items have their own levels.
hir::ItemKind::ForeignMod { items, .. } => {
for foreign_item in items {
- let foreign_item_level = self.get(foreign_item.id.def_id);
+ let foreign_item_level = self.get(foreign_item.id.owner_id.def_id);
if foreign_item_level.is_some() {
- self.reach(foreign_item.id.def_id, foreign_item_level)
+ self.reach(foreign_item.id.owner_id.def_id, foreign_item_level)
.generics()
.predicates()
.ty();
@@ -810,7 +816,7 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
// Visit everything except for private fields.
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
if item_level.is_some() {
- self.reach(item.def_id, item_level).generics().predicates();
+ self.reach(item.owner_id.def_id, item_level).generics().predicates();
for field in struct_def.fields() {
let def_id = self.tcx.hir().local_def_id(field.hir_id);
let field_level = self.get(def_id);
@@ -823,7 +829,7 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
let ctor_def_id = self.tcx.hir().local_def_id(hir_id);
let ctor_level = self.get(ctor_def_id);
if ctor_level.is_some() {
- self.reach(item.def_id, ctor_level).ty();
+ self.reach(item.owner_id.def_id, ctor_level).ty();
}
}
}
@@ -894,16 +900,92 @@ impl<'tcx> DefIdVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'tcx>
_descr: &dyn fmt::Display,
) -> ControlFlow<Self::BreakTy> {
if let Some(def_id) = def_id.as_local() {
- if let (ty::Visibility::Public, _) | (_, Some(AccessLevel::ReachableFromImplTrait)) =
- (self.tcx().visibility(def_id.to_def_id()), self.access_level)
+ if let (ty::Visibility::Public, _) | (_, Some(Level::ReachableThroughImplTrait)) =
+ (self.tcx().visibility(def_id.to_def_id()), self.level)
{
- self.ev.update(def_id, self.access_level);
+ self.ev.update(def_id, self.level);
}
}
ControlFlow::CONTINUE
}
}
+////////////////////////////////////////////////////////////////////////////////
+/// Visitor, used for EffectiveVisibilities table checking
+////////////////////////////////////////////////////////////////////////////////
+pub struct TestReachabilityVisitor<'tcx, 'a> {
+ tcx: TyCtxt<'tcx>,
+ effective_visibilities: &'a EffectiveVisibilities,
+}
+
+impl<'tcx, 'a> TestReachabilityVisitor<'tcx, 'a> {
+ fn effective_visibility_diagnostic(&mut self, def_id: LocalDefId) {
+ if self.tcx.has_attr(def_id.to_def_id(), sym::rustc_effective_visibility) {
+ let mut error_msg = String::new();
+ let span = self.tcx.def_span(def_id.to_def_id());
+ if let Some(effective_vis) = self.effective_visibilities.effective_vis(def_id) {
+ for level in Level::all_levels() {
+ let vis_str = match effective_vis.at_level(level) {
+ ty::Visibility::Restricted(restricted_id) => {
+ if restricted_id.is_top_level_module() {
+ "pub(crate)".to_string()
+ } else if *restricted_id == self.tcx.parent_module_from_def_id(def_id) {
+ "pub(self)".to_string()
+ } else {
+ format!("pub({})", self.tcx.item_name(restricted_id.to_def_id()))
+ }
+ }
+ ty::Visibility::Public => "pub".to_string(),
+ };
+ if level != Level::Direct {
+ error_msg.push_str(", ");
+ }
+ error_msg.push_str(&format!("{:?}: {}", level, vis_str));
+ }
+ } else {
+ error_msg.push_str("not in the table");
+ }
+ self.tcx.sess.emit_err(ReportEffectiveVisibility { span, descr: error_msg });
+ }
+ }
+}
+
+impl<'tcx, 'a> Visitor<'tcx> for TestReachabilityVisitor<'tcx, 'a> {
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ self.effective_visibility_diagnostic(item.owner_id.def_id);
+
+ match item.kind {
+ hir::ItemKind::Enum(ref def, _) => {
+ for variant in def.variants.iter() {
+ let variant_id = self.tcx.hir().local_def_id(variant.id);
+ self.effective_visibility_diagnostic(variant_id);
+ for field in variant.data.fields() {
+ let def_id = self.tcx.hir().local_def_id(field.hir_id);
+ self.effective_visibility_diagnostic(def_id);
+ }
+ }
+ }
+ hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
+ for field in def.fields() {
+ let def_id = self.tcx.hir().local_def_id(field.hir_id);
+ self.effective_visibility_diagnostic(def_id);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem<'tcx>) {
+ self.effective_visibility_diagnostic(item.owner_id.def_id);
+ }
+ fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem<'tcx>) {
+ self.effective_visibility_diagnostic(item.owner_id.def_id);
+ }
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
+ self.effective_visibility_diagnostic(item.owner_id.def_id);
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////////////
/// Name privacy visitor, checks privacy and reports violations.
/// Most of name privacy checks are performed during the main resolution phase,
@@ -971,7 +1053,7 @@ impl<'tcx> Visitor<'tcx> for NamePrivacyVisitor<'tcx> {
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
- // for each module in `privacy_access_levels`
+ // for each module in `effective_visibilities`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
@@ -983,7 +1065,7 @@ impl<'tcx> Visitor<'tcx> for NamePrivacyVisitor<'tcx> {
}
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let orig_current_item = mem::replace(&mut self.current_item, item.def_id);
+ let orig_current_item = mem::replace(&mut self.current_item, item.owner_id.def_id);
intravisit::walk_item(self, item);
self.current_item = orig_current_item;
}
@@ -1059,7 +1141,7 @@ impl<'tcx> TypePrivacyVisitor<'tcx> {
}
fn item_is_accessible(&self, did: DefId) -> bool {
- self.tcx.visibility(did).is_accessible_from(self.current_item.to_def_id(), self.tcx)
+ self.tcx.visibility(did).is_accessible_from(self.current_item, self.tcx)
}
// Take node-id of an expression or pattern and check its type for privacy.
@@ -1096,7 +1178,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
// Don't visit nested modules, since we run a separate visitor walk
- // for each module in `privacy_access_levels`
+ // for each module in `effective_visibilities`
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
@@ -1126,7 +1208,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
// Types in signatures.
// FIXME: This is very ineffective. Ideally each HIR type should be converted
// into a semantic type only once and the result should be cached somehow.
- if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)).is_break() {
+ if self.visit(rustc_hir_analysis::hir_ty_to_ty(self.tcx, hir_ty)).is_break() {
return;
}
}
@@ -1155,7 +1237,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
if self.maybe_typeck_results.is_none() {
// Avoid calling `hir_trait_to_predicates` in bodies, it will ICE.
// The traits' privacy in bodies is already checked as a part of trait object types.
- let bounds = rustc_typeck::hir_trait_to_predicates(
+ let bounds = rustc_hir_analysis::hir_trait_to_predicates(
self.tcx,
trait_ref,
// NOTE: This isn't really right, but the actual type doesn't matter here. It's
@@ -1260,7 +1342,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
}
}
- intravisit::walk_qpath(self, qpath, id, span);
+ intravisit::walk_qpath(self, qpath, id);
}
// Check types of patterns.
@@ -1286,7 +1368,7 @@ impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
// Check types in item interfaces.
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- let orig_current_item = mem::replace(&mut self.current_item, item.def_id);
+ let orig_current_item = mem::replace(&mut self.current_item, item.owner_id.def_id);
let old_maybe_typeck_results = self.maybe_typeck_results.take();
intravisit::walk_item(self, item);
self.maybe_typeck_results = old_maybe_typeck_results;
@@ -1321,7 +1403,7 @@ impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> {
struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
- access_levels: &'a AccessLevels,
+ effective_visibilities: &'a EffectiveVisibilities,
in_variant: bool,
// Set of errors produced by this obsolete visitor.
old_error_set: HirIdSet,
@@ -1341,7 +1423,9 @@ struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
fn path_is_private_type(&self, path: &hir::Path<'_>) -> bool {
let did = match path.res {
- Res::PrimTy(..) | Res::SelfTy { .. } | Res::Err => return false,
+ Res::PrimTy(..) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::Err => {
+ return false;
+ }
res => res.def_id(),
};
@@ -1362,7 +1446,7 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
fn trait_is_public(&self, trait_id: LocalDefId) -> bool {
// FIXME: this would preferably be using `exported_items`, but all
// traits are exported currently (see `EmbargoVisitor.exported_trait`).
- self.access_levels.is_public(trait_id)
+ self.effective_visibilities.is_directly_public(trait_id)
}
fn check_generic_bound(&mut self, bound: &hir::GenericBound<'_>) {
@@ -1374,7 +1458,7 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
}
fn item_is_public(&self, def_id: LocalDefId) -> bool {
- self.access_levels.is_reachable(def_id) || self.tcx.visibility(def_id).is_public()
+ self.effective_visibilities.is_reachable(def_id) || self.tcx.visibility(def_id).is_public()
}
}
@@ -1428,7 +1512,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
hir::ItemKind::ForeignMod { .. } => {}
hir::ItemKind::Trait(.., bounds, _) => {
- if !self.trait_is_public(item.def_id) {
+ if !self.trait_is_public(item.owner_id.def_id) {
return;
}
@@ -1488,10 +1572,10 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
|| impl_.items.iter().any(|impl_item_ref| {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
- hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => {
- self.access_levels.is_reachable(impl_item_ref.id.def_id)
- }
- hir::ImplItemKind::TyAlias(_) => false,
+ hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => self
+ .effective_visibilities
+ .is_reachable(impl_item_ref.id.owner_id.def_id),
+ hir::ImplItemKind::Type(_) => false,
}
});
@@ -1509,11 +1593,11 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item.kind {
hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..)
- if self.item_is_public(impl_item.def_id) =>
+ if self.item_is_public(impl_item.owner_id.def_id) =>
{
intravisit::walk_impl_item(self, impl_item)
}
- hir::ImplItemKind::TyAlias(..) => {
+ hir::ImplItemKind::Type(..) => {
intravisit::walk_impl_item(self, impl_item)
}
_ => {}
@@ -1539,7 +1623,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
// Those in 3. are warned with this call.
for impl_item_ref in impl_.items {
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
- if let hir::ImplItemKind::TyAlias(ty) = impl_item.kind {
+ if let hir::ImplItemKind::Type(ty) = impl_item.kind {
self.visit_ty(ty);
}
}
@@ -1550,9 +1634,10 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
// methods will be visible as `Public::foo`.
let mut found_pub_static = false;
for impl_item_ref in impl_.items {
- if self.access_levels.is_reachable(impl_item_ref.id.def_id)
- || self.tcx.visibility(impl_item_ref.id.def_id)
- == ty::Visibility::Public
+ if self
+ .effective_visibilities
+ .is_reachable(impl_item_ref.id.owner_id.def_id)
+ || self.tcx.visibility(impl_item_ref.id.owner_id).is_public()
{
let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
match impl_item_ref.kind {
@@ -1580,7 +1665,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
hir::ItemKind::TyAlias(..) => return,
// Not at all public, so we don't care.
- _ if !self.item_is_public(item.def_id) => {
+ _ if !self.item_is_public(item.owner_id.def_id) => {
return;
}
@@ -1611,7 +1696,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- if self.access_levels.is_reachable(item.def_id) {
+ if self.effective_visibilities.is_reachable(item.owner_id.def_id) {
intravisit::walk_foreign_item(self, item)
}
}
@@ -1625,15 +1710,10 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
intravisit::walk_ty(self, t)
}
- fn visit_variant(
- &mut self,
- v: &'tcx hir::Variant<'tcx>,
- g: &'tcx hir::Generics<'tcx>,
- item_id: hir::HirId,
- ) {
- if self.access_levels.is_reachable(self.tcx.hir().local_def_id(v.id)) {
+ fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) {
+ if self.effective_visibilities.is_reachable(self.tcx.hir().local_def_id(v.id)) {
self.in_variant = true;
- intravisit::walk_variant(self, v, g, item_id);
+ intravisit::walk_variant(self, v);
self.in_variant = false;
}
}
@@ -1727,18 +1807,17 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
);
}
- let hir_id = match def_id.as_local() {
- Some(def_id) => self.tcx.hir().local_def_id_to_hir_id(def_id),
- None => return false,
+ let Some(local_def_id) = def_id.as_local() else {
+ return false;
};
- let vis = self.tcx.visibility(def_id);
+ let vis = self.tcx.local_visibility(local_def_id);
if !vis.is_at_least(self.required_visibility, self.tcx) {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
let vis_descr = match vis {
ty::Visibility::Public => "public",
- ty::Visibility::Invisible => "private",
ty::Visibility::Restricted(vis_def_id) => {
- if vis_def_id == self.tcx.parent_module(hir_id).to_def_id() {
+ if vis_def_id == self.tcx.parent_module(hir_id) {
"private"
} else if vis_def_id.is_top_level_module() {
"crate-private"
@@ -1790,7 +1869,7 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
fn leaks_private_dep(&self, item_id: DefId) -> bool {
let ret = self.required_visibility.is_public() && self.tcx.is_private_dep(item_id.krate);
- tracing::debug!("leaks_private_dep(item_id={:?})={}", item_id, ret);
+ debug!("leaks_private_dep(item_id={:?})={}", item_id, ret);
ret
}
}
@@ -1854,43 +1933,44 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
pub fn check_item(&mut self, id: ItemId) {
let tcx = self.tcx;
- let item_visibility = tcx.visibility(id.def_id);
- let def_kind = tcx.def_kind(id.def_id);
+ let def_id = id.owner_id.def_id;
+ let item_visibility = tcx.local_visibility(def_id);
+ let def_kind = tcx.def_kind(def_id);
match def_kind {
DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias => {
- self.check(id.def_id, item_visibility).generics().predicates().ty();
+ self.check(def_id, item_visibility).generics().predicates().ty();
}
DefKind::OpaqueTy => {
// `ty()` for opaque types is the underlying type,
// it's not a part of interface, so we skip it.
- self.check(id.def_id, item_visibility).generics().bounds();
+ self.check(def_id, item_visibility).generics().bounds();
}
DefKind::Trait => {
let item = tcx.hir().item(id);
if let hir::ItemKind::Trait(.., trait_item_refs) = item.kind {
- self.check(item.def_id, item_visibility).generics().predicates();
+ self.check(item.owner_id.def_id, item_visibility).generics().predicates();
for trait_item_ref in trait_item_refs {
self.check_assoc_item(
- trait_item_ref.id.def_id,
+ trait_item_ref.id.owner_id.def_id,
trait_item_ref.kind,
item_visibility,
);
if let AssocItemKind::Type = trait_item_ref.kind {
- self.check(trait_item_ref.id.def_id, item_visibility).bounds();
+ self.check(trait_item_ref.id.owner_id.def_id, item_visibility).bounds();
}
}
}
}
DefKind::TraitAlias => {
- self.check(id.def_id, item_visibility).generics().predicates();
+ self.check(def_id, item_visibility).generics().predicates();
}
DefKind::Enum => {
let item = tcx.hir().item(id);
if let hir::ItemKind::Enum(ref def, _) = item.kind {
- self.check(item.def_id, item_visibility).generics().predicates();
+ self.check(item.owner_id.def_id, item_visibility).generics().predicates();
for variant in def.variants {
for field in variant.data.fields() {
@@ -1905,8 +1985,11 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
let item = tcx.hir().item(id);
if let hir::ItemKind::ForeignMod { items, .. } = item.kind {
for foreign_item in items {
- let vis = tcx.visibility(foreign_item.id.def_id);
- self.check(foreign_item.id.def_id, vis).generics().predicates().ty();
+ let vis = tcx.local_visibility(foreign_item.id.owner_id.def_id);
+ self.check(foreign_item.id.owner_id.def_id, vis)
+ .generics()
+ .predicates()
+ .ty();
}
}
}
@@ -1916,11 +1999,11 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
if let hir::ItemKind::Struct(ref struct_def, _)
| hir::ItemKind::Union(ref struct_def, _) = item.kind
{
- self.check(item.def_id, item_visibility).generics().predicates();
+ self.check(item.owner_id.def_id, item_visibility).generics().predicates();
for field in struct_def.fields() {
let def_id = tcx.hir().local_def_id(field.hir_id);
- let field_visibility = tcx.visibility(def_id);
+ let field_visibility = tcx.local_visibility(def_id);
self.check(def_id, min(item_visibility, field_visibility, tcx)).ty();
}
}
@@ -1932,20 +2015,25 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
DefKind::Impl => {
let item = tcx.hir().item(id);
if let hir::ItemKind::Impl(ref impl_) = item.kind {
- let impl_vis = ty::Visibility::of_impl(item.def_id, tcx, &Default::default());
+ let impl_vis =
+ ty::Visibility::of_impl(item.owner_id.def_id, tcx, &Default::default());
// check that private components do not appear in the generics or predicates of inherent impls
// this check is intentionally NOT performed for impls of traits, per #90586
if impl_.of_trait.is_none() {
- self.check(item.def_id, impl_vis).generics().predicates();
+ self.check(item.owner_id.def_id, impl_vis).generics().predicates();
}
for impl_item_ref in impl_.items {
let impl_item_vis = if impl_.of_trait.is_none() {
- min(tcx.visibility(impl_item_ref.id.def_id), impl_vis, tcx)
+ min(
+ tcx.local_visibility(impl_item_ref.id.owner_id.def_id),
+ impl_vis,
+ tcx,
+ )
} else {
impl_vis
};
self.check_assoc_item(
- impl_item_ref.id.def_id,
+ impl_item_ref.id.owner_id.def_id,
impl_item_ref.kind,
impl_item_vis,
);
@@ -1960,15 +2048,18 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
pub fn provide(providers: &mut Providers) {
*providers = Providers {
visibility,
- privacy_access_levels,
+ effective_visibilities,
check_private_in_public,
check_mod_privacy,
..*providers
};
}
-fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility {
- let def_id = def_id.expect_local();
+fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility<DefId> {
+ local_visibility(tcx, def_id.expect_local()).to_def_id()
+}
+
+fn local_visibility(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Visibility {
match tcx.resolutions(()).visibilities.get(&def_id) {
Some(vis) => *vis,
None => {
@@ -1983,13 +2074,14 @@ fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility {
// Visibility on them should have no effect, but to avoid the visibility
// query failing on some items, we provide it for opaque types as well.
| Node::Item(hir::Item {
- kind: hir::ItemKind::Use(_, hir::UseKind::ListStem) | hir::ItemKind::OpaqueTy(..),
+ kind: hir::ItemKind::Use(_, hir::UseKind::ListStem)
+ | hir::ItemKind::OpaqueTy(..),
..
- }) => ty::Visibility::Restricted(tcx.parent_module(hir_id).to_def_id()),
+ }) => ty::Visibility::Restricted(tcx.parent_module(hir_id)),
// Visibilities of trait impl items are inherited from their traits
// and are not filled in resolve.
Node::ImplItem(impl_item) => {
- match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(hir_id)) {
+ match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(hir_id).def_id) {
Node::Item(hir::Item {
kind: hir::ItemKind::Impl(hir::Impl { of_trait: Some(tr), .. }),
..
@@ -1998,7 +2090,7 @@ fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility {
tcx.sess.delay_span_bug(tr.path.span, "trait without a def-id");
ty::Visibility::Public
},
- |def_id| tcx.visibility(def_id),
+ |def_id| tcx.visibility(def_id).expect_local(),
),
_ => span_bug!(impl_item.span, "the parent is not a trait impl"),
}
@@ -2028,14 +2120,14 @@ fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
intravisit::walk_mod(&mut visitor, module, hir_id);
}
-fn privacy_access_levels(tcx: TyCtxt<'_>, (): ()) -> &AccessLevels {
+fn effective_visibilities(tcx: TyCtxt<'_>, (): ()) -> &EffectiveVisibilities {
// Build up a set of all exported items in the AST. This is a set of all
// items which are reachable from external crates based on visibility.
let mut visitor = EmbargoVisitor {
tcx,
- access_levels: tcx.resolutions(()).access_levels.clone(),
+ effective_visibilities: tcx.resolutions(()).effective_visibilities.clone(),
macro_reachable: Default::default(),
- prev_level: Some(AccessLevel::Public),
+ prev_level: Some(Level::Direct),
changed: false,
};
@@ -2048,15 +2140,19 @@ fn privacy_access_levels(tcx: TyCtxt<'_>, (): ()) -> &AccessLevels {
}
}
- tcx.arena.alloc(visitor.access_levels)
+ let mut check_visitor =
+ TestReachabilityVisitor { tcx, effective_visibilities: &visitor.effective_visibilities };
+ tcx.hir().visit_all_item_likes_in_crate(&mut check_visitor);
+
+ tcx.arena.alloc(visitor.effective_visibilities)
}
fn check_private_in_public(tcx: TyCtxt<'_>, (): ()) {
- let access_levels = tcx.privacy_access_levels(());
+ let effective_visibilities = tcx.effective_visibilities(());
let mut visitor = ObsoleteVisiblePrivateTypesVisitor {
tcx,
- access_levels,
+ effective_visibilities,
in_variant: false,
old_error_set: Default::default(),
};
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
index 5673bb83b..e7f12caaf 100644
--- a/compiler/rustc_query_impl/Cargo.toml
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -8,7 +8,6 @@ doctest = false
[dependencies]
measureme = "10.0.0"
-rustc-rayon-core = { version = "0.4.0", optional = true }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
@@ -17,9 +16,12 @@ rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
rustc_middle = { path = "../rustc_middle" }
rustc_query_system = { path = "../rustc_query_system" }
+rustc-rayon-core = { version = "0.4.0", optional = true }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+thin-vec = "0.2.8"
tracing = "0.1"
[features]
diff --git a/compiler/rustc_query_impl/src/keys.rs b/compiler/rustc_query_impl/src/keys.rs
index 49175e97f..8be2e2be8 100644
--- a/compiler/rustc_query_impl/src/keys.rs
+++ b/compiler/rustc_query_impl/src/keys.rs
@@ -1,6 +1,7 @@
//! Defines the set of legal keys that can be used in queries.
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_hir::hir_id::{HirId, OwnerId};
use rustc_middle::infer::canonical::Canonical;
use rustc_middle::mir;
use rustc_middle::traits;
@@ -26,6 +27,10 @@ pub trait Key {
fn key_as_def_id(&self) -> Option<DefId> {
None
}
+
+ fn ty_adt_id(&self) -> Option<DefId> {
+ None
+ }
}
impl Key for () {
@@ -104,6 +109,19 @@ impl Key for CrateNum {
}
}
+impl Key for OwnerId {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.to_def_id().default_span(tcx)
+ }
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(self.to_def_id())
+ }
+}
+
impl Key for LocalDefId {
#[inline(always)]
fn query_crate_is_local(&self) -> bool {
@@ -275,7 +293,7 @@ impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
}
}
-impl<'tcx> Key for (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>) {
+impl<'tcx> Key for (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx>) {
#[inline(always)]
fn query_crate_is_local(&self) -> bool {
(self.0).def.did.krate == LOCAL_CRATE
@@ -393,6 +411,12 @@ impl<'tcx> Key for Ty<'tcx> {
fn default_span(&self, _: TyCtxt<'_>) -> Span {
DUMMY_SP
}
+ fn ty_adt_id(&self) -> Option<DefId> {
+ match self.kind() {
+ ty::Adt(adt, _) => Some(adt.did()),
+ _ => None,
+ }
+ }
}
impl<'tcx> Key for TyAndLayout<'tcx> {
@@ -543,3 +567,19 @@ impl<'tcx> Key for (Ty<'tcx>, ty::ValTree<'tcx>) {
DUMMY_SP
}
}
+
+impl Key for HirId {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.hir().span(*self)
+ }
+
+ #[inline(always)]
+ fn key_as_def_id(&self) -> Option<DefId> {
+ None
+ }
+}
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
index eda61df77..11d4c97e7 100644
--- a/compiler/rustc_query_impl/src/lib.rs
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -1,26 +1,28 @@
//! Support for serializing the dep-graph and reloading it.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+// this shouldn't be necessary, but the check for `&mut _` is too naive and denies returning a function pointer that takes a mut ref
+#![feature(const_mut_refs)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(once_cell)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
#[macro_use]
extern crate rustc_middle;
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::AtomicU64;
use rustc_middle::arena::Arena;
-use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex};
+use rustc_middle::dep_graph::{self, DepKindStruct};
use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine};
-use rustc_middle::ty::{self, TyCtxt};
-use rustc_span::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_middle::ty::TyCtxt;
use rustc_span::Span;
#[macro_use]
@@ -33,9 +35,6 @@ pub use rustc_query_system::query::{deadlock, QueryContext};
mod keys;
use keys::Key;
-mod values;
-use self::values::Value;
-
pub use rustc_query_system::query::QueryConfig;
pub(crate) use rustc_query_system::query::{QueryDescription, QueryVTable};
@@ -45,15 +44,7 @@ pub use on_disk_cache::OnDiskCache;
mod profiling_support;
pub use self::profiling_support::alloc_self_profile_query_strings;
-fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
- if def_id.is_top_level_module() {
- "top-level module".to_string()
- } else {
- format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
- }
-}
-
-rustc_query_append! { [define_queries!][<'tcx>] }
+rustc_query_append! { define_queries! }
impl<'tcx> Queries<'tcx> {
// Force codegen in the dyn-trait transformation in this crate.
diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs
index 56fd90c98..9000f81d9 100644
--- a/compiler/rustc_query_impl/src/on_disk_cache.rs
+++ b/compiler/rustc_query_impl/src/on_disk_cache.rs
@@ -1,8 +1,9 @@
use crate::QueryCtxt;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock};
use rustc_data_structures::unhash::UnhashMap;
+use rustc_data_structures::unord::UnordSet;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE};
use rustc_hir::definitions::DefPathHash;
use rustc_index::vec::{Idx, IndexVec};
@@ -22,8 +23,9 @@ use rustc_span::hygiene::{
ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData,
};
use rustc_span::source_map::{SourceMap, StableSourceFileId};
-use rustc_span::CachingSourceMapView;
use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span};
+use rustc_span::{CachingSourceMapView, Symbol};
+use std::collections::hash_map::Entry;
use std::io;
use std::mem;
@@ -38,6 +40,11 @@ const TAG_RELATIVE_SPAN: u8 = 2;
const TAG_SYNTAX_CONTEXT: u8 = 0;
const TAG_EXPN_DATA: u8 = 1;
+// Tags for encoding Symbol's
+const SYMBOL_STR: u8 = 0;
+const SYMBOL_OFFSET: u8 = 1;
+const SYMBOL_PREINTERNED: u8 = 2;
+
/// Provides an interface to incremental compilation data cached from the
/// previous compilation session. This data will eventually include the results
/// of a few selected queries (like `typeck` and `mir_optimized`) and
@@ -112,12 +119,11 @@ pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
struct SourceFileIndex(u32);
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
-pub struct AbsoluteBytePos(u32);
+pub struct AbsoluteBytePos(u64);
impl AbsoluteBytePos {
fn new(pos: usize) -> AbsoluteBytePos {
- debug_assert!(pos <= u32::MAX as usize);
- AbsoluteBytePos(pos as u32)
+ AbsoluteBytePos(pos.try_into().expect("Incremental cache file size overflowed u64."))
}
fn to_usize(self) -> usize {
@@ -254,6 +260,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
source_map: CachingSourceMapView::new(tcx.sess.source_map()),
file_to_file_index,
hygiene_context: &hygiene_encode_context,
+ symbol_table: Default::default(),
};
// Encode query results.
@@ -714,6 +721,40 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span {
}
}
+// copy&paste impl from rustc_metadata
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Symbol {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ let tag = d.read_u8();
+
+ match tag {
+ SYMBOL_STR => {
+ let s = d.read_str();
+ Symbol::intern(s)
+ }
+ SYMBOL_OFFSET => {
+ // read str offset
+ let pos = d.read_usize();
+ let old_pos = d.opaque.position();
+
+ // move to str ofset and read
+ d.opaque.set_position(pos);
+ let s = d.read_str();
+ let sym = Symbol::intern(s);
+
+ // restore position
+ d.opaque.set_position(old_pos);
+
+ sym
+ }
+ SYMBOL_PREINTERNED => {
+ let symbol_index = d.read_u32();
+ Symbol::new_from_decoded(symbol_index)
+ }
+ _ => unreachable!(),
+ }
+ }
+}
+
impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum {
fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
let stable_id = StableCrateId::decode(d);
@@ -751,7 +792,13 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
}
}
-impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> {
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx UnordSet<LocalDefId> {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashMap<DefId, Ty<'tcx>> {
fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
RefDecodable::decode(d)
}
@@ -801,6 +848,7 @@ impl_ref_decoder! {<'tcx>
rustc_span::def_id::DefId,
rustc_span::def_id::LocalDefId,
(rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+ ty::DeducedParamAttrs,
}
//- ENCODING -------------------------------------------------------------------
@@ -815,6 +863,7 @@ pub struct CacheEncoder<'a, 'tcx> {
source_map: CachingSourceMapView<'tcx>,
file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>,
hygiene_context: &'a HygieneEncodeContext,
+ symbol_table: FxHashMap<Symbol, usize>,
}
impl<'a, 'tcx> CacheEncoder<'a, 'tcx> {
@@ -899,6 +948,32 @@ impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span {
}
}
+// copy&paste impl from rustc_metadata
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Symbol {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ // if symbol preinterned, emit tag and symbol index
+ if self.is_preinterned() {
+ s.encoder.emit_u8(SYMBOL_PREINTERNED);
+ s.encoder.emit_u32(self.as_u32());
+ } else {
+ // otherwise write it as string or as offset to it
+ match s.symbol_table.entry(*self) {
+ Entry::Vacant(o) => {
+ s.encoder.emit_u8(SYMBOL_STR);
+ let pos = s.encoder.position();
+ o.insert(pos);
+ s.emit_str(self.as_str());
+ }
+ Entry::Occupied(o) => {
+ let x = o.get().clone();
+ s.emit_u8(SYMBOL_OFFSET);
+ s.emit_usize(x);
+ }
+ }
+ }
+ }
+}
+
impl<'a, 'tcx> TyEncoder for CacheEncoder<'a, 'tcx> {
type I = TyCtxt<'tcx>;
const CLEAR_CROSS_CRATE: bool = false;
@@ -993,7 +1068,7 @@ pub fn encode_query_results<'a, 'tcx, CTX, Q>(
let _timer = tcx
.dep_context()
.profiler()
- .extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
+ .verbose_generic_activity_with_arg("encode_query_results_for", std::any::type_name::<Q>());
assert!(Q::query_state(tcx).all_inactive());
let cache = Q::query_cache(tcx);
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
index eda4401c8..1d17f4221 100644
--- a/compiler/rustc_query_impl/src/plumbing.rs
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -2,19 +2,31 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
+use crate::keys::Key;
+use crate::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex};
+use crate::profiling_support::QueryKeyStringCache;
use crate::{on_disk_cache, Queries};
-use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
-use rustc_middle::ty::tls::{self, ImplicitCtxt};
-use rustc_middle::ty::TyCtxt;
-use rustc_query_system::dep_graph::HasDepContext;
-use rustc_query_system::query::{QueryContext, QueryJobId, QueryMap, QuerySideEffects};
-
-use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{AtomicU64, Lock};
use rustc_errors::{Diagnostic, Handler};
-
+use rustc_middle::dep_graph::{
+ self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex,
+};
+use rustc_middle::ty::tls::{self, ImplicitCtxt};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext};
+use rustc_query_system::ich::StableHashingContext;
+use rustc_query_system::query::{
+ force_query, QueryConfig, QueryContext, QueryDescription, QueryJobId, QueryMap,
+ QuerySideEffects, QueryStackFrame,
+};
+use rustc_query_system::{LayoutOfDepth, QueryOverflow, Value};
+use rustc_serialize::Decodable;
+use rustc_session::Limit;
+use rustc_span::def_id::LOCAL_CRATE;
use std::any::Any;
use std::num::NonZeroU64;
+use thin_vec::ThinVec;
#[derive(Copy, Clone)]
pub struct QueryCtxt<'tcx> {
@@ -91,6 +103,7 @@ impl QueryContext for QueryCtxt<'_> {
fn start_query<R>(
&self,
token: QueryJobId,
+ depth_limit: bool,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce() -> R,
) -> R {
@@ -98,12 +111,16 @@ impl QueryContext for QueryCtxt<'_> {
// as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
// when accessing the `ImplicitCtxt`.
tls::with_related_context(**self, move |current_icx| {
+ if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) {
+ self.depth_limit_error(token);
+ }
+
// Update the `ImplicitCtxt` to point to our new query job.
let new_icx = ImplicitCtxt {
tcx: **self,
query: Some(token),
diagnostics,
- layout_depth: current_icx.layout_depth,
+ query_depth: current_icx.query_depth + depth_limit as usize,
task_deps: current_icx.task_deps,
};
@@ -113,6 +130,29 @@ impl QueryContext for QueryCtxt<'_> {
})
})
}
+
+ fn depth_limit_error(&self, job: QueryJobId) {
+ let mut span = None;
+ let mut layout_of_depth = None;
+ if let Some(map) = self.try_collect_active_jobs() {
+ if let Some((info, depth)) = job.try_find_layout_root(map) {
+ span = Some(info.job.span);
+ layout_of_depth = Some(LayoutOfDepth { desc: info.query.description, depth });
+ }
+ }
+
+ let suggested_limit = match self.recursion_limit() {
+ Limit(0) => Limit(2),
+ limit => limit * 2,
+ };
+
+ self.sess.emit_fatal(QueryOverflow {
+ span,
+ layout_of_depth,
+ suggested_limit,
+ crate_name: self.crate_name(LOCAL_CRATE),
+ });
+ }
}
impl<'tcx> QueryCtxt<'tcx> {
@@ -134,22 +174,14 @@ impl<'tcx> QueryCtxt<'tcx> {
pub(super) fn encode_query_results(
self,
- encoder: &mut on_disk_cache::CacheEncoder<'_, 'tcx>,
- query_result_index: &mut on_disk_cache::EncodedDepNodeIndex,
+ encoder: &mut CacheEncoder<'_, 'tcx>,
+ query_result_index: &mut EncodedDepNodeIndex,
) {
- macro_rules! encode_queries {
- ($($query:ident,)*) => {
- $(
- on_disk_cache::encode_query_results::<_, super::queries::$query<'_>>(
- self,
- encoder,
- query_result_index
- );
- )*
+ for query in &self.queries.query_structs {
+ if let Some(encode) = query.encode_query_results {
+ encode(self, encoder, query_result_index);
}
}
-
- rustc_cached_queries!(encode_queries!);
}
pub fn try_print_query_stack(
@@ -162,22 +194,26 @@ impl<'tcx> QueryCtxt<'tcx> {
}
}
+#[derive(Clone, Copy)]
+pub(crate) struct QueryStruct<'tcx> {
+ pub try_collect_active_jobs: fn(QueryCtxt<'tcx>, &mut QueryMap) -> Option<()>,
+ pub alloc_self_profile_query_strings: fn(TyCtxt<'tcx>, &mut QueryKeyStringCache),
+ pub encode_query_results:
+ Option<fn(QueryCtxt<'tcx>, &mut CacheEncoder<'_, 'tcx>, &mut EncodedDepNodeIndex)>,
+}
+
macro_rules! handle_cycle_error {
- ([][$tcx: expr, $error:expr]) => {{
- $error.emit();
- Value::from_cycle_error($tcx)
+ ([]) => {{
+ rustc_query_system::HandleCycleError::Error
}};
- ([(fatal_cycle) $($rest:tt)*][$tcx:expr, $error:expr]) => {{
- $error.emit();
- $tcx.sess.abort_if_errors();
- unreachable!()
+ ([(fatal_cycle) $($rest:tt)*]) => {{
+ rustc_query_system::HandleCycleError::Fatal
}};
- ([(cycle_delay_bug) $($rest:tt)*][$tcx:expr, $error:expr]) => {{
- $error.delay_as_bug();
- Value::from_cycle_error($tcx)
+ ([(cycle_delay_bug) $($rest:tt)*]) => {{
+ rustc_query_system::HandleCycleError::DelayBug
}};
- ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
- handle_cycle_error!([$($modifiers)*][$($args)*])
+ ([$other:tt $($modifiers:tt)*]) => {
+ handle_cycle_error!([$($modifiers)*])
};
}
@@ -205,6 +241,18 @@ macro_rules! is_eval_always {
};
}
+macro_rules! depth_limit {
+ ([]) => {{
+ false
+ }};
+ ([(depth_limit) $($rest:tt)*]) => {{
+ true
+ }};
+ ([$other:tt $($modifiers:tt)*]) => {
+ depth_limit!([$($modifiers)*])
+ };
+}
+
macro_rules! hash_result {
([]) => {{
Some(dep_graph::hash_result)
@@ -233,106 +281,207 @@ macro_rules! get_provider {
};
}
-macro_rules! opt_remap_env_constness {
- ([][$name:ident]) => {};
- ([(remap_env_constness) $($rest:tt)*][$name:ident]) => {
- let $name = $name.without_const();
+macro_rules! should_ever_cache_on_disk {
+ ([]) => {{
+ None
+ }};
+ ([(cache) $($rest:tt)*]) => {{
+ Some($crate::plumbing::try_load_from_disk::<Self::Value>)
+ }};
+ ([$other:tt $($modifiers:tt)*]) => {
+ should_ever_cache_on_disk!([$($modifiers)*])
+ };
+}
+
+pub(crate) fn create_query_frame<
+ 'tcx,
+ K: Copy + Key + for<'a> HashStable<StableHashingContext<'a>>,
+>(
+ tcx: QueryCtxt<'tcx>,
+ do_describe: fn(TyCtxt<'tcx>, K) -> String,
+ key: K,
+ kind: DepKind,
+ name: &'static str,
+) -> QueryStackFrame {
+ // Disable visible paths printing for performance reasons.
+ // Showing visible path instead of any path is not that important in production.
+ let description = ty::print::with_no_visible_paths!(
+ // Force filename-line mode to avoid invoking `type_of` query.
+ ty::print::with_forced_impl_filename_line!(do_describe(tcx.tcx, key))
+ );
+ let description =
+ if tcx.sess.verbose() { format!("{} [{}]", description, name) } else { description };
+ let span = if kind == dep_graph::DepKind::def_span {
+ // The `def_span` query is used to calculate `default_span`,
+ // so exit to avoid infinite recursion.
+ None
+ } else {
+ Some(key.default_span(*tcx))
};
- ([$other:tt $($modifiers:tt)*][$name:ident]) => {
- opt_remap_env_constness!([$($modifiers)*][$name])
+ let def_id = key.key_as_def_id();
+ let def_kind = if kind == dep_graph::DepKind::opt_def_kind {
+ // Try to avoid infinite recursion.
+ None
+ } else {
+ def_id.and_then(|def_id| def_id.as_local()).and_then(|def_id| tcx.opt_def_kind(def_id))
+ };
+ let hash = || {
+ tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher);
+ key.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish::<u64>()
+ })
+ };
+ let ty_adt_id = key.ty_adt_id();
+
+ QueryStackFrame::new(name, description, span, def_id, def_kind, ty_adt_id, hash)
+}
+
+fn try_load_from_on_disk_cache<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode)
+where
+ Q: QueryDescription<QueryCtxt<'tcx>>,
+ Q::Key: DepNodeParams<TyCtxt<'tcx>>,
+{
+ debug_assert!(tcx.dep_graph.is_green(&dep_node));
+
+ let key = Q::Key::recover(tcx, &dep_node).unwrap_or_else(|| {
+ panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash)
+ });
+ if Q::cache_on_disk(tcx, &key) {
+ let _ = Q::execute_query(tcx, key);
+ }
+}
+
+pub(crate) fn try_load_from_disk<'tcx, V>(
+ tcx: QueryCtxt<'tcx>,
+ id: SerializedDepNodeIndex,
+) -> Option<V>
+where
+ V: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
+{
+ tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id)
+}
+
+fn force_from_dep_node<'tcx, Q>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool
+where
+ Q: QueryDescription<QueryCtxt<'tcx>>,
+ Q::Key: DepNodeParams<TyCtxt<'tcx>>,
+ Q::Value: Value<TyCtxt<'tcx>>,
+{
+ // We must avoid ever having to call `force_from_dep_node()` for a
+ // `DepNode::codegen_unit`:
+ // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
+ // would always end up having to evaluate the first caller of the
+ // `codegen_unit` query that *is* reconstructible. This might very well be
+ // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
+ // to re-trigger calling the `codegen_unit` query with the right key. At
+ // that point we would already have re-done all the work we are trying to
+ // avoid doing in the first place.
+ // The solution is simple: Just explicitly call the `codegen_unit` query for
+ // each CGU, right after partitioning. This way `try_mark_green` will always
+ // hit the cache instead of having to go through `force_from_dep_node`.
+ // This assertion makes sure, we actually keep applying the solution above.
+ debug_assert!(
+ dep_node.kind != DepKind::codegen_unit,
+ "calling force_from_dep_node() on DepKind::codegen_unit"
+ );
+
+ if let Some(key) = Q::Key::recover(tcx, &dep_node) {
+ #[cfg(debug_assertions)]
+ let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered();
+ let tcx = QueryCtxt::from_tcx(tcx);
+ force_query::<Q, _>(tcx, key, dep_node);
+ true
+ } else {
+ false
+ }
+}
+
+pub(crate) fn query_callback<'tcx, Q: QueryConfig>(
+ is_anon: bool,
+ is_eval_always: bool,
+) -> DepKindStruct<'tcx>
+where
+ Q: QueryDescription<QueryCtxt<'tcx>>,
+ Q::Key: DepNodeParams<TyCtxt<'tcx>>,
+{
+ let fingerprint_style = Q::Key::fingerprint_style();
+
+ if is_anon || !fingerprint_style.reconstructible() {
+ return DepKindStruct {
+ is_anon,
+ is_eval_always,
+ fingerprint_style,
+ force_from_dep_node: None,
+ try_load_from_on_disk_cache: None,
+ };
+ }
+
+ DepKindStruct {
+ is_anon,
+ is_eval_always,
+ fingerprint_style,
+ force_from_dep_node: Some(force_from_dep_node::<Q>),
+ try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache::<Q>),
+ }
+}
+
+macro_rules! expand_if_cached {
+ ([], $tokens:expr) => {{
+ None
+ }};
+ ([(cache) $($rest:tt)*], $tokens:expr) => {{
+ Some($tokens)
+ }};
+ ([$other:tt $($modifiers:tt)*], $tokens:expr) => {
+ expand_if_cached!([$($modifiers)*], $tokens)
};
}
+// NOTE: `$V` isn't used here, but we still need to match on it so it can be passed to other macros
+// invoked by `rustc_query_append`.
macro_rules! define_queries {
- (<$tcx:tt>
+ (
$($(#[$attr:meta])*
[$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
-
define_queries_struct! {
- tcx: $tcx,
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
}
- mod make_query {
- use super::*;
-
- // Create an eponymous constructor for each query.
- $(#[allow(nonstandard_style)] $(#[$attr])*
- pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> QueryStackFrame {
- opt_remap_env_constness!([$($modifiers)*][key]);
- let kind = dep_graph::DepKind::$name;
- let name = stringify!($name);
- // Disable visible paths printing for performance reasons.
- // Showing visible path instead of any path is not that important in production.
- let description = ty::print::with_no_visible_paths!(
- // Force filename-line mode to avoid invoking `type_of` query.
- ty::print::with_forced_impl_filename_line!(
- queries::$name::describe(tcx, key)
- )
- );
- let description = if tcx.sess.verbose() {
- format!("{} [{}]", description, name)
- } else {
- description
- };
- let span = if kind == dep_graph::DepKind::def_span {
- // The `def_span` query is used to calculate `default_span`,
- // so exit to avoid infinite recursion.
- None
- } else {
- Some(key.default_span(*tcx))
- };
- let def_kind = if kind == dep_graph::DepKind::opt_def_kind {
- // Try to avoid infinite recursion.
- None
- } else {
- key.key_as_def_id()
- .and_then(|def_id| def_id.as_local())
- .and_then(|def_id| tcx.opt_def_kind(def_id))
- };
- let hash = || {
- tcx.with_stable_hashing_context(|mut hcx|{
- let mut hasher = StableHasher::new();
- std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher);
- key.hash_stable(&mut hcx, &mut hasher);
- hasher.finish::<u64>()
- })
- };
-
- QueryStackFrame::new(name, description, span, def_kind, hash)
- })*
- }
-
#[allow(nonstandard_style)]
mod queries {
use std::marker::PhantomData;
- $(pub struct $name<$tcx> {
- data: PhantomData<&$tcx ()>
+ $(pub struct $name<'tcx> {
+ data: PhantomData<&'tcx ()>
})*
}
- $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
- type Key = query_keys::$name<$tcx>;
- type Value = query_values::$name<$tcx>;
- type Stored = query_stored::$name<$tcx>;
+ $(impl<'tcx> QueryConfig for queries::$name<'tcx> {
+ type Key = query_keys::$name<'tcx>;
+ type Value = query_values::$name<'tcx>;
+ type Stored = query_stored::$name<'tcx>;
const NAME: &'static str = stringify!($name);
}
- impl<$tcx> QueryDescription<QueryCtxt<$tcx>> for queries::$name<$tcx> {
- rustc_query_description! { $name<$tcx> }
+ impl<'tcx> QueryDescription<QueryCtxt<'tcx>> for queries::$name<'tcx> {
+ #[inline]
+ fn cache_on_disk(tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool {
+ ::rustc_middle::query::cached::$name(tcx, key)
+ }
- type Cache = query_storage::$name<$tcx>;
+ type Cache = query_storage::$name<'tcx>;
#[inline(always)]
- fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<Self::Key>
- where QueryCtxt<$tcx>: 'a
+ fn query_state<'a>(tcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key>
+ where QueryCtxt<'tcx>: 'a
{
&tcx.queries.$name
}
#[inline(always)]
- fn query_cache<'a>(tcx: QueryCtxt<$tcx>) -> &'a Self::Cache
+ fn query_cache<'a>(tcx: QueryCtxt<'tcx>) -> &'a Self::Cache
where 'tcx:'a
{
&tcx.query_caches.$name
@@ -340,34 +489,34 @@ macro_rules! define_queries {
#[inline]
fn make_vtable(tcx: QueryCtxt<'tcx>, key: &Self::Key) ->
- QueryVTable<QueryCtxt<$tcx>, Self::Key, Self::Value>
+ QueryVTable<QueryCtxt<'tcx>, Self::Key, Self::Value>
{
let compute = get_provider!([$($modifiers)*][tcx, $name, key]);
let cache_on_disk = Self::cache_on_disk(tcx.tcx, key);
QueryVTable {
anon: is_anon!([$($modifiers)*]),
eval_always: is_eval_always!([$($modifiers)*]),
+ depth_limit: depth_limit!([$($modifiers)*]),
dep_kind: dep_graph::DepKind::$name,
hash_result: hash_result!([$($modifiers)*]),
- handle_cycle_error: |tcx, mut error| handle_cycle_error!([$($modifiers)*][tcx, error]),
+ handle_cycle_error: handle_cycle_error!([$($modifiers)*]),
compute,
- cache_on_disk,
- try_load_from_disk: Self::TRY_LOAD_FROM_DISK,
+ try_load_from_disk: if cache_on_disk { should_ever_cache_on_disk!([$($modifiers)*]) } else { None },
}
}
+
+ fn execute_query(tcx: TyCtxt<'tcx>, k: Self::Key) -> Self::Stored {
+ tcx.$name(k)
+ }
})*
#[allow(nonstandard_style)]
mod query_callbacks {
use super::*;
- use rustc_middle::dep_graph::DepNode;
- use rustc_middle::ty::query::query_keys;
- use rustc_query_system::dep_graph::DepNodeParams;
- use rustc_query_system::query::{force_query, QueryDescription};
use rustc_query_system::dep_graph::FingerprintStyle;
// We use this for most things when incr. comp. is turned off.
- pub fn Null() -> DepKindStruct {
+ pub fn Null<'tcx>() -> DepKindStruct<'tcx> {
DepKindStruct {
is_anon: false,
is_eval_always: false,
@@ -378,7 +527,7 @@ macro_rules! define_queries {
}
// We use this for the forever-red node.
- pub fn Red() -> DepKindStruct {
+ pub fn Red<'tcx>() -> DepKindStruct<'tcx> {
DepKindStruct {
is_anon: false,
is_eval_always: false,
@@ -388,7 +537,7 @@ macro_rules! define_queries {
}
}
- pub fn TraitSelect() -> DepKindStruct {
+ pub fn TraitSelect<'tcx>() -> DepKindStruct<'tcx> {
DepKindStruct {
is_anon: true,
is_eval_always: false,
@@ -398,7 +547,7 @@ macro_rules! define_queries {
}
}
- pub fn CompileCodegenUnit() -> DepKindStruct {
+ pub fn CompileCodegenUnit<'tcx>() -> DepKindStruct<'tcx> {
DepKindStruct {
is_anon: false,
is_eval_always: false,
@@ -408,7 +557,7 @@ macro_rules! define_queries {
}
}
- pub fn CompileMonoItem() -> DepKindStruct {
+ pub fn CompileMonoItem<'tcx>() -> DepKindStruct<'tcx> {
DepKindStruct {
is_anon: false,
is_eval_always: false,
@@ -418,111 +567,120 @@ macro_rules! define_queries {
}
}
- $(pub(crate) fn $name()-> DepKindStruct {
- let is_anon = is_anon!([$($modifiers)*]);
- let is_eval_always = is_eval_always!([$($modifiers)*]);
-
- let fingerprint_style =
- <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::fingerprint_style();
-
- if is_anon || !fingerprint_style.reconstructible() {
- return DepKindStruct {
- is_anon,
- is_eval_always,
- fingerprint_style,
- force_from_dep_node: None,
- try_load_from_on_disk_cache: None,
- }
- }
+ $(pub(crate) fn $name<'tcx>()-> DepKindStruct<'tcx> {
+ $crate::plumbing::query_callback::<queries::$name<'tcx>>(
+ is_anon!([$($modifiers)*]),
+ is_eval_always!([$($modifiers)*]),
+ )
+ })*
+ }
- #[inline(always)]
- fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> Option<query_keys::$name<'tcx>> {
- <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, &dep_node)
- }
+ mod query_structs {
+ use rustc_middle::ty::TyCtxt;
+ use $crate::plumbing::{QueryStruct, QueryCtxt};
+ use $crate::profiling_support::QueryKeyStringCache;
+ use rustc_query_system::query::QueryMap;
- fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool {
- if let Some(key) = recover(tcx, dep_node) {
- #[cfg(debug_assertions)]
- let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered();
- let tcx = QueryCtxt::from_tcx(tcx);
- force_query::<queries::$name<'_>, _>(tcx, key, dep_node);
- true
- } else {
- false
- }
+ pub(super) const fn dummy_query_struct<'tcx>() -> QueryStruct<'tcx> {
+ fn noop_try_collect_active_jobs(_: QueryCtxt<'_>, _: &mut QueryMap) -> Option<()> {
+ None
}
+ fn noop_alloc_self_profile_query_strings(_: TyCtxt<'_>, _: &mut QueryKeyStringCache) {}
- fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: DepNode) {
- debug_assert!(tcx.dep_graph.is_green(&dep_node));
-
- let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
- if queries::$name::cache_on_disk(tcx, &key) {
- let _ = tcx.$name(key);
- }
+ QueryStruct {
+ try_collect_active_jobs: noop_try_collect_active_jobs,
+ alloc_self_profile_query_strings: noop_alloc_self_profile_query_strings,
+ encode_query_results: None,
}
+ }
- DepKindStruct {
- is_anon,
- is_eval_always,
- fingerprint_style,
- force_from_dep_node: Some(force_from_dep_node),
- try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache),
- }
- })*
+ pub(super) use dummy_query_struct as Null;
+ pub(super) use dummy_query_struct as Red;
+ pub(super) use dummy_query_struct as TraitSelect;
+ pub(super) use dummy_query_struct as CompileCodegenUnit;
+ pub(super) use dummy_query_struct as CompileMonoItem;
+
+ $(
+ pub(super) const fn $name<'tcx>() -> QueryStruct<'tcx> { QueryStruct {
+ try_collect_active_jobs: |tcx, qmap| {
+ let make_query = |tcx, key| {
+ let kind = rustc_middle::dep_graph::DepKind::$name;
+ let name = stringify!($name);
+ $crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name)
+ };
+ tcx.queries.$name.try_collect_active_jobs(
+ tcx,
+ make_query,
+ qmap,
+ )
+ },
+ alloc_self_profile_query_strings: |tcx, string_cache| {
+ $crate::profiling_support::alloc_self_profile_query_strings_for_query_cache(
+ tcx,
+ stringify!($name),
+ &tcx.query_caches.$name,
+ string_cache,
+ )
+ },
+ encode_query_results: expand_if_cached!([$($modifiers)*], |tcx, encoder, query_result_index|
+ $crate::on_disk_cache::encode_query_results::<_, super::queries::$name<'_>>(tcx, encoder, query_result_index)
+ ),
+ }})*
}
- pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct] {
+ pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct<'tcx>] {
arena.alloc_from_iter(make_dep_kind_array!(query_callbacks))
}
}
}
-// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
-// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
-// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
+use crate::{ExternProviders, OnDiskCache, Providers};
+
+impl<'tcx> Queries<'tcx> {
+ pub fn new(
+ local_providers: Providers,
+ extern_providers: ExternProviders,
+ on_disk_cache: Option<OnDiskCache<'tcx>>,
+ ) -> Self {
+ use crate::query_structs;
+ Queries {
+ local_providers: Box::new(local_providers),
+ extern_providers: Box::new(extern_providers),
+ query_structs: make_dep_kind_array!(query_structs).to_vec(),
+ on_disk_cache,
+ jobs: AtomicU64::new(1),
+ ..Queries::default()
+ }
+ }
+}
+
macro_rules! define_queries_struct {
- (tcx: $tcx:tt,
+ (
input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
- pub struct Queries<$tcx> {
+ #[derive(Default)]
+ pub struct Queries<'tcx> {
local_providers: Box<Providers>,
extern_providers: Box<ExternProviders>,
+ query_structs: Vec<$crate::plumbing::QueryStruct<'tcx>>,
- pub on_disk_cache: Option<OnDiskCache<$tcx>>,
+ pub on_disk_cache: Option<OnDiskCache<'tcx>>,
jobs: AtomicU64,
- $($(#[$attr])* $name: QueryState<query_keys::$name<$tcx>>,)*
+ $($(#[$attr])* $name: QueryState<<queries::$name<'tcx> as QueryConfig>::Key>,)*
}
- impl<$tcx> Queries<$tcx> {
- pub fn new(
- local_providers: Providers,
- extern_providers: ExternProviders,
- on_disk_cache: Option<OnDiskCache<$tcx>>,
- ) -> Self {
- Queries {
- local_providers: Box::new(local_providers),
- extern_providers: Box::new(extern_providers),
- on_disk_cache,
- jobs: AtomicU64::new(1),
- $($name: Default::default()),*
- }
- }
-
+ impl<'tcx> Queries<'tcx> {
pub(crate) fn try_collect_active_jobs(
- &$tcx self,
- tcx: TyCtxt<$tcx>,
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
) -> Option<QueryMap> {
let tcx = QueryCtxt { tcx, queries: self };
let mut jobs = QueryMap::default();
- $(
- self.$name.try_collect_active_jobs(
- tcx,
- make_query::$name,
- &mut jobs,
- )?;
- )*
+ for query in &self.query_structs {
+ (query.try_collect_active_jobs)(tcx, &mut jobs);
+ }
Some(jobs)
}
@@ -541,17 +699,16 @@ macro_rules! define_queries_struct {
$($(#[$attr])*
#[inline(always)]
- #[tracing::instrument(level = "trace", skip(self, tcx))]
+ #[tracing::instrument(level = "trace", skip(self, tcx), ret)]
fn $name(
&'tcx self,
- tcx: TyCtxt<$tcx>,
+ tcx: TyCtxt<'tcx>,
span: Span,
- key: query_keys::$name<$tcx>,
+ key: <queries::$name<'tcx> as QueryConfig>::Key,
mode: QueryMode,
- ) -> Option<query_stored::$name<$tcx>> {
- opt_remap_env_constness!([$($modifiers)*][key]);
+ ) -> Option<query_stored::$name<'tcx>> {
let qcx = QueryCtxt { tcx, queries: self };
- get_query::<queries::$name<$tcx>, _>(qcx, span, key, mode)
+ get_query::<queries::$name<'tcx>, _>(qcx, span, key, mode)
})*
}
};
diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs
index 551f09420..2cc311d48 100644
--- a/compiler/rustc_query_impl/src/profiling_support.rs
+++ b/compiler/rustc_query_impl/src/profiling_support.rs
@@ -1,3 +1,4 @@
+use crate::QueryCtxt;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
@@ -8,7 +9,7 @@ use rustc_query_system::query::QueryCache;
use std::fmt::Debug;
use std::io::Write;
-struct QueryKeyStringCache {
+pub(crate) struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
@@ -226,7 +227,7 @@ where
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
-fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
+pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_cache: &C,
@@ -298,27 +299,15 @@ fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
-pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
+pub fn alloc_self_profile_query_strings<'tcx>(tcx: TyCtxt<'tcx>) {
if !tcx.prof.enabled() {
return;
}
let mut string_cache = QueryKeyStringCache::new();
+ let queries = QueryCtxt::from_tcx(tcx);
- macro_rules! alloc_once {
- (<$tcx:tt>
- $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
- ) => {
- $({
- alloc_self_profile_query_strings_for_query_cache(
- tcx,
- stringify!($name),
- &tcx.query_caches.$name,
- &mut string_cache,
- );
- })*
- }
+ for query in &queries.queries.query_structs {
+ (query.alloc_self_profile_query_strings)(tcx, &mut string_cache);
}
-
- rustc_query_append! { [alloc_once!][<'tcx>] }
}
diff --git a/compiler/rustc_query_impl/src/values.rs b/compiler/rustc_query_impl/src/values.rs
deleted file mode 100644
index 718a2971c..000000000
--- a/compiler/rustc_query_impl/src/values.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-use super::QueryCtxt;
-use rustc_middle::ty::{self, AdtSizedConstraint, Ty};
-
-pub(super) trait Value<'tcx>: Sized {
- fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self;
-}
-
-impl<'tcx, T> Value<'tcx> for T {
- default fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> T {
- tcx.sess.abort_if_errors();
- bug!("Value::from_cycle_error called without errors");
- }
-}
-
-impl<'tcx> Value<'tcx> for Ty<'_> {
- fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
- // SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
- // FIXME: Represent the above fact in the trait system somehow.
- unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
- }
-}
-
-impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
- fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
- // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
- // FIXME: Represent the above fact in the trait system somehow.
- unsafe {
- std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
- *tcx, "<error>",
- ))
- }
- }
-}
-
-impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
- fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
- // SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
- // FIXME: Represent the above fact in the trait system somehow.
- unsafe {
- std::mem::transmute::<AdtSizedConstraint<'tcx>, AdtSizedConstraint<'_>>(
- AdtSizedConstraint(tcx.intern_type_list(&[tcx.ty_error()])),
- )
- }
- }
-}
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
index b7787aeb8..faddad741 100644
--- a/compiler/rustc_query_system/Cargo.toml
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -4,12 +4,10 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
+parking_lot = "0.11"
rustc_arena = { path = "../rustc_arena" }
-tracing = "0.1"
-rustc-rayon-core = { version = "0.4.0", optional = true }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
@@ -17,12 +15,15 @@ rustc_feature = { path = "../rustc_feature" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
+rustc-rayon-core = { version = "0.4.0", optional = true }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
rustc_target = { path = "../rustc_target" }
-parking_lot = "0.11"
+rustc_type_ir = { path = "../rustc_type_ir" }
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+thin-vec = "0.2.8"
+tracing = "0.1"
[features]
rustc_use_parallel_compiler = ["rustc-rayon-core"]
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 162c274d8..5c6ce0556 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -47,6 +47,7 @@ use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::definitions::DefPathHash;
use std::fmt;
use std::hash::Hash;
@@ -88,6 +89,17 @@ impl<K: DepKind> DepNode<K> {
dep_node
}
+
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ pub fn from_def_path_hash<Ctxt>(tcx: Ctxt, def_path_hash: DefPathHash, kind: K) -> Self
+ where
+ Ctxt: super::DepContext<DepKind = K>,
+ {
+ debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
+ DepNode { kind, hash: def_path_hash.0.into() }
+ }
}
impl<K: DepKind> fmt::Debug for DepNode<K> {
@@ -149,6 +161,67 @@ where
}
}
+/// This struct stores metadata about each DepKind.
+///
+/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
+/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
+/// jump table instead of large matches.
+pub struct DepKindStruct<CTX: DepContext> {
+ /// Anonymous queries cannot be replayed from one compiler invocation to the next.
+ /// When their result is needed, it is recomputed. They are useful for fine-grained
+ /// dependency tracking, and caching within one compiler invocation.
+ pub is_anon: bool,
+
+ /// Eval-always queries do not track their dependencies, and are always recomputed, even if
+ /// their inputs have not changed since the last compiler invocation. The result is still
+ /// cached within one compiler invocation.
+ pub is_eval_always: bool,
+
+ /// Whether the query key can be recovered from the hashed fingerprint.
+ /// See [DepNodeParams] trait for the behaviour of each key type.
+ pub fingerprint_style: FingerprintStyle,
+
+ /// The red/green evaluation system will try to mark a specific DepNode in the
+ /// dependency graph as green by recursively trying to mark the dependencies of
+ /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
+ /// where we don't know if it is red or green and we therefore actually have
+ /// to recompute its value in order to find out. Since the only piece of
+ /// information that we have at that point is the `DepNode` we are trying to
+ /// re-evaluate, we need some way to re-run a query from just that. This is what
+ /// `force_from_dep_node()` implements.
+ ///
+ /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
+ /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
+ /// is usually constructed by computing a stable hash of the query-key that the
+ /// `DepNode` corresponds to. Consequently, it is not in general possible to go
+ /// back from hash to query-key (since hash functions are not reversible). For
+ /// this reason `force_from_dep_node()` is expected to fail from time to time
+ /// because we just cannot find out, from the `DepNode` alone, what the
+ /// corresponding query-key is and therefore cannot re-run the query.
+ ///
+ /// The system deals with this case letting `try_mark_green` fail which forces
+ /// the root query to be re-evaluated.
+ ///
+ /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
+ /// Fortunately, we can use some contextual information that will allow us to
+ /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
+ /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
+ /// valid `DefPathHash`. Since we also always build a huge table that maps every
+ /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
+ /// everything we need to re-run the query.
+ ///
+ /// Take the `mir_promoted` query as an example. Like many other queries, it
+ /// just has a single parameter: the `DefId` of the item it will compute the
+ /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
+ /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
+ /// is actually a `DefPathHash`, and can therefore just look up the corresponding
+ /// `DefId` in `tcx.def_path_hash_to_def_id`.
+ pub force_from_dep_node: Option<fn(tcx: CTX, dep_node: DepNode<CTX::DepKind>) -> bool>,
+
+ /// Invoke a query to put the on-disk cached value in memory.
+ pub try_load_from_on_disk_cache: Option<fn(CTX, DepNode<CTX::DepKind>)>,
+}
+
/// A "work product" corresponds to a `.o` (or other) file that we
/// save in between runs. These IDs do not have a `DefId` but rather
/// some independent path or string that persists between runs without
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 342d95ca4..da2075fd5 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -4,7 +4,7 @@ mod graph;
mod query;
mod serialized;
-pub use dep_node::{DepNode, DepNodeParams, WorkProductId};
+pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
pub use graph::{
hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct,
};
@@ -34,16 +34,43 @@ pub trait DepContext: Copy {
/// Access the compiler session.
fn sess(&self) -> &Session;
- /// Return whether this kind always require evaluation.
- fn is_eval_always(&self, kind: Self::DepKind) -> bool;
+ fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>;
- fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;
+ #[inline(always)]
+ fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle {
+ let data = self.dep_kind_info(kind);
+ if data.is_anon {
+ return FingerprintStyle::Opaque;
+ }
+ data.fingerprint_style
+ }
+
+ #[inline(always)]
+ /// Return whether this kind always require evaluation.
+ fn is_eval_always(&self, kind: Self::DepKind) -> bool {
+ self.dep_kind_info(kind).is_eval_always
+ }
/// Try to force a dep node to execute and see if it's green.
- fn try_force_from_dep_node(&self, dep_node: DepNode<Self::DepKind>) -> bool;
+ fn try_force_from_dep_node(self, dep_node: DepNode<Self::DepKind>) -> bool {
+ debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
+
+ let cb = self.dep_kind_info(dep_node.kind);
+ if let Some(f) = cb.force_from_dep_node {
+ f(self, dep_node);
+ true
+ } else {
+ false
+ }
+ }
/// Load data from the on-disk cache.
- fn try_load_from_on_disk_cache(&self, dep_node: DepNode<Self::DepKind>);
+ fn try_load_from_on_disk_cache(self, dep_node: DepNode<Self::DepKind>) {
+ let cb = self.dep_kind_info(dep_node.kind);
+ if let Some(f) = cb.try_load_from_on_disk_cache {
+ f(self, dep_node)
+ }
+ }
}
pub trait HasDepContext: Copy {
@@ -67,6 +94,8 @@ impl<T: DepContext> HasDepContext for T {
pub enum FingerprintStyle {
/// The fingerprint is actually a DefPathHash.
DefPathHash,
+ /// The fingerprint is actually a HirId.
+ HirId,
/// Query key was `()` or equivalent, so fingerprint is just zero.
Unit,
/// Some opaque hash.
@@ -77,7 +106,9 @@ impl FingerprintStyle {
#[inline]
pub fn reconstructible(self) -> bool {
match self {
- FingerprintStyle::DefPathHash | FingerprintStyle::Unit => true,
+ FingerprintStyle::DefPathHash | FingerprintStyle::Unit | FingerprintStyle::HirId => {
+ true
+ }
FingerprintStyle::Opaque => false,
}
}
diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs
new file mode 100644
index 000000000..7a20eaceb
--- /dev/null
+++ b/compiler/rustc_query_system/src/error.rs
@@ -0,0 +1,93 @@
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_session::Limit;
+use rustc_span::{Span, Symbol};
+
+#[derive(Subdiagnostic)]
+#[note(query_system_cycle_stack_middle)]
+pub struct CycleStack {
+ #[primary_span]
+ pub span: Span,
+ pub desc: String,
+}
+
+#[derive(Copy, Clone)]
+pub enum HandleCycleError {
+ Error,
+ Fatal,
+ DelayBug,
+}
+
+#[derive(Subdiagnostic)]
+pub enum StackCount {
+ #[note(query_system_cycle_stack_single)]
+ Single,
+ #[note(query_system_cycle_stack_multiple)]
+ Multiple,
+}
+
+#[derive(Subdiagnostic)]
+pub enum Alias {
+ #[note(query_system_cycle_recursive_ty_alias)]
+ #[help(query_system_cycle_recursive_ty_alias_help1)]
+ #[help(query_system_cycle_recursive_ty_alias_help2)]
+ Ty,
+ #[note(query_system_cycle_recursive_trait_alias)]
+ Trait,
+}
+
+#[derive(Subdiagnostic)]
+#[note(query_system_cycle_usage)]
+pub struct CycleUsage {
+ #[primary_span]
+ pub span: Span,
+ pub usage: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(query_system_cycle, code = "E0391")]
+pub struct Cycle {
+ #[primary_span]
+ pub span: Span,
+ pub stack_bottom: String,
+ #[subdiagnostic(eager)]
+ pub cycle_stack: Vec<CycleStack>,
+ #[subdiagnostic]
+ pub stack_count: StackCount,
+ #[subdiagnostic]
+ pub alias: Option<Alias>,
+ #[subdiagnostic]
+ pub cycle_usage: Option<CycleUsage>,
+}
+
+#[derive(Diagnostic)]
+#[diag(query_system_reentrant)]
+pub struct Reentrant;
+
+#[derive(Diagnostic)]
+#[diag(query_system_increment_compilation)]
+#[help]
+#[note(query_system_increment_compilation_note1)]
+#[note(query_system_increment_compilation_note2)]
+pub struct IncrementCompilation {
+ pub run_cmd: String,
+ pub dep_node: String,
+}
+
+#[derive(Diagnostic)]
+#[help]
+#[diag(query_system_query_overflow)]
+pub struct QueryOverflow {
+ #[primary_span]
+ pub span: Option<Span>,
+ #[subdiagnostic]
+ pub layout_of_depth: Option<LayoutOfDepth>,
+ pub suggested_limit: Limit,
+ pub crate_name: Symbol,
+}
+
+#[derive(Subdiagnostic)]
+#[note(query_system_layout_of_depth)]
+pub struct LayoutOfDepth {
+ pub desc: String,
+ pub depth: usize,
+}
diff --git a/compiler/rustc_query_system/src/ich/hcx.rs b/compiler/rustc_query_system/src/ich/hcx.rs
index 217fac341..148eabb38 100644
--- a/compiler/rustc_query_system/src/ich/hcx.rs
+++ b/compiler/rustc_query_system/src/ich/hcx.rs
@@ -12,7 +12,7 @@ use rustc_session::cstore::CrateStore;
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::Symbol;
-use rustc_span::{BytePos, CachingSourceMapView, SourceFile, Span, SpanData};
+use rustc_span::{BytePos, CachingSourceMapView, SourceFile, Span, SpanData, DUMMY_SP};
/// This is the context state available during incr. comp. hashing. It contains
/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
@@ -40,9 +40,9 @@ pub struct StableHashingContext<'a> {
#[derive(Clone, Copy)]
pub(super) enum BodyResolver<'tcx> {
Forbidden,
+ Ignore,
Traverse {
- hash_bodies: bool,
- owner: LocalDefId,
+ owner: hir::OwnerId,
bodies: &'tcx SortedMap<hir::ItemLocalId, &'tcx hir::Body<'tcx>>,
},
}
@@ -98,32 +98,20 @@ impl<'a> StableHashingContext<'a> {
Self::new_with_or_without_spans(sess, definitions, cstore, source_span, always_ignore_spans)
}
- /// Allow hashing
#[inline]
- pub fn while_hashing_hir_bodies(&mut self, hb: bool, f: impl FnOnce(&mut Self)) {
- let prev = match &mut self.body_resolver {
- BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
- BodyResolver::Traverse { ref mut hash_bodies, .. } => {
- std::mem::replace(hash_bodies, hb)
- }
- };
- f(self);
- match &mut self.body_resolver {
- BodyResolver::Forbidden => unreachable!(),
- BodyResolver::Traverse { ref mut hash_bodies, .. } => *hash_bodies = prev,
- }
+ pub fn without_hir_bodies(&mut self, f: impl FnOnce(&mut StableHashingContext<'_>)) {
+ f(&mut StableHashingContext { body_resolver: BodyResolver::Ignore, ..self.clone() });
}
#[inline]
pub fn with_hir_bodies(
&mut self,
- hash_bodies: bool,
- owner: LocalDefId,
+ owner: hir::OwnerId,
bodies: &SortedMap<hir::ItemLocalId, &hir::Body<'_>>,
f: impl FnOnce(&mut StableHashingContext<'_>),
) {
f(&mut StableHashingContext {
- body_resolver: BodyResolver::Traverse { hash_bodies, owner, bodies },
+ body_resolver: BodyResolver::Traverse { owner, bodies },
..self.clone()
});
}
@@ -197,7 +185,7 @@ impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
#[inline]
fn def_span(&self, def_id: LocalDefId) -> Span {
- self.source_span[def_id]
+ *self.source_span.get(def_id).unwrap_or(&DUMMY_SP)
}
#[inline]
diff --git a/compiler/rustc_query_system/src/ich/impls_hir.rs b/compiler/rustc_query_system/src/ich/impls_hir.rs
index 3390ed9eb..aa008d404 100644
--- a/compiler/rustc_query_system/src/ich/impls_hir.rs
+++ b/compiler/rustc_query_system/src/ich/impls_hir.rs
@@ -12,31 +12,11 @@ impl<'ctx> rustc_hir::HashStableContext for StableHashingContext<'ctx> {
let hcx = self;
match hcx.body_resolver {
BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
- BodyResolver::Traverse { hash_bodies: false, .. } => {}
- BodyResolver::Traverse { hash_bodies: true, owner, bodies } => {
+ BodyResolver::Ignore => {}
+ BodyResolver::Traverse { owner, bodies } => {
assert_eq!(id.hir_id.owner, owner);
bodies[&id.hir_id.local_id].hash_stable(hcx, hasher);
}
}
}
-
- fn hash_hir_expr(&mut self, expr: &hir::Expr<'_>, hasher: &mut StableHasher) {
- self.while_hashing_hir_bodies(true, |hcx| {
- let hir::Expr { hir_id, ref span, ref kind } = *expr;
-
- hir_id.hash_stable(hcx, hasher);
- span.hash_stable(hcx, hasher);
- kind.hash_stable(hcx, hasher);
- })
- }
-
- fn hash_hir_ty(&mut self, ty: &hir::Ty<'_>, hasher: &mut StableHasher) {
- self.while_hashing_hir_bodies(true, |hcx| {
- let hir::Ty { hir_id, ref kind, ref span } = *ty;
-
- hir_id.hash_stable(hcx, hasher);
- kind.hash_stable(hcx, hasher);
- span.hash_stable(hcx, hasher);
- })
- }
}
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
index 1fa085926..0bc811eb0 100644
--- a/compiler/rustc_query_system/src/ich/impls_syntax.rs
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -42,12 +42,12 @@ impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> {
debug_assert!(!attr.is_doc_comment());
let ast::Attribute { kind, id: _, style, span } = attr;
- if let ast::AttrKind::Normal(item, tokens) = kind {
- item.hash_stable(self, hasher);
+ if let ast::AttrKind::Normal(normal) = kind {
+ normal.item.hash_stable(self, hasher);
style.hash_stable(self, hasher);
span.hash_stable(self, hasher);
assert_matches!(
- tokens.as_ref(),
+ normal.tokens.as_ref(),
None,
"Tokens should have been removed during lowering!"
);
@@ -148,3 +148,5 @@ impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
});
}
}
+
+impl<'ctx> rustc_type_ir::HashStableContext for StableHashingContext<'ctx> {}
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index 68284dcaa..f47760e9a 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -1,10 +1,11 @@
#![feature(assert_matches)]
#![feature(core_intrinsics)]
#![feature(hash_raw_entry)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(extern_types)]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -15,5 +16,12 @@ extern crate rustc_macros;
pub mod cache;
pub mod dep_graph;
+mod error;
pub mod ich;
pub mod query;
+mod values;
+
+pub use error::HandleCycleError;
+pub use error::LayoutOfDepth;
+pub use error::QueryOverflow;
+pub use values::Value;
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 964914a13..0a1cffa3b 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -2,12 +2,12 @@
use crate::dep_graph::DepNode;
use crate::dep_graph::SerializedDepNodeIndex;
+use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
use crate::query::{QueryContext, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
use std::fmt::Debug;
use std::hash::Hash;
@@ -19,15 +19,17 @@ pub trait QueryConfig {
type Stored: Clone;
}
+#[derive(Copy, Clone)]
pub struct QueryVTable<CTX: QueryContext, K, V> {
pub anon: bool,
pub dep_kind: CTX::DepKind,
pub eval_always: bool,
- pub cache_on_disk: bool,
+ pub depth_limit: bool,
pub compute: fn(CTX::DepContext, K) -> V,
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
- pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ pub handle_cycle_error: HandleCycleError,
+ // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query
pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
}
@@ -42,22 +44,11 @@ impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
(self.compute)(tcx, key)
}
-
- pub(crate) fn try_load_from_disk(&self, tcx: CTX, index: SerializedDepNodeIndex) -> Option<V> {
- self.try_load_from_disk
- .expect("QueryDescription::load_from_disk() called for an unsupported query.")(
- tcx, index,
- )
- }
}
pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
- const TRY_LOAD_FROM_DISK: Option<fn(CTX, SerializedDepNodeIndex) -> Option<Self::Value>>;
-
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
- fn describe(tcx: CTX, key: Self::Key) -> String;
-
// Don't use this method to access query results, instead use the methods on TyCtxt
fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
where
@@ -72,4 +63,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
+
+ // Don't use this method to compute query results, instead use the methods on TyCtxt
+ fn execute_query(tcx: CTX::DepContext, k: Self::Key) -> Self::Stored;
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 6d2aff381..ed65393f5 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,11 +1,12 @@
+use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
use crate::query::{QueryContext, QueryStackFrame};
-use rustc_hir::def::DefKind;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
- struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level,
+ Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
};
+use rustc_hir::def::DefKind;
use rustc_session::Session;
use rustc_span::Span;
@@ -60,6 +61,7 @@ impl QueryJobId {
}
}
+#[derive(Clone)]
pub struct QueryJobInfo {
pub query: QueryStackFrame,
pub job: QueryJob,
@@ -117,10 +119,10 @@ impl QueryJob {
}
}
-#[cfg(not(parallel_compiler))]
impl QueryJobId {
#[cold]
#[inline(never)]
+ #[cfg(not(parallel_compiler))]
pub(super) fn find_cycle_in_stack(
&self,
query_map: QueryMap,
@@ -157,6 +159,24 @@ impl QueryJobId {
panic!("did not find a cycle")
}
+
+ #[cold]
+ #[inline(never)]
+ pub fn try_find_layout_root(&self, query_map: QueryMap) -> Option<(QueryJobInfo, usize)> {
+ let mut last_layout = None;
+ let mut current_id = Some(*self);
+ let mut depth = 0;
+
+ while let Some(id) = current_id {
+ let info = query_map.get(&id).unwrap();
+ if info.query.name == "layout_of" {
+ depth += 1;
+ last_layout = Some((info.clone(), depth));
+ }
+ current_id = info.job.parent;
+ }
+ last_layout
+ }
}
#[cfg(parallel_compiler)]
@@ -531,51 +551,49 @@ pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
#[cold]
pub(crate) fn report_cycle<'a>(
sess: &'a Session,
- CycleError { usage, cycle: stack }: CycleError,
+ CycleError { usage, cycle: stack }: &CycleError,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
assert!(!stack.is_empty());
let span = stack[0].query.default_span(stack[1 % stack.len()].span);
- let mut err =
- struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ let mut cycle_stack = Vec::new();
+
+ use crate::error::StackCount;
+ let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
for i in 1..stack.len() {
let query = &stack[i].query;
let span = query.default_span(stack[(i + 1) % stack.len()].span);
- err.span_note(span, &format!("...which requires {}...", query.description));
+ cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
}
- if stack.len() == 1 {
- err.note(&format!("...which immediately requires {} again", stack[0].query.description));
- } else {
- err.note(&format!(
- "...which again requires {}, completing the cycle",
- stack[0].query.description
- ));
+ let mut cycle_usage = None;
+ if let Some((span, ref query)) = *usage {
+ cycle_usage = Some(crate::error::CycleUsage {
+ span: query.default_span(span),
+ usage: query.description.to_string(),
+ });
}
- if stack.iter().all(|entry| {
- entry
- .query
- .def_kind
- .map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias | DefKind::TraitAlias))
- }) {
- if stack.iter().all(|entry| {
- entry.query.def_kind.map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias))
- }) {
- err.note("type aliases cannot be recursive");
- err.help("consider using a struct, enum, or union instead to break the cycle");
- err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information");
- } else {
- err.note("trait aliases cannot be recursive");
- }
- }
-
- if let Some((span, query)) = usage {
- err.span_note(query.default_span(span), &format!("cycle used when {}", query.description));
- }
-
- err
+ let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
+ Some(crate::error::Alias::Ty)
+ } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
+ Some(crate::error::Alias::Trait)
+ } else {
+ None
+ };
+
+ let cycle_diag = crate::error::Cycle {
+ span,
+ cycle_stack,
+ stack_bottom: stack[0].query.description.to_owned(),
+ alias,
+ cycle_usage: cycle_usage,
+ stack_count,
+ };
+
+ cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
}
pub fn print_query_stack<CTX: QueryContext>(
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index fb2258434..118703fc0 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -15,12 +15,12 @@ mod config;
pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
-
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def::DefKind;
+use rustc_span::def_id::DefId;
use rustc_span::Span;
+use thin_vec::ThinVec;
/// Description of a frame in the query stack.
///
@@ -30,7 +30,9 @@ pub struct QueryStackFrame {
pub name: &'static str,
pub description: String,
span: Option<Span>,
- def_kind: Option<DefKind>,
+ pub def_id: Option<DefId>,
+ pub def_kind: Option<DefKind>,
+ pub ty_adt_id: Option<DefId>,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
@@ -43,14 +45,18 @@ impl QueryStackFrame {
name: &'static str,
description: String,
span: Option<Span>,
+ def_id: Option<DefId>,
def_kind: Option<DefKind>,
+ ty_adt_id: Option<DefId>,
_hash: impl FnOnce() -> u64,
) -> Self {
Self {
name,
description,
span,
+ def_id,
def_kind,
+ ty_adt_id,
#[cfg(parallel_compiler)]
hash: _hash(),
}
@@ -119,7 +125,10 @@ pub trait QueryContext: HasDepContext {
fn start_query<R>(
&self,
token: QueryJobId,
+ depth_limit: bool,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce() -> R,
) -> R;
+
+ fn depth_limit_error(&self, job: QueryJobId);
}
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 5e8ea07d0..15b89daa6 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -7,6 +7,8 @@ use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVTable};
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
+use crate::values::Value;
+use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
#[cfg(parallel_compiler)]
@@ -14,7 +16,6 @@ use rustc_data_structures::profiling::TimingGuard;
#[cfg(parallel_compiler)]
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_session::Session;
use rustc_span::{Span, DUMMY_SP};
@@ -24,6 +25,7 @@ use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
use std::ptr;
+use thin_vec::ThinVec;
pub struct QueryState<K> {
#[cfg(parallel_compiler)]
@@ -117,20 +119,48 @@ where
#[inline(never)]
fn mk_cycle<CTX, V, R>(
tcx: CTX,
- error: CycleError,
- handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ cycle_error: CycleError,
+ handler: HandleCycleError,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
where
CTX: QueryContext,
- V: std::fmt::Debug,
+ V: std::fmt::Debug + Value<CTX::DepContext>,
R: Clone,
{
- let error = report_cycle(tcx.dep_context().sess(), error);
- let value = handle_cycle_error(tcx, error);
+ let error = report_cycle(tcx.dep_context().sess(), &cycle_error);
+ let value = handle_cycle_error(*tcx.dep_context(), &cycle_error, error, handler);
cache.store_nocache(value)
}
+fn handle_cycle_error<CTX, V>(
+ tcx: CTX,
+ cycle_error: &CycleError,
+ mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
+ handler: HandleCycleError,
+) -> V
+where
+ CTX: DepContext,
+ V: Value<CTX>,
+{
+ use HandleCycleError::*;
+ match handler {
+ Error => {
+ error.emit();
+ Value::from_cycle_error(tcx, &cycle_error.cycle)
+ }
+ Fatal => {
+ error.emit();
+ tcx.sess().abort_if_errors();
+ unreachable!()
+ }
+ DelayBug => {
+ error.delay_as_bug();
+ Value::from_cycle_error(tcx, &cycle_error.cycle)
+ }
+ }
+}
+
impl<'tcx, K> JobOwner<'tcx, K>
where
K: Eq + Hash + Clone,
@@ -336,6 +366,7 @@ fn try_execute_query<CTX, C>(
where
C: QueryCache,
C::Key: Clone + DepNodeParams<CTX::DepContext>,
+ C::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
@@ -381,7 +412,9 @@ where
// Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider();
- let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
+ let result = tcx.start_query(job_id, query.depth_limit, None, || {
+ query.compute(*tcx.dep_context(), key)
+ });
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return (result, dep_node_index);
@@ -394,7 +427,7 @@ where
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
- if let Some(ret) = tcx.start_query(job_id, None, || {
+ if let Some(ret) = tcx.start_query(job_id, false, None, || {
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
}) {
return ret;
@@ -404,18 +437,20 @@ where
let prof_timer = tcx.dep_context().profiler().query_provider();
let diagnostics = Lock::new(ThinVec::new());
- let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
- if query.anon {
- return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
- query.compute(*tcx.dep_context(), key)
- });
- }
+ let (result, dep_node_index) =
+ tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
+ if query.anon {
+ return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
+ query.compute(*tcx.dep_context(), key)
+ });
+ }
- // `to_dep_node` is expensive for some `DepKind`s.
- let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node =
+ dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
- dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
- });
+ dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
+ });
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -454,14 +489,14 @@ where
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
- if query.cache_on_disk {
+ if let Some(try_load_from_disk) = query.try_load_from_disk {
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
// The call to `with_query_deserialization` enforces that no new `DepNodes`
// are created during deserialization. See the docs of that method for more
// details.
- let result = dep_graph
- .with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
+ let result =
+ dep_graph.with_query_deserialization(|| try_load_from_disk(tcx, prev_dep_node_index));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -614,16 +649,12 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D
let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
if old_in_panic {
- sess.struct_err(
- "internal compiler error: re-entrant incremental verify failure, suppressing message",
- )
- .emit();
+ sess.emit_err(crate::error::Reentrant);
} else {
- sess.struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
- .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
- .note("Please follow the instructions below to create a bug report with the provided information")
- .note("See <https://github.com/rust-lang/rust/issues/84970> for more information")
- .emit();
+ sess.emit_err(crate::error::IncrementCompilation {
+ run_cmd,
+ dep_node: format!("{:?}", dep_node),
+ });
panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
}
@@ -686,6 +717,7 @@ pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) ->
where
Q: QueryDescription<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
+ Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
let query = Q::make_vtable(tcx, &key);
@@ -718,6 +750,7 @@ pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind
where
Q: QueryDescription<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
+ Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
// We may be concurrently trying both execute and force a query.
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
new file mode 100644
index 000000000..67fbf14e6
--- /dev/null
+++ b/compiler/rustc_query_system/src/values.rs
@@ -0,0 +1,15 @@
+use crate::dep_graph::DepContext;
+use crate::query::QueryInfo;
+
+pub trait Value<CTX: DepContext>: Sized {
+ fn from_cycle_error(tcx: CTX, cycle: &[QueryInfo]) -> Self;
+}
+
+impl<CTX: DepContext, T> Value<CTX> for T {
+ default fn from_cycle_error(tcx: CTX, _: &[QueryInfo]) -> T {
+ tcx.sess().abort_if_errors();
+ // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
+ // non-trivial to define it earlier.
+ panic!("Value::from_cycle_error called without errors");
+ }
+}
diff --git a/compiler/rustc_resolve/Cargo.toml b/compiler/rustc_resolve/Cargo.toml
index 5d2b606b4..d66db1d7a 100644
--- a/compiler/rustc_resolve/Cargo.toml
+++ b/compiler/rustc_resolve/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
bitflags = "1.2.1"
diff --git a/compiler/rustc_resolve/src/access_levels.rs b/compiler/rustc_resolve/src/access_levels.rs
deleted file mode 100644
index 3fba923d9..000000000
--- a/compiler/rustc_resolve/src/access_levels.rs
+++ /dev/null
@@ -1,237 +0,0 @@
-use rustc_ast::ast;
-use rustc_ast::visit;
-use rustc_ast::visit::Visitor;
-use rustc_ast::Crate;
-use rustc_ast::EnumDef;
-use rustc_ast::ForeignMod;
-use rustc_ast::NodeId;
-use rustc_hir::def_id::LocalDefId;
-use rustc_hir::def_id::CRATE_DEF_ID;
-use rustc_middle::middle::privacy::AccessLevel;
-use rustc_middle::ty::Visibility;
-use rustc_span::sym;
-
-use crate::imports::ImportKind;
-use crate::BindingKey;
-use crate::NameBinding;
-use crate::NameBindingKind;
-use crate::Resolver;
-
-pub struct AccessLevelsVisitor<'r, 'a> {
- r: &'r mut Resolver<'a>,
- prev_level: Option<AccessLevel>,
- changed: bool,
-}
-
-impl<'r, 'a> AccessLevelsVisitor<'r, 'a> {
- /// Fills the `Resolver::access_levels` table with public & exported items
- /// For now, this doesn't resolve macros (FIXME) and cannot resolve Impl, as we
- /// need access to a TyCtxt for that.
- pub fn compute_access_levels<'c>(r: &'r mut Resolver<'a>, krate: &'c Crate) {
- let mut visitor =
- AccessLevelsVisitor { r, changed: false, prev_level: Some(AccessLevel::Public) };
-
- visitor.set_access_level_def_id(CRATE_DEF_ID, Some(AccessLevel::Public));
- visitor.set_exports_access_level(CRATE_DEF_ID);
-
- while visitor.changed {
- visitor.reset();
- visit::walk_crate(&mut visitor, krate);
- }
-
- tracing::info!("resolve::access_levels: {:#?}", r.access_levels);
- }
-
- fn reset(&mut self) {
- self.changed = false;
- self.prev_level = Some(AccessLevel::Public);
- }
-
- /// Update the access level of the exports of the given module accordingly. The module access
- /// level has to be Exported or Public.
- /// This will also follow `use` chains (see PrivacyVisitor::set_import_binding_access_level).
- fn set_exports_access_level(&mut self, module_id: LocalDefId) {
- assert!(self.r.module_map.contains_key(&&module_id.to_def_id()));
-
- // Set the given binding access level to `AccessLevel::Public` and
- // sets the rest of the `use` chain to `AccessLevel::Exported` until
- // we hit the actual exported item.
- let set_import_binding_access_level =
- |this: &mut Self, mut binding: &NameBinding<'a>, mut access_level| {
- while let NameBindingKind::Import { binding: nested_binding, import, .. } =
- binding.kind
- {
- this.set_access_level(import.id, access_level);
- if let ImportKind::Single { additional_ids, .. } = import.kind {
- this.set_access_level(additional_ids.0, access_level);
- this.set_access_level(additional_ids.1, access_level);
- }
-
- access_level = Some(AccessLevel::Exported);
- binding = nested_binding;
- }
- };
-
- let module_level = self.r.access_levels.map.get(&module_id).copied();
- assert!(module_level >= Some(AccessLevel::Exported));
-
- if let Some(exports) = self.r.reexport_map.get(&module_id) {
- let pub_exports = exports
- .iter()
- .filter(|ex| ex.vis == Visibility::Public)
- .cloned()
- .collect::<Vec<_>>();
-
- let module = self.r.get_module(module_id.to_def_id()).unwrap();
- for export in pub_exports.into_iter() {
- if let Some(export_def_id) = export.res.opt_def_id().and_then(|id| id.as_local()) {
- self.set_access_level_def_id(export_def_id, Some(AccessLevel::Exported));
- }
-
- if let Some(ns) = export.res.ns() {
- let key = BindingKey { ident: export.ident, ns, disambiguator: 0 };
- let name_res = self.r.resolution(module, key);
- if let Some(binding) = name_res.borrow().binding() {
- set_import_binding_access_level(self, binding, module_level)
- }
- }
- }
- }
- }
-
- /// Sets the access level of the `LocalDefId` corresponding to the given `NodeId`.
- /// This function will panic if the `NodeId` does not have a `LocalDefId`
- fn set_access_level(
- &mut self,
- node_id: NodeId,
- access_level: Option<AccessLevel>,
- ) -> Option<AccessLevel> {
- self.set_access_level_def_id(self.r.local_def_id(node_id), access_level)
- }
-
- fn set_access_level_def_id(
- &mut self,
- def_id: LocalDefId,
- access_level: Option<AccessLevel>,
- ) -> Option<AccessLevel> {
- let old_level = self.r.access_levels.map.get(&def_id).copied();
- if old_level < access_level {
- self.r.access_levels.map.insert(def_id, access_level.unwrap());
- self.changed = true;
- access_level
- } else {
- old_level
- }
- }
-}
-
-impl<'r, 'ast> Visitor<'ast> for AccessLevelsVisitor<'ast, 'r> {
- fn visit_item(&mut self, item: &'ast ast::Item) {
- let inherited_item_level = match item.kind {
- // Resolved in rustc_privacy when types are available
- ast::ItemKind::Impl(..) => return,
-
- // Only exported `macro_rules!` items are public, but they always are
- ast::ItemKind::MacroDef(ref macro_def) if macro_def.macro_rules => {
- let is_macro_export =
- item.attrs.iter().any(|attr| attr.has_name(sym::macro_export));
- if is_macro_export { Some(AccessLevel::Public) } else { None }
- }
-
- // Foreign modules inherit level from parents.
- ast::ItemKind::ForeignMod(..) => self.prev_level,
-
- // Other `pub` items inherit levels from parents.
- ast::ItemKind::ExternCrate(..)
- | ast::ItemKind::Use(..)
- | ast::ItemKind::Static(..)
- | ast::ItemKind::Const(..)
- | ast::ItemKind::Fn(..)
- | ast::ItemKind::Mod(..)
- | ast::ItemKind::GlobalAsm(..)
- | ast::ItemKind::TyAlias(..)
- | ast::ItemKind::Enum(..)
- | ast::ItemKind::Struct(..)
- | ast::ItemKind::Union(..)
- | ast::ItemKind::Trait(..)
- | ast::ItemKind::TraitAlias(..)
- | ast::ItemKind::MacroDef(..) => {
- if item.vis.kind.is_pub() {
- self.prev_level
- } else {
- None
- }
- }
-
- // Should be unreachable at this stage
- ast::ItemKind::MacCall(..) => panic!(
- "ast::ItemKind::MacCall encountered, this should not anymore appear at this stage"
- ),
- };
-
- let access_level = self.set_access_level(item.id, inherited_item_level);
-
- // Set access level of nested items.
- // If it's a mod, also make the visitor walk all of its items
- match item.kind {
- ast::ItemKind::Mod(..) => {
- if access_level.is_some() {
- self.set_exports_access_level(self.r.local_def_id(item.id));
- }
-
- let orig_level = std::mem::replace(&mut self.prev_level, access_level);
- visit::walk_item(self, item);
- self.prev_level = orig_level;
- }
-
- ast::ItemKind::ForeignMod(ForeignMod { ref items, .. }) => {
- for nested in items {
- if nested.vis.kind.is_pub() {
- self.set_access_level(nested.id, access_level);
- }
- }
- }
- ast::ItemKind::Enum(EnumDef { ref variants }, _) => {
- for variant in variants {
- let variant_level = self.set_access_level(variant.id, access_level);
- if let Some(ctor_id) = variant.data.ctor_id() {
- self.set_access_level(ctor_id, access_level);
- }
-
- for field in variant.data.fields() {
- self.set_access_level(field.id, variant_level);
- }
- }
- }
- ast::ItemKind::Struct(ref def, _) | ast::ItemKind::Union(ref def, _) => {
- if let Some(ctor_id) = def.ctor_id() {
- self.set_access_level(ctor_id, access_level);
- }
-
- for field in def.fields() {
- if field.vis.kind.is_pub() {
- self.set_access_level(field.id, access_level);
- }
- }
- }
- ast::ItemKind::Trait(ref trait_kind) => {
- for nested in trait_kind.items.iter() {
- self.set_access_level(nested.id, access_level);
- }
- }
-
- ast::ItemKind::ExternCrate(..)
- | ast::ItemKind::Use(..)
- | ast::ItemKind::Static(..)
- | ast::ItemKind::Const(..)
- | ast::ItemKind::GlobalAsm(..)
- | ast::ItemKind::TyAlias(..)
- | ast::ItemKind::TraitAlias(..)
- | ast::ItemKind::MacroDef(..)
- | ast::ItemKind::Fn(..) => return,
-
- // Unreachable kinds
- ast::ItemKind::Impl(..) | ast::ItemKind::MacCall(..) => unreachable!(),
- }
- }
-}
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index e955a1798..a17793ecd 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -36,28 +36,29 @@ use rustc_span::Span;
use std::cell::Cell;
use std::ptr;
-use tracing::debug;
type Res = def::Res<NodeId>;
-impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, LocalExpnId) {
+impl<'a, Id: Into<DefId>> ToNameBinding<'a>
+ for (Module<'a>, ty::Visibility<Id>, Span, LocalExpnId)
+{
fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Module(self.0),
ambiguity: None,
- vis: self.1,
+ vis: self.1.to_def_id(),
span: self.2,
expansion: self.3,
})
}
}
-impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, LocalExpnId) {
+impl<'a, Id: Into<DefId>> ToNameBinding<'a> for (Res, ty::Visibility<Id>, Span, LocalExpnId) {
fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Res(self.0, false),
ambiguity: None,
- vis: self.1,
+ vis: self.1.to_def_id(),
span: self.2,
expansion: self.3,
})
@@ -71,7 +72,7 @@ impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, LocalExpnId, IsMacroE
arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Res(self.0, true),
ambiguity: None,
- vis: self.1,
+ vis: self.1.to_def_id(),
span: self.2,
expansion: self.3,
})
@@ -261,7 +262,9 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
self.r.visibilities[&def_id.expect_local()]
}
// Otherwise, the visibility is restricted to the nearest parent `mod` item.
- _ => ty::Visibility::Restricted(self.parent_scope.module.nearest_parent_mod()),
+ _ => ty::Visibility::Restricted(
+ self.parent_scope.module.nearest_parent_mod().expect_local(),
+ ),
})
}
ast::VisibilityKind::Restricted { ref path, id, .. } => {
@@ -312,7 +315,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
} else {
let vis = ty::Visibility::Restricted(res.def_id());
if self.r.is_accessible_from(vis, parent_scope.module) {
- Ok(vis)
+ Ok(vis.expect_local())
} else {
Err(VisResolutionError::AncestorOnly(path.span))
}
@@ -323,7 +326,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
}
PathResult::Module(..) => Err(VisResolutionError::ModuleOnly(path.span)),
PathResult::NonModule(partial_res) => {
- expected_found_error(partial_res.base_res())
+ expected_found_error(partial_res.expect_full_res())
}
PathResult::Failed { span, label, suggestion, .. } => {
Err(VisResolutionError::FailedToResolve(span, label, suggestion))
@@ -380,7 +383,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
has_attributes: !item.attrs.is_empty(),
root_span,
root_id,
- vis: Cell::new(vis),
+ vis: Cell::new(Some(vis)),
used: Cell::new(false),
});
@@ -588,7 +591,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
ast::UseTreeKind::Glob => {
let kind = ImportKind::Glob {
is_prelude: self.r.session.contains_name(&item.attrs, sym::prelude_import),
- max_vis: Cell::new(ty::Visibility::Invisible),
+ max_vis: Cell::new(None),
};
self.r.visibilities.insert(self.r.local_def_id(id), vis);
self.add_import(prefix, kind, use_tree.span, id, item, root_span, item.id, vis);
@@ -650,7 +653,9 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
true,
// The whole `use` item
item,
- ty::Visibility::Invisible,
+ ty::Visibility::Restricted(
+ self.parent_scope.module.nearest_parent_mod().expect_local(),
+ ),
root_span,
);
}
@@ -766,10 +771,10 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
if let Some(ctor_node_id) = vdata.ctor_id() {
// If the structure is marked as non_exhaustive then lower the visibility
// to within the crate.
- let mut ctor_vis = if vis == ty::Visibility::Public
+ let mut ctor_vis = if vis.is_public()
&& self.r.session.contains_name(&item.attrs, sym::non_exhaustive)
{
- ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ ty::Visibility::Restricted(CRATE_DEF_ID)
} else {
vis
};
@@ -786,7 +791,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
if ctor_vis.is_at_least(field_vis, &*self.r) {
ctor_vis = field_vis;
}
- ret_fields.push(field_vis);
+ ret_fields.push(field_vis.to_def_id());
}
let ctor_def_id = self.r.local_def_id(ctor_node_id);
let ctor_res = Res::Def(
@@ -796,7 +801,9 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
self.r.define(parent, ident, ValueNS, (ctor_res, ctor_vis, sp, expansion));
self.r.visibilities.insert(ctor_def_id, ctor_vis);
- self.r.struct_constructors.insert(def_id, (ctor_res, ctor_vis, ret_fields));
+ self.r
+ .struct_constructors
+ .insert(def_id, (ctor_res, ctor_vis.to_def_id(), ret_fields));
}
}
@@ -868,8 +875,8 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
}
.map(|module| {
let used = self.process_macro_use_imports(item, module);
- let binding =
- (module, ty::Visibility::Public, sp, expansion).to_name_binding(self.r.arenas);
+ let vis = ty::Visibility::<LocalDefId>::Public;
+ let binding = (module, vis, sp, expansion).to_name_binding(self.r.arenas);
(used, Some(ModuleOrUniformRoot::Module(module)), binding)
})
.unwrap_or((true, None, self.r.dummy_binding));
@@ -885,7 +892,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
root_span: item.span,
span: item.span,
module_path: Vec::new(),
- vis: Cell::new(vis),
+ vis: Cell::new(Some(vis)),
used: Cell::new(used),
});
self.r.potentially_unused_imports.push(import);
@@ -965,6 +972,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
| DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
| DefKind::TraitAlias
| DefKind::AssocTy,
_,
@@ -1002,7 +1010,8 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
_,
)
| Res::Local(..)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::SelfCtor(..)
| Res::Err => bug!("unexpected resolution: {:?}", res),
}
@@ -1030,7 +1039,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
self.insert_field_names(def_id, field_names);
}
Res::Def(DefKind::AssocFn, def_id) => {
- if cstore.fn_has_self_parameter_untracked(def_id) {
+ if cstore.fn_has_self_parameter_untracked(def_id, self.r.session) {
self.r.has_self.insert(def_id);
}
}
@@ -1118,7 +1127,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
root_span: span,
span,
module_path: Vec::new(),
- vis: Cell::new(ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())),
+ vis: Cell::new(Some(ty::Visibility::Restricted(CRATE_DEF_ID))),
used: Cell::new(false),
})
};
@@ -1264,7 +1273,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
let vis = if is_macro_export {
ty::Visibility::Public
} else {
- ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ ty::Visibility::Restricted(CRATE_DEF_ID)
};
let binding = (res, vis, span, expansion).to_name_binding(self.r.arenas);
self.r.set_binding_parent_module(binding, parent_scope.module);
@@ -1295,7 +1304,7 @@ impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
}
_ => self.resolve_visibility(&item.vis),
};
- if vis != ty::Visibility::Public {
+ if !vis.is_public() {
self.insert_unused_macro(ident, def_id, item.id, &rule_spans);
}
self.r.define(module, ident, MacroNS, (res, vis, span, expansion));
@@ -1416,7 +1425,7 @@ impl<'a, 'b> Visitor<'b> for BuildReducedGraphVisitor<'a, 'b> {
}
(DefKind::AssocFn, ValueNS)
}
- AssocItemKind::TyAlias(..) => (DefKind::AssocTy, TypeNS),
+ AssocItemKind::Type(..) => (DefKind::AssocTy, TypeNS),
AssocItemKind::MacCall(_) => bug!(), // handled above
};
@@ -1508,10 +1517,10 @@ impl<'a, 'b> Visitor<'b> for BuildReducedGraphVisitor<'a, 'b> {
self.r.visibilities.insert(def_id, vis);
// If the variant is marked as non_exhaustive then lower the visibility to within the crate.
- let ctor_vis = if vis == ty::Visibility::Public
+ let ctor_vis = if vis.is_public()
&& self.r.session.contains_name(&variant.attrs, sym::non_exhaustive)
{
- ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ ty::Visibility::Restricted(CRATE_DEF_ID)
} else {
vis
};
diff --git a/compiler/rustc_resolve/src/check_unused.rs b/compiler/rustc_resolve/src/check_unused.rs
index f2f6f1d89..01c3801f2 100644
--- a/compiler/rustc_resolve/src/check_unused.rs
+++ b/compiler/rustc_resolve/src/check_unused.rs
@@ -6,7 +6,7 @@
// `use` items.
//
// Unused trait imports can't be checked until the method resolution. We save
-// candidates here, and do the actual check in librustc_typeck/check_unused.rs.
+// candidates here, and do the actual check in rustc_hir_analysis/check_unused.rs.
//
// Checking for unused imports is split into three steps:
//
@@ -227,7 +227,7 @@ impl Resolver<'_> {
for import in self.potentially_unused_imports.iter() {
match import.kind {
_ if import.used.get()
- || import.vis.get().is_public()
+ || import.expect_vis().is_public()
|| import.span.is_dummy() =>
{
if let ImportKind::MacroUse = import.kind {
diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs
index 66641fb2c..d36e0f61d 100644
--- a/compiler/rustc_resolve/src/def_collector.rs
+++ b/compiler/rustc_resolve/src/def_collector.rs
@@ -1,6 +1,5 @@
use crate::{ImplTraitContext, Resolver};
use rustc_ast::visit::{self, FnKind};
-use rustc_ast::walk_list;
use rustc_ast::*;
use rustc_expand::expand::AstFragment;
use rustc_hir::def_id::LocalDefId;
@@ -8,7 +7,6 @@ use rustc_hir::definitions::*;
use rustc_span::hygiene::LocalExpnId;
use rustc_span::symbol::sym;
use rustc_span::Span;
-use tracing::debug;
pub(crate) fn collect_definitions(
resolver: &mut Resolver<'_>,
@@ -149,13 +147,18 @@ impl<'a, 'b> visit::Visitor<'a> for DefCollector<'a, 'b> {
self.with_parent(return_impl_trait_id, |this| {
this.visit_fn_ret_ty(&sig.decl.output)
});
- let closure_def = self.create_def(closure_id, DefPathData::ClosureExpr, span);
- self.with_parent(closure_def, |this| walk_list!(this, visit_block, body));
+ // If this async fn has no body (i.e. it's an async fn signature in a trait)
+ // then the closure_def will never be used, and we should avoid generating a
+ // def-id for it.
+ if let Some(body) = body {
+ let closure_def = self.create_def(closure_id, DefPathData::ClosureExpr, span);
+ self.with_parent(closure_def, |this| this.visit_block(body));
+ }
return;
}
}
- visit::walk_fn(self, fn_kind, span);
+ visit::walk_fn(self, fn_kind);
}
fn visit_use_tree(&mut self, use_tree: &'a UseTree, id: NodeId, _nested: bool) {
@@ -236,7 +239,7 @@ impl<'a, 'b> visit::Visitor<'a> for DefCollector<'a, 'b> {
fn visit_assoc_item(&mut self, i: &'a AssocItem, ctxt: visit::AssocCtxt) {
let def_data = match &i.kind {
AssocItemKind::Fn(..) | AssocItemKind::Const(..) => DefPathData::ValueNs(i.ident.name),
- AssocItemKind::TyAlias(..) => DefPathData::TypeNs(i.ident.name),
+ AssocItemKind::Type(..) => DefPathData::TypeNs(i.ident.name),
AssocItemKind::MacCall(..) => return self.visit_macro_invoc(i.id),
};
@@ -282,21 +285,6 @@ impl<'a, 'b> visit::Visitor<'a> for DefCollector<'a, 'b> {
fn visit_ty(&mut self, ty: &'a Ty) {
match ty.kind {
TyKind::MacCall(..) => self.visit_macro_invoc(ty.id),
- TyKind::ImplTrait(node_id, _) => {
- let parent_def = match self.impl_trait_context {
- ImplTraitContext::Universal(item_def) => self.resolver.create_def(
- item_def,
- node_id,
- DefPathData::ImplTrait,
- self.expansion.to_expn_id(),
- ty.span,
- ),
- ImplTraitContext::Existential => {
- self.create_def(node_id, DefPathData::ImplTrait, ty.span)
- }
- };
- self.with_parent(parent_def, |this| visit::walk_ty(this, ty))
- }
_ => visit::walk_ty(self, ty),
}
}
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index 8839fb1a1..5d868ebec 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -24,8 +24,7 @@ use rustc_span::hygiene::MacroKind;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{BytePos, Span};
-use tracing::debug;
+use rustc_span::{BytePos, Span, SyntaxContext};
use crate::imports::{Import, ImportKind, ImportResolver};
use crate::late::{PatternSource, Rib};
@@ -48,6 +47,7 @@ pub(crate) type Suggestion = (Vec<(Span, String)>, String, Applicability);
/// similarly named label and whether or not it is reachable.
pub(crate) type LabelSuggestion = (Ident, bool);
+#[derive(Debug)]
pub(crate) enum SuggestionTarget {
/// The target has a similar name as the name used by the programmer (probably a typo)
SimilarlyNamed,
@@ -55,6 +55,7 @@ pub(crate) enum SuggestionTarget {
SingleItem,
}
+#[derive(Debug)]
pub(crate) struct TypoSuggestion {
pub candidate: Symbol,
pub res: Res,
@@ -71,6 +72,7 @@ impl TypoSuggestion {
}
/// A free importable items suggested in case of resolution failure.
+#[derive(Debug, Clone)]
pub(crate) struct ImportSuggestion {
pub did: Option<DefId>,
pub descr: &'static str,
@@ -121,7 +123,7 @@ impl<'a> Resolver<'a> {
}
fn report_with_use_injections(&mut self, krate: &Crate) {
- for UseError { mut err, candidates, def_id, instead, suggestion, path } in
+ for UseError { mut err, candidates, def_id, instead, suggestion, path, is_call } in
self.use_injections.drain(..)
{
let (span, found_use) = if let Some(def_id) = def_id.as_local() {
@@ -129,6 +131,7 @@ impl<'a> Resolver<'a> {
} else {
(None, FoundUse::No)
};
+
if !candidates.is_empty() {
show_candidates(
&self.session,
@@ -138,13 +141,18 @@ impl<'a> Resolver<'a> {
&candidates,
if instead { Instead::Yes } else { Instead::No },
found_use,
- IsPattern::No,
+ DiagnosticMode::Normal,
path,
);
+ err.emit();
} else if let Some((span, msg, sugg, appl)) = suggestion {
err.span_suggestion(span, msg, sugg, appl);
+ err.emit();
+ } else if let [segment] = path.as_slice() && is_call {
+ err.stash(segment.ident.span, rustc_errors::StashKey::CallIntoMethod);
+ } else {
+ err.emit();
}
- err.emit();
}
}
@@ -476,11 +484,12 @@ impl<'a> Resolver<'a> {
module: Module<'a>,
names: &mut Vec<TypoSuggestion>,
filter_fn: &impl Fn(Res) -> bool,
+ ctxt: Option<SyntaxContext>,
) {
for (key, resolution) in self.resolutions(module).borrow().iter() {
if let Some(binding) = resolution.borrow().binding {
let res = binding.res();
- if filter_fn(res) {
+ if filter_fn(res) && ctxt.map_or(true, |ctxt| ctxt == key.ident.span.ctxt()) {
names.push(TypoSuggestion::typo_from_res(key.ident.name, res));
}
}
@@ -511,65 +520,57 @@ impl<'a> Resolver<'a> {
err.span_label(span, "use of generic parameter from outer function");
let sm = self.session.source_map();
- match outer_res {
- Res::SelfTy { trait_: maybe_trait_defid, alias_to: maybe_impl_defid } => {
- if let Some(impl_span) =
- maybe_impl_defid.and_then(|(def_id, _)| self.opt_span(def_id))
- {
+ let def_id = match outer_res {
+ Res::SelfTyParam { .. } => {
+ err.span_label(span, "can't use `Self` here");
+ return err;
+ }
+ Res::SelfTyAlias { alias_to: def_id, .. } => {
+ if let Some(impl_span) = self.opt_span(def_id) {
err.span_label(
reduce_impl_span_to_impl_keyword(sm, impl_span),
"`Self` type implicitly declared here, by this `impl`",
);
}
- match (maybe_trait_defid, maybe_impl_defid) {
- (Some(_), None) => {
- err.span_label(span, "can't use `Self` here");
- }
- (_, Some(_)) => {
- err.span_label(span, "use a type here instead");
- }
- (None, None) => bug!("`impl` without trait nor type?"),
- }
+ err.span_label(span, "use a type here instead");
return err;
}
Res::Def(DefKind::TyParam, def_id) => {
if let Some(span) = self.opt_span(def_id) {
err.span_label(span, "type parameter from outer function");
}
+ def_id
}
Res::Def(DefKind::ConstParam, def_id) => {
if let Some(span) = self.opt_span(def_id) {
err.span_label(span, "const parameter from outer function");
}
+ def_id
}
_ => {
bug!(
- "GenericParamsFromOuterFunction should only be used with Res::SelfTy, \
- DefKind::TyParam or DefKind::ConstParam"
+ "GenericParamsFromOuterFunction should only be used with \
+ Res::SelfTyParam, Res::SelfTyAlias, DefKind::TyParam or \
+ DefKind::ConstParam"
);
}
- }
+ };
- if has_generic_params == HasGenericParams::Yes {
+ if let HasGenericParams::Yes(span) = has_generic_params {
// Try to retrieve the span of the function signature and generate a new
// message with a local type or const parameter.
let sugg_msg = "try using a local generic parameter instead";
- if let Some((sugg_span, snippet)) = sm.generate_local_type_param_snippet(span) {
- // Suggest the modification to the user
- err.span_suggestion(
- sugg_span,
- sugg_msg,
- snippet,
- Applicability::MachineApplicable,
- );
- } else if let Some(sp) = sm.generate_fn_name_span(span) {
- err.span_label(
- sp,
- "try adding a local generic parameter in this method instead",
- );
+ let name = self.opt_name(def_id).unwrap_or(sym::T);
+ let (span, snippet) = if span.is_empty() {
+ let snippet = format!("<{}>", name);
+ (span, snippet)
} else {
- err.help("try using a local generic parameter instead");
- }
+ let span = sm.span_through_char(span, '<').shrink_to_hi();
+ let snippet = format!("{}, ", name);
+ (span, snippet)
+ };
+ // Suggest the modification to the user
+ err.span_suggestion(span, sugg_msg, snippet, Applicability::MaybeIncorrect);
}
err
@@ -700,7 +701,7 @@ impl<'a> Resolver<'a> {
&import_suggestions,
Instead::No,
FoundUse::Yes,
- IsPattern::Yes,
+ DiagnosticMode::Pattern,
vec![],
);
}
@@ -1050,6 +1051,19 @@ impl<'a> Resolver<'a> {
err.span_label(trait_item_span, "item in trait");
err
}
+ ResolutionError::TraitImplDuplicate { name, trait_item_span, old_span } => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0201,
+ "duplicate definitions with name `{}`:",
+ name,
+ );
+ err.span_label(old_span, "previous definition here");
+ err.span_label(trait_item_span, "item in trait");
+ err.span_label(span, "duplicate definition");
+ err
+ }
ResolutionError::InvalidAsmSym => {
let mut err = self.session.struct_span_err(span, "invalid `sym` operand");
err.span_label(span, "is a local variable");
@@ -1170,20 +1184,10 @@ impl<'a> Resolver<'a> {
Scope::CrateRoot => {
let root_ident = Ident::new(kw::PathRoot, ident.span);
let root_module = this.resolve_crate_root(root_ident);
- this.add_module_candidates(root_module, &mut suggestions, filter_fn);
+ this.add_module_candidates(root_module, &mut suggestions, filter_fn, None);
}
Scope::Module(module, _) => {
- this.add_module_candidates(module, &mut suggestions, filter_fn);
- }
- Scope::RegisteredAttrs => {
- let res = Res::NonMacroAttr(NonMacroAttrKind::Registered);
- if filter_fn(res) {
- suggestions.extend(
- this.registered_attrs
- .iter()
- .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
- );
- }
+ this.add_module_candidates(module, &mut suggestions, filter_fn, None);
}
Scope::MacroUsePrelude => {
suggestions.extend(this.macro_use_prelude.iter().filter_map(
@@ -1220,7 +1224,7 @@ impl<'a> Resolver<'a> {
Scope::StdLibPrelude => {
if let Some(prelude) = this.prelude {
let mut tmp_suggestions = Vec::new();
- this.add_module_candidates(prelude, &mut tmp_suggestions, filter_fn);
+ this.add_module_candidates(prelude, &mut tmp_suggestions, filter_fn, None);
suggestions.extend(
tmp_suggestions
.into_iter()
@@ -1407,7 +1411,7 @@ impl<'a> Resolver<'a> {
// If only some candidates are accessible, take just them
if !candidates.iter().all(|v: &ImportSuggestion| !v.accessible) {
- candidates = candidates.into_iter().filter(|x| x.accessible).collect();
+ candidates.retain(|x| x.accessible)
}
candidates
@@ -1493,7 +1497,7 @@ impl<'a> Resolver<'a> {
&import_suggestions,
Instead::No,
FoundUse::Yes,
- IsPattern::No,
+ DiagnosticMode::Normal,
vec![],
);
@@ -2454,12 +2458,34 @@ enum FoundUse {
No,
}
-/// Whether a binding is part of a pattern or an expression. Used for diagnostics.
-enum IsPattern {
+/// Whether a binding is part of a pattern or a use statement. Used for diagnostics.
+enum DiagnosticMode {
+ Normal,
/// The binding is part of a pattern
- Yes,
- /// The binding is part of an expression
- No,
+ Pattern,
+ /// The binding is part of a use statement
+ Import,
+}
+
+pub(crate) fn import_candidates(
+ session: &Session,
+ source_span: &IndexVec<LocalDefId, Span>,
+ err: &mut Diagnostic,
+ // This is `None` if all placement locations are inside expansions
+ use_placement_span: Option<Span>,
+ candidates: &[ImportSuggestion],
+) {
+ show_candidates(
+ session,
+ source_span,
+ err,
+ use_placement_span,
+ candidates,
+ Instead::Yes,
+ FoundUse::Yes,
+ DiagnosticMode::Import,
+ vec![],
+ );
}
/// When an entity with a given name is not available in scope, we search for
@@ -2474,7 +2500,7 @@ fn show_candidates(
candidates: &[ImportSuggestion],
instead: Instead,
found_use: FoundUse,
- is_pattern: IsPattern,
+ mode: DiagnosticMode,
path: Vec<Segment>,
) {
if candidates.is_empty() {
@@ -2509,7 +2535,7 @@ fn show_candidates(
};
let instead = if let Instead::Yes = instead { " instead" } else { "" };
- let mut msg = if let IsPattern::Yes = is_pattern {
+ let mut msg = if let DiagnosticMode::Pattern = mode {
format!(
"if you meant to match on {}{}{}, use the full path in the pattern",
kind, instead, name
@@ -2522,19 +2548,25 @@ fn show_candidates(
err.note(note);
}
- if let (IsPattern::Yes, Some(span)) = (is_pattern, use_placement_span) {
- err.span_suggestions(
- span,
- &msg,
- accessible_path_strings.into_iter().map(|a| a.0),
- Applicability::MaybeIncorrect,
- );
- } else if let Some(span) = use_placement_span {
+ if let Some(span) = use_placement_span {
+ let add_use = match mode {
+ DiagnosticMode::Pattern => {
+ err.span_suggestions(
+ span,
+ &msg,
+ accessible_path_strings.into_iter().map(|a| a.0),
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+ DiagnosticMode::Import => "",
+ DiagnosticMode::Normal => "use ",
+ };
for candidate in &mut accessible_path_strings {
// produce an additional newline to separate the new use statement
// from the directly following item.
let additional_newline = if let FoundUse::Yes = found_use { "" } else { "\n" };
- candidate.0 = format!("use {};\n{}", &candidate.0, additional_newline);
+ candidate.0 = format!("{}{};\n{}", add_use, &candidate.0, additional_newline);
}
err.span_suggestions(
@@ -2544,12 +2576,15 @@ fn show_candidates(
Applicability::MaybeIncorrect,
);
if let [first, .., last] = &path[..] {
- err.span_suggestion_verbose(
- first.ident.span.until(last.ident.span),
- &format!("if you import `{}`, refer to it directly", last.ident),
- "",
- Applicability::Unspecified,
- );
+ let sp = first.ident.span.until(last.ident.span);
+ if sp.can_be_used_for_suggestions() {
+ err.span_suggestion_verbose(
+ sp,
+ &format!("if you import `{}`, refer to it directly", last.ident),
+ "",
+ Applicability::Unspecified,
+ );
+ }
}
} else {
msg.push(':');
@@ -2561,11 +2596,14 @@ fn show_candidates(
err.note(&msg);
}
- } else {
+ } else if !matches!(mode, DiagnosticMode::Import) {
assert!(!inaccessible_path_strings.is_empty());
- let prefix =
- if let IsPattern::Yes = is_pattern { "you might have meant to match on " } else { "" };
+ let prefix = if let DiagnosticMode::Pattern = mode {
+ "you might have meant to match on "
+ } else {
+ ""
+ };
if inaccessible_path_strings.len() == 1 {
let (name, descr, def_id, note) = &inaccessible_path_strings[0];
let msg = format!(
@@ -2573,7 +2611,7 @@ fn show_candidates(
prefix,
descr,
name,
- if let IsPattern::Yes = is_pattern { ", which" } else { "" }
+ if let DiagnosticMode::Pattern = mode { ", which" } else { "" }
);
if let Some(local_def_id) = def_id.and_then(|did| did.as_local()) {
diff --git a/compiler/rustc_resolve/src/effective_visibilities.rs b/compiler/rustc_resolve/src/effective_visibilities.rs
new file mode 100644
index 000000000..c40669ac9
--- /dev/null
+++ b/compiler/rustc_resolve/src/effective_visibilities.rs
@@ -0,0 +1,188 @@
+use crate::{ImportKind, NameBindingKind, Resolver};
+use rustc_ast::ast;
+use rustc_ast::visit;
+use rustc_ast::visit::Visitor;
+use rustc_ast::Crate;
+use rustc_ast::EnumDef;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::CRATE_DEF_ID;
+use rustc_middle::middle::privacy::Level;
+use rustc_middle::ty::{DefIdTree, Visibility};
+
+pub struct EffectiveVisibilitiesVisitor<'r, 'a> {
+ r: &'r mut Resolver<'a>,
+ changed: bool,
+}
+
+impl<'r, 'a> EffectiveVisibilitiesVisitor<'r, 'a> {
+ /// Fills the `Resolver::effective_visibilities` table with public & exported items
+ /// For now, this doesn't resolve macros (FIXME) and cannot resolve Impl, as we
+ /// need access to a TyCtxt for that.
+ pub fn compute_effective_visibilities<'c>(r: &'r mut Resolver<'a>, krate: &'c Crate) {
+ let mut visitor = EffectiveVisibilitiesVisitor { r, changed: false };
+
+ visitor.update(CRATE_DEF_ID, Visibility::Public, CRATE_DEF_ID, Level::Direct);
+ visitor.set_bindings_effective_visibilities(CRATE_DEF_ID);
+
+ while visitor.changed {
+ visitor.reset();
+ visit::walk_crate(&mut visitor, krate);
+ }
+
+ info!("resolve::effective_visibilities: {:#?}", r.effective_visibilities);
+ }
+
+ fn reset(&mut self) {
+ self.changed = false;
+ }
+
+ /// Update effective visibilities of bindings in the given module,
+ /// including their whole reexport chains.
+ fn set_bindings_effective_visibilities(&mut self, module_id: LocalDefId) {
+ assert!(self.r.module_map.contains_key(&&module_id.to_def_id()));
+ let module = self.r.get_module(module_id.to_def_id()).unwrap();
+ let resolutions = self.r.resolutions(module);
+
+ for (_, name_resolution) in resolutions.borrow().iter() {
+ if let Some(mut binding) = name_resolution.borrow().binding() && !binding.is_ambiguity() {
+ // Set the given effective visibility level to `Level::Direct` and
+ // sets the rest of the `use` chain to `Level::Reexported` until
+ // we hit the actual exported item.
+
+ // FIXME: tag and is_public() condition should be removed, but assertions occur.
+ let tag = if binding.is_import() { Level::Reexported } else { Level::Direct };
+ if binding.vis.is_public() {
+ let mut prev_parent_id = module_id;
+ let mut level = Level::Direct;
+ while let NameBindingKind::Import { binding: nested_binding, import, .. } =
+ binding.kind
+ {
+ let mut update = |node_id| self.update(
+ self.r.local_def_id(node_id),
+ binding.vis.expect_local(),
+ prev_parent_id,
+ level,
+ );
+ // In theory all the import IDs have individual visibilities and effective
+ // visibilities, but in practice these IDs go straigth to HIR where all
+ // their few uses assume that their (effective) visibility applies to the
+ // whole syntactic `use` item. So we update them all to the maximum value
+ // among the potential individual effective visibilities. Maybe HIR for
+ // imports shouldn't use three IDs at all.
+ update(import.id);
+ if let ImportKind::Single { additional_ids, .. } = import.kind {
+ update(additional_ids.0);
+ update(additional_ids.1);
+ }
+
+ level = Level::Reexported;
+ prev_parent_id = self.r.local_def_id(import.id);
+ binding = nested_binding;
+ }
+ }
+
+ if let Some(def_id) = binding.res().opt_def_id().and_then(|id| id.as_local()) {
+ self.update(def_id, binding.vis.expect_local(), module_id, tag);
+ }
+ }
+ }
+ }
+
+ fn update(
+ &mut self,
+ def_id: LocalDefId,
+ nominal_vis: Visibility,
+ parent_id: LocalDefId,
+ tag: Level,
+ ) {
+ let module_id = self
+ .r
+ .get_nearest_non_block_module(def_id.to_def_id())
+ .nearest_parent_mod()
+ .expect_local();
+ if nominal_vis == Visibility::Restricted(module_id)
+ || self.r.visibilities[&parent_id] == Visibility::Restricted(module_id)
+ {
+ return;
+ }
+ let mut effective_visibilities = std::mem::take(&mut self.r.effective_visibilities);
+ self.changed |= effective_visibilities.update(
+ def_id,
+ nominal_vis,
+ || Visibility::Restricted(module_id),
+ parent_id,
+ tag,
+ &*self.r,
+ );
+ self.r.effective_visibilities = effective_visibilities;
+ }
+}
+
+impl<'r, 'ast> Visitor<'ast> for EffectiveVisibilitiesVisitor<'ast, 'r> {
+ fn visit_item(&mut self, item: &'ast ast::Item) {
+ let def_id = self.r.local_def_id(item.id);
+ // Update effective visibilities of nested items.
+ // If it's a mod, also make the visitor walk all of its items
+ match item.kind {
+ // Resolved in rustc_privacy when types are available
+ ast::ItemKind::Impl(..) => return,
+
+ // Should be unreachable at this stage
+ ast::ItemKind::MacCall(..) => panic!(
+ "ast::ItemKind::MacCall encountered, this should not anymore appear at this stage"
+ ),
+
+ // Foreign modules inherit level from parents.
+ ast::ItemKind::ForeignMod(..) => {
+ let parent_id = self.r.local_parent(def_id);
+ self.update(def_id, Visibility::Public, parent_id, Level::Direct);
+ }
+
+ // Only exported `macro_rules!` items are public, but they always are
+ ast::ItemKind::MacroDef(ref macro_def) if macro_def.macro_rules => {
+ let parent_id = self.r.local_parent(def_id);
+ let vis = self.r.visibilities[&def_id];
+ self.update(def_id, vis, parent_id, Level::Direct);
+ }
+
+ ast::ItemKind::Mod(..) => {
+ self.set_bindings_effective_visibilities(def_id);
+ visit::walk_item(self, item);
+ }
+
+ ast::ItemKind::Enum(EnumDef { ref variants }, _) => {
+ self.set_bindings_effective_visibilities(def_id);
+ for variant in variants {
+ let variant_def_id = self.r.local_def_id(variant.id);
+ for field in variant.data.fields() {
+ let field_def_id = self.r.local_def_id(field.id);
+ let vis = self.r.visibilities[&field_def_id];
+ self.update(field_def_id, vis, variant_def_id, Level::Direct);
+ }
+ }
+ }
+
+ ast::ItemKind::Struct(ref def, _) | ast::ItemKind::Union(ref def, _) => {
+ for field in def.fields() {
+ let field_def_id = self.r.local_def_id(field.id);
+ let vis = self.r.visibilities[&field_def_id];
+ self.update(field_def_id, vis, def_id, Level::Direct);
+ }
+ }
+
+ ast::ItemKind::Trait(..) => {
+ self.set_bindings_effective_visibilities(def_id);
+ }
+
+ ast::ItemKind::ExternCrate(..)
+ | ast::ItemKind::Use(..)
+ | ast::ItemKind::Static(..)
+ | ast::ItemKind::Const(..)
+ | ast::ItemKind::GlobalAsm(..)
+ | ast::ItemKind::TyAlias(..)
+ | ast::ItemKind::TraitAlias(..)
+ | ast::ItemKind::MacroDef(..)
+ | ast::ItemKind::Fn(..) => return,
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
index 6e6782881..e0542d547 100644
--- a/compiler/rustc_resolve/src/ident.rs
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -6,6 +6,7 @@ use rustc_middle::bug;
use rustc_middle::ty;
use rustc_session::lint::builtin::PROC_MACRO_DERIVE_RESOLUTION_FALLBACK;
use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_span::def_id::LocalDefId;
use rustc_span::edition::Edition;
use rustc_span::hygiene::{ExpnId, ExpnKind, LocalExpnId, MacroKind, SyntaxContext};
use rustc_span::symbol::{kw, Ident};
@@ -13,7 +14,9 @@ use rustc_span::{Span, DUMMY_SP};
use std::ptr;
-use crate::late::{ConstantItemKind, HasGenericParams, PathSource, Rib, RibKind};
+use crate::late::{
+ ConstantHasGenerics, ConstantItemKind, HasGenericParams, PathSource, Rib, RibKind,
+};
use crate::macros::{sub_namespace_match, MacroRulesScope};
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, Determinacy, Finalize};
use crate::{ImportKind, LexicalScopeBinding, Module, ModuleKind, ModuleOrUniformRoot};
@@ -24,6 +27,8 @@ use Determinacy::*;
use Namespace::*;
use RibKind::*;
+type Visibility = ty::Visibility<LocalDefId>;
+
impl<'a> Resolver<'a> {
/// A generic scope visitor.
/// Visits scopes in order to resolve some identifier in them or perform other actions.
@@ -125,7 +130,6 @@ impl<'a> Resolver<'a> {
}
Scope::CrateRoot => true,
Scope::Module(..) => true,
- Scope::RegisteredAttrs => use_prelude,
Scope::MacroUsePrelude => use_prelude || rust_2015,
Scope::BuiltinAttrs => true,
Scope::ExternPrelude => use_prelude || is_absolute_path,
@@ -185,12 +189,11 @@ impl<'a> Resolver<'a> {
match ns {
TypeNS => Scope::ExternPrelude,
ValueNS => Scope::StdLibPrelude,
- MacroNS => Scope::RegisteredAttrs,
+ MacroNS => Scope::MacroUsePrelude,
}
}
}
}
- Scope::RegisteredAttrs => Scope::MacroUsePrelude,
Scope::MacroUsePrelude => Scope::StdLibPrelude,
Scope::BuiltinAttrs => break, // nowhere else to search
Scope::ExternPrelude if is_absolute_path => break,
@@ -273,7 +276,7 @@ impl<'a> Resolver<'a> {
///
/// Invariant: This must only be called during main resolution, not during
/// import resolution.
- #[tracing::instrument(level = "debug", skip(self, ribs))]
+ #[instrument(level = "debug", skip(self, ribs))]
pub(crate) fn resolve_ident_in_lexical_scope(
&mut self,
mut ident: Ident,
@@ -367,7 +370,7 @@ impl<'a> Resolver<'a> {
/// expansion and import resolution (perhaps they can be merged in the future).
/// The function is used for resolving initial segments of macro paths (e.g., `foo` in
/// `foo::bar!(); or `foo!();`) and also for import paths on 2018 edition.
- #[tracing::instrument(level = "debug", skip(self, scope_set))]
+ #[instrument(level = "debug", skip(self, scope_set))]
pub(crate) fn early_resolve_ident_in_lexical_scope(
&mut self,
orig_ident: Ident,
@@ -424,8 +427,7 @@ impl<'a> Resolver<'a> {
let ident = Ident::new(orig_ident.name, orig_ident.span.with_ctxt(ctxt));
let ok = |res, span, arenas| {
Ok((
- (res, ty::Visibility::Public, span, LocalExpnId::ROOT)
- .to_name_binding(arenas),
+ (res, Visibility::Public, span, LocalExpnId::ROOT).to_name_binding(arenas),
Flags::empty(),
))
};
@@ -438,7 +440,7 @@ impl<'a> Resolver<'a> {
{
let binding = (
Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper),
- ty::Visibility::Public,
+ Visibility::Public,
attr.span,
expn_id,
)
@@ -554,14 +556,6 @@ impl<'a> Resolver<'a> {
Err((Determinacy::Determined, _)) => Err(Determinacy::Determined),
}
}
- Scope::RegisteredAttrs => match this.registered_attrs.get(&ident).cloned() {
- Some(ident) => ok(
- Res::NonMacroAttr(NonMacroAttrKind::Registered),
- ident.span,
- this.arenas,
- ),
- None => Err(Determinacy::Determined),
- },
Scope::MacroUsePrelude => {
match this.macro_use_prelude.get(&ident.name).cloned() {
Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)),
@@ -716,7 +710,7 @@ impl<'a> Resolver<'a> {
Err(Determinacy::determined(determinacy == Determinacy::Determined || force))
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn maybe_resolve_ident_in_module(
&mut self,
module: ModuleOrUniformRoot<'a>,
@@ -728,7 +722,7 @@ impl<'a> Resolver<'a> {
.map_err(|(determinacy, _)| determinacy)
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn resolve_ident_in_module(
&mut self,
module: ModuleOrUniformRoot<'a>,
@@ -742,7 +736,7 @@ impl<'a> Resolver<'a> {
.map_err(|(determinacy, _)| determinacy)
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_ident_in_module_ext(
&mut self,
module: ModuleOrUniformRoot<'a>,
@@ -780,7 +774,7 @@ impl<'a> Resolver<'a> {
)
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_ident_in_module_unadjusted(
&mut self,
module: ModuleOrUniformRoot<'a>,
@@ -804,7 +798,7 @@ impl<'a> Resolver<'a> {
/// Attempts to resolve `ident` in namespaces `ns` of `module`.
/// Invariant: if `finalize` is `Some`, expansion and import resolution must be complete.
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_ident_in_module_unadjusted_ext(
&mut self,
module: ModuleOrUniformRoot<'a>,
@@ -849,9 +843,8 @@ impl<'a> Resolver<'a> {
if ns == TypeNS {
if ident.name == kw::Crate || ident.name == kw::DollarCrate {
let module = self.resolve_crate_root(ident);
- let binding =
- (module, ty::Visibility::Public, module.span, LocalExpnId::ROOT)
- .to_name_binding(self.arenas);
+ let binding = (module, Visibility::Public, module.span, LocalExpnId::ROOT)
+ .to_name_binding(self.arenas);
return Ok(binding);
} else if ident.name == kw::Super || ident.name == kw::SelfLower {
// FIXME: Implement these with renaming requirements so that e.g.
@@ -951,7 +944,10 @@ impl<'a> Resolver<'a> {
// Check if one of single imports can still define the name,
// if it can then our result is not determined and can be invalidated.
for single_import in &resolution.single_imports {
- if !self.is_accessible_from(single_import.vis.get(), parent_scope.module) {
+ let Some(import_vis) = single_import.vis.get() else {
+ continue;
+ };
+ if !self.is_accessible_from(import_vis, parent_scope.module) {
continue;
}
let Some(module) = single_import.imported_module.get() else {
@@ -1016,7 +1012,10 @@ impl<'a> Resolver<'a> {
// Check if one of glob imports can still define the name,
// if it can then our "no resolution" result is not determined and can be invalidated.
for glob_import in module.globs.borrow().iter() {
- if !self.is_accessible_from(glob_import.vis.get(), parent_scope.module) {
+ let Some(import_vis) = glob_import.vis.get() else {
+ continue;
+ };
+ if !self.is_accessible_from(import_vis, parent_scope.module) {
continue;
}
let module = match glob_import.imported_module.get() {
@@ -1061,7 +1060,7 @@ impl<'a> Resolver<'a> {
}
/// Validate a local resolution (from ribs).
- #[tracing::instrument(level = "debug", skip(self, all_ribs))]
+ #[instrument(level = "debug", skip(self, all_ribs))]
fn validate_res_from_ribs(
&mut self,
rib_index: usize,
@@ -1103,7 +1102,7 @@ impl<'a> Resolver<'a> {
| ForwardGenericParamBanRibKind => {
// Nothing to do. Continue.
}
- ItemRibKind(_) | FnItemRibKind | AssocItemRibKind => {
+ ItemRibKind(_) | AssocItemRibKind => {
// This was an attempt to access an upvar inside a
// named function item. This is not allowed, so we
// report an error.
@@ -1163,15 +1162,15 @@ impl<'a> Resolver<'a> {
return Res::Err;
}
}
- Res::Def(DefKind::TyParam, _) | Res::SelfTy { .. } => {
+ Res::Def(DefKind::TyParam, _) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } => {
for rib in ribs {
let has_generic_params: HasGenericParams = match rib.kind {
NormalRibKind
| ClosureOrAsyncRibKind
- | AssocItemRibKind
| ModuleRibKind(..)
| MacroDefinition(..)
| InlineAsmSymRibKind
+ | AssocItemRibKind
| ForwardGenericParamBanRibKind => {
// Nothing to do. Continue.
continue;
@@ -1180,12 +1179,24 @@ impl<'a> Resolver<'a> {
ConstantItemRibKind(trivial, _) => {
let features = self.session.features_untracked();
// HACK(min_const_generics): We currently only allow `N` or `{ N }`.
- if !(trivial == HasGenericParams::Yes || features.generic_const_exprs) {
- // HACK(min_const_generics): If we encounter `Self` in an anonymous constant
- // we can't easily tell if it's generic at this stage, so we instead remember
- // this and then enforce the self type to be concrete later on.
- if let Res::SelfTy { trait_, alias_to: Some((def, _)) } = res {
- res = Res::SelfTy { trait_, alias_to: Some((def, true)) }
+ if !(trivial == ConstantHasGenerics::Yes
+ || features.generic_const_exprs)
+ {
+ // HACK(min_const_generics): If we encounter `Self` in an anonymous
+ // constant we can't easily tell if it's generic at this stage, so
+ // we instead remember this and then enforce the self type to be
+ // concrete later on.
+ if let Res::SelfTyAlias {
+ alias_to: def,
+ forbid_generic: _,
+ is_trait_impl,
+ } = res
+ {
+ res = Res::SelfTyAlias {
+ alias_to: def,
+ forbid_generic: true,
+ is_trait_impl,
+ }
} else {
if let Some(span) = finalize {
self.report_error(
@@ -1207,7 +1218,6 @@ impl<'a> Resolver<'a> {
// This was an attempt to use a type parameter outside its scope.
ItemRibKind(has_generic_params) => has_generic_params,
- FnItemRibKind => HasGenericParams::Yes,
ConstParamTyRibKind => {
if let Some(span) = finalize {
self.report_error(
@@ -1232,28 +1242,22 @@ impl<'a> Resolver<'a> {
}
}
Res::Def(DefKind::ConstParam, _) => {
- let mut ribs = ribs.iter().peekable();
- if let Some(Rib { kind: FnItemRibKind, .. }) = ribs.peek() {
- // When declaring const parameters inside function signatures, the first rib
- // is always a `FnItemRibKind`. In this case, we can skip it, to avoid it
- // (spuriously) conflicting with the const param.
- ribs.next();
- }
-
for rib in ribs {
let has_generic_params = match rib.kind {
NormalRibKind
| ClosureOrAsyncRibKind
- | AssocItemRibKind
| ModuleRibKind(..)
| MacroDefinition(..)
| InlineAsmSymRibKind
+ | AssocItemRibKind
| ForwardGenericParamBanRibKind => continue,
ConstantItemRibKind(trivial, _) => {
let features = self.session.features_untracked();
// HACK(min_const_generics): We currently only allow `N` or `{ N }`.
- if !(trivial == HasGenericParams::Yes || features.generic_const_exprs) {
+ if !(trivial == ConstantHasGenerics::Yes
+ || features.generic_const_exprs)
+ {
if let Some(span) = finalize {
self.report_error(
span,
@@ -1272,7 +1276,6 @@ impl<'a> Resolver<'a> {
}
ItemRibKind(has_generic_params) => has_generic_params,
- FnItemRibKind => HasGenericParams::Yes,
ConstParamTyRibKind => {
if let Some(span) = finalize {
self.report_error(
@@ -1302,7 +1305,7 @@ impl<'a> Resolver<'a> {
res
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn maybe_resolve_path(
&mut self,
path: &[Segment],
@@ -1312,7 +1315,7 @@ impl<'a> Resolver<'a> {
self.resolve_path_with_ribs(path, opt_ns, parent_scope, None, None, None)
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
pub(crate) fn resolve_path(
&mut self,
path: &[Segment],
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
index b89273990..f2cc50c19 100644
--- a/compiler/rustc_resolve/src/imports.rs
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -1,9 +1,9 @@
//! A bunch of methods and structures more or less related to resolving imports.
-use crate::diagnostics::Suggestion;
+use crate::diagnostics::{import_candidates, Suggestion};
use crate::Determinacy::{self, *};
-use crate::Namespace::{MacroNS, TypeNS};
-use crate::{module_to_string, names_to_string};
+use crate::Namespace::*;
+use crate::{module_to_string, names_to_string, ImportSuggestion};
use crate::{AmbiguityKind, BindingKey, ModuleKind, ResolutionError, Resolver, Segment};
use crate::{Finalize, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet};
use crate::{NameBinding, NameBindingKind, PathResult};
@@ -23,15 +23,13 @@ use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_span::Span;
-use tracing::*;
-
use std::cell::Cell;
use std::{mem, ptr};
type Res = def::Res<NodeId>;
/// Contains data for specific kinds of imports.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub enum ImportKind<'a> {
Single {
/// `source` in `use prefix::source as target`.
@@ -52,8 +50,8 @@ pub enum ImportKind<'a> {
},
Glob {
is_prelude: bool,
- max_vis: Cell<ty::Visibility>, // The visibility of the greatest re-export.
- // n.b. `max_vis` is only used in `finalize_import` to check for re-export errors.
+ max_vis: Cell<Option<ty::Visibility>>, // The visibility of the greatest re-export.
+ // n.b. `max_vis` is only used in `finalize_import` to check for re-export errors.
},
ExternCrate {
source: Option<Symbol>,
@@ -62,6 +60,44 @@ pub enum ImportKind<'a> {
MacroUse,
}
+/// Manually implement `Debug` for `ImportKind` because the `source/target_bindings`
+/// contain `Cell`s which can introduce infinite loops while printing.
+impl<'a> std::fmt::Debug for ImportKind<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ use ImportKind::*;
+ match self {
+ Single {
+ ref source,
+ ref target,
+ ref type_ns_only,
+ ref nested,
+ ref additional_ids,
+ // Ignore the following to avoid an infinite loop while printing.
+ source_bindings: _,
+ target_bindings: _,
+ } => f
+ .debug_struct("Single")
+ .field("source", source)
+ .field("target", target)
+ .field("type_ns_only", type_ns_only)
+ .field("nested", nested)
+ .field("additional_ids", additional_ids)
+ .finish_non_exhaustive(),
+ Glob { ref is_prelude, ref max_vis } => f
+ .debug_struct("Glob")
+ .field("is_prelude", is_prelude)
+ .field("max_vis", max_vis)
+ .finish(),
+ ExternCrate { ref source, ref target } => f
+ .debug_struct("ExternCrate")
+ .field("source", source)
+ .field("target", target)
+ .finish(),
+ MacroUse => f.debug_struct("MacroUse").finish(),
+ }
+ }
+}
+
/// One import.
#[derive(Debug, Clone)]
pub(crate) struct Import<'a> {
@@ -106,7 +142,7 @@ pub(crate) struct Import<'a> {
pub module_path: Vec<Segment>,
/// The resolution of `module_path`.
pub imported_module: Cell<Option<ModuleOrUniformRoot<'a>>>,
- pub vis: Cell<ty::Visibility>,
+ pub vis: Cell<Option<ty::Visibility>>,
pub used: Cell<bool>,
}
@@ -121,6 +157,10 @@ impl<'a> Import<'a> {
_ => false,
}
}
+
+ pub(crate) fn expect_vis(&self) -> ty::Visibility {
+ self.vis.get().expect("encountered cleared import visibility")
+ }
}
/// Records information about the resolution of a name in a namespace of a module.
@@ -161,7 +201,7 @@ fn pub_use_of_private_extern_crate_hack(import: &Import<'_>, binding: &NameBindi
import: Import { kind: ImportKind::ExternCrate { .. }, .. },
..
},
- ) => import.vis.get().is_public(),
+ ) => import.expect_vis().is_public(),
_ => false,
}
}
@@ -174,17 +214,20 @@ impl<'a> Resolver<'a> {
binding: &'a NameBinding<'a>,
import: &'a Import<'a>,
) -> &'a NameBinding<'a> {
- let vis = if binding.vis.is_at_least(import.vis.get(), self)
+ let import_vis = import.expect_vis().to_def_id();
+ let vis = if binding.vis.is_at_least(import_vis, self)
|| pub_use_of_private_extern_crate_hack(import, binding)
{
- import.vis.get()
+ import_vis
} else {
binding.vis
};
if let ImportKind::Glob { ref max_vis, .. } = import.kind {
- if vis == import.vis.get() || vis.is_at_least(max_vis.get(), self) {
- max_vis.set(vis)
+ if vis == import_vis
+ || max_vis.get().map_or(true, |max_vis| vis.is_at_least(max_vis, self))
+ {
+ max_vis.set(Some(vis.expect_local()))
}
}
@@ -209,7 +252,7 @@ impl<'a> Resolver<'a> {
self.set_binding_parent_module(binding, module);
self.update_resolution(module, key, |this, resolution| {
if let Some(old_binding) = resolution.binding {
- if res == Res::Err {
+ if res == Res::Err && old_binding.res() != Res::Err {
// Do not override real bindings with `Res::Err`s from error recovery.
return Ok(());
}
@@ -338,6 +381,7 @@ struct UnresolvedImportError {
label: Option<String>,
note: Option<String>,
suggestion: Option<Suggestion>,
+ candidate: Option<Vec<ImportSuggestion>>,
}
pub struct ImportResolver<'a, 'b> {
@@ -429,6 +473,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
label: None,
note: None,
suggestion: None,
+ candidate: None,
};
if path.contains("::") {
errors.push((path, err))
@@ -479,6 +524,16 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
}
diag.multipart_suggestion(&msg, suggestions, applicability);
}
+
+ if let Some(candidate) = &err.candidate {
+ import_candidates(
+ self.r.session,
+ &self.r.source_span,
+ &mut diag,
+ Some(err.span),
+ &candidate,
+ )
+ }
}
diag.emit();
@@ -498,7 +553,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
} else {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
- let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let orig_vis = import.vis.take();
let path_res =
self.r.maybe_resolve_path(&import.module_path, None, &import.parent_scope);
import.vis.set(orig_vis);
@@ -533,7 +588,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
if let Err(Undetermined) = source_bindings[ns].get() {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
- let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let orig_vis = import.vis.take();
let binding = this.resolve_ident_in_module(
module,
source,
@@ -582,7 +637,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
/// Optionally returns an unresolved import error. This error is buffered and used to
/// consolidate multiple unresolved import errors into a single diagnostic.
fn finalize_import(&mut self, import: &'b Import<'b>) -> Option<UnresolvedImportError> {
- let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let orig_vis = import.vis.take();
let ignore_binding = match &import.kind {
ImportKind::Single { target_bindings, .. } => target_bindings[TypeNS].get(),
_ => None,
@@ -596,6 +651,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
Some(finalize),
ignore_binding,
);
+
let no_ambiguity = self.r.ambiguity_errors.len() == prev_ambiguity_errors_len;
import.vis.set(orig_vis);
let module = match path_res {
@@ -638,12 +694,14 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
String::from("a similar path exists"),
Applicability::MaybeIncorrect,
)),
+ candidate: None,
},
None => UnresolvedImportError {
span,
label: Some(label),
note: None,
suggestion,
+ candidate: None,
},
};
return Some(err);
@@ -686,12 +744,13 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
label: Some(String::from("cannot glob-import a module into itself")),
note: None,
suggestion: None,
+ candidate: None,
});
}
}
- if !is_prelude &&
- max_vis.get() != ty::Visibility::Invisible && // Allow empty globs.
- !max_vis.get().is_at_least(import.vis.get(), &*self.r)
+ if !is_prelude
+ && let Some(max_vis) = max_vis.get()
+ && !max_vis.is_at_least(import.expect_vis(), &*self.r)
{
let msg = "glob import doesn't reexport anything because no candidate is public enough";
self.r.lint_buffer.buffer_lint(UNUSED_IMPORTS, import.id, import.span, msg);
@@ -704,7 +763,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
let mut all_ns_err = true;
self.r.per_ns(|this, ns| {
if !type_ns_only || ns == TypeNS {
- let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let orig_vis = import.vis.take();
let binding = this.resolve_ident_in_module(
module,
ident,
@@ -851,11 +910,19 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
}
};
+ let parent_suggestion =
+ self.r.lookup_import_candidates(ident, TypeNS, &import.parent_scope, |_| true);
+
Some(UnresolvedImportError {
span: import.span,
label: Some(label),
note,
suggestion,
+ candidate: if !parent_suggestion.is_empty() {
+ Some(parent_suggestion)
+ } else {
+ None
+ },
})
} else {
// `resolve_ident_in_module` reported a privacy error.
@@ -868,8 +935,7 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
let mut crate_private_reexport = false;
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
- let vis = import.vis.get();
- if !binding.vis.is_at_least(vis, &*this) {
+ if !binding.vis.is_at_least(import.expect_vis(), &*this) {
reexport_error = Some((ns, binding));
if let ty::Visibility::Restricted(binding_def_id) = binding.vis {
if binding_def_id.is_top_level_module() {
@@ -1091,24 +1157,15 @@ impl<'a, 'b> ImportResolver<'a, 'b> {
if let Some(def_id) = module.opt_def_id() {
let mut reexports = Vec::new();
- module.for_each_child(self.r, |_, ident, _, binding| {
- // FIXME: Consider changing the binding inserted by `#[macro_export] macro_rules`
- // into the crate root to actual `NameBindingKind::Import`.
- if binding.is_import()
- || matches!(binding.kind, NameBindingKind::Res(_, _is_macro_export @ true))
- {
- let res = binding.res().expect_non_local();
- // Ambiguous imports are treated as errors at this point and are
- // not exposed to other crates (see #36837 for more details).
- if res != def::Res::Err && !binding.is_ambiguity() {
- reexports.push(ModChild {
- ident,
- res,
- vis: binding.vis,
- span: binding.span,
- macro_rules: false,
- });
- }
+ module.for_each_child(self.r, |this, ident, _, binding| {
+ if let Some(res) = this.is_reexport(binding) {
+ reexports.push(ModChild {
+ ident,
+ res,
+ vis: binding.vis,
+ span: binding.span,
+ macro_rules: false,
+ });
}
});
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index dea3eaecd..00eb768ad 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -19,8 +19,8 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_errors::DiagnosticId;
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, DefKind, LifetimeRes, PartialRes, PerNS};
-use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
-use rustc_hir::{PrimTy, TraitCandidate};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::{BindingAnnotation, PrimTy, TraitCandidate};
use rustc_middle::middle::resolve_lifetime::Set1;
use rustc_middle::ty::DefIdTree;
use rustc_middle::{bug, span_bug};
@@ -30,12 +30,11 @@ use rustc_span::{BytePos, Span};
use smallvec::{smallvec, SmallVec};
use rustc_span::source_map::{respan, Spanned};
+use std::assert_matches::debug_assert_matches;
use std::collections::{hash_map::Entry, BTreeSet};
use std::mem::{replace, take};
-use tracing::debug;
mod diagnostics;
-pub(crate) mod lifetimes;
type Res = def::Res<NodeId>;
@@ -51,7 +50,7 @@ use diagnostics::{
#[derive(Copy, Clone, Debug)]
struct BindingInfo {
span: Span,
- binding_mode: BindingMode,
+ annotation: BindingAnnotation,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -91,13 +90,20 @@ enum PatBoundCtx {
}
/// Does this the item (from the item rib scope) allow generic parameters?
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[derive(Copy, Clone, Debug)]
pub(crate) enum HasGenericParams {
+ Yes(Span),
+ No,
+}
+
+/// May this constant have generics?
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum ConstantHasGenerics {
Yes,
No,
}
-impl HasGenericParams {
+impl ConstantHasGenerics {
fn force_yes_if(self, b: bool) -> Self {
if b { Self::Yes } else { self }
}
@@ -125,10 +131,6 @@ pub(crate) enum RibKind<'a> {
/// We passed through a closure. Disallow labels.
ClosureOrAsyncRibKind,
- /// We passed through a function definition. Disallow upvars.
- /// Permit only those const parameters that are specified in the function's generics.
- FnItemRibKind,
-
/// We passed through an item scope. Disallow upvars.
ItemRibKind(HasGenericParams),
@@ -136,7 +138,7 @@ pub(crate) enum RibKind<'a> {
///
/// The item may reference generic parameters in trivial constant expressions.
/// All other constants aren't allowed to use generic params at all.
- ConstantItemRibKind(HasGenericParams, Option<(Ident, ConstantItemKind)>),
+ ConstantItemRibKind(ConstantHasGenerics, Option<(Ident, ConstantItemKind)>),
/// We passed through a module.
ModuleRibKind(Module<'a>),
@@ -165,7 +167,6 @@ impl RibKind<'_> {
match self {
NormalRibKind
| ClosureOrAsyncRibKind
- | FnItemRibKind
| ConstantItemRibKind(..)
| ModuleRibKind(_)
| MacroDefinition(_)
@@ -182,7 +183,6 @@ impl RibKind<'_> {
AssocItemRibKind
| ClosureOrAsyncRibKind
- | FnItemRibKind
| ItemRibKind(..)
| ConstantItemRibKind(..)
| ModuleRibKind(..)
@@ -225,22 +225,14 @@ enum LifetimeUseSet {
#[derive(Copy, Clone, Debug)]
enum LifetimeRibKind {
- /// This rib acts as a barrier to forbid reference to lifetimes of a parent item.
- Item,
-
+ // -- Ribs introducing named lifetimes
+ //
/// This rib declares generic parameters.
+ /// Only for this kind the `LifetimeRib::bindings` field can be non-empty.
Generics { binder: NodeId, span: Span, kind: LifetimeBinderKind },
- /// FIXME(const_generics): This patches over an ICE caused by non-'static lifetimes in const
- /// generics. We are disallowing this until we can decide on how we want to handle non-'static
- /// lifetimes in const generics. See issue #74052 for discussion.
- ConstGeneric,
-
- /// Non-static lifetimes are prohibited in anonymous constants under `min_const_generics`.
- /// This function will emit an error if `generic_const_exprs` is not enabled, the body identified by
- /// `body_id` is an anonymous constant and `lifetime_ref` is non-static.
- AnonConst,
-
+ // -- Ribs introducing unnamed lifetimes
+ //
/// Create a new anonymous lifetime parameter and reference it.
///
/// If `report_in_path`, report an error when encountering lifetime elision in a path:
@@ -257,16 +249,31 @@ enum LifetimeRibKind {
/// ```
AnonymousCreateParameter { binder: NodeId, report_in_path: bool },
+ /// Replace all anonymous lifetimes by provided lifetime.
+ Elided(LifetimeRes),
+
+ // -- Barrier ribs that stop lifetime lookup, or continue it but produce an error later.
+ //
/// Give a hard error when either `&` or `'_` is written. Used to
/// rule out things like `where T: Foo<'_>`. Does not imply an
/// error on default object bounds (e.g., `Box<dyn Foo>`).
AnonymousReportError,
- /// Replace all anonymous lifetimes by provided lifetime.
- Elided(LifetimeRes),
-
/// Signal we cannot find which should be the anonymous lifetime.
ElisionFailure,
+
+ /// FIXME(const_generics): This patches over an ICE caused by non-'static lifetimes in const
+ /// generics. We are disallowing this until we can decide on how we want to handle non-'static
+ /// lifetimes in const generics. See issue #74052 for discussion.
+ ConstGeneric,
+
+ /// Non-static lifetimes are prohibited in anonymous constants under `min_const_generics`.
+ /// This function will emit an error if `generic_const_exprs` is not enabled, the body
+ /// identified by `body_id` is an anonymous constant and `lifetime_ref` is non-static.
+ AnonConst,
+
+ /// This rib acts as a barrier to forbid reference to lifetimes of a parent item.
+ Item,
}
#[derive(Copy, Clone, Debug)]
@@ -414,7 +421,8 @@ impl<'a> PathSource<'a> {
| DefKind::ForeignTy,
_,
) | Res::PrimTy(..)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
),
PathSource::Trait(AliasPossibility::No) => matches!(res, Res::Def(DefKind::Trait, _)),
PathSource::Trait(AliasPossibility::Maybe) => {
@@ -448,7 +456,8 @@ impl<'a> PathSource<'a> {
| DefKind::TyAlias
| DefKind::AssocTy,
_,
- ) | Res::SelfTy { .. }
+ ) | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
),
PathSource::TraitItem(ns) => match res {
Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) if ns == ValueNS => true,
@@ -516,6 +525,9 @@ struct DiagnosticMetadata<'ast> {
/// Used to detect possible `if let` written without `let` and to provide structured suggestion.
in_if_condition: Option<&'ast Expr>,
+ /// Used to detect possible new binding written without `let` and to provide structured suggestion.
+ in_assignment: Option<&'ast Expr>,
+
/// If we are currently in a trait object definition. Used to point at the bounds when
/// encountering a struct or enum.
current_trait_object: Option<&'ast [ast::GenericBound]>,
@@ -557,7 +569,7 @@ struct LateResolutionVisitor<'a, 'b, 'ast> {
/// They will be used to determine the correct lifetime for the fn return type.
/// The `LifetimeElisionCandidate` is used for diagnostics, to suggest introducing named
/// lifetimes.
- lifetime_elision_candidates: Option<FxIndexMap<LifetimeRes, LifetimeElisionCandidate>>,
+ lifetime_elision_candidates: Option<Vec<(LifetimeRes, LifetimeElisionCandidate)>>,
/// The trait that the current context can refer to.
current_trait_ref: Option<(Module<'a>, TraitRef)>,
@@ -629,7 +641,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
// Elided lifetime in reference: we resolve as if there was some lifetime `'_` with
// NodeId `ty.id`.
// This span will be used in case of elision failure.
- let span = self.r.session.source_map().next_point(ty.span.shrink_to_lo());
+ let span = self.r.session.source_map().start_point(ty.span);
self.resolve_elided_lifetime(ty.id, span);
visit::walk_ty(self, ty);
}
@@ -640,8 +652,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
// Check whether we should interpret this as a bare trait object.
if qself.is_none()
&& let Some(partial_res) = self.r.partial_res_map.get(&ty.id)
- && partial_res.unresolved_segments() == 0
- && let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = partial_res.base_res()
+ && let Some(Res::Def(DefKind::Trait | DefKind::TraitAlias, _)) = partial_res.full_res()
{
// This path is actually a bare trait object. In case of a bare `Fn`-trait
// object with anonymous lifetimes, we need this rib to correctly place the
@@ -723,7 +734,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
self.diagnostic_metadata.current_trait_object = prev;
self.diagnostic_metadata.current_type_path = prev_ty;
}
- fn visit_poly_trait_ref(&mut self, tref: &'ast PolyTraitRef, _: &'ast TraitBoundModifier) {
+ fn visit_poly_trait_ref(&mut self, tref: &'ast PolyTraitRef) {
let span = tref.span.shrink_to_lo().to(tref.trait_ref.path.span.shrink_to_lo());
self.with_generic_param_rib(
&tref.bound_generic_params,
@@ -748,35 +759,31 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) {
match foreign_item.kind {
ForeignItemKind::TyAlias(box TyAlias { ref generics, .. }) => {
- self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
- this.with_generic_param_rib(
- &generics.params,
- ItemRibKind(HasGenericParams::Yes),
- LifetimeRibKind::Generics {
- binder: foreign_item.id,
- kind: LifetimeBinderKind::Item,
- span: generics.span,
- },
- |this| visit::walk_foreign_item(this, foreign_item),
- )
- });
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
+ LifetimeRibKind::Generics {
+ binder: foreign_item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| visit::walk_foreign_item(this, foreign_item),
+ );
}
ForeignItemKind::Fn(box Fn { ref generics, .. }) => {
- self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
- this.with_generic_param_rib(
- &generics.params,
- ItemRibKind(HasGenericParams::Yes),
- LifetimeRibKind::Generics {
- binder: foreign_item.id,
- kind: LifetimeBinderKind::Function,
- span: generics.span,
- },
- |this| visit::walk_foreign_item(this, foreign_item),
- )
- });
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
+ LifetimeRibKind::Generics {
+ binder: foreign_item.id,
+ kind: LifetimeBinderKind::Function,
+ span: generics.span,
+ },
+ |this| visit::walk_foreign_item(this, foreign_item),
+ );
}
ForeignItemKind::Static(..) => {
- self.with_item_rib(|this| {
+ self.with_static_rib(|this| {
visit::walk_foreign_item(this, foreign_item);
});
}
@@ -786,7 +793,8 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
}
}
fn visit_fn(&mut self, fn_kind: FnKind<'ast>, sp: Span, fn_id: NodeId) {
- let rib_kind = match fn_kind {
+ let previous_value = self.diagnostic_metadata.current_function;
+ match fn_kind {
// Bail if the function is foreign, and thus cannot validly have
// a body, or if there's no body for some other reason.
FnKind::Fn(FnCtxt::Foreign, _, sig, _, generics, _)
@@ -804,25 +812,28 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
sig.decl.has_self(),
sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
&sig.decl.output,
- )
+ );
+
+ this.record_lifetime_params_for_async(
+ fn_id,
+ sig.header.asyncness.opt_return_id(),
+ );
},
);
return;
}
- FnKind::Fn(FnCtxt::Free, ..) => FnItemRibKind,
- FnKind::Fn(FnCtxt::Assoc(_), ..) => NormalRibKind,
- FnKind::Closure(..) => ClosureOrAsyncRibKind,
+ FnKind::Fn(..) => {
+ self.diagnostic_metadata.current_function = Some((fn_kind, sp));
+ }
+ // Do not update `current_function` for closures: it suggests `self` parameters.
+ FnKind::Closure(..) => {}
};
- let previous_value = self.diagnostic_metadata.current_function;
- if matches!(fn_kind, FnKind::Fn(..)) {
- self.diagnostic_metadata.current_function = Some((fn_kind, sp));
- }
debug!("(resolving function) entering function");
// Create a value rib for the function.
- self.with_rib(ValueNS, rib_kind, |this| {
+ self.with_rib(ValueNS, ClosureOrAsyncRibKind, |this| {
// Create a label rib for the function.
- this.with_label_rib(FnItemRibKind, |this| {
+ this.with_label_rib(ClosureOrAsyncRibKind, |this| {
match fn_kind {
FnKind::Fn(_, _, sig, _, generics, body) => {
this.visit_generics(generics);
@@ -848,41 +859,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
},
);
- // Construct the list of in-scope lifetime parameters for async lowering.
- // We include all lifetime parameters, either named or "Fresh".
- // The order of those parameters does not matter, as long as it is
- // deterministic.
- if let Some(async_node_id) = async_node_id {
- let mut extra_lifetime_params = this
- .r
- .extra_lifetime_params_map
- .get(&fn_id)
- .cloned()
- .unwrap_or_default();
- for rib in this.lifetime_ribs.iter().rev() {
- extra_lifetime_params.extend(
- rib.bindings
- .iter()
- .map(|(&ident, &(node_id, res))| (ident, node_id, res)),
- );
- match rib.kind {
- LifetimeRibKind::Item => break,
- LifetimeRibKind::AnonymousCreateParameter {
- binder, ..
- } => {
- if let Some(earlier_fresh) =
- this.r.extra_lifetime_params_map.get(&binder)
- {
- extra_lifetime_params.extend(earlier_fresh);
- }
- }
- _ => {}
- }
- }
- this.r
- .extra_lifetime_params_map
- .insert(async_node_id, extra_lifetime_params);
- }
+ this.record_lifetime_params_for_async(fn_id, async_node_id);
if let Some(body) = body {
// Ignore errors in function bodies if this is rustdoc
@@ -995,7 +972,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
// non-trivial constants this is doesn't matter.
self.with_constant_rib(
IsRepeatExpr::No,
- HasGenericParams::Yes,
+ ConstantHasGenerics::Yes,
None,
|this| {
this.smart_resolve_path(
@@ -1031,7 +1008,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
if let Some(ref gen_args) = constraint.gen_args {
// Forbid anonymous lifetimes in GAT parameters until proper semantics are decided.
self.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
- this.visit_generic_args(gen_args.span(), gen_args)
+ this.visit_generic_args(gen_args)
});
}
match constraint.kind {
@@ -1045,10 +1022,10 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
}
}
- fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
+ fn visit_path_segment(&mut self, path_segment: &'ast PathSegment) {
if let Some(ref args) = path_segment.args {
match &**args {
- GenericArgs::AngleBracketed(..) => visit::walk_generic_args(self, path_span, args),
+ GenericArgs::AngleBracketed(..) => visit::walk_generic_args(self, args),
GenericArgs::Parenthesized(p_args) => {
// Probe the lifetime ribs to know how to behave.
for rib in self.lifetime_ribs.iter().rev() {
@@ -1079,7 +1056,7 @@ impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
// We have nowhere to introduce generics. Code is malformed,
// so use regular lifetime resolution to avoid spurious errors.
LifetimeRibKind::Item | LifetimeRibKind::Generics { .. } => {
- visit::walk_generic_args(self, path_span, args);
+ visit::walk_generic_args(self, args);
break;
}
LifetimeRibKind::AnonymousCreateParameter { .. }
@@ -1390,7 +1367,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
})
}
- #[tracing::instrument(level = "debug", skip(self, work))]
+ #[instrument(level = "debug", skip(self, work))]
fn with_lifetime_rib<T>(
&mut self,
kind: LifetimeRibKind,
@@ -1404,7 +1381,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
ret
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_lifetime(&mut self, lifetime: &'ast Lifetime, use_ctxt: visit::LifetimeCtxt) {
let ident = lifetime.ident;
@@ -1421,9 +1398,8 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
return self.resolve_anonymous_lifetime(lifetime, false);
}
- let mut indices = (0..self.lifetime_ribs.len()).rev();
- for i in &mut indices {
- let rib = &self.lifetime_ribs[i];
+ let mut lifetime_rib_iter = self.lifetime_ribs.iter().rev();
+ while let Some(rib) = lifetime_rib_iter.next() {
let normalized_ident = ident.normalize_to_macros_2_0();
if let Some(&(_, res)) = rib.bindings.get(&normalized_ident) {
self.record_lifetime_res(lifetime.id, res, LifetimeElisionCandidate::Named);
@@ -1453,9 +1429,10 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
} else {
LifetimeUseSet::Many
}),
- LifetimeRibKind::Generics { .. }
- | LifetimeRibKind::ConstGeneric
- | LifetimeRibKind::AnonConst => None,
+ LifetimeRibKind::Generics { .. } => None,
+ LifetimeRibKind::ConstGeneric | LifetimeRibKind::AnonConst => {
+ span_bug!(ident.span, "unexpected rib kind: {:?}", rib.kind)
+ }
})
.unwrap_or(LifetimeUseSet::Many);
debug!(?use_ctxt, ?use_set);
@@ -1490,13 +1467,16 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
);
return;
}
- _ => {}
+ LifetimeRibKind::AnonymousCreateParameter { .. }
+ | LifetimeRibKind::Elided(_)
+ | LifetimeRibKind::Generics { .. }
+ | LifetimeRibKind::ElisionFailure
+ | LifetimeRibKind::AnonymousReportError => {}
}
}
let mut outer_res = None;
- for i in indices {
- let rib = &self.lifetime_ribs[i];
+ for rib in lifetime_rib_iter {
let normalized_ident = ident.normalize_to_macros_2_0();
if let Some((&outer, _)) = rib.bindings.get_key_value(&normalized_ident) {
outer_res = Some(outer);
@@ -1508,7 +1488,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.record_lifetime_res(lifetime.id, LifetimeRes::Error, LifetimeElisionCandidate::Named);
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_anonymous_lifetime(&mut self, lifetime: &Lifetime, elided: bool) {
debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
@@ -1523,8 +1503,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
count: 1,
};
let elision_candidate = LifetimeElisionCandidate::Missing(missing_lifetime);
- for i in (0..self.lifetime_ribs.len()).rev() {
- let rib = &mut self.lifetime_ribs[i];
+ for rib in self.lifetime_ribs.iter().rev() {
debug!(?rib.kind);
match rib.kind {
LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
@@ -1564,16 +1543,18 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
return;
}
LifetimeRibKind::Item => break,
- LifetimeRibKind::Generics { .. }
- | LifetimeRibKind::ConstGeneric
- | LifetimeRibKind::AnonConst => {}
+ LifetimeRibKind::Generics { .. } | LifetimeRibKind::ConstGeneric => {}
+ LifetimeRibKind::AnonConst => {
+ // There is always an `Elided(LifetimeRes::Static)` inside an `AnonConst`.
+ span_bug!(lifetime.ident.span, "unexpected rib kind: {:?}", rib.kind)
+ }
}
}
self.record_lifetime_res(lifetime.id, LifetimeRes::Error, elision_candidate);
self.report_missing_lifetime_specifiers(vec![missing_lifetime], None);
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_elided_lifetime(&mut self, anchor_id: NodeId, span: Span) {
let id = self.r.next_node_id();
let lt = Lifetime { id, ident: Ident::new(kw::UnderscoreLifetime, span) };
@@ -1586,7 +1567,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.resolve_anonymous_lifetime(&lt, true);
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn create_fresh_lifetime(&mut self, id: NodeId, ident: Ident, binder: NodeId) -> LifetimeRes {
debug_assert_eq!(ident.name, kw::UnderscoreLifetime);
debug!(?ident.span);
@@ -1604,7 +1585,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
res
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn resolve_elided_lifetimes_in_path(
&mut self,
path_id: NodeId,
@@ -1781,9 +1762,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.report_missing_lifetime_specifiers(vec![missing_lifetime], None);
break;
}
- LifetimeRibKind::Generics { .. }
- | LifetimeRibKind::ConstGeneric
- | LifetimeRibKind::AnonConst => {}
+ LifetimeRibKind::Generics { .. } | LifetimeRibKind::ConstGeneric => {}
+ LifetimeRibKind::AnonConst => {
+ // There is always an `Elided(LifetimeRes::Static)` inside an `AnonConst`.
+ span_bug!(elided_lifetime_span, "unexpected rib kind: {:?}", rib.kind)
+ }
}
}
@@ -1804,7 +1787,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
}
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn record_lifetime_res(
&mut self,
id: NodeId,
@@ -1820,14 +1803,14 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
match res {
LifetimeRes::Param { .. } | LifetimeRes::Fresh { .. } | LifetimeRes::Static => {
if let Some(ref mut candidates) = self.lifetime_elision_candidates {
- candidates.insert(res, candidate);
+ candidates.push((res, candidate));
}
}
LifetimeRes::Infer | LifetimeRes::Error | LifetimeRes::ElidedAnchor { .. } => {}
}
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self))]
fn record_lifetime_param(&mut self, id: NodeId, res: LifetimeRes) {
if let Some(prev_res) = self.r.lifetimes_res_map.insert(id, res) {
panic!(
@@ -1838,7 +1821,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
}
/// Perform resolution of a function signature, accounting for lifetime elision.
- #[tracing::instrument(level = "debug", skip(self, inputs))]
+ #[instrument(level = "debug", skip(self, inputs))]
fn resolve_fn_signature(
&mut self,
fn_id: NodeId,
@@ -1873,25 +1856,45 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
has_self: bool,
inputs: impl Iterator<Item = (Option<&'ast Pat>, &'ast Ty)>,
) -> Result<LifetimeRes, (Vec<MissingLifetime>, Vec<ElisionFnParameter>)> {
- let outer_candidates =
- replace(&mut self.lifetime_elision_candidates, Some(Default::default()));
+ enum Elision {
+ /// We have not found any candidate.
+ None,
+ /// We have a candidate bound to `self`.
+ Self_(LifetimeRes),
+ /// We have a candidate bound to a parameter.
+ Param(LifetimeRes),
+ /// We failed elision.
+ Err,
+ }
+
+ // Save elision state to reinstate it later.
+ let outer_candidates = self.lifetime_elision_candidates.take();
- let mut elision_lifetime = None;
- let mut lifetime_count = 0;
+ // Result of elision.
+ let mut elision_lifetime = Elision::None;
+ // Information for diagnostics.
let mut parameter_info = Vec::new();
+ let mut all_candidates = Vec::new();
let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
for (index, (pat, ty)) in inputs.enumerate() {
debug!(?pat, ?ty);
- if let Some(pat) = pat {
- self.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
- }
+ self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ if let Some(pat) = pat {
+ this.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
+ }
+ });
+
+ // Record elision candidates only for this parameter.
+ debug_assert_matches!(self.lifetime_elision_candidates, None);
+ self.lifetime_elision_candidates = Some(Default::default());
self.visit_ty(ty);
+ let local_candidates = self.lifetime_elision_candidates.take();
- if let Some(ref candidates) = self.lifetime_elision_candidates {
- let new_count = candidates.len();
- let local_count = new_count - lifetime_count;
- if local_count != 0 {
+ if let Some(candidates) = local_candidates {
+ let distinct: FxHashSet<_> = candidates.iter().map(|(res, _)| *res).collect();
+ let lifetime_count = distinct.len();
+ if lifetime_count != 0 {
parameter_info.push(ElisionFnParameter {
index,
ident: if let Some(pat) = pat && let PatKind::Ident(_, ident, _) = pat.kind {
@@ -1899,48 +1902,64 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
} else {
None
},
- lifetime_count: local_count,
+ lifetime_count,
span: ty.span,
});
+ all_candidates.extend(candidates.into_iter().filter_map(|(_, candidate)| {
+ match candidate {
+ LifetimeElisionCandidate::Ignore | LifetimeElisionCandidate::Named => {
+ None
+ }
+ LifetimeElisionCandidate::Missing(missing) => Some(missing),
+ }
+ }));
+ }
+ let mut distinct_iter = distinct.into_iter();
+ if let Some(res) = distinct_iter.next() {
+ match elision_lifetime {
+ // We are the first parameter to bind lifetimes.
+ Elision::None => {
+ if distinct_iter.next().is_none() {
+ // We have a single lifetime => success.
+ elision_lifetime = Elision::Param(res)
+ } else {
+ // We have have multiple lifetimes => error.
+ elision_lifetime = Elision::Err;
+ }
+ }
+ // We have 2 parameters that bind lifetimes => error.
+ Elision::Param(_) => elision_lifetime = Elision::Err,
+ // `self` elision takes precedence over everything else.
+ Elision::Self_(_) | Elision::Err => {}
+ }
}
- lifetime_count = new_count;
}
// Handle `self` specially.
if index == 0 && has_self {
let self_lifetime = self.find_lifetime_for_self(ty);
if let Set1::One(lifetime) = self_lifetime {
- elision_lifetime = Some(lifetime);
- self.lifetime_elision_candidates = None;
+ // We found `self` elision.
+ elision_lifetime = Elision::Self_(lifetime);
} else {
- self.lifetime_elision_candidates = Some(Default::default());
- lifetime_count = 0;
+ // We do not have `self` elision: disregard the `Elision::Param` that we may
+ // have found.
+ elision_lifetime = Elision::None;
}
}
debug!("(resolving function / closure) recorded parameter");
}
- let all_candidates = replace(&mut self.lifetime_elision_candidates, outer_candidates);
- debug!(?all_candidates);
+ // Reinstate elision state.
+ debug_assert_matches!(self.lifetime_elision_candidates, None);
+ self.lifetime_elision_candidates = outer_candidates;
- if let Some(res) = elision_lifetime {
+ if let Elision::Param(res) | Elision::Self_(res) = elision_lifetime {
return Ok(res);
}
- // We do not have a `self` candidate, look at the full list.
- let all_candidates = all_candidates.unwrap();
- if all_candidates.len() == 1 {
- Ok(*all_candidates.first().unwrap().0)
- } else {
- let all_candidates = all_candidates
- .into_iter()
- .filter_map(|(_, candidate)| match candidate {
- LifetimeElisionCandidate::Ignore | LifetimeElisionCandidate::Named => None,
- LifetimeElisionCandidate::Missing(missing) => Some(missing),
- })
- .collect();
- Err((all_candidates, parameter_info))
- }
+ // We do not have a candidate.
+ Err((all_candidates, parameter_info))
}
/// List all the lifetimes that appear in the provided type.
@@ -1958,11 +1977,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
match ty.kind {
TyKind::ImplicitSelf => true,
TyKind::Path(None, _) => {
- let path_res = self.r.partial_res_map[&ty.id].base_res();
- if let Res::SelfTy { .. } = path_res {
+ let path_res = self.r.partial_res_map[&ty.id].full_res();
+ if let Some(Res::SelfTyParam { .. } | Res::SelfTyAlias { .. }) = path_res {
return true;
}
- Some(path_res) == self.impl_self
+ self.impl_self.is_some() && path_res == self.impl_self
}
_ => false,
}
@@ -1999,7 +2018,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
None
}
})
- .map(|res| res.base_res())
+ .and_then(|res| res.full_res())
.filter(|res| {
// Permit the types that unambiguously always
// result in the same type constructor being used
@@ -2071,7 +2090,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.with_current_self_item(item, |this| {
this.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::Item,
@@ -2080,7 +2099,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
|this| {
let item_def_id = this.r.local_def_id(item.id).to_def_id();
this.with_self_rib(
- Res::SelfTy { trait_: None, alias_to: Some((item_def_id, false)) },
+ Res::SelfTyAlias {
+ alias_to: item_def_id,
+ forbid_generic: false,
+ is_trait_impl: false,
+ },
|this| {
visit::walk_item(this, item);
},
@@ -2141,7 +2164,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
ItemKind::TyAlias(box TyAlias { ref generics, .. }) => {
self.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::Item,
@@ -2154,7 +2177,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
ItemKind::Fn(box Fn { ref generics, .. }) => {
self.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::Function,
@@ -2186,7 +2209,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
// Create a new rib for the trait-wide type parameters.
self.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::Item,
@@ -2194,14 +2217,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
},
|this| {
let local_def_id = this.r.local_def_id(item.id).to_def_id();
- this.with_self_rib(
- Res::SelfTy { trait_: Some(local_def_id), alias_to: None },
- |this| {
- this.visit_generics(generics);
- walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits);
- this.resolve_trait_items(items);
- },
- );
+ this.with_self_rib(Res::SelfTyParam { trait_: local_def_id }, |this| {
+ this.visit_generics(generics);
+ walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits);
+ this.resolve_trait_items(items);
+ });
},
);
}
@@ -2210,7 +2230,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
// Create a new rib for the trait-wide type parameters.
self.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::Item,
@@ -2218,13 +2238,10 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
},
|this| {
let local_def_id = this.r.local_def_id(item.id).to_def_id();
- this.with_self_rib(
- Res::SelfTy { trait_: Some(local_def_id), alias_to: None },
- |this| {
- this.visit_generics(generics);
- walk_list!(this, visit_param_bound, bounds, BoundKind::Bound);
- },
- );
+ this.with_self_rib(Res::SelfTyParam { trait_: local_def_id }, |this| {
+ this.visit_generics(generics);
+ walk_list!(this, visit_param_bound, bounds, BoundKind::Bound);
+ });
},
);
}
@@ -2236,7 +2253,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
}
ItemKind::Static(ref ty, _, ref expr) | ItemKind::Const(_, ref ty, ref expr) => {
- self.with_item_rib(|this| {
+ self.with_static_rib(|this| {
this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
this.visit_ty(ty);
});
@@ -2251,7 +2268,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
// so it doesn't matter whether this is a trivial constant.
this.with_constant_rib(
IsRepeatExpr::No,
- HasGenericParams::Yes,
+ ConstantHasGenerics::Yes,
Some((item.ident, constant_item_kind)),
|this| this.visit_expr(expr),
);
@@ -2412,7 +2429,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
// Do not account for the parameters we just bound for function lifetime elision.
if let Some(ref mut candidates) = self.lifetime_elision_candidates {
for (_, res) in function_lifetime_rib.bindings.values() {
- candidates.remove(res);
+ candidates.retain(|(r, _)| r != res);
}
}
@@ -2431,11 +2448,9 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.label_ribs.pop();
}
- fn with_item_rib(&mut self, f: impl FnOnce(&mut Self)) {
+ fn with_static_rib(&mut self, f: impl FnOnce(&mut Self)) {
let kind = ItemRibKind(HasGenericParams::No);
- self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
- this.with_rib(ValueNS, kind, |this| this.with_rib(TypeNS, kind, f))
- })
+ self.with_rib(ValueNS, kind, |this| this.with_rib(TypeNS, kind, f))
}
// HACK(min_const_generics,const_evaluatable_unchecked): We
@@ -2450,7 +2465,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
fn with_constant_rib(
&mut self,
is_repeat: IsRepeatExpr,
- may_use_generics: HasGenericParams,
+ may_use_generics: ConstantHasGenerics,
item: Option<(Ident, ConstantItemKind)>,
f: impl FnOnce(&mut Self),
) {
@@ -2517,7 +2532,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
|this| {
this.with_constant_rib(
IsRepeatExpr::No,
- HasGenericParams::Yes,
+ ConstantHasGenerics::Yes,
None,
|this| this.visit_expr(expr),
)
@@ -2528,7 +2543,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
AssocItemKind::Fn(box Fn { generics, .. }) => {
walk_assoc_item(self, generics, LifetimeBinderKind::Function, item);
}
- AssocItemKind::TyAlias(box TyAlias { generics, .. }) => self
+ AssocItemKind::Type(box TyAlias { generics, .. }) => self
.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
walk_assoc_item(this, generics, LifetimeBinderKind::Item, item)
}),
@@ -2561,7 +2576,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
Finalize::new(trait_ref.ref_id, trait_ref.path.span),
);
self.diagnostic_metadata.currently_processing_impl_trait = None;
- if let Some(def_id) = res.base_res().opt_def_id() {
+ if let Some(def_id) = res.expect_full_res().opt_def_id() {
new_id = Some(def_id);
new_val = Some((self.r.expect_module(def_id), trait_ref.clone()));
}
@@ -2598,7 +2613,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
// If applicable, create a rib for the type parameters.
self.with_generic_param_rib(
&generics.params,
- ItemRibKind(HasGenericParams::Yes),
+ ItemRibKind(HasGenericParams::Yes(generics.span)),
LifetimeRibKind::Generics {
span: generics.span,
binder: item_id,
@@ -2606,7 +2621,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
},
|this| {
// Dummy self type for better errors if `Self` is used in the trait path.
- this.with_self_rib(Res::SelfTy { trait_: None, alias_to: None }, |this| {
+ this.with_self_rib(Res::SelfTyParam { trait_: LOCAL_CRATE.as_def_id() }, |this| {
this.with_lifetime_rib(
LifetimeRibKind::AnonymousCreateParameter {
binder: item_id,
@@ -2630,9 +2645,10 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
}
let item_def_id = item_def_id.to_def_id();
- let res = Res::SelfTy {
- trait_: trait_id,
- alias_to: Some((item_def_id, false)),
+ let res = Res::SelfTyAlias {
+ alias_to: item_def_id,
+ forbid_generic: false,
+ is_trait_impl: trait_id.is_some()
};
this.with_self_rib(res, |this| {
if let Some(trait_ref) = opt_trait_reference.as_ref() {
@@ -2648,8 +2664,9 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
this.with_current_self_type(self_type, |this| {
this.with_self_rib_ns(ValueNS, Res::SelfCtor(item_def_id), |this| {
debug!("resolve_implementation with_self_rib_ns(ValueNS, ...)");
+ let mut seen_trait_items = Default::default();
for item in impl_items {
- this.resolve_impl_item(&**item);
+ this.resolve_impl_item(&**item, &mut seen_trait_items);
}
});
});
@@ -2663,7 +2680,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
);
}
- fn resolve_impl_item(&mut self, item: &'ast AssocItem) {
+ fn resolve_impl_item(
+ &mut self,
+ item: &'ast AssocItem,
+ seen_trait_items: &mut FxHashMap<DefId, Span>,
+ ) {
use crate::ResolutionError::*;
match &item.kind {
AssocItemKind::Const(_, ty, default) => {
@@ -2676,6 +2697,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
&item.kind,
ValueNS,
item.span,
+ seen_trait_items,
|i, s, c| ConstNotMemberOfTrait(i, s, c),
);
@@ -2689,7 +2711,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
this.with_constant_rib(
IsRepeatExpr::No,
- HasGenericParams::Yes,
+ ConstantHasGenerics::Yes,
None,
|this| this.visit_expr(expr),
)
@@ -2716,6 +2738,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
&item.kind,
ValueNS,
item.span,
+ seen_trait_items,
|i, s, c| MethodNotMemberOfTrait(i, s, c),
);
@@ -2723,8 +2746,8 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
},
);
}
- AssocItemKind::TyAlias(box TyAlias { generics, .. }) => {
- debug!("resolve_implementation AssocItemKind::TyAlias");
+ AssocItemKind::Type(box TyAlias { generics, .. }) => {
+ debug!("resolve_implementation AssocItemKind::Type");
// We also need a new scope for the impl item type parameters.
self.with_generic_param_rib(
&generics.params,
@@ -2744,6 +2767,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
&item.kind,
TypeNS,
item.span,
+ seen_trait_items,
|i, s, c| TypeNotMemberOfTrait(i, s, c),
);
@@ -2765,6 +2789,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
kind: &AssocItemKind,
ns: Namespace,
span: Span,
+ seen_trait_items: &mut FxHashMap<DefId, Span>,
err: F,
) where
F: FnOnce(Ident, String, Option<Symbol>) -> ResolutionError<'a>,
@@ -2797,9 +2822,27 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
};
let res = binding.res();
- let Res::Def(def_kind, _) = res else { bug!() };
+ let Res::Def(def_kind, id_in_trait) = res else { bug!() };
+
+ match seen_trait_items.entry(id_in_trait) {
+ Entry::Occupied(entry) => {
+ self.report_error(
+ span,
+ ResolutionError::TraitImplDuplicate {
+ name: ident.name,
+ old_span: *entry.get(),
+ trait_item_span: binding.span,
+ },
+ );
+ return;
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(span);
+ }
+ };
+
match (def_kind, kind) {
- (DefKind::AssocTy, AssocItemKind::TyAlias(..))
+ (DefKind::AssocTy, AssocItemKind::Type(..))
| (DefKind::AssocFn, AssocItemKind::Fn(..))
| (DefKind::AssocConst, AssocItemKind::Const(..)) => {
self.r.record_partial_res(id, PartialRes::new(res));
@@ -2813,7 +2856,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
let (code, kind) = match kind {
AssocItemKind::Const(..) => (rustc_errors::error_code!(E0323), "const"),
AssocItemKind::Fn(..) => (rustc_errors::error_code!(E0324), "method"),
- AssocItemKind::TyAlias(..) => (rustc_errors::error_code!(E0325), "type"),
+ AssocItemKind::Type(..) => (rustc_errors::error_code!(E0325), "type"),
AssocItemKind::MacCall(..) => span_bug!(span, "unexpanded macro"),
};
let trait_path = path_names_to_string(path);
@@ -2831,10 +2874,13 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
fn resolve_params(&mut self, params: &'ast [Param]) {
let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
- for Param { pat, ty, .. } in params {
- self.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
+ self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ for Param { pat, .. } in params {
+ this.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
+ }
+ });
+ for Param { ty, .. } in params {
self.visit_ty(ty);
- debug!("(resolving function / closure) recorded parameter");
}
}
@@ -2866,10 +2912,10 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
pat.walk(&mut |pat| {
match pat.kind {
- PatKind::Ident(binding_mode, ident, ref sub_pat)
+ PatKind::Ident(annotation, ident, ref sub_pat)
if sub_pat.is_some() || self.is_base_res_local(pat.id) =>
{
- binding_map.insert(ident, BindingInfo { span: ident.span, binding_mode });
+ binding_map.insert(ident, BindingInfo { span: ident.span, annotation });
}
PatKind::Or(ref ps) => {
// Check the consistency of this or-pattern and
@@ -2889,7 +2935,10 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
}
fn is_base_res_local(&self, nid: NodeId) -> bool {
- matches!(self.r.partial_res_map.get(&nid).map(|res| res.base_res()), Some(Res::Local(..)))
+ matches!(
+ self.r.partial_res_map.get(&nid).map(|res| res.expect_full_res()),
+ Some(Res::Local(..))
+ )
}
/// Checks that all of the arms in an or-pattern have exactly the
@@ -2926,7 +2975,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
binding_error.target.insert(pat_outer.span);
}
Some(binding_outer) => {
- if binding_outer.binding_mode != binding_inner.binding_mode {
+ if binding_outer.annotation != binding_inner.annotation {
// The binding modes in the outer and inner bindings differ.
inconsistent_vars
.entry(name)
@@ -3147,14 +3196,14 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
fn try_resolve_as_non_binding(
&mut self,
pat_src: PatternSource,
- bm: BindingMode,
+ ann: BindingAnnotation,
ident: Ident,
has_sub: bool,
) -> Option<Res> {
// An immutable (no `mut`) by-value (no `ref`) binding pattern without
// a sub pattern (no `@ $pat`) is syntactically ambiguous as it could
// also be interpreted as a path to e.g. a constant, variant, etc.
- let is_syntactic_ambiguity = !has_sub && bm == BindingMode::ByValue(Mutability::Not);
+ let is_syntactic_ambiguity = !has_sub && ann == BindingAnnotation::NONE;
let ls_binding = self.maybe_resolve_ident_in_lexical_scope(ident, ValueNS)?;
let (res, binding) = match ls_binding {
@@ -3268,11 +3317,9 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
source: PathSource<'ast>,
finalize: Finalize,
) -> PartialRes {
- tracing::debug!(
+ debug!(
"smart_resolve_path_fragment(qself={:?}, path={:?}, finalize={:?})",
- qself,
- path,
- finalize,
+ qself, path, finalize,
);
let ns = source.namespace();
@@ -3294,6 +3341,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
instead,
suggestion,
path: path.into(),
+ is_call: source.is_call(),
});
}
@@ -3358,6 +3406,7 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
instead: false,
suggestion: None,
path: path.into(),
+ is_call: source.is_call(),
});
} else {
err.cancel();
@@ -3376,12 +3425,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
source.defer_to_typeck(),
finalize,
) {
- Ok(Some(partial_res)) if partial_res.unresolved_segments() == 0 => {
- if source.is_expected(partial_res.base_res()) || partial_res.base_res() == Res::Err
- {
+ Ok(Some(partial_res)) if let Some(res) = partial_res.full_res() => {
+ if source.is_expected(res) || res == Res::Err {
partial_res
} else {
- report_errors(self, Some(partial_res.base_res()))
+ report_errors(self, Some(res))
}
}
@@ -3589,20 +3637,21 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
};
if path.len() > 1
- && result.base_res() != Res::Err
+ && let Some(res) = result.full_res()
+ && res != Res::Err
&& path[0].ident.name != kw::PathRoot
&& path[0].ident.name != kw::DollarCrate
{
let unqualified_result = {
match self.resolve_path(&[*path.last().unwrap()], Some(ns), None) {
- PathResult::NonModule(path_res) => path_res.base_res(),
+ PathResult::NonModule(path_res) => path_res.expect_full_res(),
PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
module.res().unwrap()
}
_ => return Ok(Some(result)),
}
};
- if result.base_res() == unqualified_result {
+ if res == unqualified_result {
let lint = lint::builtin::UNUSED_QUALIFICATIONS;
self.r.lint_buffer.buffer_lint(
lint,
@@ -3696,9 +3745,9 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.with_constant_rib(
is_repeat,
if constant.value.is_potential_trivial_const_param() {
- HasGenericParams::Yes
+ ConstantHasGenerics::Yes
} else {
- HasGenericParams::No
+ ConstantHasGenerics::No
},
None,
|this| visit::walk_anon_const(this, constant),
@@ -3707,8 +3756,8 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
fn resolve_inline_const(&mut self, constant: &'ast AnonConst) {
debug!("resolve_anon_const {constant:?}");
- self.with_constant_rib(IsRepeatExpr::No, HasGenericParams::Yes, None, |this| {
- visit::walk_anon_const(this, constant);
+ self.with_constant_rib(IsRepeatExpr::No, ConstantHasGenerics::Yes, None, |this| {
+ visit::walk_anon_const(this, constant)
});
}
@@ -3796,13 +3845,12 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
ExprKind::Field(ref subexpression, _) => {
self.resolve_expr(subexpression, Some(expr));
}
- ExprKind::MethodCall(ref segment, ref arguments, _) => {
- let mut arguments = arguments.iter();
- self.resolve_expr(arguments.next().unwrap(), Some(expr));
+ ExprKind::MethodCall(ref segment, ref receiver, ref arguments, _) => {
+ self.resolve_expr(receiver, Some(expr));
for argument in arguments {
self.resolve_expr(argument, None);
}
- self.visit_path_segment(expr.span, segment);
+ self.visit_path_segment(segment);
}
ExprKind::Call(ref callee, ref arguments) => {
@@ -3815,9 +3863,9 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.with_constant_rib(
IsRepeatExpr::No,
if argument.is_potential_trivial_const_param() {
- HasGenericParams::Yes
+ ConstantHasGenerics::Yes
} else {
- HasGenericParams::No
+ ConstantHasGenerics::No
},
None,
|this| {
@@ -3895,6 +3943,11 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
self.resolve_expr(elem, Some(expr));
self.visit_expr(idx);
}
+ ExprKind::Assign(..) => {
+ let old = self.diagnostic_metadata.in_assignment.replace(expr);
+ visit::walk_expr(self, expr);
+ self.diagnostic_metadata.in_assignment = old;
+ }
_ => {
visit::walk_expr(self, expr);
}
@@ -3930,6 +3983,41 @@ impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
Some((ident.name, ns)),
)
}
+
+ /// Construct the list of in-scope lifetime parameters for async lowering.
+ /// We include all lifetime parameters, either named or "Fresh".
+ /// The order of those parameters does not matter, as long as it is
+ /// deterministic.
+ fn record_lifetime_params_for_async(
+ &mut self,
+ fn_id: NodeId,
+ async_node_id: Option<(NodeId, Span)>,
+ ) {
+ if let Some((async_node_id, span)) = async_node_id {
+ let mut extra_lifetime_params =
+ self.r.extra_lifetime_params_map.get(&fn_id).cloned().unwrap_or_default();
+ for rib in self.lifetime_ribs.iter().rev() {
+ extra_lifetime_params.extend(
+ rib.bindings.iter().map(|(&ident, &(node_id, res))| (ident, node_id, res)),
+ );
+ match rib.kind {
+ LifetimeRibKind::Item => break,
+ LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
+ if let Some(earlier_fresh) = self.r.extra_lifetime_params_map.get(&binder) {
+ extra_lifetime_params.extend(earlier_fresh);
+ }
+ }
+ LifetimeRibKind::Generics { .. } => {}
+ _ => {
+ // We are in a function definition. We should only find `Generics`
+ // and `AnonymousCreateParameter` inside the innermost `Item`.
+ span_bug!(span, "unexpected rib kind: {:?}", rib.kind)
+ }
+ }
+ }
+ self.r.extra_lifetime_params_map.insert(async_node_id, extra_lifetime_params);
+ }
+ }
}
struct LifetimeCountVisitor<'a, 'b> {
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index 2b1f2b88e..850f023b1 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -33,15 +33,13 @@ use rustc_span::{BytePos, Span};
use std::iter;
use std::ops::Deref;
-use tracing::debug;
-
type Res = def::Res<ast::NodeId>;
/// A field or associated item from self type suggested in case of resolution failure.
enum AssocSuggestion {
Field,
- MethodWithSelf,
- AssocFn,
+ MethodWithSelf { called: bool },
+ AssocFn { called: bool },
AssocType,
AssocConst,
}
@@ -50,8 +48,14 @@ impl AssocSuggestion {
fn action(&self) -> &'static str {
match self {
AssocSuggestion::Field => "use the available field",
- AssocSuggestion::MethodWithSelf => "call the method with the fully-qualified path",
- AssocSuggestion::AssocFn => "call the associated function",
+ AssocSuggestion::MethodWithSelf { called: true } => {
+ "call the method with the fully-qualified path"
+ }
+ AssocSuggestion::MethodWithSelf { called: false } => {
+ "refer to the method with the fully-qualified path"
+ }
+ AssocSuggestion::AssocFn { called: true } => "call the associated function",
+ AssocSuggestion::AssocFn { called: false } => "refer to the associated function",
AssocSuggestion::AssocConst => "use the associated `const`",
AssocSuggestion::AssocType => "use the associated type",
}
@@ -132,6 +136,33 @@ pub(super) enum LifetimeElisionCandidate {
Missing(MissingLifetime),
}
+/// Only used for diagnostics.
+#[derive(Debug)]
+struct BaseError {
+ msg: String,
+ fallback_label: String,
+ span: Span,
+ span_label: Option<(Span, &'static str)>,
+ could_be_expr: bool,
+ suggestion: Option<(Span, &'static str, String)>,
+}
+
+#[derive(Debug)]
+enum TypoCandidate {
+ Typo(TypoSuggestion),
+ Shadowed(Res),
+ None,
+}
+
+impl TypoCandidate {
+ fn to_opt_suggestion(self) -> Option<TypoSuggestion> {
+ match self {
+ TypoCandidate::Typo(sugg) => Some(sugg),
+ TypoCandidate::Shadowed(_) | TypoCandidate::None => None,
+ }
+ }
+}
+
impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
fn def_span(&self, def_id: DefId) -> Option<Span> {
match def_id.krate {
@@ -140,38 +171,28 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
- /// Handles error reporting for `smart_resolve_path_fragment` function.
- /// Creates base error and amends it with one short label and possibly some longer helps/notes.
- pub(crate) fn smart_resolve_report_errors(
+ fn make_base_error(
&mut self,
path: &[Segment],
span: Span,
source: PathSource<'_>,
res: Option<Res>,
- ) -> (DiagnosticBuilder<'a, ErrorGuaranteed>, Vec<ImportSuggestion>) {
- let ident_span = path.last().map_or(span, |ident| ident.ident.span);
- let ns = source.namespace();
- let is_expected = &|res| source.is_expected(res);
- let is_enum_variant = &|res| matches!(res, Res::Def(DefKind::Variant, _));
-
- debug!(?res, ?source);
-
+ ) -> BaseError {
// Make the base error.
- struct BaseError<'a> {
- msg: String,
- fallback_label: String,
- span: Span,
- could_be_expr: bool,
- suggestion: Option<(Span, &'a str, String)>,
- }
let mut expected = source.descr_expected();
let path_str = Segment::names_to_string(path);
let item_str = path.last().unwrap().ident;
- let base_error = if let Some(res) = res {
+ if let Some(res) = res {
BaseError {
msg: format!("expected {}, found {} `{}`", expected, res.descr(), path_str),
fallback_label: format!("not a {expected}"),
span,
+ span_label: match res {
+ Res::Def(kind, def_id) if kind == DefKind::TyParam => {
+ self.def_span(def_id).map(|span| (span, "found this type parameter"))
+ }
+ _ => None,
+ },
could_be_expr: match res {
Res::Def(DefKind::Fn, _) => {
// Verify whether this is a fn call or an Fn used as a type.
@@ -243,60 +264,132 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
.map_or_else(String::new, |res| format!("{} ", res.descr()));
(mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)), None)
};
+
+ let (fallback_label, suggestion) = if path_str == "async"
+ && expected.starts_with("struct")
+ {
+ ("`async` blocks are only allowed in Rust 2018 or later".to_string(), suggestion)
+ } else {
+ // check if we are in situation of typo like `True` instead of `true`.
+ let override_suggestion =
+ if ["true", "false"].contains(&item_str.to_string().to_lowercase().as_str()) {
+ let item_typo = item_str.to_string().to_lowercase();
+ Some((
+ item_span,
+ "you may want to use a bool value instead",
+ format!("{}", item_typo),
+ ))
+ } else {
+ suggestion
+ };
+ (format!("not found in {mod_str}"), override_suggestion)
+ };
+
BaseError {
msg: format!("cannot find {expected} `{item_str}` in {mod_prefix}{mod_str}"),
- fallback_label: if path_str == "async" && expected.starts_with("struct") {
- "`async` blocks are only allowed in Rust 2018 or later".to_string()
- } else {
- format!("not found in {mod_str}")
- },
+ fallback_label,
span: item_span,
+ span_label: None,
could_be_expr: false,
suggestion,
}
- };
+ }
+ }
+ /// Handles error reporting for `smart_resolve_path_fragment` function.
+ /// Creates base error and amends it with one short label and possibly some longer helps/notes.
+ pub(crate) fn smart_resolve_report_errors(
+ &mut self,
+ path: &[Segment],
+ span: Span,
+ source: PathSource<'_>,
+ res: Option<Res>,
+ ) -> (DiagnosticBuilder<'a, ErrorGuaranteed>, Vec<ImportSuggestion>) {
+ debug!(?res, ?source);
+ let base_error = self.make_base_error(path, span, source, res);
let code = source.error_code(res.is_some());
let mut err =
self.r.session.struct_span_err_with_code(base_error.span, &base_error.msg, code);
self.suggest_swapping_misplaced_self_ty_and_trait(&mut err, source, res, base_error.span);
- if let Some(sugg) = base_error.suggestion {
- err.span_suggestion_verbose(sugg.0, sugg.1, sugg.2, Applicability::MaybeIncorrect);
+ if let Some((span, label)) = base_error.span_label {
+ err.span_label(span, label);
}
- if let Some(span) = self.diagnostic_metadata.current_block_could_be_bare_struct_literal {
- err.multipart_suggestion(
- "you might have meant to write a `struct` literal",
- vec![
- (span.shrink_to_lo(), "{ SomeStruct ".to_string()),
- (span.shrink_to_hi(), "}".to_string()),
- ],
- Applicability::HasPlaceholders,
- );
+ if let Some(ref sugg) = base_error.suggestion {
+ err.span_suggestion_verbose(sugg.0, sugg.1, &sugg.2, Applicability::MaybeIncorrect);
}
- match (source, self.diagnostic_metadata.in_if_condition) {
- (
- PathSource::Expr(_),
- Some(Expr { span: expr_span, kind: ExprKind::Assign(lhs, _, _), .. }),
- ) => {
- // Icky heuristic so we don't suggest:
- // `if (i + 2) = 2` => `if let (i + 2) = 2` (approximately pattern)
- // `if 2 = i` => `if let 2 = i` (lhs needs to contain error span)
- if lhs.is_approximately_pattern() && lhs.span.contains(span) {
- err.span_suggestion_verbose(
- expr_span.shrink_to_lo(),
- "you might have meant to use pattern matching",
- "let ",
- Applicability::MaybeIncorrect,
- );
+
+ self.suggest_bare_struct_literal(&mut err);
+ self.suggest_pattern_match_with_let(&mut err, source, span);
+
+ self.suggest_self_or_self_ref(&mut err, path, span);
+ self.detect_assoct_type_constraint_meant_as_path(&mut err, &base_error);
+ if self.suggest_self_ty(&mut err, source, path, span)
+ || self.suggest_self_value(&mut err, source, path, span)
+ {
+ return (err, Vec::new());
+ }
+
+ let (found, candidates) =
+ self.try_lookup_name_relaxed(&mut err, source, path, span, res, &base_error);
+ if found {
+ return (err, candidates);
+ }
+
+ if !self.type_ascription_suggestion(&mut err, base_error.span) {
+ let mut fallback =
+ self.suggest_trait_and_bounds(&mut err, source, res, span, &base_error);
+ fallback |= self.suggest_typo(&mut err, source, path, span, &base_error);
+ if fallback {
+ // Fallback label.
+ err.span_label(base_error.span, &base_error.fallback_label);
+ }
+ }
+ self.err_code_special_cases(&mut err, source, path, span);
+
+ (err, candidates)
+ }
+
+ fn detect_assoct_type_constraint_meant_as_path(
+ &self,
+ err: &mut Diagnostic,
+ base_error: &BaseError,
+ ) {
+ let Some(ty) = self.diagnostic_metadata.current_type_path else { return; };
+ let TyKind::Path(_, path) = &ty.kind else { return; };
+ for segment in &path.segments {
+ let Some(params) = &segment.args else { continue; };
+ let ast::GenericArgs::AngleBracketed(ref params) = params.deref() else { continue; };
+ for param in &params.args {
+ let ast::AngleBracketedArg::Constraint(constraint) = param else { continue; };
+ let ast::AssocConstraintKind::Bound { bounds } = &constraint.kind else {
+ continue;
+ };
+ for bound in bounds {
+ let ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)
+ = bound else
+ {
+ continue;
+ };
+ if base_error.span == trait_ref.span {
+ err.span_suggestion_verbose(
+ constraint.ident.span.between(trait_ref.span),
+ "you might have meant to write a path instead of an associated type bound",
+ "::",
+ Applicability::MachineApplicable,
+ );
+ }
}
}
- _ => {}
}
+ }
+ fn suggest_self_or_self_ref(&mut self, err: &mut Diagnostic, path: &[Segment], span: Span) {
let is_assoc_fn = self.self_type_is_available();
+ let Some(path_last_segment) = path.last() else { return };
+ let item_str = path_last_segment.ident;
// Emit help message for fake-self from other languages (e.g., `this` in Javascript).
if ["this", "my"].contains(&item_str.as_str()) && is_assoc_fn {
err.span_suggestion_short(
@@ -331,96 +424,25 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
}
+ }
- self.detect_assoct_type_constraint_meant_as_path(base_error.span, &mut err);
-
- // Emit special messages for unresolved `Self` and `self`.
- if is_self_type(path, ns) {
- err.code(rustc_errors::error_code!(E0411));
- err.span_label(
- span,
- "`Self` is only available in impls, traits, and type definitions".to_string(),
- );
- if let Some(item_kind) = self.diagnostic_metadata.current_item {
- err.span_label(
- item_kind.ident.span,
- format!(
- "`Self` not allowed in {} {}",
- item_kind.kind.article(),
- item_kind.kind.descr()
- ),
- );
- }
- return (err, Vec::new());
- }
- if is_self_value(path, ns) {
- debug!("smart_resolve_path_fragment: E0424, source={:?}", source);
-
- err.code(rustc_errors::error_code!(E0424));
- err.span_label(span, match source {
- PathSource::Pat => "`self` value is a keyword and may not be bound to variables or shadowed",
- _ => "`self` value is a keyword only available in methods with a `self` parameter",
- });
- if let Some((fn_kind, span)) = &self.diagnostic_metadata.current_function {
- // The current function has a `self' parameter, but we were unable to resolve
- // a reference to `self`. This can only happen if the `self` identifier we
- // are resolving came from a different hygiene context.
- if fn_kind.decl().inputs.get(0).map_or(false, |p| p.is_self()) {
- err.span_label(*span, "this function has a `self` parameter, but a macro invocation can only access identifiers it receives from parameters");
- } else {
- let doesnt = if is_assoc_fn {
- let (span, sugg) = fn_kind
- .decl()
- .inputs
- .get(0)
- .map(|p| (p.span.shrink_to_lo(), "&self, "))
- .unwrap_or_else(|| {
- // Try to look for the "(" after the function name, if possible.
- // This avoids placing the suggestion into the visibility specifier.
- let span = fn_kind
- .ident()
- .map_or(*span, |ident| span.with_lo(ident.span.hi()));
- (
- self.r
- .session
- .source_map()
- .span_through_char(span, '(')
- .shrink_to_hi(),
- "&self",
- )
- });
- err.span_suggestion_verbose(
- span,
- "add a `self` receiver parameter to make the associated `fn` a method",
- sugg,
- Applicability::MaybeIncorrect,
- );
- "doesn't"
- } else {
- "can't"
- };
- if let Some(ident) = fn_kind.ident() {
- err.span_label(
- ident.span,
- &format!("this function {} have a `self` parameter", doesnt),
- );
- }
- }
- } else if let Some(item_kind) = self.diagnostic_metadata.current_item {
- err.span_label(
- item_kind.ident.span,
- format!(
- "`self` not allowed in {} {}",
- item_kind.kind.article(),
- item_kind.kind.descr()
- ),
- );
- }
- return (err, Vec::new());
- }
-
+ fn try_lookup_name_relaxed(
+ &mut self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ source: PathSource<'_>,
+ path: &[Segment],
+ span: Span,
+ res: Option<Res>,
+ base_error: &BaseError,
+ ) -> (bool, Vec<ImportSuggestion>) {
// Try to lookup name in more relaxed fashion for better error reporting.
let ident = path.last().unwrap().ident;
+ let is_expected = &|res| source.is_expected(res);
+ let ns = source.namespace();
+ let is_enum_variant = &|res| matches!(res, Res::Def(DefKind::Variant, _));
+ let path_str = Segment::names_to_string(path);
+ let ident_span = path.last().map_or(span, |ident| ident.ident.span);
+
let mut candidates = self
.r
.lookup_import_candidates(ident, ns, &self.parent_scope, is_expected)
@@ -467,7 +489,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
{
// Already reported this issue on the lhs of the type ascription.
err.delay_as_bug();
- return (err, candidates);
+ return (true, candidates);
}
}
@@ -495,10 +517,14 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
);
}
}
+
// Try Levenshtein algorithm.
- let typo_sugg = self.lookup_typo_candidate(path, ns, is_expected);
+ let typo_sugg =
+ self.lookup_typo_candidate(path, source.namespace(), is_expected).to_opt_suggestion();
if path.len() == 1 && self.self_type_is_available() {
- if let Some(candidate) = self.lookup_assoc_candidate(ident, ns, is_expected) {
+ if let Some(candidate) =
+ self.lookup_assoc_candidate(ident, ns, is_expected, source.is_call())
+ {
let self_is_available = self.self_value_is_available(path[0].ident.span);
match candidate {
AssocSuggestion::Field => {
@@ -513,16 +539,21 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
err.span_label(span, "a field by this name exists in `Self`");
}
}
- AssocSuggestion::MethodWithSelf if self_is_available => {
+ AssocSuggestion::MethodWithSelf { called } if self_is_available => {
+ let msg = if called {
+ "you might have meant to call the method"
+ } else {
+ "you might have meant to refer to the method"
+ };
err.span_suggestion(
span,
- "you might have meant to call the method",
+ msg,
format!("self.{path_str}"),
Applicability::MachineApplicable,
);
}
- AssocSuggestion::MethodWithSelf
- | AssocSuggestion::AssocFn
+ AssocSuggestion::MethodWithSelf { .. }
+ | AssocSuggestion::AssocFn { .. }
| AssocSuggestion::AssocConst
| AssocSuggestion::AssocType => {
err.span_suggestion(
@@ -533,8 +564,8 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
);
}
}
- self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span);
- return (err, candidates);
+ self.r.add_typo_suggestion(err, typo_sugg, ident_span);
+ return (true, candidates);
}
// If the first argument in call is `self` suggest calling a method.
@@ -552,14 +583,14 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
format!("self.{path_str}({args_snippet})"),
Applicability::MachineApplicable,
);
- return (err, candidates);
+ return (true, candidates);
}
}
// Try context-dependent help if relaxed lookup didn't work.
if let Some(res) = res {
if self.smart_resolve_context_dependent_help(
- &mut err,
+ err,
span,
source,
res,
@@ -567,106 +598,148 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
&base_error.fallback_label,
) {
// We do this to avoid losing a secondary span when we override the main error span.
- self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span);
- return (err, candidates);
+ self.r.add_typo_suggestion(err, typo_sugg, ident_span);
+ return (true, candidates);
}
}
+ return (false, candidates);
+ }
+ fn suggest_trait_and_bounds(
+ &mut self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ source: PathSource<'_>,
+ res: Option<Res>,
+ span: Span,
+ base_error: &BaseError,
+ ) -> bool {
let is_macro =
base_error.span.from_expansion() && base_error.span.desugaring_kind().is_none();
- if !self.type_ascription_suggestion(&mut err, base_error.span) {
- let mut fallback = false;
- if let (
- PathSource::Trait(AliasPossibility::Maybe),
- Some(Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _)),
- false,
- ) = (source, res, is_macro)
- {
- if let Some(bounds @ [_, .., _]) = self.diagnostic_metadata.current_trait_object {
- fallback = true;
- let spans: Vec<Span> = bounds
- .iter()
- .map(|bound| bound.span())
- .filter(|&sp| sp != base_error.span)
- .collect();
+ let mut fallback = false;
- let start_span = bounds.iter().map(|bound| bound.span()).next().unwrap();
- // `end_span` is the end of the poly trait ref (Foo + 'baz + Bar><)
- let end_span = bounds.iter().map(|bound| bound.span()).last().unwrap();
- // `last_bound_span` is the last bound of the poly trait ref (Foo + >'baz< + Bar)
- let last_bound_span = spans.last().cloned().unwrap();
- let mut multi_span: MultiSpan = spans.clone().into();
- for sp in spans {
- let msg = if sp == last_bound_span {
- format!(
- "...because of {these} bound{s}",
- these = pluralize!("this", bounds.len() - 1),
- s = pluralize!(bounds.len() - 1),
- )
- } else {
- String::new()
- };
- multi_span.push_span_label(sp, msg);
- }
- multi_span
- .push_span_label(base_error.span, "expected this type to be a trait...");
- err.span_help(
- multi_span,
- "`+` is used to constrain a \"trait object\" type with lifetimes or \
- auto-traits; structs and enums can't be bound in that way",
- );
- if bounds.iter().all(|bound| match bound {
- ast::GenericBound::Outlives(_) => true,
- ast::GenericBound::Trait(tr, _) => tr.span == base_error.span,
- }) {
- let mut sugg = vec![];
- if base_error.span != start_span {
- sugg.push((start_span.until(base_error.span), String::new()));
- }
- if base_error.span != end_span {
- sugg.push((base_error.span.shrink_to_hi().to(end_span), String::new()));
- }
+ if let (
+ PathSource::Trait(AliasPossibility::Maybe),
+ Some(Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _)),
+ false,
+ ) = (source, res, is_macro)
+ {
+ if let Some(bounds @ [_, .., _]) = self.diagnostic_metadata.current_trait_object {
+ fallback = true;
+ let spans: Vec<Span> = bounds
+ .iter()
+ .map(|bound| bound.span())
+ .filter(|&sp| sp != base_error.span)
+ .collect();
- err.multipart_suggestion(
- "if you meant to use a type and not a trait here, remove the bounds",
- sugg,
- Applicability::MaybeIncorrect,
- );
+ let start_span = bounds[0].span();
+ // `end_span` is the end of the poly trait ref (Foo + 'baz + Bar><)
+ let end_span = bounds.last().unwrap().span();
+ // `last_bound_span` is the last bound of the poly trait ref (Foo + >'baz< + Bar)
+ let last_bound_span = spans.last().cloned().unwrap();
+ let mut multi_span: MultiSpan = spans.clone().into();
+ for sp in spans {
+ let msg = if sp == last_bound_span {
+ format!(
+ "...because of {these} bound{s}",
+ these = pluralize!("this", bounds.len() - 1),
+ s = pluralize!(bounds.len() - 1),
+ )
+ } else {
+ String::new()
+ };
+ multi_span.push_span_label(sp, msg);
+ }
+ multi_span.push_span_label(base_error.span, "expected this type to be a trait...");
+ err.span_help(
+ multi_span,
+ "`+` is used to constrain a \"trait object\" type with lifetimes or \
+ auto-traits; structs and enums can't be bound in that way",
+ );
+ if bounds.iter().all(|bound| match bound {
+ ast::GenericBound::Outlives(_) => true,
+ ast::GenericBound::Trait(tr, _) => tr.span == base_error.span,
+ }) {
+ let mut sugg = vec![];
+ if base_error.span != start_span {
+ sugg.push((start_span.until(base_error.span), String::new()));
+ }
+ if base_error.span != end_span {
+ sugg.push((base_error.span.shrink_to_hi().to(end_span), String::new()));
}
+
+ err.multipart_suggestion(
+ "if you meant to use a type and not a trait here, remove the bounds",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
}
}
+ }
- fallback |= self.restrict_assoc_type_in_where_clause(span, &mut err);
+ fallback |= self.restrict_assoc_type_in_where_clause(span, err);
+ fallback
+ }
- if !self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span) {
- fallback = true;
- match self.diagnostic_metadata.current_let_binding {
- Some((pat_sp, Some(ty_sp), None))
- if ty_sp.contains(base_error.span) && base_error.could_be_expr =>
- {
- err.span_suggestion_short(
- pat_sp.between(ty_sp),
- "use `=` if you meant to assign",
- " = ",
- Applicability::MaybeIncorrect,
- );
- }
- _ => {}
+ fn suggest_typo(
+ &mut self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ source: PathSource<'_>,
+ path: &[Segment],
+ span: Span,
+ base_error: &BaseError,
+ ) -> bool {
+ let is_expected = &|res| source.is_expected(res);
+ let ident_span = path.last().map_or(span, |ident| ident.ident.span);
+ let typo_sugg = self.lookup_typo_candidate(path, source.namespace(), is_expected);
+ if let TypoCandidate::Shadowed(res) = typo_sugg
+ && let Some(id) = res.opt_def_id()
+ && let Some(sugg_span) = self.r.opt_span(id)
+ {
+ err.span_label(
+ sugg_span,
+ format!("you might have meant to refer to this {}", res.descr()),
+ );
+ return true;
+ }
+ let mut fallback = false;
+ let typo_sugg = typo_sugg.to_opt_suggestion();
+ if !self.r.add_typo_suggestion(err, typo_sugg, ident_span) {
+ fallback = true;
+ match self.diagnostic_metadata.current_let_binding {
+ Some((pat_sp, Some(ty_sp), None))
+ if ty_sp.contains(base_error.span) && base_error.could_be_expr =>
+ {
+ err.span_suggestion_short(
+ pat_sp.between(ty_sp),
+ "use `=` if you meant to assign",
+ " = ",
+ Applicability::MaybeIncorrect,
+ );
}
-
- // If the trait has a single item (which wasn't matched by Levenshtein), suggest it
- let suggestion = self.get_single_associated_item(&path, &source, is_expected);
- self.r.add_typo_suggestion(&mut err, suggestion, ident_span);
+ _ => {}
}
- if fallback {
- // Fallback label.
- err.span_label(base_error.span, base_error.fallback_label);
+
+ // If the trait has a single item (which wasn't matched by Levenshtein), suggest it
+ let suggestion = self.get_single_associated_item(&path, &source, is_expected);
+ if !self.r.add_typo_suggestion(err, suggestion, ident_span) {
+ fallback = !self.let_binding_suggestion(err, ident_span);
}
}
+ fallback
+ }
+
+ fn err_code_special_cases(
+ &mut self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ source: PathSource<'_>,
+ path: &[Segment],
+ span: Span,
+ ) {
if let Some(err_code) = &err.code {
if err_code == &rustc_errors::error_code!(E0425) {
for label_rib in &self.label_ribs {
for (label_ident, node_id) in &label_rib.bindings {
+ let ident = path.last().unwrap().ident;
if format!("'{}", ident) == label_ident.to_string() {
err.span_label(label_ident.span, "a label with a similar name exists");
if let PathSource::Expr(Some(Expr {
@@ -697,38 +770,116 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
}
+ }
- (err, candidates)
+ /// Emit special messages for unresolved `Self` and `self`.
+ fn suggest_self_ty(
+ &mut self,
+ err: &mut Diagnostic,
+ source: PathSource<'_>,
+ path: &[Segment],
+ span: Span,
+ ) -> bool {
+ if !is_self_type(path, source.namespace()) {
+ return false;
+ }
+ err.code(rustc_errors::error_code!(E0411));
+ err.span_label(
+ span,
+ "`Self` is only available in impls, traits, and type definitions".to_string(),
+ );
+ if let Some(item_kind) = self.diagnostic_metadata.current_item {
+ err.span_label(
+ item_kind.ident.span,
+ format!(
+ "`Self` not allowed in {} {}",
+ item_kind.kind.article(),
+ item_kind.kind.descr()
+ ),
+ );
+ }
+ true
}
- fn detect_assoct_type_constraint_meant_as_path(&self, base_span: Span, err: &mut Diagnostic) {
- let Some(ty) = self.diagnostic_metadata.current_type_path else { return; };
- let TyKind::Path(_, path) = &ty.kind else { return; };
- for segment in &path.segments {
- let Some(params) = &segment.args else { continue; };
- let ast::GenericArgs::AngleBracketed(ref params) = params.deref() else { continue; };
- for param in &params.args {
- let ast::AngleBracketedArg::Constraint(constraint) = param else { continue; };
- let ast::AssocConstraintKind::Bound { bounds } = &constraint.kind else {
- continue;
+ fn suggest_self_value(
+ &mut self,
+ err: &mut Diagnostic,
+ source: PathSource<'_>,
+ path: &[Segment],
+ span: Span,
+ ) -> bool {
+ if !is_self_value(path, source.namespace()) {
+ return false;
+ }
+
+ debug!("smart_resolve_path_fragment: E0424, source={:?}", source);
+ err.code(rustc_errors::error_code!(E0424));
+ err.span_label(
+ span,
+ match source {
+ PathSource::Pat => {
+ "`self` value is a keyword and may not be bound to variables or shadowed"
+ }
+ _ => "`self` value is a keyword only available in methods with a `self` parameter",
+ },
+ );
+ let is_assoc_fn = self.self_type_is_available();
+ if let Some((fn_kind, span)) = &self.diagnostic_metadata.current_function {
+ // The current function has a `self' parameter, but we were unable to resolve
+ // a reference to `self`. This can only happen if the `self` identifier we
+ // are resolving came from a different hygiene context.
+ if fn_kind.decl().inputs.get(0).map_or(false, |p| p.is_self()) {
+ err.span_label(*span, "this function has a `self` parameter, but a macro invocation can only access identifiers it receives from parameters");
+ } else {
+ let doesnt = if is_assoc_fn {
+ let (span, sugg) = fn_kind
+ .decl()
+ .inputs
+ .get(0)
+ .map(|p| (p.span.shrink_to_lo(), "&self, "))
+ .unwrap_or_else(|| {
+ // Try to look for the "(" after the function name, if possible.
+ // This avoids placing the suggestion into the visibility specifier.
+ let span = fn_kind
+ .ident()
+ .map_or(*span, |ident| span.with_lo(ident.span.hi()));
+ (
+ self.r
+ .session
+ .source_map()
+ .span_through_char(span, '(')
+ .shrink_to_hi(),
+ "&self",
+ )
+ });
+ err.span_suggestion_verbose(
+ span,
+ "add a `self` receiver parameter to make the associated `fn` a method",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ "doesn't"
+ } else {
+ "can't"
};
- for bound in bounds {
- let ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)
- = bound else
- {
- continue;
- };
- if base_span == trait_ref.span {
- err.span_suggestion_verbose(
- constraint.ident.span.between(trait_ref.span),
- "you might have meant to write a path instead of an associated type bound",
- "::",
- Applicability::MachineApplicable,
- );
- }
+ if let Some(ident) = fn_kind.ident() {
+ err.span_label(
+ ident.span,
+ &format!("this function {} have a `self` parameter", doesnt),
+ );
}
}
+ } else if let Some(item_kind) = self.diagnostic_metadata.current_item {
+ err.span_label(
+ item_kind.ident.span,
+ format!(
+ "`self` not allowed in {} {}",
+ item_kind.kind.article(),
+ item_kind.kind.descr()
+ ),
+ );
}
+ true
}
fn suggest_swapping_misplaced_self_ty_and_trait(
@@ -760,6 +911,45 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
+ fn suggest_bare_struct_literal(&mut self, err: &mut Diagnostic) {
+ if let Some(span) = self.diagnostic_metadata.current_block_could_be_bare_struct_literal {
+ err.multipart_suggestion(
+ "you might have meant to write a `struct` literal",
+ vec![
+ (span.shrink_to_lo(), "{ SomeStruct ".to_string()),
+ (span.shrink_to_hi(), "}".to_string()),
+ ],
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+
+ fn suggest_pattern_match_with_let(
+ &mut self,
+ err: &mut Diagnostic,
+ source: PathSource<'_>,
+ span: Span,
+ ) {
+ if let PathSource::Expr(_) = source &&
+ let Some(Expr {
+ span: expr_span,
+ kind: ExprKind::Assign(lhs, _, _),
+ ..
+ }) = self.diagnostic_metadata.in_if_condition {
+ // Icky heuristic so we don't suggest:
+ // `if (i + 2) = 2` => `if let (i + 2) = 2` (approximately pattern)
+ // `if 2 = i` => `if let 2 = i` (lhs needs to contain error span)
+ if lhs.is_approximately_pattern() && lhs.span.contains(span) {
+ err.span_suggestion_verbose(
+ expr_span.shrink_to_lo(),
+ "you might have meant to use pattern matching",
+ "let ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
fn get_single_associated_item(
&mut self,
path: &[Segment],
@@ -822,11 +1012,10 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
let Some(partial_res) = self.r.partial_res_map.get(&bounded_ty.id) else {
return false;
};
- if !(matches!(
- partial_res.base_res(),
- hir::def::Res::Def(hir::def::DefKind::AssocTy, _)
- ) && partial_res.unresolved_segments() == 0)
- {
+ if !matches!(
+ partial_res.full_res(),
+ Some(hir::def::Res::Def(hir::def::DefKind::AssocTy, _))
+ ) {
return false;
}
(ty, position, path)
@@ -840,11 +1029,10 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
let Some(partial_res) = self.r.partial_res_map.get(&peeled_ty.id) else {
return false;
};
- if !(matches!(
- partial_res.base_res(),
- hir::def::Res::Def(hir::def::DefKind::TyParam, _)
- ) && partial_res.unresolved_segments() == 0)
- {
+ if !matches!(
+ partial_res.full_res(),
+ Some(hir::def::Res::Def(hir::def::DefKind::TyParam, _))
+ ) {
return false;
}
if let (
@@ -932,41 +1120,14 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
// where a brace being opened means a block is being started. Look
// ahead for the next text to see if `span` is followed by a `{`.
let sm = self.r.session.source_map();
- let mut sp = span;
- loop {
- sp = sm.next_point(sp);
- match sm.span_to_snippet(sp) {
- Ok(ref snippet) => {
- if snippet.chars().any(|c| !c.is_whitespace()) {
- break;
- }
- }
- _ => break,
- }
- }
+ let sp = sm.span_look_ahead(span, None, Some(50));
let followed_by_brace = matches!(sm.span_to_snippet(sp), Ok(ref snippet) if snippet == "{");
// In case this could be a struct literal that needs to be surrounded
// by parentheses, find the appropriate span.
- let mut i = 0;
- let mut closing_brace = None;
- loop {
- sp = sm.next_point(sp);
- match sm.span_to_snippet(sp) {
- Ok(ref snippet) => {
- if snippet == "}" {
- closing_brace = Some(span.to(sp));
- break;
- }
- }
- _ => break,
- }
- i += 1;
- // The bigger the span, the more likely we're incorrect --
- // bound it to 100 chars long.
- if i > 100 {
- break;
- }
- }
+ let closing_span = sm.span_look_ahead(span, Some("}"), Some(50));
+ let closing_brace: Option<Span> = sm
+ .span_to_snippet(closing_span)
+ .map_or(None, |s| if s == "}" { Some(span.to(closing_span)) } else { None });
(followed_by_brace, closing_brace)
}
@@ -985,27 +1146,45 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
let ns = source.namespace();
let is_expected = &|res| source.is_expected(res);
- let path_sep = |err: &mut Diagnostic, expr: &Expr| match expr.kind {
- ExprKind::Field(_, ident) => {
+ let path_sep = |err: &mut Diagnostic, expr: &Expr, kind: DefKind| {
+ const MESSAGE: &str = "use the path separator to refer to an item";
+
+ let (lhs_span, rhs_span) = match &expr.kind {
+ ExprKind::Field(base, ident) => (base.span, ident.span),
+ ExprKind::MethodCall(_, receiver, _, span) => (receiver.span, *span),
+ _ => return false,
+ };
+
+ if lhs_span.eq_ctxt(rhs_span) {
err.span_suggestion(
- expr.span,
- "use the path separator to refer to an item",
- format!("{}::{}", path_str, ident),
+ lhs_span.between(rhs_span),
+ MESSAGE,
+ "::",
Applicability::MaybeIncorrect,
);
true
- }
- ExprKind::MethodCall(ref segment, ..) => {
- let span = expr.span.with_hi(segment.ident.span.hi());
- err.span_suggestion(
- span,
- "use the path separator to refer to an item",
- format!("{}::{}", path_str, segment.ident),
+ } else if kind == DefKind::Struct
+ && let Some(lhs_source_span) = lhs_span.find_ancestor_inside(expr.span)
+ && let Ok(snippet) = self.r.session.source_map().span_to_snippet(lhs_source_span)
+ {
+ // The LHS is a type that originates from a macro call.
+ // We have to add angle brackets around it.
+
+ err.span_suggestion_verbose(
+ lhs_source_span.until(rhs_span),
+ MESSAGE,
+ format!("<{snippet}>::"),
Applicability::MaybeIncorrect,
);
true
+ } else {
+ // Either we were unable to obtain the source span / the snippet or
+ // the LHS originates from a macro call and it is not a type and thus
+ // there is no way to replace `.` with `::` and still somehow suggest
+ // valid Rust code.
+
+ false
}
- _ => false,
};
let find_span = |source: &PathSource<'_>, err: &mut Diagnostic| {
@@ -1027,7 +1206,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
match source {
PathSource::Expr(Some(
parent @ Expr { kind: ExprKind::Field(..) | ExprKind::MethodCall(..), .. },
- )) if path_sep(err, &parent) => {}
+ )) if path_sep(err, &parent, DefKind::Struct) => {}
PathSource::Expr(
None
| Some(Expr {
@@ -1143,8 +1322,11 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
}
- (Res::Def(DefKind::Mod, _), PathSource::Expr(Some(parent))) => {
- if !path_sep(err, &parent) {
+ (
+ Res::Def(kind @ (DefKind::Mod | DefKind::Trait), _),
+ PathSource::Expr(Some(parent)),
+ ) => {
+ if !path_sep(err, &parent, kind) {
return false;
}
}
@@ -1282,7 +1464,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
Applicability::HasPlaceholders,
);
}
- (Res::SelfTy { .. }, _) if ns == ValueNS => {
+ (Res::SelfTyParam { .. } | Res::SelfTyAlias { .. }, _) if ns == ValueNS => {
err.span_label(span, fallback_label);
err.note("can't use `Self` as a constructor, you must use the implemented struct");
}
@@ -1315,7 +1497,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
.filter(|(_, res)| match (kind, res) {
(AssocItemKind::Const(..), Res::Def(DefKind::AssocConst, _)) => true,
(AssocItemKind::Fn(_), Res::Def(DefKind::AssocFn, _)) => true,
- (AssocItemKind::TyAlias(..), Res::Def(DefKind::AssocTy, _)) => true,
+ (AssocItemKind::Type(..), Res::Def(DefKind::AssocTy, _)) => true,
_ => false,
})
.map(|(key, _)| key.ident.name)
@@ -1329,6 +1511,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
ident: Ident,
ns: Namespace,
filter_fn: FilterFn,
+ called: bool,
) -> Option<AssocSuggestion>
where
FilterFn: Fn(Res) -> bool,
@@ -1351,20 +1534,14 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
{
// Look for a field with the same name in the current self_type.
if let Some(resolution) = self.r.partial_res_map.get(&node_id) {
- match resolution.base_res() {
- Res::Def(DefKind::Struct | DefKind::Union, did)
- if resolution.unresolved_segments() == 0 =>
- {
- if let Some(field_names) = self.r.field_names.get(&did) {
- if field_names
- .iter()
- .any(|&field_name| ident.name == field_name.node)
- {
- return Some(AssocSuggestion::Field);
- }
+ if let Some(Res::Def(DefKind::Struct | DefKind::Union, did)) =
+ resolution.full_res()
+ {
+ if let Some(field_names) = self.r.field_names.get(&did) {
+ if field_names.iter().any(|&field_name| ident.name == field_name.node) {
+ return Some(AssocSuggestion::Field);
}
}
- _ => {}
}
}
}
@@ -1376,10 +1553,10 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
return Some(match &assoc_item.kind {
ast::AssocItemKind::Const(..) => AssocSuggestion::AssocConst,
ast::AssocItemKind::Fn(box ast::Fn { sig, .. }) if sig.decl.has_self() => {
- AssocSuggestion::MethodWithSelf
+ AssocSuggestion::MethodWithSelf { called }
}
- ast::AssocItemKind::Fn(..) => AssocSuggestion::AssocFn,
- ast::AssocItemKind::TyAlias(..) => AssocSuggestion::AssocType,
+ ast::AssocItemKind::Fn(..) => AssocSuggestion::AssocFn { called },
+ ast::AssocItemKind::Type(..) => AssocSuggestion::AssocType,
ast::AssocItemKind::MacCall(_) => continue,
});
}
@@ -1397,10 +1574,12 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
let res = binding.res();
if filter_fn(res) {
if self.r.has_self.contains(&res.def_id()) {
- return Some(AssocSuggestion::MethodWithSelf);
+ return Some(AssocSuggestion::MethodWithSelf { called });
} else {
match res {
- Res::Def(DefKind::AssocFn, _) => return Some(AssocSuggestion::AssocFn),
+ Res::Def(DefKind::AssocFn, _) => {
+ return Some(AssocSuggestion::AssocFn { called });
+ }
Res::Def(DefKind::AssocConst, _) => {
return Some(AssocSuggestion::AssocConst);
}
@@ -1422,22 +1601,38 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
path: &[Segment],
ns: Namespace,
filter_fn: &impl Fn(Res) -> bool,
- ) -> Option<TypoSuggestion> {
+ ) -> TypoCandidate {
let mut names = Vec::new();
if path.len() == 1 {
+ let mut ctxt = path.last().unwrap().ident.span.ctxt();
+
// Search in lexical scope.
// Walk backwards up the ribs in scope and collect candidates.
for rib in self.ribs[ns].iter().rev() {
+ let rib_ctxt = if rib.kind.contains_params() {
+ ctxt.normalize_to_macros_2_0()
+ } else {
+ ctxt.normalize_to_macro_rules()
+ };
+
// Locals and type parameters
for (ident, &res) in &rib.bindings {
- if filter_fn(res) {
+ if filter_fn(res) && ident.span.ctxt() == rib_ctxt {
names.push(TypoSuggestion::typo_from_res(ident.name, res));
}
}
+
+ if let RibKind::MacroDefinition(def) = rib.kind && def == self.r.macro_def(ctxt) {
+ // If an invocation of this macro created `ident`, give up on `ident`
+ // and switch to `ident`'s source from the macro definition.
+ ctxt.remove_mark();
+ continue;
+ }
+
// Items in scope
if let RibKind::ModuleRibKind(module) = rib.kind {
// Items from this module
- self.r.add_module_candidates(module, &mut names, &filter_fn);
+ self.r.add_module_candidates(module, &mut names, &filter_fn, Some(ctxt));
if let ModuleKind::Block = module.kind {
// We can see through blocks
@@ -1463,7 +1658,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}));
if let Some(prelude) = self.r.prelude {
- self.r.add_module_candidates(prelude, &mut names, &filter_fn);
+ self.r.add_module_candidates(prelude, &mut names, &filter_fn, None);
}
}
break;
@@ -1482,7 +1677,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
self.resolve_path(mod_path, Some(TypeNS), None)
{
- self.r.add_module_candidates(module, &mut names, &filter_fn);
+ self.r.add_module_candidates(module, &mut names, &filter_fn, None);
}
}
@@ -1495,10 +1690,17 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
name,
None,
) {
- Some(found) if found != name => {
- names.into_iter().find(|suggestion| suggestion.candidate == found)
+ Some(found) => {
+ let Some(sugg) = names.into_iter().find(|suggestion| suggestion.candidate == found) else {
+ return TypoCandidate::None;
+ };
+ if found == name {
+ TypoCandidate::Shadowed(sugg.res)
+ } else {
+ TypoCandidate::Typo(sugg)
+ }
}
- _ => None,
+ _ => TypoCandidate::None,
}
}
@@ -1568,26 +1770,16 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
}
if let Ok(base_snippet) = base_snippet {
- let mut sp = after_colon_sp;
- for _ in 0..100 {
- // Try to find an assignment
- sp = sm.next_point(sp);
- let snippet = sm.span_to_snippet(sp.to(sm.next_point(sp)));
- match snippet {
- Ok(ref x) if x.as_str() == "=" => {
- err.span_suggestion(
- base_span,
- "maybe you meant to write an assignment here",
- format!("let {}", base_snippet),
- Applicability::MaybeIncorrect,
- );
- show_label = false;
- break;
- }
- Ok(ref x) if x.as_str() == "\n" => break,
- Err(_) => break,
- Ok(_) => {}
- }
+ // Try to find an assignment
+ let eq_span = sm.span_look_ahead(after_colon_sp, Some("="), Some(50));
+ if let Ok(ref snippet) = sm.span_to_snippet(eq_span) && snippet == "=" {
+ err.span_suggestion(
+ base_span,
+ "maybe you meant to write an assignment here",
+ format!("let {}", base_snippet),
+ Applicability::MaybeIncorrect,
+ );
+ show_label = false;
}
}
}
@@ -1604,6 +1796,31 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
false
}
+ fn let_binding_suggestion(&self, err: &mut Diagnostic, ident_span: Span) -> bool {
+ // try to give a suggestion for this pattern: `name = 1`, which is common in other languages
+ let mut added_suggestion = false;
+ if let Some(Expr { kind: ExprKind::Assign(lhs, _rhs, _), .. }) = self.diagnostic_metadata.in_assignment &&
+ let ast::ExprKind::Path(None, _) = lhs.kind {
+ let sm = self.r.session.source_map();
+ let line_span = sm.span_extend_to_line(ident_span);
+ let ident_name = sm.span_to_snippet(ident_span).unwrap();
+ // HACK(chenyukang): make sure ident_name is at the starting of the line to protect against macros
+ if sm
+ .span_to_snippet(line_span)
+ .map_or(false, |s| s.trim().starts_with(&ident_name))
+ {
+ err.span_suggestion_verbose(
+ ident_span.shrink_to_lo(),
+ "you might have meant to introduce a new binding",
+ "let ".to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ added_suggestion = true;
+ }
+ }
+ added_suggestion
+ }
+
fn find_module(&mut self, def_id: DefId) -> Option<(Module<'a>, ImportSuggestion)> {
let mut result = None;
let mut seen_modules = FxHashSet::default();
@@ -1742,7 +1959,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
}
};
- let mut suggestable_variants = variants
+ let suggestable_variants = variants
.iter()
.filter(|(_, def_id, kind)| !needs_placeholder(*def_id, *kind))
.map(|(variant, _, kind)| (path_names_to_string(variant), kind))
@@ -1752,8 +1969,9 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
CtorKind::Fictive => format!("({} {{}})", variant),
})
.collect::<Vec<_>>();
+ let no_suggestable_variant = suggestable_variants.is_empty();
- if !suggestable_variants.is_empty() {
+ if !no_suggestable_variant {
let msg = if suggestable_variants.len() == 1 {
"you might have meant to use the following enum variant"
} else {
@@ -1763,7 +1981,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
err.span_suggestions(
span,
msg,
- suggestable_variants.drain(..),
+ suggestable_variants.into_iter(),
Applicability::MaybeIncorrect,
);
}
@@ -1780,15 +1998,15 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
.collect::<Vec<_>>();
if !suggestable_variants_with_placeholders.is_empty() {
- let msg = match (
- suggestable_variants.is_empty(),
- suggestable_variants_with_placeholders.len(),
- ) {
- (true, 1) => "the following enum variant is available",
- (true, _) => "the following enum variants are available",
- (false, 1) => "alternatively, the following enum variant is available",
- (false, _) => "alternatively, the following enum variants are also available",
- };
+ let msg =
+ match (no_suggestable_variant, suggestable_variants_with_placeholders.len()) {
+ (true, 1) => "the following enum variant is available",
+ (true, _) => "the following enum variants are available",
+ (false, 1) => "alternatively, the following enum variant is available",
+ (false, _) => {
+ "alternatively, the following enum variants are also available"
+ }
+ };
err.span_suggestions(
span,
@@ -2021,9 +2239,9 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
fn suggest_introducing_lifetime(
&self,
- err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ err: &mut Diagnostic,
name: Option<&str>,
- suggest: impl Fn(&mut DiagnosticBuilder<'_, ErrorGuaranteed>, bool, Span, &str, String) -> bool,
+ suggest: impl Fn(&mut Diagnostic, bool, Span, &str, String) -> bool,
) {
let mut suggest_note = true;
for rib in self.lifetime_ribs.iter().rev() {
@@ -2147,9 +2365,9 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
err.emit()
}
- pub(crate) fn add_missing_lifetime_specifiers_label(
+ fn add_missing_lifetime_specifiers_label(
&mut self,
- err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ err: &mut Diagnostic,
lifetime_refs: Vec<MissingLifetime>,
function_param_lifetimes: Option<(Vec<MissingLifetime>, Vec<ElisionFnParameter>)>,
) {
@@ -2300,7 +2518,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
err.multipart_suggestion_verbose(
message,
std::iter::once((span, intro_sugg))
- .chain(spans_suggs.clone())
+ .chain(spans_suggs.iter().cloned())
.collect(),
Applicability::MaybeIncorrect,
);
diff --git a/compiler/rustc_resolve/src/late/lifetimes.rs b/compiler/rustc_resolve/src/late/lifetimes.rs
deleted file mode 100644
index 94460e33d..000000000
--- a/compiler/rustc_resolve/src/late/lifetimes.rs
+++ /dev/null
@@ -1,2144 +0,0 @@
-//! Resolution of early vs late bound lifetimes.
-//!
-//! Name resolution for lifetimes is performed on the AST and embedded into HIR. From this
-//! information, typechecking needs to transform the lifetime parameters into bound lifetimes.
-//! Lifetimes can be early-bound or late-bound. Construction of typechecking terms needs to visit
-//! the types in HIR to identify late-bound lifetimes and assign their Debruijn indices. This file
-//! is also responsible for assigning their semantics to implicit lifetimes in trait objects.
-
-use rustc_ast::walk_list;
-use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
-use rustc_errors::struct_span_err;
-use rustc_hir as hir;
-use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{DefIdMap, LocalDefId};
-use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{GenericArg, GenericParam, GenericParamKind, HirIdMap, LifetimeName, Node};
-use rustc_middle::bug;
-use rustc_middle::hir::map::Map;
-use rustc_middle::hir::nested_filter;
-use rustc_middle::middle::resolve_lifetime::*;
-use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
-use rustc_span::def_id::DefId;
-use rustc_span::symbol::{sym, Ident};
-use rustc_span::Span;
-use std::borrow::Cow;
-use std::fmt;
-use std::mem::take;
-
-trait RegionExt {
- fn early(hir_map: Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (LocalDefId, Region);
-
- fn late(index: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region);
-
- fn id(&self) -> Option<DefId>;
-
- fn shifted(self, amount: u32) -> Region;
-
- fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region;
-
- fn subst<'a, L>(self, params: L, map: &NamedRegionMap) -> Option<Region>
- where
- L: Iterator<Item = &'a hir::Lifetime>;
-}
-
-impl RegionExt for Region {
- fn early(hir_map: Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (LocalDefId, Region) {
- let i = *index;
- *index += 1;
- let def_id = hir_map.local_def_id(param.hir_id);
- debug!("Region::early: index={} def_id={:?}", i, def_id);
- (def_id, Region::EarlyBound(i, def_id.to_def_id()))
- }
-
- fn late(idx: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region) {
- let depth = ty::INNERMOST;
- let def_id = hir_map.local_def_id(param.hir_id);
- debug!(
- "Region::late: idx={:?}, param={:?} depth={:?} def_id={:?}",
- idx, param, depth, def_id,
- );
- (def_id, Region::LateBound(depth, idx, def_id.to_def_id()))
- }
-
- fn id(&self) -> Option<DefId> {
- match *self {
- Region::Static => None,
-
- Region::EarlyBound(_, id) | Region::LateBound(_, _, id) | Region::Free(_, id) => {
- Some(id)
- }
- }
- }
-
- fn shifted(self, amount: u32) -> Region {
- match self {
- Region::LateBound(debruijn, idx, id) => {
- Region::LateBound(debruijn.shifted_in(amount), idx, id)
- }
- _ => self,
- }
- }
-
- fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region {
- match self {
- Region::LateBound(debruijn, index, id) => {
- Region::LateBound(debruijn.shifted_out_to_binder(binder), index, id)
- }
- _ => self,
- }
- }
-
- fn subst<'a, L>(self, mut params: L, map: &NamedRegionMap) -> Option<Region>
- where
- L: Iterator<Item = &'a hir::Lifetime>,
- {
- if let Region::EarlyBound(index, _) = self {
- params.nth(index as usize).and_then(|lifetime| map.defs.get(&lifetime.hir_id).cloned())
- } else {
- Some(self)
- }
- }
-}
-
-/// Maps the id of each lifetime reference to the lifetime decl
-/// that it corresponds to.
-///
-/// FIXME. This struct gets converted to a `ResolveLifetimes` for
-/// actual use. It has the same data, but indexed by `LocalDefId`. This
-/// is silly.
-#[derive(Debug, Default)]
-struct NamedRegionMap {
- // maps from every use of a named (not anonymous) lifetime to a
- // `Region` describing how that region is bound
- defs: HirIdMap<Region>,
-
- // Maps relevant hir items to the bound vars on them. These include:
- // - function defs
- // - function pointers
- // - closures
- // - trait refs
- // - bound types (like `T` in `for<'a> T<'a>: Foo`)
- late_bound_vars: HirIdMap<Vec<ty::BoundVariableKind>>,
-}
-
-pub(crate) struct LifetimeContext<'a, 'tcx> {
- pub(crate) tcx: TyCtxt<'tcx>,
- map: &'a mut NamedRegionMap,
- scope: ScopeRef<'a>,
-
- /// Indicates that we only care about the definition of a trait. This should
- /// be false if the `Item` we are resolving lifetimes for is not a trait or
- /// we eventually need lifetimes resolve for trait items.
- trait_definition_only: bool,
-
- /// Cache for cross-crate per-definition object lifetime defaults.
- xcrate_object_lifetime_defaults: DefIdMap<Vec<ObjectLifetimeDefault>>,
-}
-
-#[derive(Debug)]
-enum Scope<'a> {
- /// Declares lifetimes, and each can be early-bound or late-bound.
- /// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
- /// it should be shifted by the number of `Binder`s in between the
- /// declaration `Binder` and the location it's referenced from.
- Binder {
- /// We use an IndexMap here because we want these lifetimes in order
- /// for diagnostics.
- lifetimes: FxIndexMap<LocalDefId, Region>,
-
- /// if we extend this scope with another scope, what is the next index
- /// we should use for an early-bound region?
- next_early_index: u32,
-
- /// Whether or not this binder would serve as the parent
- /// binder for opaque types introduced within. For example:
- ///
- /// ```text
- /// fn foo<'a>() -> impl for<'b> Trait<Item = impl Trait2<'a>>
- /// ```
- ///
- /// Here, the opaque types we create for the `impl Trait`
- /// and `impl Trait2` references will both have the `foo` item
- /// as their parent. When we get to `impl Trait2`, we find
- /// that it is nested within the `for<>` binder -- this flag
- /// allows us to skip that when looking for the parent binder
- /// of the resulting opaque type.
- opaque_type_parent: bool,
-
- scope_type: BinderScopeType,
-
- /// The late bound vars for a given item are stored by `HirId` to be
- /// queried later. However, if we enter an elision scope, we have to
- /// later append the elided bound vars to the list and need to know what
- /// to append to.
- hir_id: hir::HirId,
-
- s: ScopeRef<'a>,
-
- /// If this binder comes from a where clause, specify how it was created.
- /// This is used to diagnose inaccessible lifetimes in APIT:
- /// ```ignore (illustrative)
- /// fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
- /// ```
- where_bound_origin: Option<hir::PredicateOrigin>,
- },
-
- /// Lifetimes introduced by a fn are scoped to the call-site for that fn,
- /// if this is a fn body, otherwise the original definitions are used.
- /// Unspecified lifetimes are inferred, unless an elision scope is nested,
- /// e.g., `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
- Body {
- id: hir::BodyId,
- s: ScopeRef<'a>,
- },
-
- /// A scope which either determines unspecified lifetimes or errors
- /// on them (e.g., due to ambiguity).
- Elision {
- s: ScopeRef<'a>,
- },
-
- /// Use a specific lifetime (if `Some`) or leave it unset (to be
- /// inferred in a function body or potentially error outside one),
- /// for the default choice of lifetime in a trait object type.
- ObjectLifetimeDefault {
- lifetime: Option<Region>,
- s: ScopeRef<'a>,
- },
-
- /// When we have nested trait refs, we concatenate late bound vars for inner
- /// trait refs from outer ones. But we also need to include any HRTB
- /// lifetimes encountered when identifying the trait that an associated type
- /// is declared on.
- Supertrait {
- lifetimes: Vec<ty::BoundVariableKind>,
- s: ScopeRef<'a>,
- },
-
- TraitRefBoundary {
- s: ScopeRef<'a>,
- },
-
- Root,
-}
-
-#[derive(Copy, Clone, Debug)]
-enum BinderScopeType {
- /// Any non-concatenating binder scopes.
- Normal,
- /// Within a syntactic trait ref, there may be multiple poly trait refs that
- /// are nested (under the `associated_type_bounds` feature). The binders of
- /// the inner poly trait refs are extended from the outer poly trait refs
- /// and don't increase the late bound depth. If you had
- /// `T: for<'a> Foo<Bar: for<'b> Baz<'a, 'b>>`, then the `for<'b>` scope
- /// would be `Concatenating`. This also used in trait refs in where clauses
- /// where we have two binders `for<> T: for<> Foo` (I've intentionally left
- /// out any lifetimes because they aren't needed to show the two scopes).
- /// The inner `for<>` has a scope of `Concatenating`.
- Concatenating,
-}
-
-// A helper struct for debugging scopes without printing parent scopes
-struct TruncatedScopeDebug<'a>(&'a Scope<'a>);
-
-impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self.0 {
- Scope::Binder {
- lifetimes,
- next_early_index,
- opaque_type_parent,
- scope_type,
- hir_id,
- where_bound_origin,
- s: _,
- } => f
- .debug_struct("Binder")
- .field("lifetimes", lifetimes)
- .field("next_early_index", next_early_index)
- .field("opaque_type_parent", opaque_type_parent)
- .field("scope_type", scope_type)
- .field("hir_id", hir_id)
- .field("where_bound_origin", where_bound_origin)
- .field("s", &"..")
- .finish(),
- Scope::Body { id, s: _ } => {
- f.debug_struct("Body").field("id", id).field("s", &"..").finish()
- }
- Scope::Elision { s: _ } => f.debug_struct("Elision").field("s", &"..").finish(),
- Scope::ObjectLifetimeDefault { lifetime, s: _ } => f
- .debug_struct("ObjectLifetimeDefault")
- .field("lifetime", lifetime)
- .field("s", &"..")
- .finish(),
- Scope::Supertrait { lifetimes, s: _ } => f
- .debug_struct("Supertrait")
- .field("lifetimes", lifetimes)
- .field("s", &"..")
- .finish(),
- Scope::TraitRefBoundary { s: _ } => f.debug_struct("TraitRefBoundary").finish(),
- Scope::Root => f.debug_struct("Root").finish(),
- }
- }
-}
-
-type ScopeRef<'a> = &'a Scope<'a>;
-
-const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
-
-pub fn provide(providers: &mut ty::query::Providers) {
- *providers = ty::query::Providers {
- resolve_lifetimes_trait_definition,
- resolve_lifetimes,
-
- named_region_map: |tcx, id| resolve_lifetimes_for(tcx, id).defs.get(&id),
- is_late_bound_map,
- object_lifetime_defaults: |tcx, id| match tcx.hir().find_by_def_id(id) {
- Some(Node::Item(item)) => compute_object_lifetime_defaults(tcx, item),
- _ => None,
- },
- late_bound_vars_map: |tcx, id| resolve_lifetimes_for(tcx, id).late_bound_vars.get(&id),
-
- ..*providers
- };
-}
-
-/// Like `resolve_lifetimes`, but does not resolve lifetimes for trait items.
-/// Also does not generate any diagnostics.
-///
-/// This is ultimately a subset of the `resolve_lifetimes` work. It effectively
-/// resolves lifetimes only within the trait "header" -- that is, the trait
-/// and supertrait list. In contrast, `resolve_lifetimes` resolves all the
-/// lifetimes within the trait and its items. There is room to refactor this,
-/// for example to resolve lifetimes for each trait item in separate queries,
-/// but it's convenient to do the entire trait at once because the lifetimes
-/// from the trait definition are in scope within the trait items as well.
-///
-/// The reason for this separate call is to resolve what would otherwise
-/// be a cycle. Consider this example:
-///
-/// ```ignore UNSOLVED (maybe @jackh726 knows what lifetime parameter to give Sub)
-/// trait Base<'a> {
-/// type BaseItem;
-/// }
-/// trait Sub<'b>: for<'a> Base<'a> {
-/// type SubItem: Sub<BaseItem = &'b u32>;
-/// }
-/// ```
-///
-/// When we resolve `Sub` and all its items, we also have to resolve `Sub<BaseItem = &'b u32>`.
-/// To figure out the index of `'b`, we have to know about the supertraits
-/// of `Sub` so that we can determine that the `for<'a>` will be in scope.
-/// (This is because we -- currently at least -- flatten all the late-bound
-/// lifetimes into a single binder.) This requires us to resolve the
-/// *trait definition* of `Sub`; basically just enough lifetime information
-/// to look at the supertraits.
-#[tracing::instrument(level = "debug", skip(tcx))]
-fn resolve_lifetimes_trait_definition(
- tcx: TyCtxt<'_>,
- local_def_id: LocalDefId,
-) -> ResolveLifetimes {
- convert_named_region_map(do_resolve(tcx, local_def_id, true))
-}
-
-/// Computes the `ResolveLifetimes` map that contains data for an entire `Item`.
-/// You should not read the result of this query directly, but rather use
-/// `named_region_map`, `is_late_bound_map`, etc.
-#[tracing::instrument(level = "debug", skip(tcx))]
-fn resolve_lifetimes(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> ResolveLifetimes {
- convert_named_region_map(do_resolve(tcx, local_def_id, false))
-}
-
-fn do_resolve(
- tcx: TyCtxt<'_>,
- local_def_id: LocalDefId,
- trait_definition_only: bool,
-) -> NamedRegionMap {
- let item = tcx.hir().expect_item(local_def_id);
- let mut named_region_map =
- NamedRegionMap { defs: Default::default(), late_bound_vars: Default::default() };
- let mut visitor = LifetimeContext {
- tcx,
- map: &mut named_region_map,
- scope: ROOT_SCOPE,
- trait_definition_only,
- xcrate_object_lifetime_defaults: Default::default(),
- };
- visitor.visit_item(item);
-
- named_region_map
-}
-
-fn convert_named_region_map(named_region_map: NamedRegionMap) -> ResolveLifetimes {
- let mut rl = ResolveLifetimes::default();
-
- for (hir_id, v) in named_region_map.defs {
- let map = rl.defs.entry(hir_id.owner).or_default();
- map.insert(hir_id.local_id, v);
- }
- for (hir_id, v) in named_region_map.late_bound_vars {
- let map = rl.late_bound_vars.entry(hir_id.owner).or_default();
- map.insert(hir_id.local_id, v);
- }
-
- debug!(?rl.defs);
- rl
-}
-
-/// Given `any` owner (structs, traits, trait methods, etc.), does lifetime resolution.
-/// There are two important things this does.
-/// First, we have to resolve lifetimes for
-/// the entire *`Item`* that contains this owner, because that's the largest "scope"
-/// where we can have relevant lifetimes.
-/// Second, if we are asking for lifetimes in a trait *definition*, we use `resolve_lifetimes_trait_definition`
-/// instead of `resolve_lifetimes`, which does not descend into the trait items and does not emit diagnostics.
-/// This allows us to avoid cycles. Importantly, if we ask for lifetimes for lifetimes that have an owner
-/// other than the trait itself (like the trait methods or associated types), then we just use the regular
-/// `resolve_lifetimes`.
-fn resolve_lifetimes_for<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx ResolveLifetimes {
- let item_id = item_for(tcx, def_id);
- if item_id == def_id {
- let item = tcx.hir().item(hir::ItemId { def_id: item_id });
- match item.kind {
- hir::ItemKind::Trait(..) => tcx.resolve_lifetimes_trait_definition(item_id),
- _ => tcx.resolve_lifetimes(item_id),
- }
- } else {
- tcx.resolve_lifetimes(item_id)
- }
-}
-
-/// Finds the `Item` that contains the given `LocalDefId`
-fn item_for(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> LocalDefId {
- match tcx.hir().find_by_def_id(local_def_id) {
- Some(Node::Item(item)) => {
- return item.def_id;
- }
- _ => {}
- }
- let item = {
- let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
- let mut parent_iter = tcx.hir().parent_iter(hir_id);
- loop {
- let node = parent_iter.next().map(|n| n.1);
- match node {
- Some(hir::Node::Item(item)) => break item.def_id,
- Some(hir::Node::Crate(_)) | None => bug!("Called `item_for` on an Item."),
- _ => {}
- }
- }
- };
- item
-}
-
-/// In traits, there is an implicit `Self` type parameter which comes before the generics.
-/// We have to account for this when computing the index of the other generic parameters.
-/// This function returns whether there is such an implicit parameter defined on the given item.
-fn sub_items_have_self_param(node: &hir::ItemKind<'_>) -> bool {
- matches!(*node, hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..))
-}
-
-fn late_region_as_bound_region<'tcx>(tcx: TyCtxt<'tcx>, region: &Region) -> ty::BoundVariableKind {
- match region {
- Region::LateBound(_, _, def_id) => {
- let name = tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id.expect_local()));
- ty::BoundVariableKind::Region(ty::BrNamed(*def_id, name))
- }
- _ => bug!("{:?} is not a late region", region),
- }
-}
-
-impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
- /// Returns the binders in scope and the type of `Binder` that should be created for a poly trait ref.
- fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderScopeType) {
- let mut scope = self.scope;
- let mut supertrait_lifetimes = vec![];
- loop {
- match scope {
- Scope::Body { .. } | Scope::Root => {
- break (vec![], BinderScopeType::Normal);
- }
-
- Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => {
- scope = s;
- }
-
- Scope::Supertrait { s, lifetimes } => {
- supertrait_lifetimes = lifetimes.clone();
- scope = s;
- }
-
- Scope::TraitRefBoundary { .. } => {
- // We should only see super trait lifetimes if there is a `Binder` above
- assert!(supertrait_lifetimes.is_empty());
- break (vec![], BinderScopeType::Normal);
- }
-
- Scope::Binder { hir_id, .. } => {
- // Nested poly trait refs have the binders concatenated
- let mut full_binders =
- self.map.late_bound_vars.entry(*hir_id).or_default().clone();
- full_binders.extend(supertrait_lifetimes.into_iter());
- break (full_binders, BinderScopeType::Concatenating);
- }
- }
- }
- }
-}
-impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
- type NestedFilter = nested_filter::All;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- // We want to nest trait/impl items in their parent, but nothing else.
- fn visit_nested_item(&mut self, _: hir::ItemId) {}
-
- fn visit_trait_item_ref(&mut self, ii: &'tcx hir::TraitItemRef) {
- if !self.trait_definition_only {
- intravisit::walk_trait_item_ref(self, ii)
- }
- }
-
- fn visit_nested_body(&mut self, body: hir::BodyId) {
- let body = self.tcx.hir().body(body);
- self.with(Scope::Body { id: body.id(), s: self.scope }, |this| {
- this.visit_body(body);
- });
- }
-
- fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
- if let hir::ExprKind::Closure(hir::Closure {
- binder, bound_generic_params, fn_decl, ..
- }) = e.kind
- {
- if let &hir::ClosureBinder::For { span: for_sp, .. } = binder {
- fn span_of_infer(ty: &hir::Ty<'_>) -> Option<Span> {
- struct V(Option<Span>);
-
- impl<'v> Visitor<'v> for V {
- fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
- match t.kind {
- _ if self.0.is_some() => (),
- hir::TyKind::Infer => {
- self.0 = Some(t.span);
- }
- _ => intravisit::walk_ty(self, t),
- }
- }
- }
-
- let mut v = V(None);
- v.visit_ty(ty);
- v.0
- }
-
- let infer_in_rt_sp = match fn_decl.output {
- hir::FnRetTy::DefaultReturn(sp) => Some(sp),
- hir::FnRetTy::Return(ty) => span_of_infer(ty),
- };
-
- let infer_spans = fn_decl
- .inputs
- .into_iter()
- .filter_map(span_of_infer)
- .chain(infer_in_rt_sp)
- .collect::<Vec<_>>();
-
- if !infer_spans.is_empty() {
- self.tcx.sess
- .struct_span_err(
- infer_spans,
- "implicit types in closure signatures are forbidden when `for<...>` is present",
- )
- .span_label(for_sp, "`for<...>` is here")
- .emit();
- }
- }
-
- let next_early_index = self.next_early_index();
- let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) =
- bound_generic_params
- .iter()
- .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
- let r = late_region_as_bound_region(self.tcx, &pair.1);
- (pair, r)
- })
- .unzip();
-
- self.map.late_bound_vars.insert(e.hir_id, binders);
- let scope = Scope::Binder {
- hir_id: e.hir_id,
- lifetimes,
- s: self.scope,
- next_early_index,
- opaque_type_parent: false,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
-
- self.with(scope, |this| {
- // a closure has no bounds, so everything
- // contained within is scoped within its binder.
- intravisit::walk_expr(this, e)
- });
- } else {
- intravisit::walk_expr(self, e)
- }
- }
-
- fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- match &item.kind {
- hir::ItemKind::Impl(hir::Impl { of_trait, .. }) => {
- if let Some(of_trait) = of_trait {
- self.map.late_bound_vars.insert(of_trait.hir_ref_id, Vec::default());
- }
- }
- _ => {}
- }
- match item.kind {
- hir::ItemKind::Fn(_, ref generics, _) => {
- self.visit_early_late(None, item.hir_id(), generics, |this| {
- intravisit::walk_item(this, item);
- });
- }
-
- hir::ItemKind::ExternCrate(_)
- | hir::ItemKind::Use(..)
- | hir::ItemKind::Macro(..)
- | hir::ItemKind::Mod(..)
- | hir::ItemKind::ForeignMod { .. }
- | hir::ItemKind::GlobalAsm(..) => {
- // These sorts of items have no lifetime parameters at all.
- intravisit::walk_item(self, item);
- }
- hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
- // No lifetime parameters, but implied 'static.
- self.with(Scope::Elision { s: self.scope }, |this| {
- intravisit::walk_item(this, item)
- });
- }
- hir::ItemKind::OpaqueTy(hir::OpaqueTy { .. }) => {
- // Opaque types are visited when we visit the
- // `TyKind::OpaqueDef`, so that they have the lifetimes from
- // their parent opaque_ty in scope.
- //
- // The core idea here is that since OpaqueTys are generated with the impl Trait as
- // their owner, we can keep going until we find the Item that owns that. We then
- // conservatively add all resolved lifetimes. Otherwise we run into problems in
- // cases like `type Foo<'a> = impl Bar<As = impl Baz + 'a>`.
- for (_hir_id, node) in
- self.tcx.hir().parent_iter(self.tcx.hir().local_def_id_to_hir_id(item.def_id))
- {
- match node {
- hir::Node::Item(parent_item) => {
- let resolved_lifetimes: &ResolveLifetimes =
- self.tcx.resolve_lifetimes(item_for(self.tcx, parent_item.def_id));
- // We need to add *all* deps, since opaque tys may want them from *us*
- for (&owner, defs) in resolved_lifetimes.defs.iter() {
- defs.iter().for_each(|(&local_id, region)| {
- self.map.defs.insert(hir::HirId { owner, local_id }, *region);
- });
- }
- for (&owner, late_bound_vars) in
- resolved_lifetimes.late_bound_vars.iter()
- {
- late_bound_vars.iter().for_each(|(&local_id, late_bound_vars)| {
- self.map.late_bound_vars.insert(
- hir::HirId { owner, local_id },
- late_bound_vars.clone(),
- );
- });
- }
- break;
- }
- hir::Node::Crate(_) => bug!("No Item about an OpaqueTy"),
- _ => {}
- }
- }
- }
- hir::ItemKind::TyAlias(_, ref generics)
- | hir::ItemKind::Enum(_, ref generics)
- | hir::ItemKind::Struct(_, ref generics)
- | hir::ItemKind::Union(_, ref generics)
- | hir::ItemKind::Trait(_, _, ref generics, ..)
- | hir::ItemKind::TraitAlias(ref generics, ..)
- | hir::ItemKind::Impl(hir::Impl { ref generics, .. }) => {
- // These kinds of items have only early-bound lifetime parameters.
- let mut index = if sub_items_have_self_param(&item.kind) {
- 1 // Self comes before lifetimes
- } else {
- 0
- };
- let mut non_lifetime_count = 0;
- let lifetimes = generics
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- Some(Region::early(self.tcx.hir(), &mut index, param))
- }
- GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
- non_lifetime_count += 1;
- None
- }
- })
- .collect();
- self.map.late_bound_vars.insert(item.hir_id(), vec![]);
- let scope = Scope::Binder {
- hir_id: item.hir_id(),
- lifetimes,
- next_early_index: index + non_lifetime_count,
- opaque_type_parent: true,
- scope_type: BinderScopeType::Normal,
- s: ROOT_SCOPE,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- intravisit::walk_item(this, item);
- });
- });
- }
- }
- }
-
- fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- match item.kind {
- hir::ForeignItemKind::Fn(_, _, ref generics) => {
- self.visit_early_late(None, item.hir_id(), generics, |this| {
- intravisit::walk_foreign_item(this, item);
- })
- }
- hir::ForeignItemKind::Static(..) => {
- intravisit::walk_foreign_item(self, item);
- }
- hir::ForeignItemKind::Type => {
- intravisit::walk_foreign_item(self, item);
- }
- }
- }
-
- #[tracing::instrument(level = "debug", skip(self))]
- fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
- match ty.kind {
- hir::TyKind::BareFn(ref c) => {
- let next_early_index = self.next_early_index();
- let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) = c
- .generic_params
- .iter()
- .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
- let r = late_region_as_bound_region(self.tcx, &pair.1);
- (pair, r)
- })
- .unzip();
- self.map.late_bound_vars.insert(ty.hir_id, binders);
- let scope = Scope::Binder {
- hir_id: ty.hir_id,
- lifetimes,
- s: self.scope,
- next_early_index,
- opaque_type_parent: false,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- // a bare fn has no bounds, so everything
- // contained within is scoped within its binder.
- intravisit::walk_ty(this, ty);
- });
- }
- hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
- debug!(?bounds, ?lifetime, "TraitObject");
- let scope = Scope::TraitRefBoundary { s: self.scope };
- self.with(scope, |this| {
- for bound in bounds {
- this.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
- }
- });
- match lifetime.name {
- LifetimeName::ImplicitObjectLifetimeDefault => {
- // If the user does not write *anything*, we
- // use the object lifetime defaulting
- // rules. So e.g., `Box<dyn Debug>` becomes
- // `Box<dyn Debug + 'static>`.
- self.resolve_object_lifetime_default(lifetime)
- }
- LifetimeName::Infer => {
- // If the user writes `'_`, we use the *ordinary* elision
- // rules. So the `'_` in e.g., `Box<dyn Debug + '_>` will be
- // resolved the same as the `'_` in `&'_ Foo`.
- //
- // cc #48468
- }
- LifetimeName::Param(..) | LifetimeName::Static => {
- // If the user wrote an explicit name, use that.
- self.visit_lifetime(lifetime);
- }
- LifetimeName::Error => {}
- }
- }
- hir::TyKind::Rptr(ref lifetime_ref, ref mt) => {
- self.visit_lifetime(lifetime_ref);
- let scope = Scope::ObjectLifetimeDefault {
- lifetime: self.map.defs.get(&lifetime_ref.hir_id).cloned(),
- s: self.scope,
- };
- self.with(scope, |this| this.visit_ty(&mt.ty));
- }
- hir::TyKind::OpaqueDef(item_id, lifetimes) => {
- // Resolve the lifetimes in the bounds to the lifetime defs in the generics.
- // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
- // `type MyAnonTy<'b> = impl MyTrait<'b>;`
- // ^ ^ this gets resolved in the scope of
- // the opaque_ty generics
- let opaque_ty = self.tcx.hir().item(item_id);
- let (generics, bounds) = match opaque_ty.kind {
- hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- origin: hir::OpaqueTyOrigin::TyAlias,
- ..
- }) => {
- intravisit::walk_ty(self, ty);
-
- // Elided lifetimes are not allowed in non-return
- // position impl Trait
- let scope = Scope::TraitRefBoundary { s: self.scope };
- self.with(scope, |this| {
- let scope = Scope::Elision { s: this.scope };
- this.with(scope, |this| {
- intravisit::walk_item(this, opaque_ty);
- })
- });
-
- return;
- }
- hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
- ref generics,
- bounds,
- ..
- }) => (generics, bounds),
- ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
- };
-
- // Resolve the lifetimes that are applied to the opaque type.
- // These are resolved in the current scope.
- // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
- // `fn foo<'a>() -> MyAnonTy<'a> { ... }`
- // ^ ^this gets resolved in the current scope
- for lifetime in lifetimes {
- let hir::GenericArg::Lifetime(lifetime) = lifetime else {
- continue
- };
- self.visit_lifetime(lifetime);
-
- // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
- // and ban them. Type variables instantiated inside binders aren't
- // well-supported at the moment, so this doesn't work.
- // In the future, this should be fixed and this error should be removed.
- let def = self.map.defs.get(&lifetime.hir_id).cloned();
- let Some(Region::LateBound(_, _, def_id)) = def else {
- continue
- };
- let Some(def_id) = def_id.as_local() else {
- continue
- };
- let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
- // Ensure that the parent of the def is an item, not HRTB
- let parent_id = self.tcx.hir().get_parent_node(hir_id);
- if !parent_id.is_owner() {
- if !self.trait_definition_only {
- struct_span_err!(
- self.tcx.sess,
- lifetime.span,
- E0657,
- "`impl Trait` can only capture lifetimes \
- bound at the fn or impl level"
- )
- .emit();
- }
- self.uninsert_lifetime_on_error(lifetime, def.unwrap());
- }
- if let hir::Node::Item(hir::Item {
- kind: hir::ItemKind::OpaqueTy { .. }, ..
- }) = self.tcx.hir().get(parent_id)
- {
- if !self.trait_definition_only {
- let mut err = self.tcx.sess.struct_span_err(
- lifetime.span,
- "higher kinded lifetime bounds on nested opaque types are not supported yet",
- );
- err.span_note(self.tcx.def_span(def_id), "lifetime declared here");
- err.emit();
- }
- self.uninsert_lifetime_on_error(lifetime, def.unwrap());
- }
- }
-
- // We want to start our early-bound indices at the end of the parent scope,
- // not including any parent `impl Trait`s.
- let mut index = self.next_early_index_for_opaque_type();
- debug!(?index);
-
- let mut lifetimes = FxIndexMap::default();
- let mut non_lifetime_count = 0;
- debug!(?generics.params);
- for param in generics.params {
- match param.kind {
- GenericParamKind::Lifetime { .. } => {
- let (def_id, reg) = Region::early(self.tcx.hir(), &mut index, &param);
- lifetimes.insert(def_id, reg);
- }
- GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
- non_lifetime_count += 1;
- }
- }
- }
- let next_early_index = index + non_lifetime_count;
- self.map.late_bound_vars.insert(ty.hir_id, vec![]);
-
- let scope = Scope::Binder {
- hir_id: ty.hir_id,
- lifetimes,
- next_early_index,
- s: self.scope,
- opaque_type_parent: false,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- this.visit_generics(generics);
- for bound in bounds {
- this.visit_param_bound(bound);
- }
- })
- });
- }
- _ => intravisit::walk_ty(self, ty),
- }
- }
-
- fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- use self::hir::TraitItemKind::*;
- match trait_item.kind {
- Fn(_, _) => {
- let tcx = self.tcx;
- self.visit_early_late(
- Some(tcx.hir().get_parent_item(trait_item.hir_id())),
- trait_item.hir_id(),
- &trait_item.generics,
- |this| intravisit::walk_trait_item(this, trait_item),
- );
- }
- Type(bounds, ref ty) => {
- let generics = &trait_item.generics;
- let mut index = self.next_early_index();
- debug!("visit_ty: index = {}", index);
- let mut non_lifetime_count = 0;
- let lifetimes = generics
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- Some(Region::early(self.tcx.hir(), &mut index, param))
- }
- GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
- non_lifetime_count += 1;
- None
- }
- })
- .collect();
- self.map.late_bound_vars.insert(trait_item.hir_id(), vec![]);
- let scope = Scope::Binder {
- hir_id: trait_item.hir_id(),
- lifetimes,
- next_early_index: index + non_lifetime_count,
- s: self.scope,
- opaque_type_parent: true,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- this.visit_generics(generics);
- for bound in bounds {
- this.visit_param_bound(bound);
- }
- if let Some(ty) = ty {
- this.visit_ty(ty);
- }
- })
- });
- }
- Const(_, _) => {
- // Only methods and types support generics.
- assert!(trait_item.generics.params.is_empty());
- intravisit::walk_trait_item(self, trait_item);
- }
- }
- }
-
- fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- use self::hir::ImplItemKind::*;
- match impl_item.kind {
- Fn(..) => {
- let tcx = self.tcx;
- self.visit_early_late(
- Some(tcx.hir().get_parent_item(impl_item.hir_id())),
- impl_item.hir_id(),
- &impl_item.generics,
- |this| intravisit::walk_impl_item(this, impl_item),
- );
- }
- TyAlias(ref ty) => {
- let generics = &impl_item.generics;
- let mut index = self.next_early_index();
- let mut non_lifetime_count = 0;
- debug!("visit_ty: index = {}", index);
- let lifetimes: FxIndexMap<LocalDefId, Region> = generics
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- Some(Region::early(self.tcx.hir(), &mut index, param))
- }
- GenericParamKind::Const { .. } | GenericParamKind::Type { .. } => {
- non_lifetime_count += 1;
- None
- }
- })
- .collect();
- self.map.late_bound_vars.insert(ty.hir_id, vec![]);
- let scope = Scope::Binder {
- hir_id: ty.hir_id,
- lifetimes,
- next_early_index: index + non_lifetime_count,
- s: self.scope,
- opaque_type_parent: true,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- let scope = Scope::TraitRefBoundary { s: this.scope };
- this.with(scope, |this| {
- this.visit_generics(generics);
- this.visit_ty(ty);
- })
- });
- }
- Const(_, _) => {
- // Only methods and types support generics.
- assert!(impl_item.generics.params.is_empty());
- intravisit::walk_impl_item(self, impl_item);
- }
- }
- }
-
- #[tracing::instrument(level = "debug", skip(self))]
- fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
- match lifetime_ref.name {
- hir::LifetimeName::Static => self.insert_lifetime(lifetime_ref, Region::Static),
- hir::LifetimeName::Param(param_def_id, _) => {
- self.resolve_lifetime_ref(param_def_id, lifetime_ref)
- }
- // If we've already reported an error, just ignore `lifetime_ref`.
- hir::LifetimeName::Error => {}
- // Those will be resolved by typechecking.
- hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Infer => {}
- }
- }
-
- fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
- for (i, segment) in path.segments.iter().enumerate() {
- let depth = path.segments.len() - i - 1;
- if let Some(ref args) = segment.args {
- self.visit_segment_args(path.res, depth, args);
- }
- }
- }
-
- fn visit_fn(
- &mut self,
- fk: intravisit::FnKind<'tcx>,
- fd: &'tcx hir::FnDecl<'tcx>,
- body_id: hir::BodyId,
- _: Span,
- _: hir::HirId,
- ) {
- let output = match fd.output {
- hir::FnRetTy::DefaultReturn(_) => None,
- hir::FnRetTy::Return(ref ty) => Some(&**ty),
- };
- self.visit_fn_like_elision(&fd.inputs, output, matches!(fk, intravisit::FnKind::Closure));
- intravisit::walk_fn_kind(self, fk);
- self.visit_nested_body(body_id)
- }
-
- fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
- let scope = Scope::TraitRefBoundary { s: self.scope };
- self.with(scope, |this| {
- for param in generics.params {
- match param.kind {
- GenericParamKind::Lifetime { .. } => {}
- GenericParamKind::Type { ref default, .. } => {
- if let Some(ref ty) = default {
- this.visit_ty(&ty);
- }
- }
- GenericParamKind::Const { ref ty, default } => {
- this.visit_ty(&ty);
- if let Some(default) = default {
- this.visit_body(this.tcx.hir().body(default.body));
- }
- }
- }
- }
- for predicate in generics.predicates {
- match predicate {
- &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
- ref bounded_ty,
- bounds,
- ref bound_generic_params,
- origin,
- ..
- }) => {
- let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) =
- bound_generic_params
- .iter()
- .filter(|param| {
- matches!(param.kind, GenericParamKind::Lifetime { .. })
- })
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair =
- Region::late(late_bound_idx as u32, this.tcx.hir(), param);
- let r = late_region_as_bound_region(this.tcx, &pair.1);
- (pair, r)
- })
- .unzip();
- this.map.late_bound_vars.insert(bounded_ty.hir_id, binders.clone());
- let next_early_index = this.next_early_index();
- // Even if there are no lifetimes defined here, we still wrap it in a binder
- // scope. If there happens to be a nested poly trait ref (an error), that
- // will be `Concatenating` anyways, so we don't have to worry about the depth
- // being wrong.
- let scope = Scope::Binder {
- hir_id: bounded_ty.hir_id,
- lifetimes,
- s: this.scope,
- next_early_index,
- opaque_type_parent: false,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: Some(origin),
- };
- this.with(scope, |this| {
- this.visit_ty(&bounded_ty);
- walk_list!(this, visit_param_bound, bounds);
- })
- }
- &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
- ref lifetime,
- bounds,
- ..
- }) => {
- this.visit_lifetime(lifetime);
- walk_list!(this, visit_param_bound, bounds);
-
- if lifetime.name != hir::LifetimeName::Static {
- for bound in bounds {
- let hir::GenericBound::Outlives(ref lt) = bound else {
- continue;
- };
- if lt.name != hir::LifetimeName::Static {
- continue;
- }
- this.insert_lifetime(lt, Region::Static);
- this.tcx
- .sess
- .struct_span_warn(
- lifetime.span,
- &format!(
- "unnecessary lifetime parameter `{}`",
- lifetime.name.ident(),
- ),
- )
- .help(&format!(
- "you can use the `'static` lifetime directly, in place of `{}`",
- lifetime.name.ident(),
- ))
- .emit();
- }
- }
- }
- &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
- ref lhs_ty,
- ref rhs_ty,
- ..
- }) => {
- this.visit_ty(lhs_ty);
- this.visit_ty(rhs_ty);
- }
- }
- }
- })
- }
-
- fn visit_param_bound(&mut self, bound: &'tcx hir::GenericBound<'tcx>) {
- match bound {
- hir::GenericBound::LangItemTrait(_, _, hir_id, _) => {
- // FIXME(jackh726): This is pretty weird. `LangItemTrait` doesn't go
- // through the regular poly trait ref code, so we don't get another
- // chance to introduce a binder. For now, I'm keeping the existing logic
- // of "if there isn't a Binder scope above us, add one", but I
- // imagine there's a better way to go about this.
- let (binders, scope_type) = self.poly_trait_ref_binder_info();
-
- self.map.late_bound_vars.insert(*hir_id, binders);
- let scope = Scope::Binder {
- hir_id: *hir_id,
- lifetimes: FxIndexMap::default(),
- s: self.scope,
- next_early_index: self.next_early_index(),
- opaque_type_parent: false,
- scope_type,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- intravisit::walk_param_bound(this, bound);
- });
- }
- _ => intravisit::walk_param_bound(self, bound),
- }
- }
-
- fn visit_poly_trait_ref(
- &mut self,
- trait_ref: &'tcx hir::PolyTraitRef<'tcx>,
- _modifier: hir::TraitBoundModifier,
- ) {
- debug!("visit_poly_trait_ref(trait_ref={:?})", trait_ref);
-
- let next_early_index = self.next_early_index();
- let (mut binders, scope_type) = self.poly_trait_ref_binder_info();
-
- let initial_bound_vars = binders.len() as u32;
- let mut lifetimes: FxIndexMap<LocalDefId, Region> = FxIndexMap::default();
- let binders_iter = trait_ref
- .bound_generic_params
- .iter()
- .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair =
- Region::late(initial_bound_vars + late_bound_idx as u32, self.tcx.hir(), param);
- let r = late_region_as_bound_region(self.tcx, &pair.1);
- lifetimes.insert(pair.0, pair.1);
- r
- });
- binders.extend(binders_iter);
-
- debug!(?binders);
- self.map.late_bound_vars.insert(trait_ref.trait_ref.hir_ref_id, binders);
-
- // Always introduce a scope here, even if this is in a where clause and
- // we introduced the binders around the bounded Ty. In that case, we
- // just reuse the concatenation functionality also present in nested trait
- // refs.
- let scope = Scope::Binder {
- hir_id: trait_ref.trait_ref.hir_ref_id,
- lifetimes,
- s: self.scope,
- next_early_index,
- opaque_type_parent: false,
- scope_type,
- where_bound_origin: None,
- };
- self.with(scope, |this| {
- walk_list!(this, visit_generic_param, trait_ref.bound_generic_params);
- this.visit_trait_ref(&trait_ref.trait_ref);
- });
- }
-}
-
-fn compute_object_lifetime_defaults<'tcx>(
- tcx: TyCtxt<'tcx>,
- item: &hir::Item<'_>,
-) -> Option<&'tcx [ObjectLifetimeDefault]> {
- match item.kind {
- hir::ItemKind::Struct(_, ref generics)
- | hir::ItemKind::Union(_, ref generics)
- | hir::ItemKind::Enum(_, ref generics)
- | hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- ref generics,
- origin: hir::OpaqueTyOrigin::TyAlias,
- ..
- })
- | hir::ItemKind::TyAlias(_, ref generics)
- | hir::ItemKind::Trait(_, _, ref generics, ..) => {
- let result = object_lifetime_defaults_for_item(tcx, generics);
-
- // Debugging aid.
- let attrs = tcx.hir().attrs(item.hir_id());
- if tcx.sess.contains_name(attrs, sym::rustc_object_lifetime_default) {
- let object_lifetime_default_reprs: String = result
- .iter()
- .map(|set| match *set {
- Set1::Empty => "BaseDefault".into(),
- Set1::One(Region::Static) => "'static".into(),
- Set1::One(Region::EarlyBound(mut i, _)) => generics
- .params
- .iter()
- .find_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- if i == 0 {
- return Some(param.name.ident().to_string().into());
- }
- i -= 1;
- None
- }
- _ => None,
- })
- .unwrap(),
- Set1::One(_) => bug!(),
- Set1::Many => "Ambiguous".into(),
- })
- .collect::<Vec<Cow<'static, str>>>()
- .join(",");
- tcx.sess.span_err(item.span, &object_lifetime_default_reprs);
- }
-
- Some(result)
- }
- _ => None,
- }
-}
-
-/// Scan the bounds and where-clauses on parameters to extract bounds
-/// of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
-/// for each type parameter.
-fn object_lifetime_defaults_for_item<'tcx>(
- tcx: TyCtxt<'tcx>,
- generics: &hir::Generics<'_>,
-) -> &'tcx [ObjectLifetimeDefault] {
- fn add_bounds(set: &mut Set1<hir::LifetimeName>, bounds: &[hir::GenericBound<'_>]) {
- for bound in bounds {
- if let hir::GenericBound::Outlives(ref lifetime) = *bound {
- set.insert(lifetime.name.normalize_to_macros_2_0());
- }
- }
- }
-
- let process_param = |param: &hir::GenericParam<'_>| match param.kind {
- GenericParamKind::Lifetime { .. } => None,
- GenericParamKind::Type { .. } => {
- let mut set = Set1::Empty;
-
- let param_def_id = tcx.hir().local_def_id(param.hir_id);
- for predicate in generics.predicates {
- // Look for `type: ...` where clauses.
- let hir::WherePredicate::BoundPredicate(ref data) = *predicate else { continue };
-
- // Ignore `for<'a> type: ...` as they can change what
- // lifetimes mean (although we could "just" handle it).
- if !data.bound_generic_params.is_empty() {
- continue;
- }
-
- let res = match data.bounded_ty.kind {
- hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.res,
- _ => continue,
- };
-
- if res == Res::Def(DefKind::TyParam, param_def_id.to_def_id()) {
- add_bounds(&mut set, &data.bounds);
- }
- }
-
- Some(match set {
- Set1::Empty => Set1::Empty,
- Set1::One(name) => {
- if name == hir::LifetimeName::Static {
- Set1::One(Region::Static)
- } else {
- generics
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- let param_def_id = tcx.hir().local_def_id(param.hir_id);
- Some((
- param_def_id,
- hir::LifetimeName::Param(param_def_id, param.name),
- ))
- }
- _ => None,
- })
- .enumerate()
- .find(|&(_, (_, lt_name))| lt_name == name)
- .map_or(Set1::Many, |(i, (def_id, _))| {
- Set1::One(Region::EarlyBound(i as u32, def_id.to_def_id()))
- })
- }
- }
- Set1::Many => Set1::Many,
- })
- }
- GenericParamKind::Const { .. } => {
- // Generic consts don't impose any constraints.
- //
- // We still store a dummy value here to allow generic parameters
- // in an arbitrary order.
- Some(Set1::Empty)
- }
- };
-
- tcx.arena.alloc_from_iter(generics.params.iter().filter_map(process_param))
-}
-
-impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
- fn with<F>(&mut self, wrap_scope: Scope<'_>, f: F)
- where
- F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
- {
- let LifetimeContext { tcx, map, .. } = self;
- let xcrate_object_lifetime_defaults = take(&mut self.xcrate_object_lifetime_defaults);
- let mut this = LifetimeContext {
- tcx: *tcx,
- map,
- scope: &wrap_scope,
- trait_definition_only: self.trait_definition_only,
- xcrate_object_lifetime_defaults,
- };
- let span = tracing::debug_span!("scope", scope = ?TruncatedScopeDebug(&this.scope));
- {
- let _enter = span.enter();
- f(&mut this);
- }
- self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults;
- }
-
- /// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
- ///
- /// Handles visiting fns and methods. These are a bit complicated because we must distinguish
- /// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
- /// within type bounds; those are early bound lifetimes, and the rest are late bound.
- ///
- /// For example:
- ///
- /// fn foo<'a,'b,'c,T:Trait<'b>>(...)
- ///
- /// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
- /// lifetimes may be interspersed together.
- ///
- /// If early bound lifetimes are present, we separate them into their own list (and likewise
- /// for late bound). They will be numbered sequentially, starting from the lowest index that is
- /// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
- /// bound lifetimes are resolved by name and associated with a binder ID (`binder_id`), so the
- /// ordering is not important there.
- fn visit_early_late<F>(
- &mut self,
- parent_id: Option<LocalDefId>,
- hir_id: hir::HirId,
- generics: &'tcx hir::Generics<'tcx>,
- walk: F,
- ) where
- F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
- {
- // Find the start of nested early scopes, e.g., in methods.
- let mut next_early_index = 0;
- if let Some(parent_id) = parent_id {
- let parent = self.tcx.hir().expect_item(parent_id);
- if sub_items_have_self_param(&parent.kind) {
- next_early_index += 1; // Self comes before lifetimes
- }
- match parent.kind {
- hir::ItemKind::Trait(_, _, ref generics, ..)
- | hir::ItemKind::Impl(hir::Impl { ref generics, .. }) => {
- next_early_index += generics.params.len() as u32;
- }
- _ => {}
- }
- }
-
- let mut non_lifetime_count = 0;
- let mut named_late_bound_vars = 0;
- let lifetimes: FxIndexMap<LocalDefId, Region> = generics
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => {
- if self.tcx.is_late_bound(param.hir_id) {
- let late_bound_idx = named_late_bound_vars;
- named_late_bound_vars += 1;
- Some(Region::late(late_bound_idx, self.tcx.hir(), param))
- } else {
- Some(Region::early(self.tcx.hir(), &mut next_early_index, param))
- }
- }
- GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
- non_lifetime_count += 1;
- None
- }
- })
- .collect();
- let next_early_index = next_early_index + non_lifetime_count;
-
- let binders: Vec<_> = generics
- .params
- .iter()
- .filter(|param| {
- matches!(param.kind, GenericParamKind::Lifetime { .. })
- && self.tcx.is_late_bound(param.hir_id)
- })
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
- late_region_as_bound_region(self.tcx, &pair.1)
- })
- .collect();
- self.map.late_bound_vars.insert(hir_id, binders);
- let scope = Scope::Binder {
- hir_id,
- lifetimes,
- next_early_index,
- s: self.scope,
- opaque_type_parent: true,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: None,
- };
- self.with(scope, walk);
- }
-
- fn next_early_index_helper(&self, only_opaque_type_parent: bool) -> u32 {
- let mut scope = self.scope;
- loop {
- match *scope {
- Scope::Root => return 0,
-
- Scope::Binder { next_early_index, opaque_type_parent, .. }
- if (!only_opaque_type_parent || opaque_type_parent) =>
- {
- return next_early_index;
- }
-
- Scope::Binder { s, .. }
- | Scope::Body { s, .. }
- | Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
- | Scope::Supertrait { s, .. }
- | Scope::TraitRefBoundary { s, .. } => scope = s,
- }
- }
- }
-
- /// Returns the next index one would use for an early-bound-region
- /// if extending the current scope.
- fn next_early_index(&self) -> u32 {
- self.next_early_index_helper(true)
- }
-
- /// Returns the next index one would use for an `impl Trait` that
- /// is being converted into an opaque type alias `impl Trait`. This will be the
- /// next early index from the enclosing item, for the most
- /// part. See the `opaque_type_parent` field for more info.
- fn next_early_index_for_opaque_type(&self) -> u32 {
- self.next_early_index_helper(false)
- }
-
- #[tracing::instrument(level = "debug", skip(self))]
- fn resolve_lifetime_ref(
- &mut self,
- region_def_id: LocalDefId,
- lifetime_ref: &'tcx hir::Lifetime,
- ) {
- // Walk up the scope chain, tracking the number of fn scopes
- // that we pass through, until we find a lifetime with the
- // given name or we run out of scopes.
- // search.
- let mut late_depth = 0;
- let mut scope = self.scope;
- let mut outermost_body = None;
- let result = loop {
- match *scope {
- Scope::Body { id, s } => {
- outermost_body = Some(id);
- scope = s;
- }
-
- Scope::Root => {
- break None;
- }
-
- Scope::Binder { ref lifetimes, scope_type, s, where_bound_origin, .. } => {
- if let Some(&def) = lifetimes.get(&region_def_id) {
- break Some(def.shifted(late_depth));
- }
- match scope_type {
- BinderScopeType::Normal => late_depth += 1,
- BinderScopeType::Concatenating => {}
- }
- // Fresh lifetimes in APIT used to be allowed in async fns and forbidden in
- // regular fns.
- if let Some(hir::PredicateOrigin::ImplTrait) = where_bound_origin
- && let hir::LifetimeName::Param(_, hir::ParamName::Fresh) = lifetime_ref.name
- && let hir::IsAsync::NotAsync = self.tcx.asyncness(lifetime_ref.hir_id.owner)
- && !self.tcx.features().anonymous_lifetime_in_impl_trait
- {
- rustc_session::parse::feature_err(
- &self.tcx.sess.parse_sess,
- sym::anonymous_lifetime_in_impl_trait,
- lifetime_ref.span,
- "anonymous lifetimes in `impl Trait` are unstable",
- ).emit();
- return;
- }
- scope = s;
- }
-
- Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
- | Scope::Supertrait { s, .. }
- | Scope::TraitRefBoundary { s, .. } => {
- scope = s;
- }
- }
- };
-
- if let Some(mut def) = result {
- if let Region::EarlyBound(..) = def {
- // Do not free early-bound regions, only late-bound ones.
- } else if let Some(body_id) = outermost_body {
- let fn_id = self.tcx.hir().body_owner(body_id);
- match self.tcx.hir().get(fn_id) {
- Node::Item(&hir::Item { kind: hir::ItemKind::Fn(..), .. })
- | Node::TraitItem(&hir::TraitItem {
- kind: hir::TraitItemKind::Fn(..), ..
- })
- | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
- let scope = self.tcx.hir().local_def_id(fn_id);
- def = Region::Free(scope.to_def_id(), def.id().unwrap());
- }
- _ => {}
- }
- }
-
- self.insert_lifetime(lifetime_ref, def);
- return;
- }
-
- // We may fail to resolve higher-ranked lifetimes that are mentionned by APIT.
- // AST-based resolution does not care for impl-trait desugaring, which are the
- // responibility of lowering. This may create a mismatch between the resolution
- // AST found (`region_def_id`) which points to HRTB, and what HIR allows.
- // ```
- // fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
- // ```
- //
- // In such case, walk back the binders to diagnose it properly.
- let mut scope = self.scope;
- loop {
- match *scope {
- Scope::Binder {
- where_bound_origin: Some(hir::PredicateOrigin::ImplTrait), ..
- } => {
- let mut err = self.tcx.sess.struct_span_err(
- lifetime_ref.span,
- "`impl Trait` can only mention lifetimes bound at the fn or impl level",
- );
- err.span_note(self.tcx.def_span(region_def_id), "lifetime declared here");
- err.emit();
- return;
- }
- Scope::Root => break,
- Scope::Binder { s, .. }
- | Scope::Body { s, .. }
- | Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
- | Scope::Supertrait { s, .. }
- | Scope::TraitRefBoundary { s, .. } => {
- scope = s;
- }
- }
- }
-
- self.tcx.sess.delay_span_bug(
- lifetime_ref.span,
- &format!("Could not resolve {:?} in scope {:#?}", lifetime_ref, self.scope,),
- );
- }
-
- fn visit_segment_args(
- &mut self,
- res: Res,
- depth: usize,
- generic_args: &'tcx hir::GenericArgs<'tcx>,
- ) {
- debug!(
- "visit_segment_args(res={:?}, depth={:?}, generic_args={:?})",
- res, depth, generic_args,
- );
-
- if generic_args.parenthesized {
- self.visit_fn_like_elision(
- generic_args.inputs(),
- Some(generic_args.bindings[0].ty()),
- false,
- );
- return;
- }
-
- for arg in generic_args.args {
- if let hir::GenericArg::Lifetime(lt) = arg {
- self.visit_lifetime(lt);
- }
- }
-
- // Figure out if this is a type/trait segment,
- // which requires object lifetime defaults.
- let parent_def_id = |this: &mut Self, def_id: DefId| {
- let def_key = this.tcx.def_key(def_id);
- DefId { krate: def_id.krate, index: def_key.parent.expect("missing parent") }
- };
- let type_def_id = match res {
- Res::Def(DefKind::AssocTy, def_id) if depth == 1 => Some(parent_def_id(self, def_id)),
- Res::Def(DefKind::Variant, def_id) if depth == 0 => Some(parent_def_id(self, def_id)),
- Res::Def(
- DefKind::Struct
- | DefKind::Union
- | DefKind::Enum
- | DefKind::TyAlias
- | DefKind::Trait,
- def_id,
- ) if depth == 0 => Some(def_id),
- _ => None,
- };
-
- debug!("visit_segment_args: type_def_id={:?}", type_def_id);
-
- // Compute a vector of defaults, one for each type parameter,
- // per the rules given in RFCs 599 and 1156. Example:
- //
- // ```rust
- // struct Foo<'a, T: 'a, U> { }
- // ```
- //
- // If you have `Foo<'x, dyn Bar, dyn Baz>`, we want to default
- // `dyn Bar` to `dyn Bar + 'x` (because of the `T: 'a` bound)
- // and `dyn Baz` to `dyn Baz + 'static` (because there is no
- // such bound).
- //
- // Therefore, we would compute `object_lifetime_defaults` to a
- // vector like `['x, 'static]`. Note that the vector only
- // includes type parameters.
- let object_lifetime_defaults = type_def_id.map_or_else(Vec::new, |def_id| {
- let in_body = {
- let mut scope = self.scope;
- loop {
- match *scope {
- Scope::Root => break false,
-
- Scope::Body { .. } => break true,
-
- Scope::Binder { s, .. }
- | Scope::Elision { s, .. }
- | Scope::ObjectLifetimeDefault { s, .. }
- | Scope::Supertrait { s, .. }
- | Scope::TraitRefBoundary { s, .. } => {
- scope = s;
- }
- }
- }
- };
-
- let map = &self.map;
- let set_to_region = |set: &ObjectLifetimeDefault| match *set {
- Set1::Empty => {
- if in_body {
- None
- } else {
- Some(Region::Static)
- }
- }
- Set1::One(r) => {
- let lifetimes = generic_args.args.iter().filter_map(|arg| match arg {
- GenericArg::Lifetime(lt) => Some(lt),
- _ => None,
- });
- r.subst(lifetimes, map)
- }
- Set1::Many => None,
- };
- if let Some(def_id) = def_id.as_local() {
- let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
- self.tcx
- .object_lifetime_defaults(id.owner)
- .unwrap()
- .iter()
- .map(set_to_region)
- .collect()
- } else {
- let tcx = self.tcx;
- self.xcrate_object_lifetime_defaults
- .entry(def_id)
- .or_insert_with(|| {
- tcx.generics_of(def_id)
- .params
- .iter()
- .filter_map(|param| match param.kind {
- GenericParamDefKind::Type { object_lifetime_default, .. } => {
- Some(object_lifetime_default)
- }
- GenericParamDefKind::Const { .. } => Some(Set1::Empty),
- GenericParamDefKind::Lifetime => None,
- })
- .collect()
- })
- .iter()
- .map(set_to_region)
- .collect()
- }
- });
-
- debug!("visit_segment_args: object_lifetime_defaults={:?}", object_lifetime_defaults);
-
- let mut i = 0;
- for arg in generic_args.args {
- match arg {
- GenericArg::Lifetime(_) => {}
- GenericArg::Type(ty) => {
- if let Some(&lt) = object_lifetime_defaults.get(i) {
- let scope = Scope::ObjectLifetimeDefault { lifetime: lt, s: self.scope };
- self.with(scope, |this| this.visit_ty(ty));
- } else {
- self.visit_ty(ty);
- }
- i += 1;
- }
- GenericArg::Const(ct) => {
- self.visit_anon_const(&ct.value);
- i += 1;
- }
- GenericArg::Infer(inf) => {
- self.visit_id(inf.hir_id);
- i += 1;
- }
- }
- }
-
- // Hack: when resolving the type `XX` in binding like `dyn
- // Foo<'b, Item = XX>`, the current object-lifetime default
- // would be to examine the trait `Foo` to check whether it has
- // a lifetime bound declared on `Item`. e.g., if `Foo` is
- // declared like so, then the default object lifetime bound in
- // `XX` should be `'b`:
- //
- // ```rust
- // trait Foo<'a> {
- // type Item: 'a;
- // }
- // ```
- //
- // but if we just have `type Item;`, then it would be
- // `'static`. However, we don't get all of this logic correct.
- //
- // Instead, we do something hacky: if there are no lifetime parameters
- // to the trait, then we simply use a default object lifetime
- // bound of `'static`, because there is no other possibility. On the other hand,
- // if there ARE lifetime parameters, then we require the user to give an
- // explicit bound for now.
- //
- // This is intended to leave room for us to implement the
- // correct behavior in the future.
- let has_lifetime_parameter =
- generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
-
- // Resolve lifetimes found in the bindings, so either in the type `XX` in `Item = XX` or
- // in the trait ref `YY<...>` in `Item: YY<...>`.
- for binding in generic_args.bindings {
- let scope = Scope::ObjectLifetimeDefault {
- lifetime: if has_lifetime_parameter { None } else { Some(Region::Static) },
- s: self.scope,
- };
- if let Some(type_def_id) = type_def_id {
- let lifetimes = LifetimeContext::supertrait_hrtb_lifetimes(
- self.tcx,
- type_def_id,
- binding.ident,
- );
- self.with(scope, |this| {
- let scope = Scope::Supertrait {
- lifetimes: lifetimes.unwrap_or_default(),
- s: this.scope,
- };
- this.with(scope, |this| this.visit_assoc_type_binding(binding));
- });
- } else {
- self.with(scope, |this| this.visit_assoc_type_binding(binding));
- }
- }
- }
-
- /// Returns all the late-bound vars that come into scope from supertrait HRTBs, based on the
- /// associated type name and starting trait.
- /// For example, imagine we have
- /// ```ignore (illustrative)
- /// trait Foo<'a, 'b> {
- /// type As;
- /// }
- /// trait Bar<'b>: for<'a> Foo<'a, 'b> {}
- /// trait Bar: for<'b> Bar<'b> {}
- /// ```
- /// In this case, if we wanted to the supertrait HRTB lifetimes for `As` on
- /// the starting trait `Bar`, we would return `Some(['b, 'a])`.
- fn supertrait_hrtb_lifetimes(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- assoc_name: Ident,
- ) -> Option<Vec<ty::BoundVariableKind>> {
- let trait_defines_associated_type_named = |trait_def_id: DefId| {
- tcx.associated_items(trait_def_id)
- .find_by_name_and_kind(tcx, assoc_name, ty::AssocKind::Type, trait_def_id)
- .is_some()
- };
-
- use smallvec::{smallvec, SmallVec};
- let mut stack: SmallVec<[(DefId, SmallVec<[ty::BoundVariableKind; 8]>); 8]> =
- smallvec![(def_id, smallvec![])];
- let mut visited: FxHashSet<DefId> = FxHashSet::default();
- loop {
- let Some((def_id, bound_vars)) = stack.pop() else {
- break None;
- };
- // See issue #83753. If someone writes an associated type on a non-trait, just treat it as
- // there being no supertrait HRTBs.
- match tcx.def_kind(def_id) {
- DefKind::Trait | DefKind::TraitAlias | DefKind::Impl => {}
- _ => break None,
- }
-
- if trait_defines_associated_type_named(def_id) {
- break Some(bound_vars.into_iter().collect());
- }
- let predicates =
- tcx.super_predicates_that_define_assoc_type((def_id, Some(assoc_name)));
- let obligations = predicates.predicates.iter().filter_map(|&(pred, _)| {
- let bound_predicate = pred.kind();
- match bound_predicate.skip_binder() {
- ty::PredicateKind::Trait(data) => {
- // The order here needs to match what we would get from `subst_supertrait`
- let pred_bound_vars = bound_predicate.bound_vars();
- let mut all_bound_vars = bound_vars.clone();
- all_bound_vars.extend(pred_bound_vars.iter());
- let super_def_id = data.trait_ref.def_id;
- Some((super_def_id, all_bound_vars))
- }
- _ => None,
- }
- });
-
- let obligations = obligations.filter(|o| visited.insert(o.0));
- stack.extend(obligations);
- }
- }
-
- #[tracing::instrument(level = "debug", skip(self))]
- fn visit_fn_like_elision(
- &mut self,
- inputs: &'tcx [hir::Ty<'tcx>],
- output: Option<&'tcx hir::Ty<'tcx>>,
- in_closure: bool,
- ) {
- self.with(Scope::Elision { s: self.scope }, |this| {
- for input in inputs {
- this.visit_ty(input);
- }
- if !in_closure && let Some(output) = output {
- this.visit_ty(output);
- }
- });
- if in_closure && let Some(output) = output {
- self.visit_ty(output);
- }
- }
-
- fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
- debug!("resolve_object_lifetime_default(lifetime_ref={:?})", lifetime_ref);
- let mut late_depth = 0;
- let mut scope = self.scope;
- let lifetime = loop {
- match *scope {
- Scope::Binder { s, scope_type, .. } => {
- match scope_type {
- BinderScopeType::Normal => late_depth += 1,
- BinderScopeType::Concatenating => {}
- }
- scope = s;
- }
-
- Scope::Root | Scope::Elision { .. } => break Region::Static,
-
- Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
-
- Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
-
- Scope::Supertrait { s, .. } | Scope::TraitRefBoundary { s, .. } => {
- scope = s;
- }
- }
- };
- self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
- }
-
- #[tracing::instrument(level = "debug", skip(self))]
- fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) {
- debug!(
- node = ?self.tcx.hir().node_to_string(lifetime_ref.hir_id),
- span = ?self.tcx.sess.source_map().span_to_diagnostic_string(lifetime_ref.span)
- );
- self.map.defs.insert(lifetime_ref.hir_id, def);
- }
-
- /// Sometimes we resolve a lifetime, but later find that it is an
- /// error (esp. around impl trait). In that case, we remove the
- /// entry into `map.defs` so as not to confuse later code.
- fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) {
- let old_value = self.map.defs.remove(&lifetime_ref.hir_id);
- assert_eq!(old_value, Some(bad_def));
- }
-}
-
-/// Detects late-bound lifetimes and inserts them into
-/// `late_bound`.
-///
-/// A region declared on a fn is **late-bound** if:
-/// - it is constrained by an argument type;
-/// - it does not appear in a where-clause.
-///
-/// "Constrained" basically means that it appears in any type but
-/// not amongst the inputs to a projection. In other words, `<&'a
-/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
-fn is_late_bound_map(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<&FxIndexSet<LocalDefId>> {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let decl = tcx.hir().fn_decl_by_hir_id(hir_id)?;
- let generics = tcx.hir().get_generics(def_id)?;
-
- let mut late_bound = FxIndexSet::default();
-
- let mut constrained_by_input = ConstrainedCollector::default();
- for arg_ty in decl.inputs {
- constrained_by_input.visit_ty(arg_ty);
- }
-
- let mut appears_in_output = AllCollector::default();
- intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
-
- debug!(?constrained_by_input.regions);
-
- // Walk the lifetimes that appear in where clauses.
- //
- // Subtle point: because we disallow nested bindings, we can just
- // ignore binders here and scrape up all names we see.
- let mut appears_in_where_clause = AllCollector::default();
- appears_in_where_clause.visit_generics(generics);
- debug!(?appears_in_where_clause.regions);
-
- // Late bound regions are those that:
- // - appear in the inputs
- // - do not appear in the where-clauses
- // - are not implicitly captured by `impl Trait`
- for param in generics.params {
- match param.kind {
- hir::GenericParamKind::Lifetime { .. } => { /* fall through */ }
-
- // Neither types nor consts are late-bound.
- hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => continue,
- }
-
- let param_def_id = tcx.hir().local_def_id(param.hir_id);
-
- // appears in the where clauses? early-bound.
- if appears_in_where_clause.regions.contains(&param_def_id) {
- continue;
- }
-
- // does not appear in the inputs, but appears in the return type? early-bound.
- if !constrained_by_input.regions.contains(&param_def_id)
- && appears_in_output.regions.contains(&param_def_id)
- {
- continue;
- }
-
- debug!("lifetime {:?} with id {:?} is late-bound", param.name.ident(), param.hir_id);
-
- let inserted = late_bound.insert(param_def_id);
- assert!(inserted, "visited lifetime {:?} twice", param.hir_id);
- }
-
- debug!(?late_bound);
- return Some(tcx.arena.alloc(late_bound));
-
- #[derive(Default)]
- struct ConstrainedCollector {
- regions: FxHashSet<LocalDefId>,
- }
-
- impl<'v> Visitor<'v> for ConstrainedCollector {
- fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
- match ty.kind {
- hir::TyKind::Path(
- hir::QPath::Resolved(Some(_), _) | hir::QPath::TypeRelative(..),
- ) => {
- // ignore lifetimes appearing in associated type
- // projections, as they are not *constrained*
- // (defined above)
- }
-
- hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
- // consider only the lifetimes on the final
- // segment; I am not sure it's even currently
- // valid to have them elsewhere, but even if it
- // is, those would be potentially inputs to
- // projections
- if let Some(last_segment) = path.segments.last() {
- self.visit_path_segment(path.span, last_segment);
- }
- }
-
- _ => {
- intravisit::walk_ty(self, ty);
- }
- }
- }
-
- fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
- if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
- self.regions.insert(def_id);
- }
- }
- }
-
- #[derive(Default)]
- struct AllCollector {
- regions: FxHashSet<LocalDefId>,
- }
-
- impl<'v> Visitor<'v> for AllCollector {
- fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
- if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
- self.regions.insert(def_id);
- }
- }
- }
-}
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index 62843c651..8aebb7da1 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -4,15 +4,15 @@
//! Paths in macros, imports, expressions, types, patterns are resolved here.
//! Label and lifetime names are resolved here as well.
//!
-//! Type-relative name resolution (methods, fields, associated items) happens in `rustc_typeck`.
+//! Type-relative name resolution (methods, fields, associated items) happens in `rustc_hir_analysis`.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(assert_matches)]
#![feature(box_patterns)]
#![feature(drain_filter)]
#![feature(if_let_guard)]
#![feature(iter_intersperse)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(never_type)]
#![recursion_limit = "256"]
#![allow(rustdoc::private_intra_doc_links)]
@@ -41,12 +41,12 @@ use rustc_hir::TraitCandidate;
use rustc_index::vec::IndexVec;
use rustc_metadata::creader::{CStore, CrateLoader};
use rustc_middle::metadata::ModChild;
-use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::span_bug;
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{self, DefIdTree, MainDefinition, RegisteredTools, ResolverOutputs};
+use rustc_middle::ty::{self, DefIdTree, MainDefinition, RegisteredTools};
+use rustc_middle::ty::{ResolverGlobalCtxt, ResolverOutputs};
use rustc_query_system::ich::StableHashingContext;
-use rustc_session::cstore::{CrateStore, CrateStoreDyn, MetadataLoaderDyn};
+use rustc_session::cstore::{CrateStore, MetadataLoaderDyn};
use rustc_session::lint::LintBuffer;
use rustc_session::Session;
use rustc_span::hygiene::{ExpnId, LocalExpnId, MacroKind, SyntaxContext, Transparency};
@@ -57,23 +57,22 @@ use rustc_span::{Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
-use std::{cmp, fmt, ptr};
-use tracing::debug;
+use std::{fmt, ptr};
use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion};
use imports::{Import, ImportKind, ImportResolver, NameResolution};
use late::{HasGenericParams, PathSource, PatternSource};
use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
-use crate::access_levels::AccessLevelsVisitor;
+use crate::effective_visibilities::EffectiveVisibilitiesVisitor;
type Res = def::Res<NodeId>;
-mod access_levels;
mod build_reduced_graph;
mod check_unused;
mod def_collector;
mod diagnostics;
+mod effective_visibilities;
mod ident;
mod imports;
mod late;
@@ -108,7 +107,6 @@ enum Scope<'a> {
// The node ID is for reporting the `PROC_MACRO_DERIVE_RESOLUTION_FALLBACK`
// lint if it should be reported.
Module(Module<'a>, Option<NodeId>),
- RegisteredAttrs,
MacroUsePrelude,
BuiltinAttrs,
ExternPrelude,
@@ -165,7 +163,6 @@ enum ImplTraitContext {
Universal(LocalDefId),
}
-#[derive(Eq)]
struct BindingError {
name: Symbol,
origin: BTreeSet<Span>,
@@ -173,24 +170,6 @@ struct BindingError {
could_be_path: bool,
}
-impl PartialOrd for BindingError {
- fn partial_cmp(&self, other: &BindingError) -> Option<cmp::Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl PartialEq for BindingError {
- fn eq(&self, other: &BindingError) -> bool {
- self.name == other.name
- }
-}
-
-impl Ord for BindingError {
- fn cmp(&self, other: &BindingError) -> cmp::Ordering {
- self.name.cmp(&other.name)
- }
-}
-
enum ResolutionError<'a> {
/// Error E0401: can't use type or const parameters from outer function.
GenericParamsFromOuterFunction(Res, HasGenericParams),
@@ -258,6 +237,8 @@ enum ResolutionError<'a> {
trait_item_span: Span,
code: rustc_errors::DiagnosticId,
},
+ /// Error E0201: multiple impl items for the same trait item.
+ TraitImplDuplicate { name: Symbol, trait_item_span: Span, old_span: Span },
/// Inline asm `sym` operand must refer to a `fn` or `static`.
InvalidAsmSym,
}
@@ -650,7 +631,7 @@ pub struct NameBinding<'a> {
ambiguity: Option<(&'a NameBinding<'a>, AmbiguityKind)>,
expansion: LocalExpnId,
span: Span,
- vis: ty::Visibility,
+ vis: ty::Visibility<DefId>,
}
pub trait ToNameBinding<'a> {
@@ -696,6 +677,8 @@ struct UseError<'a> {
/// Path `Segment`s at the place of use that failed. Used for accurate suggestion after telling
/// the user to import the item directly.
path: Vec<Segment>,
+ /// Whether the expected source is a call
+ is_call: bool,
}
#[derive(Clone, Copy, PartialEq, Debug)]
@@ -847,7 +830,7 @@ impl<'a> NameBinding<'a> {
}
}
-#[derive(Debug, Default, Clone)]
+#[derive(Default, Clone)]
pub struct ExternPreludeEntry<'a> {
extern_crate_item: Option<&'a NameBinding<'a>>,
pub introduced_by_item: bool,
@@ -913,11 +896,6 @@ pub struct Resolver<'a> {
label_res_map: NodeMap<NodeId>,
/// Resolutions for lifetimes.
lifetimes_res_map: NodeMap<LifetimeRes>,
- /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
- /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
- /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
- /// field from the original parameter 'a to the new parameter 'a1.
- generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
/// Lifetime parameters that lowering will have to introduce.
extra_lifetime_params_map: NodeMap<Vec<(Ident, NodeId, LifetimeRes)>>,
@@ -976,7 +954,6 @@ pub struct Resolver<'a> {
/// A small map keeping true kinds of built-in macros that appear to be fn-like on
/// the surface (`macro` items in libcore), but are actually attributes or derives.
builtin_macro_kinds: FxHashMap<LocalDefId, MacroKind>,
- registered_attrs: FxHashSet<Ident>,
registered_tools: RegisteredTools,
macro_use_prelude: FxHashMap<Symbol, &'a NameBinding<'a>>,
macro_map: FxHashMap<DefId, MacroData>,
@@ -1020,7 +997,7 @@ pub struct Resolver<'a> {
/// Table for mapping struct IDs into struct constructor IDs,
/// it's not used during normal resolution, only for better error reporting.
/// Also includes of list of each fields visibility
- struct_constructors: DefIdMap<(Res, ty::Visibility, Vec<ty::Visibility>)>,
+ struct_constructors: DefIdMap<(Res, ty::Visibility<DefId>, Vec<ty::Visibility<DefId>>)>,
/// Features enabled for this crate.
active_features: FxHashSet<Symbol>,
@@ -1054,7 +1031,7 @@ pub struct Resolver<'a> {
proc_macros: Vec<NodeId>,
confused_type_with_std_module: FxHashMap<Span, Span>,
- access_levels: AccessLevels,
+ effective_visibilities: EffectiveVisibilities,
}
/// Nothing really interesting here; it just provides memory for the rest of the crate.
@@ -1253,8 +1230,7 @@ impl<'a> Resolver<'a> {
}
}
- let (registered_attrs, registered_tools) =
- macros::registered_attrs_and_tools(session, &krate.attrs);
+ let registered_tools = macros::registered_tools(session, &krate.attrs);
let features = session.features_untracked();
@@ -1282,7 +1258,6 @@ impl<'a> Resolver<'a> {
import_res_map: Default::default(),
label_res_map: Default::default(),
lifetimes_res_map: Default::default(),
- generics_def_id_map: Vec::new(),
extra_lifetime_params_map: Default::default(),
extern_crate_map: Default::default(),
reexport_map: FxHashMap::default(),
@@ -1319,7 +1294,6 @@ impl<'a> Resolver<'a> {
macro_names: FxHashSet::default(),
builtin_macros: Default::default(),
builtin_macro_kinds: Default::default(),
- registered_attrs,
registered_tools,
macro_use_prelude: FxHashMap::default(),
macro_map: FxHashMap::default(),
@@ -1361,7 +1335,7 @@ impl<'a> Resolver<'a> {
trait_impls: Default::default(),
proc_macros: Default::default(),
confused_type_with_std_module: Default::default(),
- access_levels: Default::default(),
+ effective_visibilities: Default::default(),
};
let root_parent_scope = ParentScope::module(graph_root, &resolver);
@@ -1404,9 +1378,7 @@ impl<'a> Resolver<'a> {
Default::default()
}
- pub fn into_outputs(
- self,
- ) -> (Definitions, Box<CrateStoreDyn>, ResolverOutputs, ty::ResolverAstLowering) {
+ pub fn into_outputs(self) -> ResolverOutputs {
let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect();
let definitions = self.definitions;
let cstore = Box::new(self.crate_loader.into_cstore());
@@ -1421,13 +1393,14 @@ impl<'a> Resolver<'a> {
let glob_map = self.glob_map;
let main_def = self.main_def;
let confused_type_with_std_module = self.confused_type_with_std_module;
- let access_levels = self.access_levels;
- let resolutions = ResolverOutputs {
+ let effective_visibilities = self.effective_visibilities;
+ let global_ctxt = ResolverGlobalCtxt {
+ cstore,
source_span,
expn_that_defined,
visibilities,
has_pub_restricted,
- access_levels,
+ effective_visibilities,
extern_crate_map,
reexport_map,
glob_map,
@@ -1444,13 +1417,12 @@ impl<'a> Resolver<'a> {
confused_type_with_std_module,
registered_tools: self.registered_tools,
};
- let resolutions_lowering = ty::ResolverAstLowering {
+ let ast_lowering = ty::ResolverAstLowering {
legacy_const_generic_args: self.legacy_const_generic_args,
partial_res_map: self.partial_res_map,
import_res_map: self.import_res_map,
label_res_map: self.label_res_map,
lifetimes_res_map: self.lifetimes_res_map,
- generics_def_id_map: self.generics_def_id_map,
extra_lifetime_params_map: self.extra_lifetime_params_map,
next_node_id: self.next_node_id,
node_id_to_def_id: self.node_id_to_def_id,
@@ -1458,16 +1430,15 @@ impl<'a> Resolver<'a> {
trait_map: self.trait_map,
builtin_macro_kinds: self.builtin_macro_kinds,
};
- (definitions, cstore, resolutions, resolutions_lowering)
+ ResolverOutputs { definitions, global_ctxt, ast_lowering }
}
- pub fn clone_outputs(
- &self,
- ) -> (Definitions, Box<CrateStoreDyn>, ResolverOutputs, ty::ResolverAstLowering) {
+ pub fn clone_outputs(&self) -> ResolverOutputs {
let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect();
let definitions = self.definitions.clone();
let cstore = Box::new(self.cstore().clone());
- let resolutions = ResolverOutputs {
+ let global_ctxt = ResolverGlobalCtxt {
+ cstore,
source_span: self.source_span.clone(),
expn_that_defined: self.expn_that_defined.clone(),
visibilities: self.visibilities.clone(),
@@ -1487,15 +1458,14 @@ impl<'a> Resolver<'a> {
proc_macros,
confused_type_with_std_module: self.confused_type_with_std_module.clone(),
registered_tools: self.registered_tools.clone(),
- access_levels: self.access_levels.clone(),
+ effective_visibilities: self.effective_visibilities.clone(),
};
- let resolutions_lowering = ty::ResolverAstLowering {
+ let ast_lowering = ty::ResolverAstLowering {
legacy_const_generic_args: self.legacy_const_generic_args.clone(),
partial_res_map: self.partial_res_map.clone(),
import_res_map: self.import_res_map.clone(),
label_res_map: self.label_res_map.clone(),
lifetimes_res_map: self.lifetimes_res_map.clone(),
- generics_def_id_map: self.generics_def_id_map.clone(),
extra_lifetime_params_map: self.extra_lifetime_params_map.clone(),
next_node_id: self.next_node_id.clone(),
node_id_to_def_id: self.node_id_to_def_id.clone(),
@@ -1503,7 +1473,7 @@ impl<'a> Resolver<'a> {
trait_map: self.trait_map.clone(),
builtin_macro_kinds: self.builtin_macro_kinds.clone(),
};
- (definitions, cstore, resolutions, resolutions_lowering)
+ ResolverOutputs { definitions, global_ctxt, ast_lowering }
}
fn create_stable_hashing_context(&self) -> StableHashingContext<'_> {
@@ -1551,8 +1521,8 @@ impl<'a> Resolver<'a> {
pub fn resolve_crate(&mut self, krate: &Crate) {
self.session.time("resolve_crate", || {
self.session.time("finalize_imports", || ImportResolver { r: self }.finalize_imports());
- self.session.time("resolve_access_levels", || {
- AccessLevelsVisitor::compute_access_levels(self, krate)
+ self.session.time("compute_effective_visibilities", || {
+ EffectiveVisibilitiesVisitor::compute_effective_visibilities(self, krate)
});
self.session.time("finalize_macro_resolutions", || self.finalize_macro_resolutions());
self.session.time("late_resolve_crate", || self.late_resolve_crate(krate));
@@ -1821,7 +1791,11 @@ impl<'a> Resolver<'a> {
self.pat_span_map.insert(node, span);
}
- fn is_accessible_from(&self, vis: ty::Visibility, module: Module<'a>) -> bool {
+ fn is_accessible_from(
+ &self,
+ vis: ty::Visibility<impl Into<DefId>>,
+ module: Module<'a>,
+ ) -> bool {
vis.is_accessible_from(module.nearest_parent_mod(), self)
}
@@ -1875,10 +1849,8 @@ impl<'a> Resolver<'a> {
self.crate_loader.maybe_process_path_extern(ident.name)?
};
let crate_root = self.expect_module(crate_id.as_def_id());
- Some(
- (crate_root, ty::Visibility::Public, DUMMY_SP, LocalExpnId::ROOT)
- .to_name_binding(self.arenas),
- )
+ let vis = ty::Visibility::<LocalDefId>::Public;
+ Some((crate_root, vis, DUMMY_SP, LocalExpnId::ROOT).to_name_binding(self.arenas))
}
})
}
@@ -1911,12 +1883,10 @@ impl<'a> Resolver<'a> {
match self.maybe_resolve_path(&segments, Some(ns), &parent_scope) {
PathResult::Module(ModuleOrUniformRoot::Module(module)) => Some(module.res().unwrap()),
- PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
- Some(path_res.base_res())
+ PathResult::NonModule(path_res) => path_res.full_res(),
+ PathResult::Module(ModuleOrUniformRoot::ExternPrelude) | PathResult::Failed { .. } => {
+ None
}
- PathResult::Module(ModuleOrUniformRoot::ExternPrelude)
- | PathResult::NonModule(..)
- | PathResult::Failed { .. } => None,
PathResult::Module(..) | PathResult::Indeterminate => unreachable!(),
}
}
@@ -1940,12 +1910,27 @@ impl<'a> Resolver<'a> {
}
}
+ /// For rustdoc.
+ pub fn get_partial_res(&self, node_id: NodeId) -> Option<PartialRes> {
+ self.partial_res_map.get(&node_id).copied()
+ }
+
/// Retrieves the span of the given `DefId` if `DefId` is in the local crate.
#[inline]
pub fn opt_span(&self, def_id: DefId) -> Option<Span> {
def_id.as_local().map(|def_id| self.source_span[def_id])
}
+ /// Retrieves the name of the given `DefId`.
+ #[inline]
+ pub fn opt_name(&self, def_id: DefId) -> Option<Symbol> {
+ let def_key = match def_id.as_local() {
+ Some(def_id) => self.definitions.def_key(def_id),
+ None => self.cstore().def_key(def_id),
+ };
+ def_key.get_opt_name()
+ }
+
/// Checks if an expression refers to a function marked with
/// `#[rustc_legacy_const_generics]` and returns the argument index list
/// from the attribute.
@@ -1957,12 +1942,8 @@ impl<'a> Resolver<'a> {
return None;
}
- let partial_res = self.partial_res_map.get(&expr.id)?;
- if partial_res.unresolved_segments() != 0 {
- return None;
- }
-
- if let Res::Def(def::DefKind::Fn, def_id) = partial_res.base_res() {
+ let res = self.partial_res_map.get(&expr.id)?.full_res()?;
+ if let Res::Def(def::DefKind::Fn, def_id) = res {
// We only support cross-crate argument rewriting. Uses
// within the same crate should be updated to use the new
// const generics style.
@@ -1985,7 +1966,7 @@ impl<'a> Resolver<'a> {
_ => panic!("invalid arg index"),
}
}
- // Cache the lookup to avoid parsing attributes for an iterm multiple times.
+ // Cache the lookup to avoid parsing attributes for an item multiple times.
self.legacy_const_generic_args.insert(def_id, Some(ret.clone()));
return Some(ret);
}
@@ -2015,6 +1996,24 @@ impl<'a> Resolver<'a> {
}
self.main_def = Some(MainDefinition { res, is_import, span });
}
+
+ // Items that go to reexport table encoded to metadata and visible through it to other crates.
+ fn is_reexport(&self, binding: &NameBinding<'a>) -> Option<def::Res<!>> {
+ // FIXME: Consider changing the binding inserted by `#[macro_export] macro_rules`
+ // into the crate root to actual `NameBindingKind::Import`.
+ if binding.is_import()
+ || matches!(binding.kind, NameBindingKind::Res(_, _is_macro_export @ true))
+ {
+ let res = binding.res().expect_non_local();
+ // Ambiguous imports are treated as errors at this point and are
+ // not exposed to other crates (see #36837 for more details).
+ if res != def::Res::Err && !binding.is_ambiguity() {
+ return Some(res);
+ }
+ }
+
+ return None;
+ }
}
fn names_to_string(names: &[Symbol]) -> String {
@@ -2066,7 +2065,7 @@ struct Finalize {
/// Span of the whole path or some its characteristic fragment.
/// E.g. span of `b` in `foo::{a, b, c}`, or full span for regular paths.
path_span: Span,
- /// Span of the path start, suitable for prepending something to to it.
+ /// Span of the path start, suitable for prepending something to it.
/// E.g. span of `foo` in `foo::{a, b, c}`, or full span for regular paths.
root_span: Span,
/// Whether to report privacy errors or silently return "no resolution" for them,
@@ -2083,7 +2082,3 @@ impl Finalize {
Finalize { node_id, path_span, root_span, report_private: true }
}
}
-
-pub fn provide(providers: &mut Providers) {
- late::lifetimes::provide(providers);
-}
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index 070fb9c72..9526296f9 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -12,7 +12,7 @@ use rustc_attr::StabilityLevel;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::intern::Interned;
use rustc_data_structures::sync::Lrc;
-use rustc_errors::struct_span_err;
+use rustc_errors::{struct_span_err, Applicability};
use rustc_expand::base::{Annotatable, DeriveResolutions, Indeterminate, ResolverExpand};
use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
use rustc_expand::compile_declarative_macro;
@@ -112,47 +112,32 @@ fn fast_print_path(path: &ast::Path) -> Symbol {
}
}
-/// The code common between processing `#![register_tool]` and `#![register_attr]`.
-fn registered_idents(
- sess: &Session,
- attrs: &[ast::Attribute],
- attr_name: Symbol,
- descr: &str,
-) -> FxHashSet<Ident> {
- let mut registered = FxHashSet::default();
- for attr in sess.filter_by_name(attrs, attr_name) {
+pub(crate) fn registered_tools(sess: &Session, attrs: &[ast::Attribute]) -> FxHashSet<Ident> {
+ let mut registered_tools = FxHashSet::default();
+ for attr in sess.filter_by_name(attrs, sym::register_tool) {
for nested_meta in attr.meta_item_list().unwrap_or_default() {
match nested_meta.ident() {
Some(ident) => {
- if let Some(old_ident) = registered.replace(ident) {
- let msg = format!("{} `{}` was already registered", descr, ident);
+ if let Some(old_ident) = registered_tools.replace(ident) {
+ let msg = format!("{} `{}` was already registered", "tool", ident);
sess.struct_span_err(ident.span, &msg)
.span_label(old_ident.span, "already registered here")
.emit();
}
}
None => {
- let msg = format!("`{}` only accepts identifiers", attr_name);
+ let msg = format!("`{}` only accepts identifiers", sym::register_tool);
let span = nested_meta.span();
sess.struct_span_err(span, &msg).span_label(span, "not an identifier").emit();
}
}
}
}
- registered
-}
-
-pub(crate) fn registered_attrs_and_tools(
- sess: &Session,
- attrs: &[ast::Attribute],
-) -> (FxHashSet<Ident>, FxHashSet<Ident>) {
- let registered_attrs = registered_idents(sess, attrs, sym::register_attr, "attribute");
- let mut registered_tools = registered_idents(sess, attrs, sym::register_tool, "tool");
// We implicitly add `rustfmt` and `clippy` to known tools,
// but it's not an error to register them explicitly.
let predefined_tools = [sym::clippy, sym::rustfmt];
registered_tools.extend(predefined_tools.iter().cloned().map(Ident::with_dummy_span));
- (registered_attrs, registered_tools)
+ registered_tools
}
// Some feature gates for inner attributes are reported as lints for backward compatibility.
@@ -456,7 +441,7 @@ impl<'a> ResolverExpand for Resolver<'a> {
}
PathResult::Indeterminate => indeterminate = true,
// We can only be sure that a path doesn't exist after having tested all the
- // posibilities, only at that time we can return false.
+ // possibilities, only at that time we can return false.
PathResult::Failed { .. } => {}
PathResult::Module(_) => panic!("unexpected path resolution"),
}
@@ -605,9 +590,7 @@ impl<'a> Resolver<'a> {
let res = if path.len() > 1 {
let res = match self.maybe_resolve_path(&path, Some(MacroNS), parent_scope) {
- PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
- Ok(path_res.base_res())
- }
+ PathResult::NonModule(path_res) if let Some(res) = path_res.full_res() => Ok(res),
PathResult::Indeterminate if !force => return Err(Determinacy::Undetermined),
PathResult::NonModule(..)
| PathResult::Indeterminate
@@ -707,12 +690,23 @@ impl<'a> Resolver<'a> {
Some(Finalize::new(ast::CRATE_NODE_ID, path_span)),
None,
) {
- PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
- let res = path_res.base_res();
- check_consistency(self, &path, path_span, kind, initial_res, res);
+ PathResult::NonModule(path_res) if let Some(res) = path_res.full_res() => {
+ check_consistency(self, &path, path_span, kind, initial_res, res)
}
path_res @ PathResult::NonModule(..) | path_res @ PathResult::Failed { .. } => {
+ let mut suggestion = None;
let (span, label) = if let PathResult::Failed { span, label, .. } = path_res {
+ // try to suggest if it's not a macro, maybe a function
+ if let PathResult::NonModule(partial_res) = self.maybe_resolve_path(&path, Some(ValueNS), &parent_scope)
+ && partial_res.unresolved_segments() == 0 {
+ let sm = self.session.source_map();
+ let exclamation_span = sm.next_point(span);
+ suggestion = Some((
+ vec![(exclamation_span, "".to_string())],
+ format!("{} is not a macro, but a {}, try to remove `!`", Segment::names_to_string(&path), partial_res.base_res().descr()),
+ Applicability::MaybeIncorrect
+ ));
+ }
(span, label)
} else {
(
@@ -726,7 +720,7 @@ impl<'a> Resolver<'a> {
};
self.report_error(
span,
- ResolutionError::FailedToResolve { label, suggestion: None },
+ ResolutionError::FailedToResolve { label, suggestion },
);
}
PathResult::Module(..) | PathResult::Indeterminate => unreachable!(),
diff --git a/compiler/rustc_save_analysis/Cargo.toml b/compiler/rustc_save_analysis/Cargo.toml
index 15a89d82f..181e27f33 100644
--- a/compiler/rustc_save_analysis/Cargo.toml
+++ b/compiler/rustc_save_analysis/Cargo.toml
@@ -9,9 +9,11 @@ rustc_middle = { path = "../rustc_middle" }
rustc_ast = { path = "../rustc_ast" }
rustc_ast_pretty = { path = "../rustc_ast_pretty" }
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
rustc_hir = { path = "../rustc_hir" }
rustc_hir_pretty = { path = "../rustc_hir_pretty" }
rustc_lexer = { path = "../rustc_lexer" }
+rustc_macros = { path = "../rustc_macros" }
serde_json = "1"
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_save_analysis/src/dump_visitor.rs b/compiler/rustc_save_analysis/src/dump_visitor.rs
index e2e0e1f5b..df5d992f6 100644
--- a/compiler/rustc_save_analysis/src/dump_visitor.rs
+++ b/compiler/rustc_save_analysis/src/dump_visitor.rs
@@ -44,8 +44,6 @@ use rls_data::{
RefKind, Relation, RelationKind, SpanData,
};
-use tracing::{debug, error};
-
#[rustfmt::skip] // https://github.com/rust-lang/rustfmt/issues/5213
macro_rules! down_cast_data {
($id:ident, $kind:ident, $sp:expr) => {
@@ -59,7 +57,7 @@ macro_rules! access_from {
($save_ctxt:expr, $id:expr) => {
Access {
public: $save_ctxt.tcx.visibility($id).is_public(),
- reachable: $save_ctxt.access_levels.is_reachable($id),
+ reachable: $save_ctxt.effective_visibilities.is_reachable($id),
}
};
}
@@ -347,14 +345,14 @@ impl<'tcx> DumpVisitor<'tcx> {
body: hir::BodyId,
) {
let map = self.tcx.hir();
- self.nest_typeck_results(item.def_id, |v| {
+ self.nest_typeck_results(item.owner_id.def_id, |v| {
let body = map.body(body);
if let Some(fn_data) = v.save_ctxt.get_item_data(item) {
down_cast_data!(fn_data, DefData, item.span);
v.process_formals(body.params, &fn_data.qualname);
v.process_generic_params(ty_params, &fn_data.qualname, item.hir_id());
- v.dumper.dump_def(&access_from!(v.save_ctxt, item.def_id), fn_data);
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item.owner_id.def_id), fn_data);
}
for arg in decl.inputs {
@@ -375,10 +373,10 @@ impl<'tcx> DumpVisitor<'tcx> {
typ: &'tcx hir::Ty<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) {
- self.nest_typeck_results(item.def_id, |v| {
+ self.nest_typeck_results(item.owner_id.def_id, |v| {
if let Some(var_data) = v.save_ctxt.get_item_data(item) {
down_cast_data!(var_data, DefData, item.span);
- v.dumper.dump_def(&access_from!(v.save_ctxt, item.def_id), var_data);
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item.owner_id.def_id), var_data);
}
v.visit_ty(&typ);
v.visit_expr(expr);
@@ -438,7 +436,7 @@ impl<'tcx> DumpVisitor<'tcx> {
) {
debug!("process_struct {:?} {:?}", item, item.span);
let name = item.ident.to_string();
- let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+ let qualname = format!("::{}", self.tcx.def_path_str(item.owner_id.to_def_id()));
let kind = match item.kind {
hir::ItemKind::Struct(_, _) => DefKind::Struct,
@@ -475,10 +473,10 @@ impl<'tcx> DumpVisitor<'tcx> {
let span = self.span_from_span(item.ident.span);
let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item.def_id),
+ &access_from!(self.save_ctxt, item.owner_id.def_id),
Def {
kind,
- id: id_from_def_id(item.def_id.to_def_id()),
+ id: id_from_def_id(item.owner_id.to_def_id()),
span,
name,
qualname: qualname.clone(),
@@ -493,7 +491,7 @@ impl<'tcx> DumpVisitor<'tcx> {
);
}
- self.nest_typeck_results(item.def_id, |v| {
+ self.nest_typeck_results(item.owner_id.def_id, |v| {
for field in def.fields() {
v.process_struct_field_def(field, item.hir_id());
v.visit_ty(&field.ty);
@@ -515,7 +513,7 @@ impl<'tcx> DumpVisitor<'tcx> {
};
down_cast_data!(enum_data, DefData, item.span);
- let access = access_from!(self.save_ctxt, item.def_id);
+ let access = access_from!(self.save_ctxt, item.owner_id.def_id);
for variant in enum_definition.variants {
let name = variant.ident.name.to_string();
@@ -530,7 +528,7 @@ impl<'tcx> DumpVisitor<'tcx> {
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
let id = id_from_hir_id(variant.id, &self.save_ctxt);
- let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let parent = Some(id_from_def_id(item.owner_id.to_def_id()));
let attrs = self.tcx.hir().attrs(variant.id);
self.dumper.dump_def(
@@ -568,7 +566,7 @@ impl<'tcx> DumpVisitor<'tcx> {
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
let id = id_from_hir_id(variant.id, &self.save_ctxt);
- let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let parent = Some(id_from_def_id(item.owner_id.to_def_id()));
let attrs = self.tcx.hir().attrs(variant.id);
self.dumper.dump_def(
@@ -614,14 +612,14 @@ impl<'tcx> DumpVisitor<'tcx> {
}
let map = self.tcx.hir();
- self.nest_typeck_results(item.def_id, |v| {
+ self.nest_typeck_results(item.owner_id.def_id, |v| {
v.visit_ty(&impl_.self_ty);
if let Some(trait_ref) = &impl_.of_trait {
v.process_path(trait_ref.hir_ref_id, &hir::QPath::Resolved(None, &trait_ref.path));
}
v.process_generic_params(&impl_.generics, "", item.hir_id());
for impl_item in impl_.items {
- v.process_impl_item(map.impl_item(impl_item.id), item.def_id.to_def_id());
+ v.process_impl_item(map.impl_item(impl_item.id), item.owner_id.to_def_id());
}
});
}
@@ -634,7 +632,7 @@ impl<'tcx> DumpVisitor<'tcx> {
methods: &'tcx [hir::TraitItemRef],
) {
let name = item.ident.to_string();
- let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+ let qualname = format!("::{}", self.tcx.def_path_str(item.owner_id.to_def_id()));
let mut val = name.clone();
if !generics.params.is_empty() {
val.push_str(&generic_params_to_string(generics.params));
@@ -644,13 +642,13 @@ impl<'tcx> DumpVisitor<'tcx> {
val.push_str(&bounds_to_string(trait_refs));
}
if !self.span.filter_generated(item.ident.span) {
- let id = id_from_def_id(item.def_id.to_def_id());
+ let id = id_from_def_id(item.owner_id.to_def_id());
let span = self.span_from_span(item.ident.span);
let children =
- methods.iter().map(|i| id_from_def_id(i.id.def_id.to_def_id())).collect();
+ methods.iter().map(|i| id_from_def_id(i.id.owner_id.to_def_id())).collect();
let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item.def_id),
+ &access_from!(self.save_ctxt, item.owner_id.def_id),
Def {
kind: DefKind::Trait,
id,
@@ -694,7 +692,7 @@ impl<'tcx> DumpVisitor<'tcx> {
kind: RelationKind::SuperTrait,
span,
from: id_from_def_id(id),
- to: id_from_def_id(item.def_id.to_def_id()),
+ to: id_from_def_id(item.owner_id.to_def_id()),
});
}
}
@@ -704,7 +702,7 @@ impl<'tcx> DumpVisitor<'tcx> {
self.process_generic_params(generics, &qualname, item.hir_id());
for method in methods {
let map = self.tcx.hir();
- self.process_trait_item(map.trait_item(method.id), item.def_id.to_def_id())
+ self.process_trait_item(map.trait_item(method.id), item.owner_id.to_def_id())
}
}
@@ -712,7 +710,7 @@ impl<'tcx> DumpVisitor<'tcx> {
fn process_mod(&mut self, item: &'tcx hir::Item<'tcx>) {
if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
down_cast_data!(mod_data, DefData, item.span);
- self.dumper.dump_def(&access_from!(self.save_ctxt, item.def_id), mod_data);
+ self.dumper.dump_def(&access_from!(self.save_ctxt, item.owner_id.def_id), mod_data);
}
}
@@ -805,6 +803,7 @@ impl<'tcx> DumpVisitor<'tcx> {
&mut self,
ex: &'tcx hir::Expr<'tcx>,
seg: &'tcx hir::PathSegment<'tcx>,
+ receiver: &'tcx hir::Expr<'tcx>,
args: &'tcx [hir::Expr<'tcx>],
) {
debug!("process_method_call {:?} {:?}", ex, ex.span);
@@ -825,6 +824,7 @@ impl<'tcx> DumpVisitor<'tcx> {
}
// walk receiver and args
+ self.visit_expr(receiver);
walk_list!(self, visit_expr, args);
}
@@ -913,8 +913,12 @@ impl<'tcx> DumpVisitor<'tcx> {
| HirDefKind::AssocTy,
_,
)
- | Res::SelfTy { .. } => {
- self.dump_path_segment_ref(id, &hir::PathSegment::from_ident(ident));
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. } => {
+ self.dump_path_segment_ref(
+ id,
+ &hir::PathSegment::new(ident, hir::HirId::INVALID, Res::Err),
+ );
}
def => {
error!("unexpected definition kind when processing collected idents: {:?}", def)
@@ -974,10 +978,10 @@ impl<'tcx> DumpVisitor<'tcx> {
self.process_macro_use(trait_item.span);
match trait_item.kind {
hir::TraitItemKind::Const(ref ty, body) => {
- let body = body.map(|b| &self.tcx.hir().body(b).value);
+ let body = body.map(|b| self.tcx.hir().body(b).value);
let attrs = self.tcx.hir().attrs(trait_item.hir_id());
self.process_assoc_const(
- trait_item.def_id,
+ trait_item.owner_id.def_id,
trait_item.ident,
&ty,
body,
@@ -991,7 +995,7 @@ impl<'tcx> DumpVisitor<'tcx> {
self.process_method(
sig,
body,
- trait_item.def_id,
+ trait_item.owner_id.def_id,
trait_item.ident,
&trait_item.generics,
trait_item.span,
@@ -1001,11 +1005,11 @@ impl<'tcx> DumpVisitor<'tcx> {
// FIXME do something with _bounds (for type refs)
let name = trait_item.ident.name.to_string();
let qualname =
- format!("::{}", self.tcx.def_path_str(trait_item.def_id.to_def_id()));
+ format!("::{}", self.tcx.def_path_str(trait_item.owner_id.to_def_id()));
if !self.span.filter_generated(trait_item.ident.span) {
let span = self.span_from_span(trait_item.ident.span);
- let id = id_from_def_id(trait_item.def_id.to_def_id());
+ let id = id_from_def_id(trait_item.owner_id.to_def_id());
let attrs = self.tcx.hir().attrs(trait_item.hir_id());
self.dumper.dump_def(
@@ -1047,7 +1051,7 @@ impl<'tcx> DumpVisitor<'tcx> {
let body = self.tcx.hir().body(body);
let attrs = self.tcx.hir().attrs(impl_item.hir_id());
self.process_assoc_const(
- impl_item.def_id,
+ impl_item.owner_id.def_id,
impl_item.ident,
&ty,
Some(&body.value),
@@ -1059,13 +1063,13 @@ impl<'tcx> DumpVisitor<'tcx> {
self.process_method(
sig,
Some(body),
- impl_item.def_id,
+ impl_item.owner_id.def_id,
impl_item.ident,
&impl_item.generics,
impl_item.span,
);
}
- hir::ImplItemKind::TyAlias(ref ty) => {
+ hir::ImplItemKind::Type(ref ty) => {
// FIXME: uses of the assoc type should ideally point to this
// 'def' and the name here should be a ref to the def in the
// trait.
@@ -1084,7 +1088,7 @@ impl<'tcx> DumpVisitor<'tcx> {
let filename = sm.span_to_filename(krate_mod.spans.inner_span);
let data_id = id_from_hir_id(id, &self.save_ctxt);
let children =
- krate_mod.item_ids.iter().map(|i| id_from_def_id(i.def_id.to_def_id())).collect();
+ krate_mod.item_ids.iter().map(|i| id_from_def_id(i.owner_id.to_def_id())).collect();
let span = self.span_from_span(krate_mod.spans.inner_span);
let attrs = self.tcx.hir().attrs(id);
@@ -1133,10 +1137,10 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
hir::ItemKind::Use(path, hir::UseKind::Single) => {
let sub_span = path.segments.last().unwrap().ident.span;
if !self.span.filter_generated(sub_span) {
- let access = access_from!(self.save_ctxt, item.def_id);
+ let access = access_from!(self.save_ctxt, item.owner_id.def_id);
let ref_id = self.lookup_def_id(item.hir_id()).map(id_from_def_id);
let span = self.span_from_span(sub_span);
- let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ let parent = self.save_ctxt.tcx.local_parent(item.owner_id.def_id);
self.dumper.import(
&access,
Import {
@@ -1154,16 +1158,16 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
}
hir::ItemKind::Use(path, hir::UseKind::Glob) => {
// Make a comma-separated list of names of imported modules.
- let names = self.tcx.names_imported_by_glob_use(item.def_id);
+ let names = self.tcx.names_imported_by_glob_use(item.owner_id.def_id);
let names: Vec<_> = names.iter().map(|n| n.to_string()).collect();
// Otherwise it's a span with wrong macro expansion info, which
// we don't want to track anyway, since it's probably macro-internal `use`
if let Some(sub_span) = self.span.sub_span_of_star(item.span) {
if !self.span.filter_generated(item.span) {
- let access = access_from!(self.save_ctxt, item.def_id);
+ let access = access_from!(self.save_ctxt, item.owner_id.def_id);
let span = self.span_from_span(sub_span);
- let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ let parent = self.save_ctxt.tcx.local_parent(item.owner_id.def_id);
self.dumper.import(
&access,
Import {
@@ -1184,7 +1188,7 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
let name_span = item.ident.span;
if !self.span.filter_generated(name_span) {
let span = self.span_from_span(name_span);
- let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ let parent = self.save_ctxt.tcx.local_parent(item.owner_id.def_id);
self.dumper.import(
&Access { public: false, reachable: false },
Import {
@@ -1224,15 +1228,15 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
intravisit::walk_mod(self, m, item.hir_id());
}
hir::ItemKind::TyAlias(ty, ref generics) => {
- let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+ let qualname = format!("::{}", self.tcx.def_path_str(item.owner_id.to_def_id()));
let value = ty_to_string(&ty);
if !self.span.filter_generated(item.ident.span) {
let span = self.span_from_span(item.ident.span);
- let id = id_from_def_id(item.def_id.to_def_id());
+ let id = id_from_def_id(item.owner_id.to_def_id());
let attrs = self.tcx.hir().attrs(item.hir_id());
self.dumper.dump_def(
- &access_from!(self.save_ctxt, item.def_id),
+ &access_from!(self.save_ctxt, item.owner_id.def_id),
Def {
kind: DefKind::Type,
id,
@@ -1303,7 +1307,7 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
if let hir::QPath::Resolved(_, path) = path {
self.write_sub_paths_truncated(path);
}
- intravisit::walk_qpath(self, path, t.hir_id, t.span);
+ intravisit::walk_qpath(self, path, t.hir_id);
}
hir::TyKind::Array(ref ty, ref length) => {
self.visit_ty(ty);
@@ -1318,9 +1322,9 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
}),
}
}
- hir::TyKind::OpaqueDef(item_id, _) => {
+ hir::TyKind::OpaqueDef(item_id, _, _) => {
let item = self.tcx.hir().item(item_id);
- self.nest_typeck_results(item_id.def_id, |v| v.visit_item(item));
+ self.nest_typeck_results(item_id.owner_id.def_id, |v| v.visit_item(item));
}
_ => intravisit::walk_ty(self, t),
}
@@ -1342,7 +1346,9 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
let res = self.save_ctxt.get_path_res(hir_expr.hir_id);
self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *rest)
}
- hir::ExprKind::MethodCall(ref seg, args, _) => self.process_method_call(ex, seg, args),
+ hir::ExprKind::MethodCall(ref seg, receiver, args, _) => {
+ self.process_method_call(ex, seg, receiver, args)
+ }
hir::ExprKind::Field(ref sub_ex, _) => {
self.visit_expr(&sub_ex);
@@ -1425,7 +1431,7 @@ impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
}
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- let access = access_from!(self.save_ctxt, item.def_id);
+ let access = access_from!(self.save_ctxt, item.owner_id.def_id);
match item.kind {
hir::ForeignItemKind::Fn(decl, _, ref generics) => {
diff --git a/compiler/rustc_save_analysis/src/errors.rs b/compiler/rustc_save_analysis/src/errors.rs
new file mode 100644
index 000000000..585aac8c1
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/errors.rs
@@ -0,0 +1,10 @@
+use rustc_macros::Diagnostic;
+
+use std::path::Path;
+
+#[derive(Diagnostic)]
+#[diag(save_analysis_could_not_open)]
+pub(crate) struct CouldNotOpen<'a> {
+ pub file_name: &'a Path,
+ pub err: std::io::Error,
+}
diff --git a/compiler/rustc_save_analysis/src/lib.rs b/compiler/rustc_save_analysis/src/lib.rs
index a1a2040bb..d0155c908 100644
--- a/compiler/rustc_save_analysis/src/lib.rs
+++ b/compiler/rustc_save_analysis/src/lib.rs
@@ -1,13 +1,19 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(if_let_guard)]
-#![feature(let_else)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![feature(never_type)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
+
+#[macro_use]
+extern crate tracing;
mod dump_visitor;
mod dumper;
#[macro_use]
mod span_utils;
+mod errors;
mod sig;
use rustc_ast as ast;
@@ -20,7 +26,7 @@ use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::Node;
use rustc_hir_pretty::{enum_def_to_string, fn_to_string, ty_to_string};
use rustc_middle::hir::nested_filter;
-use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::ty::{self, print::with_no_trimmed_paths, DefIdTree, TyCtxt};
use rustc_middle::{bug, span_bug};
use rustc_session::config::{CrateType, Input, OutputType};
@@ -45,12 +51,10 @@ use rls_data::{
RefKind, Relation, RelationKind, SpanData,
};
-use tracing::{debug, error, info};
-
pub struct SaveContext<'tcx> {
tcx: TyCtxt<'tcx>,
maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
- access_levels: &'tcx AccessLevels,
+ effective_visibilities: &'tcx EffectiveVisibilities,
span_utils: SpanUtils<'tcx>,
config: Config,
impl_counter: Cell<u32>,
@@ -137,7 +141,7 @@ impl<'tcx> SaveContext<'tcx> {
}
pub fn get_extern_item_data(&self, item: &hir::ForeignItem<'_>) -> Option<Data> {
- let def_id = item.def_id.to_def_id();
+ let def_id = item.owner_id.to_def_id();
let qualname = format!("::{}", self.tcx.def_path_str(def_id));
let attrs = self.tcx.hir().attrs(item.hir_id());
match item.kind {
@@ -201,7 +205,7 @@ impl<'tcx> SaveContext<'tcx> {
}
pub fn get_item_data(&self, item: &hir::Item<'_>) -> Option<Data> {
- let def_id = item.def_id.to_def_id();
+ let def_id = item.owner_id.to_def_id();
let attrs = self.tcx.hir().attrs(item.hir_id());
match item.kind {
hir::ItemKind::Fn(ref sig, ref generics, _) => {
@@ -293,7 +297,7 @@ impl<'tcx> SaveContext<'tcx> {
children: m
.item_ids
.iter()
- .map(|i| id_from_def_id(i.def_id.to_def_id()))
+ .map(|i| id_from_def_id(i.owner_id.to_def_id()))
.collect(),
decl_id: None,
docs: self.docs_for_attrs(attrs),
@@ -359,7 +363,7 @@ impl<'tcx> SaveContext<'tcx> {
parent: None,
children: items
.iter()
- .map(|i| id_from_def_id(i.id.def_id.to_def_id()))
+ .map(|i| id_from_def_id(i.id.owner_id.to_def_id()))
.collect(),
docs: String::new(),
sig: None,
@@ -591,13 +595,14 @@ impl<'tcx> SaveContext<'tcx> {
Node::TraitRef(tr) => tr.path.res,
Node::Item(&hir::Item { kind: hir::ItemKind::Use(path, _), .. }) => path.res,
- Node::PathSegment(seg) => match seg.res {
- Some(res) if res != Res::Err => res,
- _ => {
+ Node::PathSegment(seg) => {
+ if seg.res != Res::Err {
+ seg.res
+ } else {
let parent_node = self.tcx.hir().get_parent_node(hir_id);
self.get_path_res(parent_node)
}
- },
+ }
Node::Expr(&hir::Expr { kind: hir::ExprKind::Struct(ref qpath, ..), .. }) => {
self.typeck_results().qpath_res(qpath, hir_id)
@@ -616,7 +621,7 @@ impl<'tcx> SaveContext<'tcx> {
hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
// #75962: `self.typeck_results` may be different from the `hir_id`'s result.
if self.tcx.has_typeck_results(hir_id.owner.to_def_id()) {
- self.tcx.typeck(hir_id.owner).qpath_res(qpath, hir_id)
+ self.tcx.typeck(hir_id.owner.def_id).qpath_res(qpath, hir_id)
} else {
Res::Err
}
@@ -643,7 +648,7 @@ impl<'tcx> SaveContext<'tcx> {
}
pub fn get_path_segment_data(&self, path_seg: &hir::PathSegment<'_>) -> Option<Ref> {
- self.get_path_segment_data_with_id(path_seg, path_seg.hir_id?)
+ self.get_path_segment_data_with_id(path_seg, path_seg.hir_id)
}
pub fn get_path_segment_data_with_id(
@@ -679,6 +684,7 @@ impl<'tcx> SaveContext<'tcx> {
| HirDefKind::AssocTy
| HirDefKind::Trait
| HirDefKind::OpaqueTy
+ | HirDefKind::ImplTraitPlaceholder
| HirDefKind::TyParam,
def_id,
) => Some(Ref { kind: RefKind::Type, span, ref_id: id_from_def_id(def_id) }),
@@ -734,7 +740,8 @@ impl<'tcx> SaveContext<'tcx> {
_,
)
| Res::PrimTy(..)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::ToolMod
| Res::NonMacroAttr(..)
| Res::SelfCtor(..)
@@ -799,7 +806,7 @@ impl<'tcx> SaveContext<'tcx> {
fn lookup_def_id(&self, ref_id: hir::HirId) -> Option<DefId> {
match self.get_path_res(ref_id) {
- Res::PrimTy(_) | Res::SelfTy { .. } | Res::Err => None,
+ Res::PrimTy(_) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::Err => None,
def => def.opt_def_id(),
}
}
@@ -860,23 +867,12 @@ impl<'l> Visitor<'l> for PathCollector<'l> {
hir::PatKind::TupleStruct(ref path, ..) | hir::PatKind::Path(ref path) => {
self.collected_paths.push((p.hir_id, path));
}
- hir::PatKind::Binding(bm, _, ident, _) => {
+ hir::PatKind::Binding(hir::BindingAnnotation(_, mutbl), _, ident, _) => {
debug!(
"PathCollector, visit ident in pat {}: {:?} {:?}",
ident, p.span, ident.span
);
- let immut = match bm {
- // Even if the ref is mut, you can't change the ref, only
- // the data pointed at, so showing the initialising expression
- // is still worthwhile.
- hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Ref => {
- hir::Mutability::Not
- }
- hir::BindingAnnotation::Mutable | hir::BindingAnnotation::RefMut => {
- hir::Mutability::Mut
- }
- };
- self.collected_idents.push((p.hir_id, ident, immut));
+ self.collected_idents.push((p.hir_id, ident, mutbl));
}
_ => {}
}
@@ -928,7 +924,7 @@ impl<'a> DumpHandler<'a> {
info!("Writing output to {}", file_name.display());
let output_file = BufWriter::new(File::create(&file_name).unwrap_or_else(|e| {
- sess.fatal(&format!("Could not open {}: {}", file_name.display(), e))
+ sess.emit_fatal(errors::CouldNotOpen { file_name: file_name.as_path(), err: e })
}));
(output_file, file_name)
@@ -972,16 +968,16 @@ pub fn process_crate<'l, 'tcx, H: SaveHandler>(
info!("Dumping crate {}", cratename);
// Privacy checking must be done outside of type inference; use a
- // fallback in case the access levels couldn't have been correctly computed.
- let access_levels = match tcx.sess.compile_status() {
- Ok(..) => tcx.privacy_access_levels(()),
- Err(..) => tcx.arena.alloc(AccessLevels::default()),
+ // fallback in case effective visibilities couldn't have been correctly computed.
+ let effective_visibilities = match tcx.sess.compile_status() {
+ Ok(..) => tcx.effective_visibilities(()),
+ Err(..) => tcx.arena.alloc(EffectiveVisibilities::default()),
};
let save_ctxt = SaveContext {
tcx,
maybe_typeck_results: None,
- access_levels: &access_levels,
+ effective_visibilities: &effective_visibilities,
span_utils: SpanUtils::new(&tcx.sess),
config: find_config(config),
impl_counter: Cell::new(0),
@@ -1045,7 +1041,7 @@ fn id_from_hir_id(id: hir::HirId, scx: &SaveContext<'_>) -> rls_data::Id {
// crate (very unlikely to actually happen).
rls_data::Id {
krate: LOCAL_CRATE.as_u32(),
- index: id.owner.local_def_index.as_u32() | id.local_id.as_u32().reverse_bits(),
+ index: id.owner.def_id.local_def_index.as_u32() | id.local_id.as_u32().reverse_bits(),
}
})
}
diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs
index d1286c9b8..83c51d213 100644
--- a/compiler/rustc_save_analysis/src/sig.rs
+++ b/compiler/rustc_save_analysis/src/sig.rs
@@ -316,7 +316,7 @@ impl<'hir> Sig for hir::Ty<'hir> {
let text = format!("[{}; {}]", nested_ty.text, expr);
Ok(replace_text(nested_ty, text))
}
- hir::TyKind::OpaqueDef(item_id, _) => {
+ hir::TyKind::OpaqueDef(item_id, _, _) => {
let item = scx.tcx.hir().item(item_id);
item.make(offset, Some(item_id.hir_id()), scx)
}
@@ -337,7 +337,7 @@ impl<'hir> Sig for hir::Item<'hir> {
}
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_def_id(self.def_id.to_def_id()),
+ id: id_from_def_id(self.owner_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -359,7 +359,7 @@ impl<'hir> Sig for hir::Item<'hir> {
let mut text = "const ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_def_id(self.def_id.to_def_id()),
+ id: id_from_def_id(self.owner_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -428,7 +428,7 @@ impl<'hir> Sig for hir::Item<'hir> {
let mut text = "mod ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_def_id(self.def_id.to_def_id()),
+ id: id_from_def_id(self.owner_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -561,7 +561,13 @@ impl<'hir> Sig for hir::Item<'hir> {
hir::ItemKind::ForeignMod { .. } => Err("extern mod"),
hir::ItemKind::GlobalAsm(_) => Err("global asm"),
hir::ItemKind::ExternCrate(_) => Err("extern crate"),
- hir::ItemKind::OpaqueTy(..) => Err("opaque type"),
+ hir::ItemKind::OpaqueTy(ref opaque) => {
+ if opaque.in_trait {
+ Err("opaque type in trait")
+ } else {
+ Err("opaque type")
+ }
+ }
// FIXME should implement this (e.g., pub use).
hir::ItemKind::Use(..) => Err("import"),
}
@@ -573,7 +579,7 @@ impl<'hir> Sig for hir::Path<'hir> {
let res = scx.get_path_res(id.ok_or("Missing id for Path")?);
let (name, start, end) = match res {
- Res::PrimTy(..) | Res::SelfTy { .. } | Res::Err => {
+ Res::PrimTy(..) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::Err => {
return Ok(Signature { text: path_to_string(self), defs: vec![], refs: vec![] });
}
Res::Def(DefKind::AssocConst | DefKind::Variant | DefKind::Ctor(..), _) => {
@@ -758,7 +764,7 @@ impl<'hir> Sig for hir::ForeignItem<'hir> {
}
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_def_id(self.def_id.to_def_id()),
+ id: id_from_def_id(self.owner_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
@@ -774,7 +780,7 @@ impl<'hir> Sig for hir::ForeignItem<'hir> {
let mut text = "type ".to_owned();
let name = self.ident.to_string();
let defs = vec![SigElement {
- id: id_from_def_id(self.def_id.to_def_id()),
+ id: id_from_def_id(self.owner_id.to_def_id()),
start: offset + text.len(),
end: offset + text.len() + name.len(),
}];
diff --git a/compiler/rustc_serialize/Cargo.toml b/compiler/rustc_serialize/Cargo.toml
index dbc5c1519..3b0b3144f 100644
--- a/compiler/rustc_serialize/Cargo.toml
+++ b/compiler/rustc_serialize/Cargo.toml
@@ -6,6 +6,7 @@ edition = "2021"
[dependencies]
indexmap = "1.9.1"
smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+thin-vec = "0.2.8"
[dev-dependencies]
rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_serialize/src/collection_impls.rs b/compiler/rustc_serialize/src/collection_impls.rs
index 5e53f0b10..8f8c50411 100644
--- a/compiler/rustc_serialize/src/collection_impls.rs
+++ b/compiler/rustc_serialize/src/collection_impls.rs
@@ -1,13 +1,12 @@
//! Implementations of serialization for structures found in liballoc
-use std::hash::{BuildHasher, Hash};
-
use crate::{Decodable, Decoder, Encodable, Encoder};
+use smallvec::{Array, SmallVec};
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, LinkedList, VecDeque};
+use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
-
-use smallvec::{Array, SmallVec};
+use thin_vec::ThinVec;
impl<S: Encoder, A: Array<Item: Encodable<S>>> Encodable<S> for SmallVec<A> {
fn encode(&self, s: &mut S) {
@@ -23,6 +22,19 @@ impl<D: Decoder, A: Array<Item: Decodable<D>>> Decodable<D> for SmallVec<A> {
}
}
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for ThinVec<T> {
+ fn encode(&self, s: &mut S) {
+ self.as_slice().encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for ThinVec<T> {
+ fn decode(d: &mut D) -> ThinVec<T> {
+ let len = d.read_usize();
+ (0..len).map(|_| Decodable::decode(d)).collect()
+ }
+}
+
impl<S: Encoder, T: Encodable<S>> Encodable<S> for LinkedList<T> {
fn encode(&self, s: &mut S) {
s.emit_usize(self.len());
diff --git a/compiler/rustc_serialize/src/lib.rs b/compiler/rustc_serialize/src/lib.rs
index e606f4273..1f8d2336c 100644
--- a/compiler/rustc_serialize/src/lib.rs
+++ b/compiler/rustc_serialize/src/lib.rs
@@ -14,10 +14,12 @@ Core encoding and decoding interfaces.
#![feature(min_specialization)]
#![feature(core_intrinsics)]
#![feature(maybe_uninit_slice)]
-#![feature(let_else)]
#![feature(new_uninit)]
+#![feature(allocator_api)]
#![cfg_attr(test, feature(test))]
#![allow(rustc::internal)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
pub use self::serialize::{Decodable, Decoder, Encodable, Encoder};
diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs
index 5c17ef6ac..f35cc08f4 100644
--- a/compiler/rustc_serialize/src/opaque.rs
+++ b/compiler/rustc_serialize/src/opaque.rs
@@ -193,7 +193,9 @@ impl FileEncoder {
// shaves an instruction off those code paths (on x86 at least).
assert!(capacity <= usize::MAX - max_leb128_len());
- let file = File::create(path)?;
+ // Create the file for reading and writing, because some encoders do both
+ // (e.g. the metadata encoder when -Zmeta-stats is enabled)
+ let file = File::options().read(true).write(true).create(true).truncate(true).open(path)?;
Ok(FileEncoder {
buf: Box::new_uninit_slice(capacity),
diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs
index 36585b8d7..751b209f1 100644
--- a/compiler/rustc_serialize/src/serialize.rs
+++ b/compiler/rustc_serialize/src/serialize.rs
@@ -4,6 +4,7 @@
Core encoding and decoding interfaces.
*/
+use std::alloc::Allocator;
use std::borrow::Cow;
use std::cell::{Cell, RefCell};
use std::marker::PhantomData;
@@ -229,9 +230,9 @@ impl<D: Decoder, T> Decodable<D> for PhantomData<T> {
}
}
-impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<[T]> {
- fn decode(d: &mut D) -> Box<[T]> {
- let v: Vec<T> = Decodable::decode(d);
+impl<D: Decoder, A: Allocator + Default, T: Decodable<D>> Decodable<D> for Box<[T], A> {
+ fn decode(d: &mut D) -> Box<[T], A> {
+ let v: Vec<T, A> = Decodable::decode(d);
v.into_boxed_slice()
}
}
@@ -264,16 +265,17 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for Vec<T> {
}
}
-impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> {
- default fn decode(d: &mut D) -> Vec<T> {
+impl<D: Decoder, T: Decodable<D>, A: Allocator + Default> Decodable<D> for Vec<T, A> {
+ default fn decode(d: &mut D) -> Vec<T, A> {
let len = d.read_usize();
+ let allocator = A::default();
// SAFETY: we set the capacity in advance, only write elements, and
// only set the length at the end once the writing has succeeded.
- let mut vec = Vec::with_capacity(len);
+ let mut vec = Vec::with_capacity_in(len, allocator);
unsafe {
let ptr: *mut T = vec.as_mut_ptr();
for i in 0..len {
- std::ptr::write(ptr.offset(i as isize), Decodable::decode(d));
+ std::ptr::write(ptr.add(i), Decodable::decode(d));
}
vec.set_len(len);
}
@@ -457,13 +459,15 @@ impl<D: Decoder, T: Decodable<D>> Decodable<D> for Arc<T> {
}
}
-impl<S: Encoder, T: ?Sized + Encodable<S>> Encodable<S> for Box<T> {
+impl<S: Encoder, T: ?Sized + Encodable<S>, A: Allocator + Default> Encodable<S> for Box<T, A> {
fn encode(&self, s: &mut S) {
- (**self).encode(s);
+ (**self).encode(s)
}
}
-impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<T> {
- fn decode(d: &mut D) -> Box<T> {
- Box::new(Decodable::decode(d))
+
+impl<D: Decoder, A: Allocator + Default, T: Decodable<D>> Decodable<D> for Box<T, A> {
+ fn decode(d: &mut D) -> Box<T, A> {
+ let allocator = A::default();
+ Box::new_in(Decodable::decode(d), allocator)
}
}
diff --git a/compiler/rustc_session/Cargo.toml b/compiler/rustc_session/Cargo.toml
index 37cfc4a0d..6b1eaa4d3 100644
--- a/compiler/rustc_session/Cargo.toml
+++ b/compiler/rustc_session/Cargo.toml
@@ -15,6 +15,5 @@ rustc_serialize = { path = "../rustc_serialize" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_span = { path = "../rustc_span" }
rustc_fs_util = { path = "../rustc_fs_util" }
-num_cpus = "1.0"
rustc_ast = { path = "../rustc_ast" }
rustc_lint_defs = { path = "../rustc_lint_defs" }
diff --git a/compiler/rustc_session/src/cgu_reuse_tracker.rs b/compiler/rustc_session/src/cgu_reuse_tracker.rs
index dd64e8ab7..2336d9936 100644
--- a/compiler/rustc_session/src/cgu_reuse_tracker.rs
+++ b/compiler/rustc_session/src/cgu_reuse_tracker.rs
@@ -2,10 +2,14 @@
//! compilation. This is used for incremental compilation tests and debug
//! output.
+use crate::errors::{CguNotRecorded, IncorrectCguReuseType};
+use crate::Session;
use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
use rustc_span::{Span, Symbol};
+use std::borrow::Cow;
+use std::fmt::{self};
use std::sync::{Arc, Mutex};
-use tracing::debug;
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
pub enum CguReuse {
@@ -14,6 +18,22 @@ pub enum CguReuse {
PostLto,
}
+impl fmt::Display for CguReuse {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ CguReuse::No => write!(f, "No"),
+ CguReuse::PreLto => write!(f, "PreLto "),
+ CguReuse::PostLto => write!(f, "PostLto "),
+ }
+ }
+}
+
+impl IntoDiagnosticArg for CguReuse {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self.to_string()))
+ }
+}
+
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ComparisonKind {
Exact,
@@ -84,7 +104,7 @@ impl CguReuseTracker {
}
}
- pub fn check_expected_reuse(&self, diag: &rustc_errors::Handler) {
+ pub fn check_expected_reuse(&self, sess: &Session) {
if let Some(ref data) = self.data {
let data = data.lock().unwrap();
@@ -98,19 +118,17 @@ impl CguReuseTracker {
};
if error {
- let at_least = if at_least { "at least " } else { "" };
- let msg = format!(
- "CGU-reuse for `{cgu_user_name}` is `{actual_reuse:?}` but \
- should be {at_least}`{expected_reuse:?}`"
- );
- diag.span_err(error_span.0, &msg);
+ let at_least = if at_least { 1 } else { 0 };
+ IncorrectCguReuseType {
+ span: error_span.0,
+ cgu_user_name: &cgu_user_name,
+ actual_reuse,
+ expected_reuse,
+ at_least,
+ };
}
} else {
- let msg = format!(
- "CGU-reuse for `{cgu_user_name}` (mangled: `{cgu_name}`) was \
- not recorded"
- );
- diag.span_fatal(error_span.0, &msg)
+ sess.emit_fatal(CguNotRecorded { cgu_user_name, cgu_name });
}
}
}
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index 6a8298605..f2ee52262 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -12,8 +12,8 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::ToStableHashKey;
use rustc_target::abi::{Align, TargetDataLayout};
-use rustc_target::spec::{LinkerFlavor, SplitDebuginfo, Target, TargetTriple, TargetWarnings};
-use rustc_target::spec::{PanicStrategy, SanitizerSet, TARGETS};
+use rustc_target::spec::{PanicStrategy, SanitizerSet, SplitDebuginfo};
+use rustc_target::spec::{Target, TargetTriple, TargetWarnings, TARGETS};
use crate::parse::{CrateCheckConfig, CrateConfig};
use rustc_feature::UnstableFeatures;
@@ -36,6 +36,8 @@ use std::iter::{self, FromIterator};
use std::path::{Path, PathBuf};
use std::str::{self, FromStr};
+pub mod sigpipe;
+
/// The different settings that the `-C strip` flag can have.
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum Strip {
@@ -535,6 +537,7 @@ pub enum PrintRequest {
TargetLibdir,
CrateName,
Cfg,
+ CallingConventions,
TargetList,
TargetCPUs,
TargetFeatures,
@@ -798,7 +801,15 @@ impl UnstableOptions {
// The type of entry function, so users can have their own entry functions
#[derive(Copy, Clone, PartialEq, Hash, Debug, HashStable_Generic)]
pub enum EntryFnType {
- Main,
+ Main {
+ /// Specifies what to do with `SIGPIPE` before calling `fn main()`.
+ ///
+ /// What values that are valid and what they mean must be in sync
+ /// across rustc and libstd, but we don't want it public in libstd,
+ /// so we take a bit of an unusual approach with simple constants
+ /// and an `include!()`.
+ sigpipe: u8,
+ },
Start,
}
@@ -888,10 +899,10 @@ fn default_configuration(sess: &Session) -> CrateConfig {
let max_atomic_width = sess.target.max_atomic_width();
let atomic_cas = sess.target.atomic_cas;
let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
- sess.fatal(&err);
+ sess.emit_fatal(err);
});
- let mut ret = FxHashSet::default();
+ let mut ret = CrateConfig::default();
ret.reserve(7); // the minimum number of insertions
// Target bindings.
ret.insert((sym::target_os, Some(Symbol::intern(os))));
@@ -949,7 +960,7 @@ fn default_configuration(sess: &Session) -> CrateConfig {
ret.insert((sym::debug_assertions, None));
}
// JUSTIFICATION: before wrapper fn is available
- #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ #[allow(rustc::bad_opt_access)]
if sess.opts.crate_types.contains(&CrateType::ProcMacro) {
ret.insert((sym::proc_macro, None));
}
@@ -1343,8 +1354,8 @@ pub fn rustc_short_optgroups() -> Vec<RustcOptGroup> {
"",
"print",
"Compiler information to print on stdout",
- "[crate-name|file-names|sysroot|target-libdir|cfg|target-list|\
- target-cpus|target-features|relocation-models|code-models|\
+ "[crate-name|file-names|sysroot|target-libdir|cfg|calling-conventions|\
+ target-list|target-cpus|target-features|relocation-models|code-models|\
tls-models|target-spec-json|native-static-libs|stack-protector-strategies|\
link-args]",
),
@@ -1783,6 +1794,7 @@ fn collect_print_requests(
"sysroot" => PrintRequest::Sysroot,
"target-libdir" => PrintRequest::TargetLibdir,
"cfg" => PrintRequest::Cfg,
+ "calling-conventions" => PrintRequest::CallingConventions,
"target-list" => PrintRequest::TargetList,
"target-cpus" => PrintRequest::TargetCPUs,
"target-features" => PrintRequest::TargetFeatures,
@@ -2198,7 +2210,7 @@ fn parse_remap_path_prefix(
}
// JUSTIFICATION: before wrapper fn is available
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
pub fn build_session_options(matches: &getopts::Matches) -> Options {
let color = parse_color(matches);
@@ -2379,16 +2391,6 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
}
}
- if cg.linker_flavor == Some(LinkerFlavor::L4Bender)
- && !nightly_options::is_unstable_enabled(matches)
- {
- early_error(
- error_format,
- "`l4-bender` linker flavor is unstable, `-Z unstable-options` \
- flag must also be passed to explicitly use it",
- );
- }
-
let prints = collect_print_requests(&mut cg, &mut unstable_opts, matches, error_format);
let cg = cg;
@@ -2423,13 +2425,6 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options {
let pretty = parse_pretty(&unstable_opts, error_format);
- if !unstable_opts.unstable_options
- && !target_triple.triple().contains("apple")
- && cg.split_debuginfo.is_some()
- {
- early_error(error_format, "`-Csplit-debuginfo` is unstable on this platform");
- }
-
// Try to find a directory containing the Rust `src`, for more details see
// the doc comment on the `real_rust_source_base_dir` field.
let tmp_buf;
@@ -2537,7 +2532,7 @@ fn parse_pretty(unstable_opts: &UnstableOptions, efmt: ErrorOutputType) -> Optio
),
),
};
- tracing::debug!("got unpretty option: {first:?}");
+ debug!("got unpretty option: {first:?}");
Some(first)
}
diff --git a/compiler/rustc_session/src/config/sigpipe.rs b/compiler/rustc_session/src/config/sigpipe.rs
new file mode 100644
index 000000000..53692ad7c
--- /dev/null
+++ b/compiler/rustc_session/src/config/sigpipe.rs
@@ -0,0 +1,25 @@
+//! NOTE: Keep these constants in sync with `library/std/src/sys/unix/mod.rs`!
+
+/// The default value if `#[unix_sigpipe]` is not specified. This resolves
+/// to `SIG_IGN` in `library/std/src/sys/unix/mod.rs`.
+///
+/// Note that `SIG_IGN` has been the Rust default since 2014. See
+/// <https://github.com/rust-lang/rust/issues/62569>.
+#[allow(dead_code)]
+pub const DEFAULT: u8 = 0;
+
+/// Do not touch `SIGPIPE`. Use whatever the parent process uses.
+#[allow(dead_code)]
+pub const INHERIT: u8 = 1;
+
+/// Change `SIGPIPE` to `SIG_IGN` so that failed writes results in `EPIPE`
+/// that are eventually converted to `ErrorKind::BrokenPipe`.
+#[allow(dead_code)]
+pub const SIG_IGN: u8 = 2;
+
+/// Change `SIGPIPE` to `SIG_DFL` so that the process is killed when trying
+/// to write to a closed pipe. This is usually the desired behavior for CLI
+/// apps that produce textual output that you want to pipe to other programs
+/// such as `head -n 1`.
+#[allow(dead_code)]
+pub const SIG_DFL: u8 = 3;
diff --git a/compiler/rustc_session/src/cstore.rs b/compiler/rustc_session/src/cstore.rs
index c1fd3c7c6..7d4a1e212 100644
--- a/compiler/rustc_session/src/cstore.rs
+++ b/compiler/rustc_session/src/cstore.rs
@@ -68,6 +68,8 @@ pub enum LinkagePreference {
pub struct NativeLib {
pub kind: NativeLibKind,
pub name: Option<Symbol>,
+ /// If packed_bundled_libs enabled, actual filename of library is stored.
+ pub filename: Option<Symbol>,
pub cfg: Option<ast::MetaItem>,
pub foreign_module: Option<DefId>,
pub wasm_import_module: Option<Symbol>,
@@ -81,10 +83,29 @@ impl NativeLib {
}
}
+/// Different ways that the PE Format can decorate a symbol name.
+/// From <https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#import-name-type>
+#[derive(Copy, Clone, Debug, Encodable, Decodable, HashStable_Generic, PartialEq, Eq)]
+pub enum PeImportNameType {
+ /// IMPORT_ORDINAL
+ /// Uses the ordinal (i.e., a number) rather than the name.
+ Ordinal(u16),
+ /// Same as IMPORT_NAME
+ /// Name is decorated with all prefixes and suffixes.
+ Decorated,
+ /// Same as IMPORT_NAME_NOPREFIX
+ /// Prefix (e.g., the leading `_` or `@`) is skipped, but suffix is kept.
+ NoPrefix,
+ /// Same as IMPORT_NAME_UNDECORATE
+ /// Prefix (e.g., the leading `_` or `@`) and suffix (the first `@` and all
+ /// trailing characters) are skipped.
+ Undecorated,
+}
+
#[derive(Clone, Debug, Encodable, Decodable, HashStable_Generic)]
pub struct DllImport {
pub name: Symbol,
- pub ordinal: Option<u16>,
+ pub import_name_type: Option<PeImportNameType>,
/// Calling convention for the function.
///
/// On x86_64, this is always `DllCallingConvention::C`; on i686, it can be any
@@ -92,6 +113,18 @@ pub struct DllImport {
pub calling_convention: DllCallingConvention,
/// Span of import's "extern" declaration; used for diagnostics.
pub span: Span,
+ /// Is this for a function (rather than a static variable).
+ pub is_fn: bool,
+}
+
+impl DllImport {
+ pub fn ordinal(&self) -> Option<u16> {
+ if let Some(PeImportNameType::Ordinal(ordinal)) = self.import_name_type {
+ Some(ordinal)
+ } else {
+ None
+ }
+ }
}
/// Calling convention for a function defined in an external library.
diff --git a/compiler/rustc_session/src/errors.rs b/compiler/rustc_session/src/errors.rs
new file mode 100644
index 000000000..bf542faec
--- /dev/null
+++ b/compiler/rustc_session/src/errors.rs
@@ -0,0 +1,193 @@
+use std::num::NonZeroU32;
+
+use crate::cgu_reuse_tracker::CguReuse;
+use rustc_errors::MultiSpan;
+use rustc_macros::Diagnostic;
+use rustc_span::{Span, Symbol};
+use rustc_target::spec::{SplitDebuginfo, StackProtector, TargetTriple};
+
+#[derive(Diagnostic)]
+#[diag(session_incorrect_cgu_reuse_type)]
+pub struct IncorrectCguReuseType<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub cgu_user_name: &'a str,
+ pub actual_reuse: CguReuse,
+ pub expected_reuse: CguReuse,
+ pub at_least: u8,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_cgu_not_recorded)]
+pub struct CguNotRecorded<'a> {
+ pub cgu_user_name: &'a str,
+ pub cgu_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_feature_gate_error, code = "E0658")]
+pub struct FeatureGateError<'a> {
+ #[primary_span]
+ pub span: MultiSpan,
+ pub explain: &'a str,
+}
+
+#[derive(Subdiagnostic)]
+#[note(session_feature_diagnostic_for_issue)]
+pub struct FeatureDiagnosticForIssue {
+ pub n: NonZeroU32,
+}
+
+#[derive(Subdiagnostic)]
+#[help(session_feature_diagnostic_help)]
+pub struct FeatureDiagnosticHelp {
+ pub feature: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_not_circumvent_feature)]
+pub struct NotCircumventFeature;
+
+#[derive(Diagnostic)]
+#[diag(session_linker_plugin_lto_windows_not_supported)]
+pub struct LinkerPluginToWindowsNotSupported;
+
+#[derive(Diagnostic)]
+#[diag(session_profile_use_file_does_not_exist)]
+pub struct ProfileUseFileDoesNotExist<'a> {
+ pub path: &'a std::path::Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_profile_sample_use_file_does_not_exist)]
+pub struct ProfileSampleUseFileDoesNotExist<'a> {
+ pub path: &'a std::path::Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_target_requires_unwind_tables)]
+pub struct TargetRequiresUnwindTables;
+
+#[derive(Diagnostic)]
+#[diag(session_sanitizer_not_supported)]
+pub struct SanitizerNotSupported {
+ pub us: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_sanitizers_not_supported)]
+pub struct SanitizersNotSupported {
+ pub us: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_cannot_mix_and_match_sanitizers)]
+pub struct CannotMixAndMatchSanitizers {
+ pub first: String,
+ pub second: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_cannot_enable_crt_static_linux)]
+pub struct CannotEnableCrtStaticLinux;
+
+#[derive(Diagnostic)]
+#[diag(session_sanitizer_cfi_enabled)]
+pub struct SanitizerCfiEnabled;
+
+#[derive(Diagnostic)]
+#[diag(session_unstable_virtual_function_elimination)]
+pub struct UnstableVirtualFunctionElimination;
+
+#[derive(Diagnostic)]
+#[diag(session_unsupported_dwarf_version)]
+pub struct UnsupportedDwarfVersion {
+ pub dwarf_version: u32,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_target_stack_protector_not_supported)]
+pub struct StackProtectorNotSupportedForTarget<'a> {
+ pub stack_protector: StackProtector,
+ pub target_triple: &'a TargetTriple,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_split_debuginfo_unstable_platform)]
+pub struct SplitDebugInfoUnstablePlatform {
+ pub debuginfo: SplitDebuginfo,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_file_is_not_writeable)]
+pub struct FileIsNotWriteable<'a> {
+ pub file: &'a std::path::Path,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_crate_name_does_not_match)]
+pub struct CrateNameDoesNotMatch<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub s: &'a str,
+ pub name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_crate_name_invalid)]
+pub struct CrateNameInvalid<'a> {
+ pub s: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_crate_name_empty)]
+pub struct CrateNameEmpty {
+ #[primary_span]
+ pub span: Option<Span>,
+}
+
+#[derive(Diagnostic)]
+#[diag(session_invalid_character_in_create_name)]
+pub struct InvalidCharacterInCrateName<'a> {
+ #[primary_span]
+ pub span: Option<Span>,
+ pub character: char,
+ pub crate_name: &'a str,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(session_expr_parentheses_needed, applicability = "machine-applicable")]
+pub struct ExprParenthesesNeeded {
+ #[suggestion_part(code = "(")]
+ pub left: Span,
+ #[suggestion_part(code = ")")]
+ pub right: Span,
+}
+
+impl ExprParenthesesNeeded {
+ pub fn surrounding(s: Span) -> Self {
+ ExprParenthesesNeeded { left: s.shrink_to_lo(), right: s.shrink_to_hi() }
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(session_skipping_const_checks)]
+pub struct SkippingConstChecks {
+ #[subdiagnostic(eager)]
+ pub unleashed_features: Vec<UnleashedFeatureHelp>,
+}
+
+#[derive(Subdiagnostic)]
+pub enum UnleashedFeatureHelp {
+ #[help(session_unleashed_feature_help_named)]
+ Named {
+ #[primary_span]
+ span: Span,
+ gate: Symbol,
+ },
+ #[help(session_unleashed_feature_help_unnamed)]
+ Unnamed {
+ #[primary_span]
+ span: Span,
+ },
+}
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
index c973e3140..e8edb38f5 100644
--- a/compiler/rustc_session/src/filesearch.rs
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -7,7 +7,6 @@ use std::path::{Path, PathBuf};
use crate::search_paths::{PathKind, SearchPath};
use rustc_fs_util::fix_windows_verbatim_for_gcc;
-use tracing::debug;
#[derive(Copy, Clone)]
pub enum FileMatch {
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index 7353c1ca0..39e871f53 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -1,6 +1,5 @@
#![feature(if_let_guard)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(once_cell)]
@@ -9,9 +8,15 @@
#![feature(map_many_mut)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
+pub mod errors;
+
+#[macro_use]
+extern crate tracing;
pub mod cgu_reuse_tracker;
pub mod utils;
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index 1827f1c20..3f234a47a 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -5,7 +5,7 @@ use crate::lint;
use crate::search_paths::SearchPath;
use crate::utils::NativeLib;
use rustc_errors::LanguageIdentifier;
-use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy, SanitizerSet};
+use rustc_target::spec::{CodeModel, LinkerFlavorCli, MergeFunctions, PanicStrategy, SanitizerSet};
use rustc_target::spec::{
RelocModel, RelroLevel, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
};
@@ -127,11 +127,11 @@ top_level_options!(
/// `CodegenOptions`, think about how it influences incremental compilation. If in
/// doubt, specify `[TRACKED]`, which is always "correct" but might lead to
/// unnecessary re-compilation.
- #[cfg_attr(not(bootstrap), rustc_lint_opt_ty)]
+ #[rustc_lint_opt_ty]
pub struct Options {
/// The crate config requested for the session, which may be combined
/// with additional crate configurations during the compile process.
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::crate_types` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::crate_types` instead of this field")]
crate_types: Vec<CrateType> [TRACKED],
optimize: OptLevel [TRACKED],
/// Include the `debug_assertions` flag in dependency tracking, since it
@@ -178,15 +178,15 @@ top_level_options!(
/// what rustc was invoked with, but massaged a bit to agree with
/// commands like `--emit llvm-ir` which they're often incompatible with
/// if we otherwise use the defaults of rustc.
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::codegen_units` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::codegen_units` instead of this field")]
cli_forced_codegen_units: Option<usize> [UNTRACKED],
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field")]
cli_forced_thinlto_off: bool [UNTRACKED],
/// Remap source path prefixes in all output (messages, object files, debug, etc.).
remap_path_prefix: Vec<(PathBuf, PathBuf)> [TRACKED_NO_CRATE_HASH],
/// Base directory containing the `src/` for the Rust standard library, and
- /// potentially `rustc` as well, if we can can find it. Right now it's always
+ /// potentially `rustc` as well, if we can find it. Right now it's always
/// `$sysroot/lib/rustlib/src/rust` (i.e. the `rustup` `rust-src` component).
///
/// This directory is what the virtual `/rustc/$hash` is translated back to,
@@ -231,7 +231,7 @@ macro_rules! options {
),* ,) =>
(
#[derive(Clone)]
- #[cfg_attr(not(bootstrap), rustc_lint_opt_ty)]
+ #[rustc_lint_opt_ty]
pub struct $struct_name { $( $( #[$attr] )* pub $opt: $t),* }
impl Default for $struct_name {
@@ -280,17 +280,9 @@ macro_rules! options {
) }
-impl Options {
- // JUSTIFICATION: defn of the suggested wrapper fn
- #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
- pub fn time_passes(&self) -> bool {
- self.unstable_opts.time_passes || self.unstable_opts.time
- }
-}
-
impl CodegenOptions {
// JUSTIFICATION: defn of the suggested wrapper fn
- #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ #[allow(rustc::bad_opt_access)]
pub fn instrument_coverage(&self) -> InstrumentCoverage {
self.instrument_coverage.unwrap_or(InstrumentCoverage::Off)
}
@@ -382,7 +374,7 @@ mod desc {
"either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`";
pub const parse_cfprotection: &str = "`none`|`no`|`n` (default), `branch`, `return`, or `full`|`yes`|`y` (equivalent to `branch` and `return`)";
pub const parse_strip: &str = "either `none`, `debuginfo`, or `symbols`";
- pub const parse_linker_flavor: &str = ::rustc_target::spec::LinkerFlavor::one_of();
+ pub const parse_linker_flavor: &str = ::rustc_target::spec::LinkerFlavorCli::one_of();
pub const parse_optimization_fuel: &str = "crate=integer";
pub const parse_mir_spanview: &str = "`statement` (default), `terminator`, or `block`";
pub const parse_instrument_coverage: &str =
@@ -582,7 +574,7 @@ mod parse {
pub(crate) fn parse_threads(slot: &mut usize, v: Option<&str>) -> bool {
match v.and_then(|s| s.parse().ok()) {
Some(0) => {
- *slot = ::num_cpus::get();
+ *slot = std::thread::available_parallelism().map_or(1, std::num::NonZeroUsize::get);
true
}
Some(i) => {
@@ -763,8 +755,8 @@ mod parse {
true
}
- pub(crate) fn parse_linker_flavor(slot: &mut Option<LinkerFlavor>, v: Option<&str>) -> bool {
- match v.and_then(LinkerFlavor::from_str) {
+ pub(crate) fn parse_linker_flavor(slot: &mut Option<LinkerFlavorCli>, v: Option<&str>) -> bool {
+ match v.and_then(LinkerFlavorCli::from_str) {
Some(lf) => *slot = Some(lf),
_ => return false,
}
@@ -1083,15 +1075,14 @@ mod parse {
options! {
CodegenOptions, CG_OPTIONS, cgopts, "C", "codegen",
- // This list is in alphabetical order.
- //
// If you add a new option, please update:
// - compiler/rustc_interface/src/tests.rs
// - src/doc/rustc/src/codegen-options/index.md
+ // tidy-alphabetical-start
ar: String = (String::new(), parse_string, [UNTRACKED],
"this option is deprecated and does nothing"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::code_model` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::code_model` instead of this field")]
code_model: Option<CodeModel> = (None, parse_code_model, [TRACKED],
"choose the code model to use (`rustc --print code-models` for details)"),
codegen_units: Option<usize> = (None, parse_opt_number, [UNTRACKED],
@@ -1111,14 +1102,14 @@ options! {
"extra data to put in each output filename"),
force_frame_pointers: Option<bool> = (None, parse_opt_bool, [TRACKED],
"force use of the frame pointers"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::must_emit_unwind_tables` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::must_emit_unwind_tables` instead of this field")]
force_unwind_tables: Option<bool> = (None, parse_opt_bool, [TRACKED],
"force use of unwind tables"),
incremental: Option<String> = (None, parse_opt_string, [UNTRACKED],
"enable incremental compilation"),
inline_threshold: Option<u32> = (None, parse_opt_number, [TRACKED],
"set the threshold for inlining a function"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field")]
instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
"instrument the generated code to support LLVM source-based code coverage \
reports (note, the compiler build config must include `profiler = true`); \
@@ -1131,7 +1122,7 @@ options! {
"a single extra argument to append to the linker invocation (can be used several times)"),
link_args: Vec<String> = (Vec::new(), parse_list, [UNTRACKED],
"extra arguments to append to the linker invocation (space separated)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::link_dead_code` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::link_dead_code` instead of this field")]
link_dead_code: Option<bool> = (None, parse_opt_bool, [TRACKED],
"keep dead code at link time (useful for code coverage) (default: no)"),
link_self_contained: Option<bool> = (None, parse_opt_bool, [UNTRACKED],
@@ -1139,14 +1130,14 @@ options! {
on C toolchain installed in the system"),
linker: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
"system linker to link outputs with"),
- linker_flavor: Option<LinkerFlavor> = (None, parse_linker_flavor, [UNTRACKED],
+ linker_flavor: Option<LinkerFlavorCli> = (None, parse_linker_flavor, [UNTRACKED],
"linker flavor"),
linker_plugin_lto: LinkerPluginLto = (LinkerPluginLto::Disabled,
parse_linker_plugin_lto, [TRACKED],
"generate build artifacts that are compatible with linker-based LTO"),
llvm_args: Vec<String> = (Vec::new(), parse_list, [TRACKED],
"a list of arguments to pass to LLVM (space separated)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field")]
lto: LtoCli = (LtoCli::Unspecified, parse_lto, [TRACKED],
"perform LLVM link-time optimizations"),
metadata: Vec<String> = (Vec::new(), parse_list, [TRACKED],
@@ -1163,10 +1154,10 @@ options! {
"disable LLVM's SLP vectorization pass"),
opt_level: String = ("0".to_string(), parse_string, [TRACKED],
"optimization level (0-3, s, or z; default: 0)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::overflow_checks` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::overflow_checks` instead of this field")]
overflow_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
"use overflow checks for integer arithmetic"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::panic_strategy` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::panic_strategy` instead of this field")]
panic: Option<PanicStrategy> = (None, parse_opt_panic_strategy, [TRACKED],
"panic strategy to compile crate with"),
passes: Vec<String> = (Vec::new(), parse_list, [TRACKED],
@@ -1178,7 +1169,7 @@ options! {
"compile the program with profiling instrumentation"),
profile_use: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"use the given `.profdata` file for profile-guided optimization"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::relocation_model` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::relocation_model` instead of this field")]
relocation_model: Option<RelocModel> = (None, parse_relocation_model, [TRACKED],
"control generation of position-independent code (PIC) \
(`rustc --print relocation-models` for details)"),
@@ -1190,7 +1181,7 @@ options! {
"save all temporary output files during compilation (default: no)"),
soft_float: bool = (false, parse_bool, [TRACKED],
"use soft float ABI (*eabihf targets only) (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::split_debuginfo` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::split_debuginfo` instead of this field")]
split_debuginfo: Option<SplitDebuginfo> = (None, parse_split_debuginfo, [TRACKED],
"how to handle split-debuginfo, a platform-specific option"),
strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
@@ -1203,9 +1194,8 @@ options! {
target_feature: String = (String::new(), parse_target_feature, [TRACKED],
"target specific attributes. (`rustc --print target-features` for details). \
This feature is unsafe."),
+ // tidy-alphabetical-end
- // This list is in alphabetical order.
- //
// If you add a new option, please update:
// - compiler/rustc_interface/src/tests.rs
// - src/doc/rustc/src/codegen-options/index.md
@@ -1214,25 +1204,24 @@ options! {
options! {
UnstableOptions, Z_OPTIONS, dbopts, "Z", "unstable",
- // This list is in alphabetical order.
- //
// If you add a new option, please update:
// - compiler/rustc_interface/src/tests.rs
// - src/doc/unstable-book/src/compiler-flags
+ // tidy-alphabetical-start
allow_features: Option<Vec<String>> = (None, parse_opt_comma_list, [TRACKED],
"only allow the listed language features to be enabled in code (space separated)"),
always_encode_mir: bool = (false, parse_bool, [TRACKED],
"encode MIR of all functions into the crate metadata (default: no)"),
- assume_incomplete_release: bool = (false, parse_bool, [TRACKED],
- "make cfg(version) treat the current version as incomplete (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::asm_comments` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::asm_comments` instead of this field")]
asm_comments: bool = (false, parse_bool, [TRACKED],
"generate comments into the assembly (may change behavior) (default: no)"),
assert_incr_state: Option<String> = (None, parse_opt_string, [UNTRACKED],
"assert that the incremental cache is in given state: \
either `loaded` or `not-loaded`."),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::binary_dep_depinfo` instead of this field"))]
+ assume_incomplete_release: bool = (false, parse_bool, [TRACKED],
+ "make cfg(version) treat the current version as incomplete (default: no)"),
+ #[rustc_lint_opt_deny_field_access("use `Session::binary_dep_depinfo` instead of this field")]
binary_dep_depinfo: bool = (false, parse_bool, [TRACKED],
"include artifacts (sysroot, crate dependencies) used during compilation in dep-info \
(default: no)"),
@@ -1264,6 +1253,8 @@ options! {
dep_tasks: bool = (false, parse_bool, [UNTRACKED],
"print tasks that execute and the color their dep node gets (requires debug build) \
(default: no)"),
+ diagnostic_width: Option<usize> = (None, parse_opt_number, [UNTRACKED],
+ "set the current output width for diagnostic truncation"),
dlltool: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
"import library generation tool (windows-gnu only)"),
dont_buffer_diagnostics: bool = (false, parse_bool, [UNTRACKED],
@@ -1304,13 +1295,17 @@ options! {
an additional `.html` file showing the computed coverage spans."),
dwarf_version: Option<u32> = (None, parse_opt_number, [TRACKED],
"version of DWARF debug information to emit (default: 2 or 4, depending on platform)"),
+ dylib_lto: bool = (false, parse_bool, [UNTRACKED],
+ "enables LTO for dylib crate type"),
emit_stack_sizes: bool = (false, parse_bool, [UNTRACKED],
"emit a section containing stack size metadata (default: no)"),
emit_thin_lto: bool = (true, parse_bool, [TRACKED],
"emit the bc module with thin LTO info (default: yes)"),
export_executable_symbols: bool = (false, parse_bool, [TRACKED],
"export symbols from executables, as if they were dynamic libraries"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::fewer_names` instead of this field"))]
+ extra_const_ub_checks: bool = (false, parse_bool, [TRACKED],
+ "turns on more checks to detect const UB, which can be slow (default: no)"),
+ #[rustc_lint_opt_deny_field_access("use `Session::fewer_names` instead of this field")]
fewer_names: Option<bool> = (None, parse_opt_bool, [TRACKED],
"reduce memory use by retaining fewer names within compilation artifacts (LLVM-IR) \
(default: no)"),
@@ -1343,17 +1338,19 @@ options! {
"hash spans relative to their parent item for incr. comp. (default: no)"),
incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
"verify incr. comp. hashes of green query instances (default: no)"),
+ inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "control whether `#[inline]` functions are in all CGUs"),
+ inline_llvm: bool = (true, parse_bool, [TRACKED],
+ "enable LLVM inlining (default: yes)"),
inline_mir: Option<bool> = (None, parse_opt_bool, [TRACKED],
"enable MIR inlining (default: no)"),
- inline_mir_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
- "a default MIR inlining threshold (default: 50)"),
inline_mir_hint_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
"inlining threshold for functions with inline hint (default: 100)"),
- inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
- "control whether `#[inline]` functions are in all CGUs"),
+ inline_mir_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
+ "a default MIR inlining threshold (default: 50)"),
input_stats: bool = (false, parse_bool, [UNTRACKED],
"gather statistics about the input (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field")]
instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
"instrument the generated code to support LLVM source-based code coverage \
reports (note, the compiler build config must include `profiler = true`); \
@@ -1362,11 +1359,13 @@ options! {
`=except-unused-generics`
`=except-unused-functions`
`=off` (default)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_mcount` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::instrument_mcount` instead of this field")]
instrument_mcount: bool = (false, parse_bool, [TRACKED],
"insert function instrument code for mcount-based tracing (default: no)"),
keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],
"keep hygiene data after analysis (default: no)"),
+ layout_seed: Option<u64> = (None, parse_opt_number, [TRACKED],
+ "seed layout randomization"),
link_native_libraries: bool = (true, parse_bool, [UNTRACKED],
"link native libraries in the linker invocation (default: yes)"),
link_only: bool = (false, parse_bool, [TRACKED],
@@ -1386,7 +1385,7 @@ options! {
merge_functions: Option<MergeFunctions> = (None, parse_merge_functions, [TRACKED],
"control the operation of the MergeFunctions LLVM pass, taking \
the same values as the target option of the same name"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::meta_stats` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::meta_stats` instead of this field")]
meta_stats: bool = (false, parse_bool, [UNTRACKED],
"gather metadata statistics (default: no)"),
mir_emit_retag: bool = (false, parse_bool, [TRACKED],
@@ -1396,17 +1395,15 @@ options! {
"use like `-Zmir-enable-passes=+DestProp,-InstCombine`. Forces the specified passes to be \
enabled, overriding all other checks. Passes that are not specified are enabled or \
disabled by other flags as usual."),
- mir_pretty_relative_line_numbers: bool = (false, parse_bool, [UNTRACKED],
- "use line numbers relative to the function in mir pretty printing"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::mir_opt_level` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::mir_opt_level` instead of this field")]
mir_opt_level: Option<usize> = (None, parse_opt_number, [TRACKED],
"MIR optimization level (0-4; default: 1 in non optimized builds and 2 in optimized builds)"),
+ mir_pretty_relative_line_numbers: bool = (false, parse_bool, [UNTRACKED],
+ "use line numbers relative to the function in mir pretty printing"),
move_size_limit: Option<usize> = (None, parse_opt_number, [TRACKED],
"the size at which the `large_assignments` lint starts to be emitted"),
mutable_noalias: Option<bool> = (None, parse_opt_bool, [TRACKED],
"emit noalias metadata for mutable references (default: yes)"),
- new_llvm_pass_manager: Option<bool> = (None, parse_opt_bool, [TRACKED],
- "use new LLVM pass manager (default: no)"),
nll_facts: bool = (false, parse_bool, [UNTRACKED],
"dump facts from NLL analysis into side files (default: no)"),
nll_facts_dir: String = ("nll-facts".to_string(), parse_string, [UNTRACKED],
@@ -1425,18 +1422,18 @@ options! {
"compile without linking"),
no_parallel_llvm: bool = (false, parse_no_flag, [UNTRACKED],
"run LLVM in non-parallel mode (while keeping codegen-units and ThinLTO)"),
- no_unique_section_names: bool = (false, parse_bool, [TRACKED],
- "do not use unique names for text and data sections when -Z function-sections is used"),
no_profiler_runtime: bool = (false, parse_no_flag, [TRACKED],
"prevent automatic injection of the profiler_builtins crate"),
+ no_unique_section_names: bool = (false, parse_bool, [TRACKED],
+ "do not use unique names for text and data sections when -Z function-sections is used"),
normalize_docs: bool = (false, parse_bool, [TRACKED],
"normalize associated items in rustdoc when generating documentation"),
oom: OomStrategy = (OomStrategy::Abort, parse_oom_strategy, [TRACKED],
"panic strategy for out-of-memory handling"),
osx_rpath_install_name: bool = (false, parse_bool, [TRACKED],
"pass `-install_name @rpath/...` to the macOS linker (default: no)"),
- diagnostic_width: Option<usize> = (None, parse_opt_number, [UNTRACKED],
- "set the current output width for diagnostic truncation"),
+ packed_bundled_libs: bool = (false, parse_bool, [TRACKED],
+ "change rlib format to store native libraries as archives"),
panic_abort_tests: bool = (false, parse_bool, [TRACKED],
"support compiling tests with panic=abort (default: no)"),
panic_in_drop: PanicStrategy = (PanicStrategy::Unwind, parse_panic_strategy, [TRACKED],
@@ -1465,7 +1462,7 @@ options! {
See #77382 and #74551."),
print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
"make rustc print the total optimization fuel used by a crate"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::print_llvm_passes` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::print_llvm_passes` instead of this field")]
print_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
"print the LLVM optimization passes being run (default: no)"),
print_mono_items: Option<String> = (None, parse_opt_string, [UNTRACKED],
@@ -1484,25 +1481,20 @@ options! {
profile_emit: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"file path to emit profiling data at runtime when using 'profile' \
(default based on relative source path)"),
- profiler_runtime: String = (String::from("profiler_builtins"), parse_string, [TRACKED],
- "name of the profiler runtime crate to automatically inject (default: `profiler_builtins`)"),
profile_sample_use: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"use the given `.prof` file for sampled profile-guided optimization (also known as AutoFDO)"),
+ profiler_runtime: String = (String::from("profiler_builtins"), parse_string, [TRACKED],
+ "name of the profiler runtime crate to automatically inject (default: `profiler_builtins`)"),
query_dep_graph: bool = (false, parse_bool, [UNTRACKED],
"enable queries of the dependency graph for regression testing (default: no)"),
randomize_layout: bool = (false, parse_bool, [TRACKED],
"randomize the layout of types (default: no)"),
- layout_seed: Option<u64> = (None, parse_opt_number, [TRACKED],
- "seed layout randomization"),
relax_elf_relocations: Option<bool> = (None, parse_opt_bool, [TRACKED],
"whether ELF relocations can be relaxed"),
relro_level: Option<RelroLevel> = (None, parse_relro_level, [TRACKED],
"choose which RELRO level to use"),
remap_cwd_prefix: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
"remap paths under the current working directory to this path prefix"),
- simulate_remapped_rust_src_base: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
- "simulate the effect of remap-debuginfo = true at bootstrapping by remapping path \
- to rust's source base directory. only meant for testing purposes"),
report_delayed_bugs: bool = (false, parse_bool, [TRACKED],
"immediately print bugs registered with `delay_span_bug` (default: no)"),
sanitizer: SanitizerSet = (SanitizerSet::empty(), parse_sanitizers, [TRACKED],
@@ -1520,36 +1512,33 @@ options! {
self_profile: SwitchWithOptPath = (SwitchWithOptPath::Disabled,
parse_switch_with_opt_path, [UNTRACKED],
"run the self profiler and output the raw event data"),
- /// keep this in sync with the event filter names in librustc_data_structures/profiling.rs
- self_profile_events: Option<Vec<String>> = (None, parse_opt_comma_list, [UNTRACKED],
- "specify the events recorded by the self profiler;
- for example: `-Z self-profile-events=default,query-keys`
- all options: none, all, default, generic-activity, query-provider, query-cache-hit
- query-blocked, incr-cache-load, incr-result-hashing, query-keys, function-args, args, llvm, artifact-sizes"),
self_profile_counter: String = ("wall-time".to_string(), parse_string, [UNTRACKED],
"counter used by the self profiler (default: `wall-time`), one of:
`wall-time` (monotonic clock, i.e. `std::time::Instant`)
`instructions:u` (retired instructions, userspace-only)
`instructions-minus-irqs:u` (subtracting hardware interrupt counts for extra accuracy)"
),
+ /// keep this in sync with the event filter names in librustc_data_structures/profiling.rs
+ self_profile_events: Option<Vec<String>> = (None, parse_opt_comma_list, [UNTRACKED],
+ "specify the events recorded by the self profiler;
+ for example: `-Z self-profile-events=default,query-keys`
+ all options: none, all, default, generic-activity, query-provider, query-cache-hit
+ query-blocked, incr-cache-load, incr-result-hashing, query-keys, function-args, args, llvm, artifact-sizes"),
share_generics: Option<bool> = (None, parse_opt_bool, [TRACKED],
"make the current crate share its generic instantiations"),
show_span: Option<String> = (None, parse_opt_string, [TRACKED],
"show spans for compiler debugging (expr|pat|ty)"),
+ simulate_remapped_rust_src_base: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "simulate the effect of remap-debuginfo = true at bootstrapping by remapping path \
+ to rust's source base directory. only meant for testing purposes"),
span_debug: bool = (false, parse_bool, [UNTRACKED],
"forward proc_macro::Span's `Debug` impl to `Span`"),
/// o/w tests have closure@path
span_free_formats: bool = (false, parse_bool, [UNTRACKED],
"exclude spans when debug-printing compiler state (default: no)"),
- src_hash_algorithm: Option<SourceFileHashAlgorithm> = (None, parse_src_file_hash, [TRACKED],
- "hash algorithm of source files in debug info (`md5`, `sha1`, or `sha256`)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::stack_protector` instead of this field"))]
- stack_protector: StackProtector = (StackProtector::None, parse_stack_protector, [TRACKED],
- "control stack smash protection strategy (`rustc --print stack-protector-strategies` for details)"),
- strict_init_checks: bool = (false, parse_bool, [TRACKED],
- "control if mem::uninitialized and mem::zeroed panic on more UB"),
- strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
- "tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
+ split_dwarf_inlining: bool = (true, parse_bool, [TRACKED],
+ "provide minimal debug info in the object/executable to facilitate online \
+ symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF"),
split_dwarf_kind: SplitDwarfKind = (SplitDwarfKind::Split, parse_split_dwarf_kind, [TRACKED],
"split dwarf variant (only if -Csplit-debuginfo is enabled and on relevant platform)
(default: `split`)
@@ -1558,29 +1547,24 @@ options! {
file which is ignored by the linker
`single`: sections which do not require relocation are written into object file but ignored
by the linker"),
- split_dwarf_inlining: bool = (true, parse_bool, [TRACKED],
- "provide minimal debug info in the object/executable to facilitate online \
- symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF"),
+ src_hash_algorithm: Option<SourceFileHashAlgorithm> = (None, parse_src_file_hash, [TRACKED],
+ "hash algorithm of source files in debug info (`md5`, `sha1`, or `sha256`)"),
+ #[rustc_lint_opt_deny_field_access("use `Session::stack_protector` instead of this field")]
+ stack_protector: StackProtector = (StackProtector::None, parse_stack_protector, [TRACKED],
+ "control stack smash protection strategy (`rustc --print stack-protector-strategies` for details)"),
+ strict_init_checks: bool = (false, parse_bool, [TRACKED],
+ "control if mem::uninitialized and mem::zeroed panic on more UB"),
+ strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
+ "tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
symbol_mangling_version: Option<SymbolManglingVersion> = (None,
parse_symbol_mangling_version, [TRACKED],
"which mangling version to use for symbol names ('legacy' (default) or 'v0')"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::teach` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::teach` instead of this field")]
teach: bool = (false, parse_bool, [TRACKED],
"show extended diagnostic help (default: no)"),
temps_dir: Option<String> = (None, parse_opt_string, [UNTRACKED],
"the directory the intermediate files are written to"),
- // Diagnostics are considered side-effects of a query (see `QuerySideEffects`) and are saved
- // alongside query results and changes to translation options can affect diagnostics - so
- // translation options should be tracked.
- translate_lang: Option<LanguageIdentifier> = (None, parse_opt_langid, [TRACKED],
- "language identifier for diagnostic output"),
- translate_additional_ftl: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
- "additional fluent translation to preferentially use (for testing translation)"),
- translate_directionality_markers: bool = (false, parse_bool, [TRACKED],
- "emit directionality isolation markers in translated diagnostics"),
- tune_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
- "select processor to schedule for (`rustc --print target-cpus` for details)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field")]
thinlto: Option<bool> = (None, parse_opt_bool, [TRACKED],
"enable ThinLTO when possible"),
thir_unsafeck: bool = (false, parse_bool, [TRACKED],
@@ -1589,23 +1573,29 @@ options! {
/// a sequential compiler for now. This'll likely be adjusted
/// in the future. Note that -Zthreads=0 is the way to get
/// the num_cpus behavior.
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::threads` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::threads` instead of this field")]
threads: usize = (1, parse_threads, [UNTRACKED],
"use a thread pool with N threads"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_passes` instead of this field"))]
- time: bool = (false, parse_bool, [UNTRACKED],
- "measure time of rustc processes (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_llvm_passes` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::time_llvm_passes` instead of this field")]
time_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
"measure time of each LLVM pass (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_passes` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::time_passes` instead of this field")]
time_passes: bool = (false, parse_bool, [UNTRACKED],
"measure time of each rustc pass (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::tls_model` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::tls_model` instead of this field")]
tls_model: Option<TlsModel> = (None, parse_tls_model, [TRACKED],
"choose the TLS model to use (`rustc --print tls-models` for details)"),
trace_macros: bool = (false, parse_bool, [UNTRACKED],
"for every macro invocation, print its name and arguments (default: no)"),
+ // Diagnostics are considered side-effects of a query (see `QuerySideEffects`) and are saved
+ // alongside query results and changes to translation options can affect diagnostics - so
+ // translation options should be tracked.
+ translate_additional_ftl: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "additional fluent translation to preferentially use (for testing translation)"),
+ translate_directionality_markers: bool = (false, parse_bool, [TRACKED],
+ "emit directionality isolation markers in translated diagnostics"),
+ translate_lang: Option<LanguageIdentifier> = (None, parse_opt_langid, [TRACKED],
+ "language identifier for diagnostic output"),
translate_remapped_path_to_local_path: bool = (true, parse_bool, [TRACKED],
"translate remapped paths into local paths when possible (default: yes)"),
trap_unreachable: Option<bool> = (None, parse_opt_bool, [TRACKED],
@@ -1614,6 +1604,8 @@ options! {
"treat error number `val` that occurs as bug"),
trim_diagnostic_paths: bool = (true, parse_bool, [UNTRACKED],
"in diagnostics, use heuristics to shorten paths referring to items"),
+ tune_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
+ "select processor to schedule for (`rustc --print target-cpus` for details)"),
ui_testing: bool = (false, parse_bool, [UNTRACKED],
"emit compiler diagnostics in a form suitable for UI testing (default: no)"),
uninit_const_chunk_threshold: usize = (16, parse_number, [TRACKED],
@@ -1636,17 +1628,17 @@ options! {
"enable unsound and buggy MIR optimizations (default: no)"),
/// This name is kind of confusing: Most unstable options enable something themselves, while
/// this just allows "normal" options to be feature-gated.
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::unstable_options` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::unstable_options` instead of this field")]
unstable_options: bool = (false, parse_bool, [UNTRACKED],
"adds unstable command line options to rustc interface (default: no)"),
use_ctors_section: Option<bool> = (None, parse_opt_bool, [TRACKED],
"use legacy .ctors section for initializers rather than .init_array"),
validate_mir: bool = (false, parse_bool, [UNTRACKED],
"validate MIR after each transformation"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::verbose` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::verbose` instead of this field")]
verbose: bool = (false, parse_bool, [UNTRACKED],
"in general, enable more debug printouts (default: no)"),
- #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::verify_llvm_ir` instead of this field"))]
+ #[rustc_lint_opt_deny_field_access("use `Session::verify_llvm_ir` instead of this field")]
verify_llvm_ir: bool = (false, parse_bool, [TRACKED],
"verify LLVM IR (default: no)"),
virtual_function_elimination: bool = (false, parse_bool, [TRACKED],
@@ -1654,9 +1646,8 @@ options! {
Requires `-Clto[=[fat,yes]]`"),
wasi_exec_model: Option<WasiExecModel> = (None, parse_wasi_exec_model, [TRACKED],
"whether to build a wasi command or reactor"),
+ // tidy-alphabetical-end
- // This list is in alphabetical order.
- //
// If you add a new option, please update:
// - compiler/rustc_interface/src/tests.rs
}
diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs
index e5e6579d7..2511bee46 100644
--- a/compiler/rustc_session/src/output.rs
+++ b/compiler/rustc_session/src/output.rs
@@ -1,5 +1,9 @@
//! Related to out filenames of compilation (e.g. save analysis, binaries).
use crate::config::{CrateType, Input, OutputFilenames, OutputType};
+use crate::errors::{
+ CrateNameDoesNotMatch, CrateNameEmpty, CrateNameInvalid, FileIsNotWriteable,
+ InvalidCharacterInCrateName,
+};
use crate::Session;
use rustc_ast as ast;
use rustc_span::symbol::sym;
@@ -30,11 +34,7 @@ pub fn out_filename(
/// read-only file. We should be consistent.
pub fn check_file_is_writeable(file: &Path, sess: &Session) {
if !is_writeable(file) {
- sess.fatal(&format!(
- "output file {} is not writeable -- check its \
- permissions",
- file.display()
- ));
+ sess.emit_fatal(FileIsNotWriteable { file });
}
}
@@ -61,11 +61,7 @@ pub fn find_crate_name(sess: &Session, attrs: &[ast::Attribute], input: &Input)
if let Some(ref s) = sess.opts.crate_name {
if let Some((attr, name)) = attr_crate_name {
if name.as_str() != s {
- let msg = format!(
- "`--crate-name` and `#[crate_name]` are \
- required to match, but `{s}` != `{name}`"
- );
- sess.span_err(attr.span, &msg);
+ sess.emit_err(CrateNameDoesNotMatch { span: attr.span, s, name });
}
}
return validate(s.clone(), None);
@@ -77,11 +73,7 @@ pub fn find_crate_name(sess: &Session, attrs: &[ast::Attribute], input: &Input)
if let Input::File(ref path) = *input {
if let Some(s) = path.file_stem().and_then(|s| s.to_str()) {
if s.starts_with('-') {
- let msg = format!(
- "crate names cannot start with a `-`, but \
- `{s}` has a leading hyphen"
- );
- sess.err(&msg);
+ sess.emit_err(CrateNameInvalid { s });
} else {
return validate(s.replace('-', "_"), None);
}
@@ -94,15 +86,9 @@ pub fn find_crate_name(sess: &Session, attrs: &[ast::Attribute], input: &Input)
pub fn validate_crate_name(sess: &Session, s: &str, sp: Option<Span>) {
let mut err_count = 0;
{
- let mut say = |s: &str| {
- match sp {
- Some(sp) => sess.span_err(sp, s),
- None => sess.err(s),
- };
- err_count += 1;
- };
if s.is_empty() {
- say("crate name must not be empty");
+ err_count += 1;
+ sess.emit_err(CrateNameEmpty { span: sp });
}
for c in s.chars() {
if c.is_alphanumeric() {
@@ -111,7 +97,8 @@ pub fn validate_crate_name(sess: &Session, s: &str, sp: Option<Span>) {
if c == '_' {
continue;
}
- say(&format!("invalid character `{c}` in crate name: `{s}`"));
+ err_count += 1;
+ sess.emit_err(InvalidCharacterInCrateName { span: sp, character: c, crate_name: s });
}
}
diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs
index f31d52147..a199947eb 100644
--- a/compiler/rustc_session/src/parse.rs
+++ b/compiler/rustc_session/src/parse.rs
@@ -2,15 +2,17 @@
//! It also serves as an input to the parser itself.
use crate::config::CheckCfg;
-use crate::lint::{BufferedEarlyLint, BuiltinLintDiagnostics, Lint, LintId};
-use crate::SessionDiagnostic;
+use crate::errors::{FeatureDiagnosticForIssue, FeatureDiagnosticHelp, FeatureGateError};
+use crate::lint::{
+ builtin::UNSTABLE_SYNTAX_PRE_EXPANSION, BufferedEarlyLint, BuiltinLintDiagnostics, Lint, LintId,
+};
use rustc_ast::node_id::NodeId;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
use rustc_data_structures::sync::{Lock, Lrc};
use rustc_errors::{emitter::SilentEmitter, ColorConfig, Handler};
use rustc_errors::{
- error_code, fallback_fluent_bundle, Applicability, Diagnostic, DiagnosticBuilder,
- DiagnosticMessage, ErrorGuaranteed, MultiSpan,
+ fallback_fluent_bundle, Diagnostic, DiagnosticBuilder, DiagnosticId, DiagnosticMessage,
+ EmissionGuarantee, ErrorGuaranteed, IntoDiagnostic, MultiSpan, Noted, StashKey,
};
use rustc_feature::{find_feature_issue, GateIssue, UnstableFeatures};
use rustc_span::edition::Edition;
@@ -18,11 +20,12 @@ use rustc_span::hygiene::ExpnId;
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::{Span, Symbol};
+use rustc_ast::attr::AttrIdGenerator;
use std::str;
/// The set of keys (and, optionally, values) that define the compilation
/// environment of the crate, used to drive conditional compilation.
-pub type CrateConfig = FxHashSet<(Symbol, Option<Symbol>)>;
+pub type CrateConfig = FxIndexSet<(Symbol, Option<Symbol>)>;
pub type CrateCheckConfig = CheckCfg<Symbol>;
/// Collected spans during parsing for places where a certain feature was
@@ -101,11 +104,60 @@ pub fn feature_err_issue<'a>(
issue: GateIssue,
explain: &str,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err = sess.span_diagnostic.struct_span_err_with_code(span, explain, error_code!(E0658));
+ let span = span.into();
+
+ // Cancel an earlier warning for this same error, if it exists.
+ if let Some(span) = span.primary_span() {
+ sess.span_diagnostic
+ .steal_diagnostic(span, StashKey::EarlySyntaxWarning)
+ .map(|err| err.cancel());
+ }
+
+ let mut err = sess.create_err(FeatureGateError { span, explain });
add_feature_diagnostics_for_issue(&mut err, sess, feature, issue);
err
}
+/// Construct a future incompatibility diagnostic for a feature gate.
+///
+/// This diagnostic is only a warning and *does not cause compilation to fail*.
+pub fn feature_warn<'a>(sess: &'a ParseSess, feature: Symbol, span: Span, explain: &str) {
+ feature_warn_issue(sess, feature, span, GateIssue::Language, explain);
+}
+
+/// Construct a future incompatibility diagnostic for a feature gate.
+///
+/// This diagnostic is only a warning and *does not cause compilation to fail*.
+///
+/// This variant allows you to control whether it is a library or language feature.
+/// Almost always, you want to use this for a language feature. If so, prefer `feature_warn`.
+#[allow(rustc::diagnostic_outside_of_impl)]
+#[allow(rustc::untranslatable_diagnostic)]
+pub fn feature_warn_issue<'a>(
+ sess: &'a ParseSess,
+ feature: Symbol,
+ span: Span,
+ issue: GateIssue,
+ explain: &str,
+) {
+ let mut err = sess.span_diagnostic.struct_span_warn(span, explain);
+ add_feature_diagnostics_for_issue(&mut err, sess, feature, issue);
+
+ // Decorate this as a future-incompatibility lint as in rustc_middle::lint::struct_lint_level
+ let lint = UNSTABLE_SYNTAX_PRE_EXPANSION;
+ let future_incompatible = lint.future_incompatible.as_ref().unwrap();
+ err.code(DiagnosticId::Lint {
+ name: lint.name_lower(),
+ has_future_breakage: false,
+ is_force_warn: false,
+ });
+ err.warn(lint.desc);
+ err.note(format!("for more information, see {}", future_incompatible.reference));
+
+ // A later feature_err call can steal and cancel this warning.
+ err.stash(span, StashKey::EarlySyntaxWarning);
+}
+
/// Adds the diagnostics for a feature to an existing error.
pub fn add_feature_diagnostics<'a>(err: &mut Diagnostic, sess: &'a ParseSess, feature: Symbol) {
add_feature_diagnostics_for_issue(err, sess, feature, GateIssue::Language);
@@ -123,14 +175,12 @@ pub fn add_feature_diagnostics_for_issue<'a>(
issue: GateIssue,
) {
if let Some(n) = find_feature_issue(feature, issue) {
- err.note(&format!(
- "see issue #{n} <https://github.com/rust-lang/rust/issues/{n}> for more information"
- ));
+ err.subdiagnostic(FeatureDiagnosticForIssue { n });
}
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.unstable_features.is_nightly_build() {
- err.help(&format!("add `#![feature({feature})]` to the crate attributes to enable"));
+ err.subdiagnostic(FeatureDiagnosticHelp { feature });
}
}
@@ -169,6 +219,8 @@ pub struct ParseSess {
/// Spans passed to `proc_macro::quote_span`. Each span has a numerical
/// identifier represented by its position in the vector.
pub proc_macro_quoted_spans: Lock<Vec<Span>>,
+ /// Used to generate new `AttrId`s. Every `AttrId` is unique.
+ pub attr_id_generator: AttrIdGenerator,
}
impl ParseSess {
@@ -191,7 +243,7 @@ impl ParseSess {
Self {
span_diagnostic: handler,
unstable_features: UnstableFeatures::from_environment(None),
- config: FxHashSet::default(),
+ config: FxIndexSet::default(),
check_config: CrateCheckConfig::default(),
edition: ExpnId::root().expn_data().edition,
raw_identifier_spans: Lock::new(Vec::new()),
@@ -207,6 +259,7 @@ impl ParseSess {
type_ascription_path_suggestions: Default::default(),
assume_incomplete_release: false,
proc_macro_quoted_spans: Default::default(),
+ attr_id_generator: AttrIdGenerator::new(),
}
}
@@ -269,16 +322,6 @@ impl ParseSess {
});
}
- /// Extend an error with a suggestion to wrap an expression with parentheses to allow the
- /// parser to continue parsing the following operation as part of the same expression.
- pub fn expr_parentheses_needed(&self, err: &mut Diagnostic, span: Span) {
- err.multipart_suggestion(
- "parentheses are required to parse this as an expression",
- vec![(span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), ")".to_string())],
- Applicability::MachineApplicable,
- );
- }
-
pub fn save_proc_macro_span(&self, span: Span) -> usize {
let mut spans = self.proc_macro_quoted_spans.lock();
spans.push(span);
@@ -291,26 +334,48 @@ impl ParseSess {
pub fn create_err<'a>(
&'a self,
- err: impl SessionDiagnostic<'a>,
+ err: impl IntoDiagnostic<'a>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- err.into_diagnostic(self)
+ err.into_diagnostic(&self.span_diagnostic)
}
- pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ pub fn emit_err<'a>(&'a self, err: impl IntoDiagnostic<'a>) -> ErrorGuaranteed {
self.create_err(err).emit()
}
pub fn create_warning<'a>(
&'a self,
- warning: impl SessionDiagnostic<'a, ()>,
+ warning: impl IntoDiagnostic<'a, ()>,
) -> DiagnosticBuilder<'a, ()> {
- warning.into_diagnostic(self)
+ warning.into_diagnostic(&self.span_diagnostic)
}
- pub fn emit_warning<'a>(&'a self, warning: impl SessionDiagnostic<'a, ()>) {
+ pub fn emit_warning<'a>(&'a self, warning: impl IntoDiagnostic<'a, ()>) {
self.create_warning(warning).emit()
}
+ pub fn create_note<'a>(
+ &'a self,
+ note: impl IntoDiagnostic<'a, Noted>,
+ ) -> DiagnosticBuilder<'a, Noted> {
+ note.into_diagnostic(&self.span_diagnostic)
+ }
+
+ pub fn emit_note<'a>(&'a self, note: impl IntoDiagnostic<'a, Noted>) -> Noted {
+ self.create_note(note).emit()
+ }
+
+ pub fn create_fatal<'a>(
+ &'a self,
+ fatal: impl IntoDiagnostic<'a, !>,
+ ) -> DiagnosticBuilder<'a, !> {
+ fatal.into_diagnostic(&self.span_diagnostic)
+ }
+
+ pub fn emit_fatal<'a>(&'a self, fatal: impl IntoDiagnostic<'a, !>) -> ! {
+ self.create_fatal(fatal).emit()
+ }
+
#[rustc_lint_diagnostics]
pub fn struct_err(
&self,
@@ -323,4 +388,17 @@ impl ParseSess {
pub fn struct_warn(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
self.span_diagnostic.struct_warn(msg)
}
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_fatal(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, !> {
+ self.span_diagnostic.struct_fatal(msg)
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_diagnostic<G: EmissionGuarantee>(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, G> {
+ self.span_diagnostic.struct_diagnostic(msg)
+ }
}
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index 9669287b3..100c66f63 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -2,6 +2,14 @@ use crate::cgu_reuse_tracker::CguReuseTracker;
use crate::code_stats::CodeStats;
pub use crate::code_stats::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
use crate::config::{self, CrateType, InstrumentCoverage, OptLevel, OutputType, SwitchWithOptPath};
+use crate::errors::{
+ CannotEnableCrtStaticLinux, CannotMixAndMatchSanitizers, LinkerPluginToWindowsNotSupported,
+ NotCircumventFeature, ProfileSampleUseFileDoesNotExist, ProfileUseFileDoesNotExist,
+ SanitizerCfiEnabled, SanitizerNotSupported, SanitizersNotSupported, SkippingConstChecks,
+ SplitDebugInfoUnstablePlatform, StackProtectorNotSupportedForTarget,
+ TargetRequiresUnwindTables, UnleashedFeatureHelp, UnstableVirtualFunctionElimination,
+ UnsupportedDwarfVersion,
+};
use crate::parse::{add_feature_diagnostics, ParseSess};
use crate::search_paths::{PathKind, SearchPath};
use crate::{filesearch, lint};
@@ -20,8 +28,8 @@ use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
use rustc_errors::json::JsonEmitter;
use rustc_errors::registry::Registry;
use rustc_errors::{
- fallback_fluent_bundle, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, EmissionGuarantee,
- ErrorGuaranteed, FluentBundle, LazyFallbackBundle, MultiSpan,
+ error_code, fallback_fluent_bundle, DiagnosticBuilder, DiagnosticId, DiagnosticMessage,
+ ErrorGuaranteed, FluentBundle, IntoDiagnostic, LazyFallbackBundle, MultiSpan, Noted,
};
use rustc_macros::HashStable_Generic;
pub use rustc_span::def_id::StableCrateId;
@@ -31,13 +39,12 @@ use rustc_span::{sym, SourceFileHashAlgorithm, Symbol};
use rustc_target::asm::InlineAsmArch;
use rustc_target::spec::{CodeModel, PanicStrategy, RelocModel, RelroLevel};
use rustc_target::spec::{
- SanitizerSet, SplitDebuginfo, StackProtector, Target, TargetTriple, TlsModel,
+ DebuginfoKind, SanitizerSet, SplitDebuginfo, StackProtector, Target, TargetTriple, TlsModel,
};
use std::cell::{self, RefCell};
use std::env;
use std::fmt;
-use std::io::Write;
use std::ops::{Div, Mul};
use std::path::{Path, PathBuf};
use std::str::FromStr;
@@ -110,6 +117,12 @@ impl Mul<usize> for Limit {
}
}
+impl rustc_errors::IntoDiagnosticArg for Limit {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+}
+
#[derive(Clone, Copy, Debug, HashStable_Generic)]
pub struct Limits {
/// The maximum recursion limit for potentially infinitely recursive
@@ -210,15 +223,6 @@ pub struct PerfStats {
pub normalize_projection_ty: AtomicUsize,
}
-/// Trait implemented by error types. This should not be implemented manually. Instead, use
-/// `#[derive(SessionDiagnostic)]` -- see [rustc_macros::SessionDiagnostic].
-#[rustc_diagnostic_item = "SessionDiagnostic"]
-pub trait SessionDiagnostic<'a, T: EmissionGuarantee = ErrorGuaranteed> {
- /// Write out as a diagnostic out of `sess`.
- #[must_use]
- fn into_diagnostic(self, sess: &'a ParseSess) -> DiagnosticBuilder<'a, T>;
-}
-
impl Session {
pub fn miri_unleashed_feature(&self, span: Span, feature_gate: Option<Symbol>) {
self.miri_unleashed_features.lock().push((span, feature_gate));
@@ -229,25 +233,23 @@ impl Session {
if !unleashed_features.is_empty() {
let mut must_err = false;
// Create a diagnostic pointing at where things got unleashed.
- let mut diag = self.struct_warn("skipping const checks");
- for &(span, feature_gate) in unleashed_features.iter() {
- // FIXME: `span_label` doesn't do anything, so we use "help" as a hack.
- if let Some(gate) = feature_gate {
- diag.span_help(span, &format!("skipping check for `{gate}` feature"));
- // The unleash flag must *not* be used to just "hack around" feature gates.
- must_err = true;
- } else {
- diag.span_help(span, "skipping check that does not even have a feature gate");
- }
- }
- diag.emit();
+ self.emit_warning(SkippingConstChecks {
+ unleashed_features: unleashed_features
+ .iter()
+ .map(|(span, gate)| {
+ gate.map(|gate| {
+ must_err = true;
+ UnleashedFeatureHelp::Named { span: *span, gate }
+ })
+ .unwrap_or(UnleashedFeatureHelp::Unnamed { span: *span })
+ })
+ .collect(),
+ });
+
// If we should err, make sure we did.
if must_err && self.has_errors().is_none() {
// We have skipped a feature gate, and not run into other errors... reject.
- self.err(
- "`-Zunleash-the-miri-inside-of-you` may not be used to circumvent feature \
- gates, except when testing error paths in the CTFE engine",
- );
+ self.emit_err(NotCircumventFeature);
}
}
}
@@ -457,31 +459,52 @@ impl Session {
}
pub fn create_err<'a>(
&'a self,
- err: impl SessionDiagnostic<'a>,
+ err: impl IntoDiagnostic<'a>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
self.parse_sess.create_err(err)
}
pub fn create_feature_err<'a>(
&'a self,
- err: impl SessionDiagnostic<'a>,
+ err: impl IntoDiagnostic<'a>,
feature: Symbol,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
let mut err = self.parse_sess.create_err(err);
+ if err.code.is_none() {
+ err.code = std::option::Option::Some(error_code!(E0658));
+ }
add_feature_diagnostics(&mut err, &self.parse_sess, feature);
err
}
- pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ pub fn emit_err<'a>(&'a self, err: impl IntoDiagnostic<'a>) -> ErrorGuaranteed {
self.parse_sess.emit_err(err)
}
pub fn create_warning<'a>(
&'a self,
- err: impl SessionDiagnostic<'a, ()>,
+ err: impl IntoDiagnostic<'a, ()>,
) -> DiagnosticBuilder<'a, ()> {
self.parse_sess.create_warning(err)
}
- pub fn emit_warning<'a>(&'a self, warning: impl SessionDiagnostic<'a, ()>) {
+ pub fn emit_warning<'a>(&'a self, warning: impl IntoDiagnostic<'a, ()>) {
self.parse_sess.emit_warning(warning)
}
+ pub fn create_note<'a>(
+ &'a self,
+ note: impl IntoDiagnostic<'a, Noted>,
+ ) -> DiagnosticBuilder<'a, Noted> {
+ self.parse_sess.create_note(note)
+ }
+ pub fn emit_note<'a>(&'a self, note: impl IntoDiagnostic<'a, Noted>) -> Noted {
+ self.parse_sess.emit_note(note)
+ }
+ pub fn create_fatal<'a>(
+ &'a self,
+ fatal: impl IntoDiagnostic<'a, !>,
+ ) -> DiagnosticBuilder<'a, !> {
+ self.parse_sess.create_fatal(fatal)
+ }
+ pub fn emit_fatal<'a>(&'a self, fatal: impl IntoDiagnostic<'a, !>) -> ! {
+ self.parse_sess.emit_fatal(fatal)
+ }
#[inline]
pub fn err_count(&self) -> usize {
self.diagnostic().err_count()
@@ -516,9 +539,13 @@ impl Session {
Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
}
}
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<DiagnosticMessage>) {
self.diagnostic().span_warn(sp, msg)
}
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
pub fn span_warn_with_code<S: Into<MultiSpan>>(
&self,
sp: S,
@@ -567,6 +594,8 @@ impl Session {
) {
self.diagnostic().span_note_without_error(sp, msg)
}
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
pub fn struct_note_without_error(
&self,
msg: impl Into<DiagnosticMessage>,
@@ -584,10 +613,6 @@ impl Session {
self.parse_sess.source_map()
}
- pub fn time_passes(&self) -> bool {
- self.opts.time_passes()
- }
-
/// Returns `true` if internal lints should be added to the lint store - i.e. if
/// `-Zunstable-options` is provided and this isn't rustdoc (internal lints can trigger errors
/// to be emitted under rustdoc).
@@ -638,7 +663,7 @@ impl Session {
let found_positive = requested_features.clone().any(|r| r == "+crt-static");
// JUSTIFICATION: necessary use of crate_types directly (see FIXME below)
- #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ #[allow(rustc::bad_opt_access)]
if found_positive || found_negative {
found_positive
} else if crate_type == Some(CrateType::ProcMacro)
@@ -661,8 +686,9 @@ impl Session {
)
}
+ /// Returns `true` if the target can use the current split debuginfo configuration.
pub fn target_can_use_split_dwarf(&self) -> bool {
- !self.target.is_like_windows && !self.target.is_like_osx
+ self.target.debuginfo_kind == DebuginfoKind::Dwarf
}
pub fn generate_proc_macro_decls_symbol(&self, stable_crate_id: StableCrateId) -> String {
@@ -894,7 +920,7 @@ impl Session {
}
// JUSTIFICATION: defn of the suggested wrapper fns
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
impl Session {
pub fn verbose(&self) -> bool {
self.opts.unstable_opts.verbose
@@ -904,6 +930,10 @@ impl Session {
self.opts.unstable_opts.instrument_mcount
}
+ pub fn time_passes(&self) -> bool {
+ self.opts.unstable_opts.time_passes
+ }
+
pub fn time_llvm_passes(&self) -> bool {
self.opts.unstable_opts.time_llvm_passes
}
@@ -1174,18 +1204,17 @@ impl Session {
}
// JUSTIFICATION: part of session construction
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
fn default_emitter(
sopts: &config::Options,
registry: rustc_errors::registry::Registry,
source_map: Lrc<SourceMap>,
bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: LazyFallbackBundle,
- emitter_dest: Option<Box<dyn Write + Send>>,
) -> Box<dyn Emitter + sync::Send> {
let macro_backtrace = sopts.unstable_opts.macro_backtrace;
- match (sopts.error_format, emitter_dest) {
- (config::ErrorOutputType::HumanReadable(kind), dst) => {
+ match sopts.error_format {
+ config::ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip();
if let HumanReadableErrorType::AnnotateSnippet(_) = kind {
@@ -1198,33 +1227,20 @@ fn default_emitter(
);
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
} else {
- let emitter = match dst {
- None => EmitterWriter::stderr(
- color_config,
- Some(source_map),
- bundle,
- fallback_bundle,
- short,
- sopts.unstable_opts.teach,
- sopts.diagnostic_width,
- macro_backtrace,
- ),
- Some(dst) => EmitterWriter::new(
- dst,
- Some(source_map),
- bundle,
- fallback_bundle,
- short,
- false, // no teach messages when writing to a buffer
- false, // no colors when writing to a buffer
- None, // no diagnostic width
- macro_backtrace,
- ),
- };
+ let emitter = EmitterWriter::stderr(
+ color_config,
+ Some(source_map),
+ bundle,
+ fallback_bundle,
+ short,
+ sopts.unstable_opts.teach,
+ sopts.diagnostic_width,
+ macro_backtrace,
+ );
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
}
}
- (config::ErrorOutputType::Json { pretty, json_rendered }, None) => Box::new(
+ config::ErrorOutputType::Json { pretty, json_rendered } => Box::new(
JsonEmitter::stderr(
Some(registry),
source_map,
@@ -1237,36 +1253,16 @@ fn default_emitter(
)
.ui_testing(sopts.unstable_opts.ui_testing),
),
- (config::ErrorOutputType::Json { pretty, json_rendered }, Some(dst)) => Box::new(
- JsonEmitter::new(
- dst,
- Some(registry),
- source_map,
- bundle,
- fallback_bundle,
- pretty,
- json_rendered,
- sopts.diagnostic_width,
- macro_backtrace,
- )
- .ui_testing(sopts.unstable_opts.ui_testing),
- ),
}
}
-pub enum DiagnosticOutput {
- Default,
- Raw(Box<dyn Write + Send>),
-}
-
// JUSTIFICATION: literally session construction
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
pub fn build_session(
sopts: config::Options,
local_crate_source_file: Option<PathBuf>,
bundle: Option<Lrc<rustc_errors::FluentBundle>>,
registry: rustc_errors::registry::Registry,
- diagnostics_output: DiagnosticOutput,
driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
target_override: Option<Target>,
@@ -1277,18 +1273,11 @@ pub fn build_session(
let warnings_allow = sopts
.lint_opts
.iter()
- .filter(|&&(ref key, _)| *key == "warnings")
- .map(|&(_, ref level)| *level == lint::Allow)
- .last()
- .unwrap_or(false);
+ .rfind(|&&(ref key, _)| *key == "warnings")
+ .map_or(false, |&(_, level)| level == lint::Allow);
let cap_lints_allow = sopts.lint_cap.map_or(false, |cap| cap == lint::Allow);
let can_emit_warnings = !(warnings_allow || cap_lints_allow);
- let write_dest = match diagnostics_output {
- DiagnosticOutput::Default => None,
- DiagnosticOutput::Raw(write) => Some(write),
- };
-
let sysroot = match &sopts.maybe_sysroot {
Some(sysroot) => sysroot.clone(),
None => filesearch::get_or_default_sysroot(),
@@ -1321,8 +1310,7 @@ pub fn build_session(
rustc_errors::DEFAULT_LOCALE_RESOURCES,
sopts.unstable_opts.translate_directionality_markers,
);
- let emitter =
- default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle, write_dest);
+ let emitter = default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle);
let span_diagnostic = rustc_errors::Handler::with_emitter_and_flags(
emitter,
@@ -1382,8 +1370,7 @@ pub fn build_session(
CguReuseTracker::new_disabled()
};
- let prof =
- SelfProfilerRef::new(self_profiler, sopts.time_passes(), sopts.unstable_opts.time_passes);
+ let prof = SelfProfilerRef::new(self_profiler, sopts.unstable_opts.time_passes);
let ctfe_backtrace = Lock::new(match env::var("RUSTC_CTFE_BACKTRACE") {
Ok(ref val) if val == "immediate" => CtfeBacktrace::Immediate,
@@ -1437,7 +1424,7 @@ pub fn build_session(
/// If it is useful to have a Session available already for validating a commandline argument, you
/// can do so here.
// JUSTIFICATION: needs to access args to validate them
-#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+#[allow(rustc::bad_opt_access)]
fn validate_commandline_args_with_session_available(sess: &Session) {
// Since we don't know if code in an rlib will be linked to statically or
// dynamically downstream, rustc generates `__imp_` symbols that help linkers
@@ -1450,40 +1437,28 @@ fn validate_commandline_args_with_session_available(sess: &Session) {
&& sess.opts.cg.prefer_dynamic
&& sess.target.is_like_windows
{
- sess.err(
- "Linker plugin based LTO is not supported together with \
- `-C prefer-dynamic` when targeting Windows-like targets",
- );
+ sess.emit_err(LinkerPluginToWindowsNotSupported);
}
// Make sure that any given profiling data actually exists so LLVM can't
// decide to silently skip PGO.
if let Some(ref path) = sess.opts.cg.profile_use {
if !path.exists() {
- sess.err(&format!(
- "File `{}` passed to `-C profile-use` does not exist.",
- path.display()
- ));
+ sess.emit_err(ProfileUseFileDoesNotExist { path });
}
}
// Do the same for sample profile data.
if let Some(ref path) = sess.opts.unstable_opts.profile_sample_use {
if !path.exists() {
- sess.err(&format!(
- "File `{}` passed to `-C profile-sample-use` does not exist.",
- path.display()
- ));
+ sess.emit_err(ProfileSampleUseFileDoesNotExist { path });
}
}
// Unwind tables cannot be disabled if the target requires them.
if let Some(include_uwtables) = sess.opts.cg.force_unwind_tables {
if sess.target.requires_uwtable && !include_uwtables {
- sess.err(
- "target requires unwind tables, they cannot be disabled with \
- `-C force-unwind-tables=no`.",
- );
+ sess.emit_err(TargetRequiresUnwindTables);
}
}
@@ -1493,56 +1468,56 @@ fn validate_commandline_args_with_session_available(sess: &Session) {
match unsupported_sanitizers.into_iter().count() {
0 => {}
1 => {
- sess.err(&format!(
- "{} sanitizer is not supported for this target",
- unsupported_sanitizers
- ));
+ sess.emit_err(SanitizerNotSupported { us: unsupported_sanitizers.to_string() });
}
_ => {
- sess.err(&format!(
- "{} sanitizers are not supported for this target",
- unsupported_sanitizers
- ));
+ sess.emit_err(SanitizersNotSupported { us: unsupported_sanitizers.to_string() });
}
}
// Cannot mix and match sanitizers.
let mut sanitizer_iter = sess.opts.unstable_opts.sanitizer.into_iter();
if let (Some(first), Some(second)) = (sanitizer_iter.next(), sanitizer_iter.next()) {
- sess.err(&format!("`-Zsanitizer={first}` is incompatible with `-Zsanitizer={second}`"));
+ sess.emit_err(CannotMixAndMatchSanitizers {
+ first: first.to_string(),
+ second: second.to_string(),
+ });
}
// Cannot enable crt-static with sanitizers on Linux
if sess.crt_static(None) && !sess.opts.unstable_opts.sanitizer.is_empty() {
- sess.err(
- "sanitizer is incompatible with statically linked libc, \
- disable it using `-C target-feature=-crt-static`",
- );
+ sess.emit_err(CannotEnableCrtStaticLinux);
}
// LLVM CFI and VFE both require LTO.
if sess.lto() != config::Lto::Fat {
if sess.is_sanitizer_cfi_enabled() {
- sess.err("`-Zsanitizer=cfi` requires `-Clto`");
+ sess.emit_err(SanitizerCfiEnabled);
}
if sess.opts.unstable_opts.virtual_function_elimination {
- sess.err("`-Zvirtual-function-elimination` requires `-Clto`");
+ sess.emit_err(UnstableVirtualFunctionElimination);
}
}
if sess.opts.unstable_opts.stack_protector != StackProtector::None {
if !sess.target.options.supports_stack_protector {
- sess.warn(&format!(
- "`-Z stack-protector={}` is not supported for target {} and will be ignored",
- sess.opts.unstable_opts.stack_protector, sess.opts.target_triple
- ))
+ sess.emit_warning(StackProtectorNotSupportedForTarget {
+ stack_protector: sess.opts.unstable_opts.stack_protector,
+ target_triple: &sess.opts.target_triple,
+ });
}
}
if let Some(dwarf_version) = sess.opts.unstable_opts.dwarf_version {
if dwarf_version > 5 {
- sess.err(&format!("requested DWARF version {} is greater than 5", dwarf_version));
+ sess.emit_err(UnsupportedDwarfVersion { dwarf_version });
}
}
+
+ if !sess.target.options.supported_split_debuginfo.contains(&sess.split_debuginfo())
+ && !sess.opts.unstable_opts.unstable_options
+ {
+ sess.emit_err(SplitDebugInfoUnstablePlatform { debuginfo: sess.split_debuginfo() });
+ }
}
/// Holds data on the current incremental compilation session, if there is one.
@@ -1586,14 +1561,20 @@ fn early_error_handler(output: config::ErrorOutputType) -> rustc_errors::Handler
rustc_errors::Handler::with_emitter(true, None, emitter)
}
+#[allow(rustc::untranslatable_diagnostic)]
+#[allow(rustc::diagnostic_outside_of_impl)]
pub fn early_error_no_abort(output: config::ErrorOutputType, msg: &str) -> ErrorGuaranteed {
early_error_handler(output).struct_err(msg).emit()
}
+#[allow(rustc::untranslatable_diagnostic)]
+#[allow(rustc::diagnostic_outside_of_impl)]
pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! {
early_error_handler(output).struct_fatal(msg).emit()
}
+#[allow(rustc::untranslatable_diagnostic)]
+#[allow(rustc::diagnostic_outside_of_impl)]
pub fn early_warn(output: config::ErrorOutputType, msg: &str) {
early_error_handler(output).struct_warn(msg).emit()
}
diff --git a/compiler/rustc_session/src/utils.rs b/compiler/rustc_session/src/utils.rs
index 9a4f6f9f9..e65b6891e 100644
--- a/compiler/rustc_session/src/utils.rs
+++ b/compiler/rustc_session/src/utils.rs
@@ -53,6 +53,17 @@ impl NativeLibKind {
NativeLibKind::RawDylib | NativeLibKind::Unspecified | NativeLibKind::LinkArg => false,
}
}
+
+ pub fn is_statically_included(&self) -> bool {
+ matches!(self, NativeLibKind::Static { .. })
+ }
+
+ pub fn is_dllimport(&self) -> bool {
+ matches!(
+ self,
+ NativeLibKind::Dylib { .. } | NativeLibKind::RawDylib | NativeLibKind::Unspecified
+ )
+ }
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
diff --git a/compiler/rustc_smir/src/lib.rs b/compiler/rustc_smir/src/lib.rs
index 5c7aaf35b..3e93c6bba 100644
--- a/compiler/rustc_smir/src/lib.rs
+++ b/compiler/rustc_smir/src/lib.rs
@@ -11,6 +11,8 @@
test(attr(allow(unused_variables), deny(warnings)))
)]
#![cfg_attr(not(feature = "default"), feature(rustc_private))]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
pub mod mir;
diff --git a/compiler/rustc_smir/src/mir.rs b/compiler/rustc_smir/src/mir.rs
index 855605b1a..887e65729 100644
--- a/compiler/rustc_smir/src/mir.rs
+++ b/compiler/rustc_smir/src/mir.rs
@@ -1,10 +1,10 @@
+pub use crate::very_unstable::hir::ImplicitSelfKind;
pub use crate::very_unstable::middle::mir::{
visit::MutVisitor, AggregateKind, AssertKind, BasicBlock, BasicBlockData, BinOp, BindingForm,
BlockTailInfo, Body, BorrowKind, CastKind, ClearCrossCrate, Constant, ConstantKind,
- CopyNonOverlapping, Coverage, FakeReadCause, Field, GeneratorInfo, ImplicitSelfKind,
- InlineAsmOperand, Local, LocalDecl, LocalInfo, LocalKind, Location, MirPhase, MirSource,
- NullOp, Operand, Place, PlaceRef, ProjectionElem, ProjectionKind, Promoted, RetagKind, Rvalue,
- Safety, SourceInfo, SourceScope, SourceScopeData, SourceScopeLocalData, Statement,
- StatementKind, UnOp, UserTypeProjection, UserTypeProjections, VarBindingForm, VarDebugInfo,
- VarDebugInfoContents,
+ CopyNonOverlapping, Coverage, FakeReadCause, Field, GeneratorInfo, InlineAsmOperand, Local,
+ LocalDecl, LocalInfo, LocalKind, Location, MirPhase, MirSource, NullOp, Operand, Place,
+ PlaceRef, ProjectionElem, ProjectionKind, Promoted, RetagKind, Rvalue, Safety, SourceInfo,
+ SourceScope, SourceScopeData, SourceScopeLocalData, Statement, StatementKind, UnOp,
+ UserTypeProjection, UserTypeProjections, VarBindingForm, VarDebugInfo, VarDebugInfoContents,
};
diff --git a/compiler/rustc_span/Cargo.toml b/compiler/rustc_span/Cargo.toml
index 7227b193f..48a2ab0f9 100644
--- a/compiler/rustc_span/Cargo.toml
+++ b/compiler/rustc_span/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_serialize = { path = "../rustc_serialize" }
diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs
index a1533fe46..bbeabdb55 100644
--- a/compiler/rustc_span/src/def_id.rs
+++ b/compiler/rustc_span/src/def_id.rs
@@ -218,7 +218,9 @@ impl<D: Decoder> Decodable<D> for DefIndex {
/// index and a def index.
///
/// You can create a `DefId` from a `LocalDefId` using `local_def_id.to_def_id()`.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Copy)]
+#[derive(Clone, PartialEq, Eq, Copy)]
+// Don't derive order on 64-bit big-endian, so we can be consistent regardless of field order.
+#[cfg_attr(not(all(target_pointer_width = "64", target_endian = "big")), derive(PartialOrd, Ord))]
// On below-64 bit systems we can simply use the derived `Hash` impl
#[cfg_attr(not(target_pointer_width = "64"), derive(Hash))]
#[repr(C)]
@@ -260,6 +262,22 @@ impl Hash for DefId {
}
}
+// Implement the same comparison as derived with the other field order.
+#[cfg(all(target_pointer_width = "64", target_endian = "big"))]
+impl Ord for DefId {
+ #[inline]
+ fn cmp(&self, other: &DefId) -> std::cmp::Ordering {
+ Ord::cmp(&(self.index, self.krate), &(other.index, other.krate))
+ }
+}
+#[cfg(all(target_pointer_width = "64", target_endian = "big"))]
+impl PartialOrd for DefId {
+ #[inline]
+ fn partial_cmp(&self, other: &DefId) -> Option<std::cmp::Ordering> {
+ Some(Ord::cmp(self, other))
+ }
+}
+
impl DefId {
/// Makes a local `DefId` from the given `DefIndex`.
#[inline]
@@ -305,6 +323,12 @@ impl DefId {
}
}
+impl From<LocalDefId> for DefId {
+ fn from(local: LocalDefId) -> DefId {
+ local.to_def_id()
+ }
+}
+
impl<E: Encoder> Encodable<E> for DefId {
default fn encode(&self, s: &mut E) {
self.krate.encode(s);
@@ -331,7 +355,7 @@ impl fmt::Debug for DefId {
}
}
-rustc_data_structures::define_id_collections!(DefIdMap, DefIdSet, DefId);
+rustc_data_structures::define_id_collections!(DefIdMap, DefIdSet, DefIdMapEntry, DefId);
/// A `LocalDefId` is equivalent to a `DefId` with `krate == LOCAL_CRATE`. Since
/// we encode this information in the type, we can ensure at compile time that
@@ -393,7 +417,12 @@ impl<D: Decoder> Decodable<D> for LocalDefId {
}
}
-rustc_data_structures::define_id_collections!(LocalDefIdMap, LocalDefIdSet, LocalDefId);
+rustc_data_structures::define_id_collections!(
+ LocalDefIdMap,
+ LocalDefIdSet,
+ LocalDefIdMapEntry,
+ LocalDefId
+);
impl<CTX: HashStableContext> HashStable<CTX> for DefId {
#[inline]
diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs
index e169d3c7c..191186af6 100644
--- a/compiler/rustc_span/src/hygiene.rs
+++ b/compiler/rustc_span/src/hygiene.rs
@@ -41,7 +41,6 @@ use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fmt;
use std::hash::Hash;
-use tracing::*;
/// A `SyntaxContext` represents a chain of pairs `(ExpnId, Transparency)` named "marks".
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -945,12 +944,6 @@ pub struct ExpnData {
/// internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: Option<Lrc<[Symbol]>>,
- /// Whether the macro is allowed to use `unsafe` internally
- /// even if the user crate has `#![forbid(unsafe_code)]`.
- pub allow_internal_unsafe: bool,
- /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`)
- /// for a given macro.
- pub local_inner_macros: bool,
/// Edition of the crate in which the macro is defined.
pub edition: Edition,
/// The `DefId` of the macro being invoked,
@@ -958,6 +951,13 @@ pub struct ExpnData {
pub macro_def_id: Option<DefId>,
/// The normal module (`mod`) in which the expanded macro was defined.
pub parent_module: Option<DefId>,
+ /// Suppresses the `unsafe_code` lint for code produced by this macro.
+ pub allow_internal_unsafe: bool,
+ /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro.
+ pub local_inner_macros: bool,
+ /// Should debuginfo for the macro be collapsed to the outermost expansion site (in other
+ /// words, was the macro definition annotated with `#[collapse_debuginfo]`)?
+ pub collapse_debuginfo: bool,
}
impl !PartialEq for ExpnData {}
@@ -970,11 +970,12 @@ impl ExpnData {
call_site: Span,
def_site: Span,
allow_internal_unstable: Option<Lrc<[Symbol]>>,
- allow_internal_unsafe: bool,
- local_inner_macros: bool,
edition: Edition,
macro_def_id: Option<DefId>,
parent_module: Option<DefId>,
+ allow_internal_unsafe: bool,
+ local_inner_macros: bool,
+ collapse_debuginfo: bool,
) -> ExpnData {
ExpnData {
kind,
@@ -982,12 +983,13 @@ impl ExpnData {
call_site,
def_site,
allow_internal_unstable,
- allow_internal_unsafe,
- local_inner_macros,
edition,
macro_def_id,
parent_module,
disambiguator: 0,
+ allow_internal_unsafe,
+ local_inner_macros,
+ collapse_debuginfo,
}
}
@@ -1005,12 +1007,13 @@ impl ExpnData {
call_site,
def_site: DUMMY_SP,
allow_internal_unstable: None,
- allow_internal_unsafe: false,
- local_inner_macros: false,
edition,
macro_def_id,
parent_module,
disambiguator: 0,
+ allow_internal_unsafe: false,
+ local_inner_macros: false,
+ collapse_debuginfo: false,
}
}
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index cf3069281..322c7104b 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -15,11 +15,12 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(array_windows)]
-#![feature(let_else)]
#![feature(if_let_guard)]
#![feature(negative_impls)]
#![feature(min_specialization)]
#![feature(rustc_attrs)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_macros;
@@ -74,8 +75,6 @@ use md5::Md5;
use sha1::Sha1;
use sha2::Sha256;
-use tracing::debug;
-
#[cfg(test)]
mod tests;
@@ -299,7 +298,11 @@ impl From<PathBuf> for FileName {
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)]
pub enum FileNameDisplayPreference {
+ /// Display the path after the application of rewrite rules provided via `--remap-path-prefix`.
+ /// This is appropriate for paths that get embedded into files produced by the compiler.
Remapped,
+ /// Display the path before the application of rewrite rules provided via `--remap-path-prefix`.
+ /// This is appropriate for use in user-facing output (such as diagnostics).
Local,
}
@@ -533,9 +536,6 @@ impl Span {
self.data().with_hi(hi)
}
#[inline]
- pub fn ctxt(self) -> SyntaxContext {
- self.data_untracked().ctxt
- }
pub fn eq_ctxt(self, other: Span) -> bool {
self.data_untracked().ctxt == other.data_untracked().ctxt
}
@@ -558,12 +558,25 @@ impl Span {
self.data_untracked().is_dummy()
}
- /// Returns `true` if this span comes from a macro or desugaring.
+ /// Returns `true` if this span comes from any kind of macro, desugaring or inlining.
#[inline]
pub fn from_expansion(self) -> bool {
self.ctxt() != SyntaxContext::root()
}
+ /// Returns `true` if `span` originates in a macro's expansion where debuginfo should be
+ /// collapsed.
+ pub fn in_macro_expansion_with_collapse_debuginfo(self) -> bool {
+ let outer_expn = self.ctxt().outer_expn_data();
+ matches!(outer_expn.kind, ExpnKind::Macro(..)) && outer_expn.collapse_debuginfo
+ }
+
+ /// Returns `true` if this span comes from MIR inlining.
+ pub fn is_inlined(self) -> bool {
+ let outer_expn = self.ctxt().outer_expn_data();
+ matches!(outer_expn.kind, ExpnKind::Inlined)
+ }
+
/// Returns `true` if `span` originates in a derive-macro's expansion.
pub fn in_derive_expansion(self) -> bool {
matches!(self.ctxt().outer_expn_data().kind, ExpnKind::Macro(MacroKind::Derive, _))
@@ -662,6 +675,16 @@ impl Span {
Some(self)
}
+ /// Like `find_ancestor_inside`, but specifically for when spans might not
+ /// overlaps. Take care when using this, and prefer `find_ancestor_inside`
+ /// when you know that the spans are nested (modulo macro expansion).
+ pub fn find_ancestor_in_same_ctxt(mut self, other: Span) -> Option<Span> {
+ while !Span::eq_ctxt(self, other) {
+ self = self.parent_callsite()?;
+ }
+ Some(self)
+ }
+
/// Edition of the crate from which this span came.
pub fn edition(self) -> edition::Edition {
self.ctxt().edition()
@@ -1094,10 +1117,8 @@ pub enum ExternalSource {
Unneeded,
Foreign {
kind: ExternalSourceKind,
- /// This SourceFile's byte-offset within the source_map of its original crate.
- original_start_pos: BytePos,
- /// The end of this SourceFile within the source_map of its original crate.
- original_end_pos: BytePos,
+ /// Index of the file inside metadata.
+ metadata_index: u32,
},
}
@@ -1616,11 +1637,7 @@ impl SourceFile {
/// number. If the source_file is empty or the position is located before the
/// first line, `None` is returned.
pub fn lookup_line(&self, pos: BytePos) -> Option<usize> {
- self.lines(|lines| match lines.binary_search(&pos) {
- Ok(idx) => Some(idx),
- Err(0) => None,
- Err(idx) => Some(idx - 1),
- })
+ self.lines(|lines| lines.partition_point(|x| x <= &pos).checked_sub(1))
}
pub fn line_bounds(&self, line_index: usize) -> Range<BytePos> {
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
index 28381157d..f9566eeee 100644
--- a/compiler/rustc_span/src/source_map.rs
+++ b/compiler/rustc_span/src/source_map.rs
@@ -23,7 +23,6 @@ use std::{convert::TryFrom, unreachable};
use std::fs;
use std::io;
-use tracing::debug;
#[cfg(test)]
mod tests;
@@ -336,7 +335,7 @@ impl SourceMap {
mut file_local_non_narrow_chars: Vec<NonNarrowChar>,
mut file_local_normalized_pos: Vec<NormalizedPos>,
original_start_pos: BytePos,
- original_end_pos: BytePos,
+ metadata_index: u32,
) -> Lrc<SourceFile> {
let start_pos = self
.allocate_address_space(source_len)
@@ -381,8 +380,7 @@ impl SourceMap {
src_hash,
external_src: Lock::new(ExternalSource::Foreign {
kind: ExternalSourceKind::AbsentOk,
- original_start_pos,
- original_end_pos,
+ metadata_index,
}),
start_pos,
end_pos,
@@ -473,7 +471,7 @@ impl SourceMap {
let hi = self.lookup_char_pos(sp.hi());
let offset = self.lookup_char_pos(relative_to.lo());
- if lo.file.name != offset.file.name {
+ if lo.file.name != offset.file.name || !relative_to.contains(sp) {
return self.span_to_embeddable_string(sp);
}
@@ -722,7 +720,7 @@ impl SourceMap {
})
}
- /// Extends the given `Span` to just after the next occurrence of `c`.
+ /// Extends the given `Span` to just before the next occurrence of `c`.
pub fn span_extend_to_next_char(&self, sp: Span, c: char, accept_newlines: bool) -> Span {
if let Ok(next_source) = self.span_to_next_source(sp) {
let next_source = next_source.split(c).next().unwrap_or("");
@@ -855,28 +853,56 @@ impl SourceMap {
}
/// Returns a new span representing the next character after the end-point of this span.
+ /// Special cases:
+ /// - if span is a dummy one, returns the same span
+ /// - if next_point reached the end of source, return span with lo = hi
+ /// - respect multi-byte characters
pub fn next_point(&self, sp: Span) -> Span {
if sp.is_dummy() {
return sp;
}
let start_of_next_point = sp.hi().0;
- let width = self.find_width_of_character_at_span(sp.shrink_to_hi(), true);
- // If the width is 1, then the next span should point to the same `lo` and `hi`. However,
- // in the case of a multibyte character, where the width != 1, the next span should
+ let width = self.find_width_of_character_at_span(sp, true);
+ if width == 0 {
+ return Span::new(sp.hi(), sp.hi(), sp.ctxt(), None);
+ }
+ // If the width is 1, then the next span should only contain the next char besides current ending.
+ // However, in the case of a multibyte character, where the width != 1, the next span should
// span multiple bytes to include the whole character.
let end_of_next_point =
- start_of_next_point.checked_add(width - 1).unwrap_or(start_of_next_point);
+ start_of_next_point.checked_add(width).unwrap_or(start_of_next_point);
- let end_of_next_point = BytePos(cmp::max(sp.lo().0 + 1, end_of_next_point));
+ let end_of_next_point = BytePos(cmp::max(start_of_next_point + 1, end_of_next_point));
Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt(), None)
}
+ /// Returns a new span to check next none-whitespace character or some specified expected character
+ /// If `expect` is none, the first span of non-whitespace character is returned.
+ /// If `expect` presented, the first span of the character `expect` is returned
+ /// Otherwise, the span reached to limit is returned.
+ pub fn span_look_ahead(&self, span: Span, expect: Option<&str>, limit: Option<usize>) -> Span {
+ let mut sp = span;
+ for _ in 0..limit.unwrap_or(100 as usize) {
+ sp = self.next_point(sp);
+ if let Ok(ref snippet) = self.span_to_snippet(sp) {
+ if expect.map_or(false, |es| snippet == es) {
+ break;
+ }
+ if expect.is_none() && snippet.chars().any(|c| !c.is_whitespace()) {
+ break;
+ }
+ }
+ }
+ sp
+ }
+
/// Finds the width of the character, either before or after the end of provided span,
/// depending on the `forwards` parameter.
fn find_width_of_character_at_span(&self, sp: Span, forwards: bool) -> u32 {
let sp = sp.data();
- if sp.lo == sp.hi {
+
+ if sp.lo == sp.hi && !forwards {
debug!("find_width_of_character_at_span: early return empty span");
return 1;
}
@@ -910,9 +936,9 @@ impl SourceMap {
let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
debug!("find_width_of_character_at_span: source_len=`{:?}`", source_len);
// Ensure indexes are also not malformed.
- if start_index > end_index || end_index > source_len {
+ if start_index > end_index || end_index > source_len - 1 {
debug!("find_width_of_character_at_span: source indexes are malformed");
- return 1;
+ return 0;
}
let src = local_begin.sf.external_src.borrow();
@@ -983,93 +1009,6 @@ impl SourceMap {
self.files().iter().fold(0, |a, f| a + f.count_lines())
}
- pub fn generate_fn_name_span(&self, span: Span) -> Option<Span> {
- let prev_span = self.span_extend_to_prev_str(span, "fn", true, true)?;
- if let Ok(snippet) = self.span_to_snippet(prev_span) {
- debug!(
- "generate_fn_name_span: span={:?}, prev_span={:?}, snippet={:?}",
- span, prev_span, snippet
- );
-
- if snippet.is_empty() {
- return None;
- };
-
- let len = snippet
- .find(|c: char| !c.is_alphanumeric() && c != '_')
- .expect("no label after fn");
- Some(prev_span.with_hi(BytePos(prev_span.lo().0 + len as u32)))
- } else {
- None
- }
- }
-
- /// Takes the span of a type parameter in a function signature and try to generate a span for
- /// the function name (with generics) and a new snippet for this span with the pointed type
- /// parameter as a new local type parameter.
- ///
- /// For instance:
- /// ```rust,ignore (pseudo-Rust)
- /// // Given span
- /// fn my_function(param: T)
- /// // ^ Original span
- ///
- /// // Result
- /// fn my_function(param: T)
- /// // ^^^^^^^^^^^ Generated span with snippet `my_function<T>`
- /// ```
- ///
- /// Attention: The method used is very fragile since it essentially duplicates the work of the
- /// parser. If you need to use this function or something similar, please consider updating the
- /// `SourceMap` functions and this function to something more robust.
- pub fn generate_local_type_param_snippet(&self, span: Span) -> Option<(Span, String)> {
- // Try to extend the span to the previous "fn" keyword to retrieve the function
- // signature.
- if let Some(sugg_span) = self.span_extend_to_prev_str(span, "fn", false, true) {
- if let Ok(snippet) = self.span_to_snippet(sugg_span) {
- // Consume the function name.
- let mut offset = snippet
- .find(|c: char| !c.is_alphanumeric() && c != '_')
- .expect("no label after fn");
-
- // Consume the generics part of the function signature.
- let mut bracket_counter = 0;
- let mut last_char = None;
- for c in snippet[offset..].chars() {
- match c {
- '<' => bracket_counter += 1,
- '>' => bracket_counter -= 1,
- '(' => {
- if bracket_counter == 0 {
- break;
- }
- }
- _ => {}
- }
- offset += c.len_utf8();
- last_char = Some(c);
- }
-
- // Adjust the suggestion span to encompass the function name with its generics.
- let sugg_span = sugg_span.with_hi(BytePos(sugg_span.lo().0 + offset as u32));
-
- // Prepare the new suggested snippet to append the type parameter that triggered
- // the error in the generics of the function signature.
- let mut new_snippet = if last_char == Some('>') {
- format!("{}, ", &snippet[..(offset - '>'.len_utf8())])
- } else {
- format!("{}<", &snippet[..offset])
- };
- new_snippet
- .push_str(&self.span_to_snippet(span).unwrap_or_else(|_| "T".to_string()));
- new_snippet.push('>');
-
- return Some((sugg_span, new_snippet));
- }
- }
-
- None
- }
pub fn ensure_source_file_source_present(&self, source_file: Lrc<SourceFile>) -> bool {
source_file.add_external_src(|| {
match source_file.name {
@@ -1148,13 +1087,13 @@ impl FilePathMapping {
return remap_path_prefix(&self.mapping, path);
- #[instrument(level = "debug", skip(mapping))]
+ #[instrument(level = "debug", skip(mapping), ret)]
fn remap_path_prefix(mapping: &[(PathBuf, PathBuf)], path: PathBuf) -> (PathBuf, bool) {
// NOTE: We are iterating over the mapping entries from last to first
// because entries specified later on the command line should
// take precedence.
for &(ref from, ref to) in mapping.iter().rev() {
- debug!("Trying to apply {:?} => {:?}", from, to);
+ debug!("Trying to apply {from:?} => {to:?}");
if let Ok(rest) = path.strip_prefix(from) {
let remapped = if rest.as_os_str().is_empty() {
@@ -1168,15 +1107,15 @@ impl FilePathMapping {
} else {
to.join(rest)
};
- debug!("Match - remapped {:?} => {:?}", path, remapped);
+ debug!("Match - remapped");
return (remapped, true);
} else {
- debug!("No match - prefix {:?} does not match {:?}", from, path);
+ debug!("No match - prefix {from:?} does not match");
}
}
- debug!("Path {:?} was not remapped", path);
+ debug!("not remapped");
(path, false)
}
}
diff --git a/compiler/rustc_span/src/source_map/tests.rs b/compiler/rustc_span/src/source_map/tests.rs
index be827cea8..1fd81018f 100644
--- a/compiler/rustc_span/src/source_map/tests.rs
+++ b/compiler/rustc_span/src/source_map/tests.rs
@@ -251,7 +251,7 @@ fn t10() {
non_narrow_chars,
normalized_pos,
start_pos,
- end_pos,
+ 0,
);
assert!(
@@ -479,3 +479,48 @@ fn path_prefix_remapping_expand_to_absolute() {
RealFileName::Remapped { local_path: None, virtual_name: path("XYZ/src/main.rs") }
);
}
+
+#[test]
+fn test_next_point() {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ sm.new_source_file(PathBuf::from("example.rs").into(), "a…b".to_string());
+
+ // Dummy spans don't advance.
+ let span = DUMMY_SP;
+ let span = sm.next_point(span);
+ assert_eq!(span.lo().0, 0);
+ assert_eq!(span.hi().0, 0);
+
+ // Span advance respect multi-byte character
+ let span = Span::with_root_ctxt(BytePos(0), BytePos(1));
+ assert_eq!(sm.span_to_snippet(span), Ok("a".to_string()));
+ let span = sm.next_point(span);
+ assert_eq!(sm.span_to_snippet(span), Ok("…".to_string()));
+ assert_eq!(span.lo().0, 1);
+ assert_eq!(span.hi().0, 4);
+
+ // An empty span pointing just before a multi-byte character should
+ // advance to contain the multi-byte character.
+ let span = Span::with_root_ctxt(BytePos(1), BytePos(1));
+ let span = sm.next_point(span);
+ assert_eq!(span.lo().0, 1);
+ assert_eq!(span.hi().0, 4);
+
+ let span = Span::with_root_ctxt(BytePos(1), BytePos(4));
+ let span = sm.next_point(span);
+ assert_eq!(span.lo().0, 4);
+ assert_eq!(span.hi().0, 5);
+
+ // A non-empty span at the last byte should advance to create an empty
+ // span pointing at the end of the file.
+ let span = Span::with_root_ctxt(BytePos(4), BytePos(5));
+ let span = sm.next_point(span);
+ assert_eq!(span.lo().0, 5);
+ assert_eq!(span.hi().0, 5);
+
+ // Empty span pointing just past the last byte.
+ let span = Span::with_root_ctxt(BytePos(5), BytePos(5));
+ let span = sm.next_point(span);
+ assert_eq!(span.lo().0, 5);
+ assert_eq!(span.hi().0, 5);
+}
diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs
index 3ee329e97..b3de67415 100644
--- a/compiler/rustc_span/src/span_encoding.rs
+++ b/compiler/rustc_span/src/span_encoding.rs
@@ -28,12 +28,17 @@ use rustc_data_structures::fx::FxIndexSet;
/// Inline (compressed) format:
/// - `span.base_or_index == span_data.lo`
/// - `span.len_or_tag == len == span_data.hi - span_data.lo` (must be `<= MAX_LEN`)
-/// - `span.ctxt == span_data.ctxt` (must be `<= MAX_CTXT`)
+/// - `span.ctxt_or_tag == span_data.ctxt` (must be `<= MAX_CTXT`)
+///
+/// Interned format with inline `SyntaxContext`:
+/// - `span.base_or_index == index` (indexes into the interner table)
+/// - `span.len_or_tag == LEN_TAG` (high bit set, all other bits are zero)
+/// - `span.ctxt_or_tag == span_data.ctxt` (must be `<= MAX_CTXT`)
///
/// Interned format:
/// - `span.base_or_index == index` (indexes into the interner table)
/// - `span.len_or_tag == LEN_TAG` (high bit set, all other bits are zero)
-/// - `span.ctxt == 0`
+/// - `span.ctxt_or_tag == CTXT_TAG`
///
/// The inline form uses 0 for the tag value (rather than 1) so that we don't
/// need to mask out the tag bit when getting the length, and so that the
@@ -50,10 +55,10 @@ use rustc_data_structures::fx::FxIndexSet;
/// at 3 or 4, and then it drops off quickly from 8 onwards. 15 bits is enough
/// for 99.99%+ of cases, but larger values (sometimes 20+ bits) might occur
/// dozens of times in a typical crate.
-/// - `ctxt` is 16 bits in `Span` and 32 bits in `SpanData`, which means that
+/// - `ctxt_or_tag` is 16 bits in `Span` and 32 bits in `SpanData`, which means that
/// large `ctxt` values will cause interning. The number of bits needed for
/// `ctxt` values depend partly on the crate size and partly on the form of
-/// the code. No crates in `rustc-perf` need more than 15 bits for `ctxt`,
+/// the code. No crates in `rustc-perf` need more than 15 bits for `ctxt_or_tag`,
/// but larger crates might need more than 16 bits.
///
/// In order to reliably use parented spans in incremental compilation,
@@ -65,15 +70,16 @@ use rustc_data_structures::fx::FxIndexSet;
pub struct Span {
base_or_index: u32,
len_or_tag: u16,
- ctxt_or_zero: u16,
+ ctxt_or_tag: u16,
}
const LEN_TAG: u16 = 0b1000_0000_0000_0000;
const MAX_LEN: u32 = 0b0111_1111_1111_1111;
-const MAX_CTXT: u32 = 0b1111_1111_1111_1111;
+const CTXT_TAG: u32 = 0b1111_1111_1111_1111;
+const MAX_CTXT: u32 = CTXT_TAG - 1;
/// Dummy span, both position and length are zero, syntax context is zero as well.
-pub const DUMMY_SP: Span = Span { base_or_index: 0, len_or_tag: 0, ctxt_or_zero: 0 };
+pub const DUMMY_SP: Span = Span { base_or_index: 0, len_or_tag: 0, ctxt_or_tag: 0 };
impl Span {
#[inline]
@@ -91,12 +97,13 @@ impl Span {
if len <= MAX_LEN && ctxt2 <= MAX_CTXT && parent.is_none() {
// Inline format.
- Span { base_or_index: base, len_or_tag: len as u16, ctxt_or_zero: ctxt2 as u16 }
+ Span { base_or_index: base, len_or_tag: len as u16, ctxt_or_tag: ctxt2 as u16 }
} else {
// Interned format.
let index =
with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }));
- Span { base_or_index: index, len_or_tag: LEN_TAG, ctxt_or_zero: 0 }
+ let ctxt_or_tag = if ctxt2 <= MAX_CTXT { ctxt2 } else { CTXT_TAG } as u16;
+ Span { base_or_index: index, len_or_tag: LEN_TAG, ctxt_or_tag }
}
}
@@ -119,16 +126,29 @@ impl Span {
SpanData {
lo: BytePos(self.base_or_index),
hi: BytePos(self.base_or_index + self.len_or_tag as u32),
- ctxt: SyntaxContext::from_u32(self.ctxt_or_zero as u32),
+ ctxt: SyntaxContext::from_u32(self.ctxt_or_tag as u32),
parent: None,
}
} else {
// Interned format.
- debug_assert!(self.ctxt_or_zero == 0);
let index = self.base_or_index;
with_span_interner(|interner| interner.spans[index as usize])
}
}
+
+ /// This function is used as a fast path when decoding the full `SpanData` is not necessary.
+ #[inline]
+ pub fn ctxt(self) -> SyntaxContext {
+ let ctxt_or_tag = self.ctxt_or_tag as u32;
+ if ctxt_or_tag <= MAX_CTXT {
+ // Inline format or interned format with inline ctxt.
+ SyntaxContext::from_u32(ctxt_or_tag)
+ } else {
+ // Interned format.
+ let index = self.base_or_index;
+ with_span_interner(|interner| interner.spans[index as usize].ctxt)
+ }
+ }
}
#[derive(Default)]
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 791160ff6..7f16da52b 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -125,7 +125,7 @@ symbols! {
Symbols {
AcqRel,
Acquire,
- AddSubdiagnostic,
+ AddToDiagnostic,
Alignment,
Any,
Arc,
@@ -157,6 +157,7 @@ symbols! {
BTreeSet,
BinaryHeap,
Borrow,
+ BorrowMut,
Break,
C,
CStr,
@@ -209,10 +210,12 @@ symbols! {
Implied,
Input,
Into,
+ IntoDiagnostic,
IntoFuture,
IntoIterator,
IoRead,
IoWrite,
+ IpAddr,
IrTyKind,
Is,
ItemContext,
@@ -221,7 +224,9 @@ symbols! {
Left,
LinkedList,
LintPass,
+ LocalKey,
Mutex,
+ MutexGuard,
N,
NonZeroI128,
NonZeroI16,
@@ -262,6 +267,7 @@ symbols! {
Rc,
Ready,
Receiver,
+ RefCell,
Relaxed,
Release,
Result,
@@ -270,9 +276,11 @@ symbols! {
Rust,
RustcDecodable,
RustcEncodable,
+ RwLock,
+ RwLockReadGuard,
+ RwLockWriteGuard,
Send,
SeqCst,
- SessionDiagnostic,
SliceIndex,
Some,
String,
@@ -280,6 +288,7 @@ symbols! {
StructuralPartialEq,
SubdiagnosticMessage,
Sync,
+ T,
Target,
ToOwned,
ToString,
@@ -334,6 +343,7 @@ symbols! {
alias,
align,
align_offset,
+ alignment,
alignstack,
all,
alloc,
@@ -389,6 +399,7 @@ symbols! {
assume_init,
async_await,
async_closure,
+ async_fn_in_trait,
atomic,
atomic_mod,
atomics,
@@ -440,6 +451,7 @@ symbols! {
call_once,
caller_location,
capture_disjoint_fields,
+ cause,
cdylib,
ceilf32,
ceilf64,
@@ -481,6 +493,7 @@ symbols! {
cmse_nonsecure_entry,
coerce_unsized,
cold,
+ collapse_debuginfo,
column,
column_macro,
compare_and_swap,
@@ -504,7 +517,6 @@ symbols! {
const_deallocate,
const_eval_limit,
const_eval_select,
- const_eval_select_ct,
const_evaluatable_checked,
const_extern_fn,
const_fn,
@@ -644,6 +656,7 @@ symbols! {
dropck_parametricity,
dylib,
dyn_metadata,
+ dyn_star,
dyn_trait,
e,
edition_macro_pats,
@@ -656,7 +669,6 @@ symbols! {
emit_struct,
emit_struct_field,
enable,
- enclosing_scope,
encode,
end,
env,
@@ -759,7 +771,7 @@ symbols! {
gen_future,
gen_kill,
generator,
- generator_return,
+ generator_clone,
generator_state,
generators,
generic_arg_infer,
@@ -774,6 +786,7 @@ symbols! {
globs,
gt,
half_open_range_patterns,
+ half_open_range_patterns_in_slices,
hash,
hexagon_target_feature,
hidden,
@@ -802,6 +815,7 @@ symbols! {
impl_trait_in_bindings,
implied_by,
import,
+ import_name_type,
import_shadowing,
imported_main,
in_band_lifetimes,
@@ -817,6 +831,7 @@ symbols! {
infer_outlives_requirements,
infer_static_outlives_requirements,
inherent_associated_types,
+ inherit,
inlateout,
inline,
inline_const,
@@ -859,6 +874,7 @@ symbols! {
lib,
libc,
lifetime,
+ lifetimes,
likely,
line,
line_macro,
@@ -980,7 +996,18 @@ symbols! {
never_type,
never_type_fallback,
new,
+ new_binary,
+ new_debug,
+ new_display,
+ new_lower_exp,
+ new_lower_hex,
+ new_octal,
+ new_pointer,
new_unchecked,
+ new_upper_exp,
+ new_upper_hex,
+ new_v1,
+ new_v1_formatted,
next,
nll,
no,
@@ -1056,6 +1083,7 @@ symbols! {
panic_unwind,
panicking,
param_attrs,
+ parent_label,
partial_cmp,
partial_ord,
passes,
@@ -1110,14 +1138,15 @@ symbols! {
profiler_builtins,
profiler_runtime,
ptr,
- ptr_guaranteed_eq,
- ptr_guaranteed_ne,
+ ptr_guaranteed_cmp,
+ ptr_mask,
ptr_null,
ptr_null_mut,
ptr_offset_from,
ptr_offset_from_unsigned,
pub_macro_rules,
pub_restricted,
+ public,
pure,
pushpop_unsafe,
qreg,
@@ -1170,8 +1199,10 @@ symbols! {
repr_packed,
repr_simd,
repr_transparent,
+ require,
residual,
result,
+ return_position_impl_trait_in_trait,
rhs,
rintf32,
rintf64,
@@ -1201,7 +1232,6 @@ symbols! {
rust_oom,
rustc,
rustc_allocator,
- rustc_allocator_nounwind,
rustc_allocator_zeroed,
rustc_allow_const_fn_unstable,
rustc_allow_incoherent_impl,
@@ -1217,6 +1247,7 @@ symbols! {
rustc_conversion_suggestion,
rustc_deallocator,
rustc_def_path,
+ rustc_default_body_unstable,
rustc_diagnostic_item,
rustc_diagnostic_macros,
rustc_dirty,
@@ -1226,6 +1257,7 @@ symbols! {
rustc_dump_program_clauses,
rustc_dump_user_substs,
rustc_dump_vtable,
+ rustc_effective_visibility,
rustc_error,
rustc_evaluate_where_clauses,
rustc_expected_cgu_reuse,
@@ -1246,6 +1278,7 @@ symbols! {
rustc_mir,
rustc_must_implement_one_of,
rustc_nonnull_optimization_guaranteed,
+ rustc_nounwind,
rustc_object_lifetime_default,
rustc_on_unimplemented,
rustc_outlives,
@@ -1265,6 +1298,7 @@ symbols! {
rustc_reallocator,
rustc_regions,
rustc_reservation_impl,
+ rustc_safe_intrinsic,
rustc_serialize,
rustc_skip_array_during_method_dispatch,
rustc_specialization_trait,
@@ -1279,9 +1313,11 @@ symbols! {
rustc_variance,
rustdoc,
rustdoc_internals,
+ rustdoc_missing_doc_code_examples,
rustfmt,
rvalue_static_promotion,
s,
+ safety,
sanitize,
sanitizer_runtime,
saturating_add,
@@ -1295,6 +1331,8 @@ symbols! {
should_panic,
shr,
shr_assign,
+ sig_dfl,
+ sig_ign,
simd,
simd_add,
simd_and,
@@ -1302,9 +1340,11 @@ symbols! {
simd_as,
simd_bitmask,
simd_cast,
+ simd_cast_ptr,
simd_ceil,
simd_div,
simd_eq,
+ simd_expose_addr,
simd_extract,
simd_fabs,
simd_fcos,
@@ -1320,6 +1360,7 @@ symbols! {
simd_fmin,
simd_fpow,
simd_fpowi,
+ simd_from_exposed_addr,
simd_fsin,
simd_fsqrt,
simd_gather,
@@ -1464,6 +1505,7 @@ symbols! {
trait_alias,
trait_upcasting,
transmute,
+ transmute_opts,
transmute_trait,
transparent,
transparent_enums,
@@ -1480,6 +1522,7 @@ symbols! {
tuple,
tuple_from_req,
tuple_indexing,
+ tuple_trait,
two_phase,
ty,
type_alias_enum_variants,
@@ -1513,6 +1556,7 @@ symbols! {
unit,
universal_impl_trait,
unix,
+ unix_sigpipe,
unlikely,
unmarked_api,
unpin,
@@ -1558,6 +1602,7 @@ symbols! {
va_list,
va_start,
val,
+ validity,
values,
var,
variant_count,
@@ -1661,6 +1706,7 @@ impl Ident {
/// macro (e.g., `macro` or `macro_rules!` items) and stay different if they came from different
/// non-transparent macros.
/// Technically, this operation strips all transparent marks from ident's syntactic context.
+ #[inline]
pub fn normalize_to_macro_rules(self) -> Ident {
Ident::new(self.name, self.span.normalize_to_macro_rules())
}
@@ -1676,6 +1722,7 @@ impl Ident {
}
impl PartialEq for Ident {
+ #[inline]
fn eq(&self, rhs: &Self) -> bool {
self.name == rhs.name && self.span.eq_ctxt(rhs.span)
}
@@ -1801,6 +1848,11 @@ impl Symbol {
Symbol(SymbolIndex::from_u32(n))
}
+ /// for use in Decoder only
+ pub fn new_from_decoded(n: u32) -> Self {
+ Self::new(n)
+ }
+
/// Maps a string to its interned representation.
pub fn intern(string: &str) -> Self {
with_session_globals(|session_globals| session_globals.symbol_interner.intern(string))
@@ -1849,15 +1901,22 @@ impl fmt::Display for Symbol {
}
}
+// takes advantage of `str::to_string` specialization
+impl ToString for Symbol {
+ fn to_string(&self) -> String {
+ self.as_str().to_string()
+ }
+}
+
impl<S: Encoder> Encodable<S> for Symbol {
- fn encode(&self, s: &mut S) {
+ default fn encode(&self, s: &mut S) {
s.emit_str(self.as_str());
}
}
impl<D: Decoder> Decodable<D> for Symbol {
#[inline]
- fn decode(d: &mut D) -> Symbol {
+ default fn decode(d: &mut D) -> Symbol {
Symbol::intern(&d.read_str())
}
}
@@ -2025,6 +2084,11 @@ impl Symbol {
pub fn can_be_raw(self) -> bool {
self != kw::Empty && self != kw::Underscore && !self.is_path_segment_keyword()
}
+
+ /// Is this symbol was interned in compiler's `symbols!` macro
+ pub fn is_preinterned(self) -> bool {
+ self.as_u32() < PREINTERNED_SYMBOLS_COUNT
+ }
}
impl Ident {
diff --git a/compiler/rustc_symbol_mangling/Cargo.toml b/compiler/rustc_symbol_mangling/Cargo.toml
index b104a40c2..2a29ad6a9 100644
--- a/compiler/rustc_symbol_mangling/Cargo.toml
+++ b/compiler/rustc_symbol_mangling/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
bitflags = "1.2.1"
@@ -18,3 +17,5 @@ rustc_hir = { path = "../rustc_hir" }
rustc_target = { path = "../rustc_target" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_session = { path = "../rustc_session" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_symbol_mangling/src/errors.rs b/compiler/rustc_symbol_mangling/src/errors.rs
new file mode 100644
index 000000000..f4d0751f7
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/errors.rs
@@ -0,0 +1,34 @@
+//! Errors emitted by symbol_mangling.
+
+use rustc_errors::{DiagnosticArgValue, IntoDiagnosticArg};
+use rustc_macros::Diagnostic;
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(symbol_mangling_test_output)]
+pub struct TestOutput {
+ #[primary_span]
+ pub span: Span,
+ pub kind: Kind,
+ pub content: String,
+}
+
+pub enum Kind {
+ SymbolName,
+ Demangling,
+ DemanglingAlt,
+ DefPath,
+}
+
+impl IntoDiagnosticArg for Kind {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ let kind = match self {
+ Kind::SymbolName => "symbol-name",
+ Kind::Demangling => "demangling",
+ Kind::DemanglingAlt => "demangling-alt",
+ Kind::DefPath => "def-path",
+ }
+ .into();
+ DiagnosticArgValue::Str(kind)
+ }
+}
diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs
index 9241fd82c..46c5fe78f 100644
--- a/compiler/rustc_symbol_mangling/src/legacy.rs
+++ b/compiler/rustc_symbol_mangling/src/legacy.rs
@@ -6,8 +6,6 @@ use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeVisitable};
use rustc_middle::util::common::record_time;
-use tracing::debug;
-
use std::fmt::{self, Write};
use std::mem::{self, discriminant};
diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs
index 5fc992023..62f44a480 100644
--- a/compiler/rustc_symbol_mangling/src/lib.rs
+++ b/compiler/rustc_symbol_mangling/src/lib.rs
@@ -91,10 +91,15 @@
#![feature(never_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
@@ -105,11 +110,10 @@ use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Instance, TyCtxt};
use rustc_session::config::SymbolManglingVersion;
-use tracing::debug;
-
mod legacy;
mod v0;
+pub mod errors;
pub mod test;
pub mod typeid;
diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs
index 7249ce04c..150459ce0 100644
--- a/compiler/rustc_symbol_mangling/src/test.rs
+++ b/compiler/rustc_symbol_mangling/src/test.rs
@@ -4,6 +4,7 @@
//! def-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
+use crate::errors::{Kind, TestOutput};
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{subst::InternalSubsts, Instance, TyCtxt};
@@ -25,19 +26,19 @@ pub fn report_symbol_names(tcx: TyCtxt<'_>) {
let crate_items = tcx.hir_crate_items(());
for id in crate_items.items() {
- symbol_names.process_attrs(id.def_id);
+ symbol_names.process_attrs(id.owner_id.def_id);
}
for id in crate_items.trait_items() {
- symbol_names.process_attrs(id.def_id);
+ symbol_names.process_attrs(id.owner_id.def_id);
}
for id in crate_items.impl_items() {
- symbol_names.process_attrs(id.def_id);
+ symbol_names.process_attrs(id.owner_id.def_id);
}
for id in crate_items.foreign_items() {
- symbol_names.process_attrs(id.def_id);
+ symbol_names.process_attrs(id.owner_id.def_id);
}
})
}
@@ -59,16 +60,31 @@ impl SymbolNamesTest<'_> {
tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def_id)),
);
let mangled = tcx.symbol_name(instance);
- tcx.sess.span_err(attr.span, &format!("symbol-name({})", mangled));
+ tcx.sess.emit_err(TestOutput {
+ span: attr.span,
+ kind: Kind::SymbolName,
+ content: format!("{mangled}"),
+ });
if let Ok(demangling) = rustc_demangle::try_demangle(mangled.name) {
- tcx.sess.span_err(attr.span, &format!("demangling({})", demangling));
- tcx.sess.span_err(attr.span, &format!("demangling-alt({:#})", demangling));
+ tcx.sess.emit_err(TestOutput {
+ span: attr.span,
+ kind: Kind::Demangling,
+ content: format!("{demangling}"),
+ });
+ tcx.sess.emit_err(TestOutput {
+ span: attr.span,
+ kind: Kind::DemanglingAlt,
+ content: format!("{:#}", demangling),
+ });
}
}
for attr in tcx.get_attrs(def_id.to_def_id(), DEF_PATH) {
- let path = with_no_trimmed_paths!(tcx.def_path_str(def_id.to_def_id()));
- tcx.sess.span_err(attr.span, &format!("def-path({})", path));
+ tcx.sess.emit_err(TestOutput {
+ span: attr.span,
+ kind: Kind::DefPath,
+ content: with_no_trimmed_paths!(tcx.def_path_str(def_id.to_def_id())),
+ });
}
}
}
diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
index a09b52fbf..6aa031c83 100644
--- a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
+++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
@@ -13,7 +13,7 @@ use rustc_hir as hir;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
use rustc_middle::ty::{
self, Binder, Const, ExistentialPredicate, FloatTy, FnSig, IntTy, List, Region, RegionKind,
- Term, Ty, TyCtxt, UintTy,
+ TermKind, Ty, TyCtxt, UintTy,
};
use rustc_span::def_id::DefId;
use rustc_span::symbol::sym;
@@ -243,13 +243,9 @@ fn encode_predicate<'tcx>(
let name = encode_ty_name(tcx, projection.item_def_id);
let _ = write!(s, "u{}{}", name.len(), &name);
s.push_str(&encode_substs(tcx, projection.substs, dict, options));
- match projection.term {
- Term::Ty(ty) => {
- s.push_str(&encode_ty(tcx, ty, dict, options));
- }
- Term::Const(c) => {
- s.push_str(&encode_const(tcx, c, dict, options));
- }
+ match projection.term.unpack() {
+ TermKind::Ty(ty) => s.push_str(&encode_ty(tcx, ty, dict, options)),
+ TermKind::Const(c) => s.push_str(&encode_const(tcx, c, dict, options)),
}
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
@@ -309,8 +305,7 @@ fn encode_region<'tcx>(
| RegionKind::ReFree(..)
| RegionKind::ReStatic
| RegionKind::ReVar(..)
- | RegionKind::RePlaceholder(..)
- | RegionKind::ReEmpty(..) => {
+ | RegionKind::RePlaceholder(..) => {
bug!("encode_region: unexpected `{:?}`", region.kind());
}
}
@@ -545,7 +540,7 @@ fn encode_ty<'tcx>(
let mut s = String::new();
let def_id = adt_def.0.did;
if options.contains(EncodeTyOptions::GENERALIZE_REPR_C) && adt_def.repr().c() {
- // For for cross-language CFI support, the encoding must be compatible at the FFI
+ // For cross-language CFI support, the encoding must be compatible at the FFI
// boundary. For instance:
//
// struct type1 {};
@@ -632,10 +627,13 @@ fn encode_ty<'tcx>(
}
// Trait types
- ty::Dynamic(predicates, region) => {
+ ty::Dynamic(predicates, region, kind) => {
// u3dynI<element-type1[..element-typeN]>E, where <element-type> is <predicate>, as
// vendor extended type.
- let mut s = String::from("u3dynI");
+ let mut s = String::from(match kind {
+ ty::Dyn => "u3dynI",
+ ty::DynStar => "u7dynstarI",
+ });
s.push_str(&encode_predicates(tcx, predicates, dict, options));
s.push_str(&encode_region(tcx, *region, dict, options));
s.push('E');
@@ -888,7 +886,7 @@ pub fn typeid_for_fnabi<'tcx>(
typeid.push('v');
}
} else {
- for n in 0..fn_abi.fixed_count {
+ for n in 0..fn_abi.fixed_count as usize {
let ty = transform_ty(tcx, fn_abi.args[n].layout.ty, transform_ty_options);
typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
}
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index 71fa5a448..ecfe6861e 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -7,10 +7,10 @@ use rustc_hir::def_id::{CrateNum, DefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
use rustc_middle::ty::layout::IntegerExt;
use rustc_middle::ty::print::{Print, Printer};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
use rustc_middle::ty::{
self, EarlyBinder, FloatTy, Instance, IntTy, Ty, TyCtxt, TypeVisitable, UintTy,
};
+use rustc_middle::ty::{GenericArg, GenericArgKind};
use rustc_span::symbol::kw;
use rustc_target::abi::Integer;
use rustc_target::spec::abi::Abi;
@@ -301,7 +301,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
// Encode impl generic params if the substitutions contain parameters (implying
// polymorphization is enabled) and this isn't an inherent impl.
- if impl_trait_ref.is_some() && substs.iter().any(|a| a.has_param_types_or_consts()) {
+ if impl_trait_ref.is_some() && substs.iter().any(|a| a.has_non_region_param()) {
self = self.path_generic_args(
|this| {
this.path_append_ns(
@@ -479,8 +479,12 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
})?;
}
- ty::Dynamic(predicates, r) => {
- self.push("D");
+ ty::Dynamic(predicates, r, kind) => {
+ self.push(match kind {
+ ty::Dyn => "D",
+ // FIXME(dyn-star): need to update v0 mangling docs
+ ty::DynStar => "D*",
+ });
self = self.print_dyn_existential(predicates)?;
self = r.print(self)?;
}
@@ -543,9 +547,9 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
let name = cx.tcx.associated_item(projection.item_def_id).name;
cx.push("p");
cx.push_ident(name.as_str());
- cx = match projection.term {
- ty::Term::Ty(ty) => ty.print(cx),
- ty::Term::Const(c) => c.print(cx),
+ cx = match projection.term.unpack() {
+ ty::TermKind::Ty(ty) => ty.print(cx),
+ ty::TermKind::Const(c) => c.print(cx),
}?;
}
ty::ExistentialPredicate::AutoTrait(def_id) => {
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
index 162376af4..fc37fdb1c 100644
--- a/compiler/rustc_target/Cargo.toml
+++ b/compiler/rustc_target/Cargo.toml
@@ -8,7 +8,8 @@ bitflags = "1.2.1"
tracing = "0.1"
serde_json = "1.0.59"
rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_span = { path = "../rustc_span" }
-rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
index 4613a459c..a84988fa7 100644
--- a/compiler/rustc_target/src/abi/call/aarch64.rs
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -1,6 +1,27 @@
use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, TyAbiInterface};
+/// Given integer-types M and register width N (e.g. M=u16 and N=32 bits), the
+/// `ParamExtension` policy specifies how a uM value should be treated when
+/// passed via register or stack-slot of width N. See also rust-lang/rust#97463.
+#[derive(Copy, Clone, PartialEq)]
+pub enum ParamExtension {
+ /// Indicates that when passing an i8/i16, either as a function argument or
+ /// as a return value, it must be sign-extended to 32 bits, and likewise a
+ /// u8/u16 must be zero-extended to 32-bits. (This variant is here to
+ /// accommodate Apple's deviation from the usual AArch64 ABI as defined by
+ /// ARM.)
+ ///
+ /// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
+ ExtendTo32Bits,
+
+ /// Indicates that no sign- nor zero-extension is performed: if a value of
+ /// type with bitwidth M is passed as function argument or return value,
+ /// then M bits are copied into the least significant M bits, and the
+ /// remaining bits of the register (or word of memory) are untouched.
+ NoExtension,
+}
+
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
where
Ty: TyAbiInterface<'a, C> + Copy,
@@ -24,13 +45,16 @@ where
})
}
-fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !ret.layout.is_aggregate() {
- ret.extend_integer_width_to(32);
+ match param_policy {
+ ParamExtension::ExtendTo32Bits => ret.extend_integer_width_to(32),
+ ParamExtension::NoExtension => {}
+ }
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
@@ -46,13 +70,16 @@ where
ret.make_indirect();
}
-fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !arg.layout.is_aggregate() {
- arg.extend_integer_width_to(32);
+ match param_policy {
+ ParamExtension::ExtendTo32Bits => arg.extend_integer_width_to(32),
+ ParamExtension::NoExtension => {}
+ }
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
@@ -68,19 +95,19 @@ where
arg.make_indirect();
}
-pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, param_policy: ParamExtension)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
{
if !fn_abi.ret.is_ignore() {
- classify_ret(cx, &mut fn_abi.ret);
+ classify_ret(cx, &mut fn_abi.ret, param_policy);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
- classify_arg(cx, arg);
+ classify_arg(cx, arg, param_policy);
}
}
diff --git a/compiler/rustc_target/src/abi/call/amdgpu.rs b/compiler/rustc_target/src/abi/call/amdgpu.rs
index 9be97476c..e30dead63 100644
--- a/compiler/rustc_target/src/abi/call/amdgpu.rs
+++ b/compiler/rustc_target/src/abi/call/amdgpu.rs
@@ -26,7 +26,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
index e66c2132b..1923ea588 100644
--- a/compiler/rustc_target/src/abi/call/arm.rs
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -88,7 +88,7 @@ where
classify_ret(cx, &mut fn_abi.ret, vfp);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/avr.rs b/compiler/rustc_target/src/abi/call/avr.rs
index c1f7a1e3a..e20f01355 100644
--- a/compiler/rustc_target/src/abi/call/avr.rs
+++ b/compiler/rustc_target/src/abi/call/avr.rs
@@ -49,7 +49,7 @@ pub fn compute_abi_info<Ty>(fty: &mut FnAbi<'_, Ty>) {
classify_ret_ty(&mut fty.ret);
}
- for arg in &mut fty.args {
+ for arg in fty.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/bpf.rs b/compiler/rustc_target/src/abi/call/bpf.rs
index 466c52553..780e7df43 100644
--- a/compiler/rustc_target/src/abi/call/bpf.rs
+++ b/compiler/rustc_target/src/abi/call/bpf.rs
@@ -22,7 +22,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/hexagon.rs b/compiler/rustc_target/src/abi/call/hexagon.rs
index 8028443b8..80a442048 100644
--- a/compiler/rustc_target/src/abi/call/hexagon.rs
+++ b/compiler/rustc_target/src/abi/call/hexagon.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
index 58fdc00b6..c1e0f54af 100644
--- a/compiler/rustc_target/src/abi/call/m68k.rs
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
index cc4431976..edcd1bab8 100644
--- a/compiler/rustc_target/src/abi/call/mips.rs
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -22,10 +22,8 @@ where
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
- arg.cast_to(Uniform { unit: Reg::i32(), total: size });
- if !offset.is_aligned(align) {
- arg.pad_with(Reg::i32());
- }
+ let pad_i32 = !offset.is_aligned(align);
+ arg.cast_to_and_pad_i32(Uniform { unit: Reg::i32(), total: size }, pad_i32);
} else {
arg.extend_integer_width_to(32);
}
@@ -42,7 +40,7 @@ where
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
index cd54167aa..2700f67b2 100644
--- a/compiler/rustc_target/src/abi/call/mips64.rs
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -158,7 +158,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 577126a95..9e5f0e4d1 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -14,7 +14,6 @@ mod m68k;
mod mips;
mod mips64;
mod msp430;
-mod nvptx;
mod nvptx64;
mod powerpc;
mod powerpc64;
@@ -27,7 +26,7 @@ mod x86;
mod x86_64;
mod x86_win64;
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub enum PassMode {
/// Ignore the argument.
///
@@ -41,13 +40,14 @@ pub enum PassMode {
///
/// The argument has a layout abi of `ScalarPair`.
Pair(ArgAttributes, ArgAttributes),
- /// Pass the argument after casting it, to either
- /// a single uniform or a pair of registers.
- Cast(CastTarget),
+ /// Pass the argument after casting it, to either a single uniform or a
+ /// pair of registers. The bool indicates if a `Reg::i32()` dummy argument
+ /// is emitted before the real argument.
+ Cast(Box<CastTarget>, bool),
/// Pass the argument indirectly via a hidden pointer.
/// The `extra_attrs` value, if any, is for the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
- /// `on_stack` defines that the the value should be passed at a fixed
+ /// `on_stack` defines that the value should be passed at a fixed
/// stack offset in accordance to the ABI rather than passed using a
/// pointer. This corresponds to the `byval` LLVM argument attribute.
Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
@@ -464,10 +464,6 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct ArgAbi<'a, Ty> {
pub layout: TyAndLayout<'a, Ty>,
-
- /// Dummy argument, which is emitted before the real argument.
- pub pad: Option<Reg>,
-
pub mode: PassMode,
}
@@ -487,7 +483,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
};
- ArgAbi { layout, pad: None, mode }
+ ArgAbi { layout, mode }
}
fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
@@ -549,11 +545,11 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
}
pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
- self.mode = PassMode::Cast(target.into());
+ self.mode = PassMode::Cast(Box::new(target.into()), false);
}
- pub fn pad_with(&mut self, reg: Reg) {
- self.pad = Some(reg);
+ pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
+ self.mode = PassMode::Cast(Box::new(target.into()), pad_i32);
}
pub fn is_indirect(&self) -> bool {
@@ -615,7 +611,7 @@ pub enum Conv {
#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct FnAbi<'a, Ty> {
/// The LLVM types of each argument.
- pub args: Vec<ArgAbi<'a, Ty>>,
+ pub args: Box<[ArgAbi<'a, Ty>]>,
/// LLVM return type.
pub ret: ArgAbi<'a, Ty>,
@@ -626,7 +622,7 @@ pub struct FnAbi<'a, Ty> {
///
/// Should only be different from args.len() when c_variadic is true.
/// This can be used to know whether an argument is variadic or not.
- pub fixed_count: usize,
+ pub fixed_count: u32,
pub conv: Conv,
@@ -689,7 +685,14 @@ impl<'a, Ty> FnAbi<'a, Ty> {
}
}
},
- "aarch64" => aarch64::compute_abi_info(cx, self),
+ "aarch64" => {
+ let param_policy = if cx.target_spec().is_like_osx {
+ aarch64::ParamExtension::ExtendTo32Bits
+ } else {
+ aarch64::ParamExtension::NoExtension
+ };
+ aarch64::compute_abi_info(cx, self, param_policy)
+ }
"amdgpu" => amdgpu::compute_abi_info(cx, self),
"arm" => arm::compute_abi_info(cx, self),
"avr" => avr::compute_abi_info(self),
@@ -702,7 +705,6 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"msp430" => msp430::compute_abi_info(self),
"sparc" => sparc::compute_abi_info(cx, self),
"sparc64" => sparc64::compute_abi_info(cx, self),
- "nvptx" => nvptx::compute_abi_info(self),
"nvptx64" => {
if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
nvptx64::compute_ptx_kernel_abi_info(cx, self)
@@ -732,3 +734,14 @@ impl<'a, Ty> FnAbi<'a, Ty> {
Ok(())
}
}
+
+// Some types are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ use rustc_data_structures::static_assert_size;
+ // tidy-alphabetical-start
+ static_assert_size!(ArgAbi<'_, usize>, 56);
+ static_assert_size!(FnAbi<'_, usize>, 80);
+ // tidy-alphabetical-end
+}
diff --git a/compiler/rustc_target/src/abi/call/msp430.rs b/compiler/rustc_target/src/abi/call/msp430.rs
index 0ba73657b..33ef47be0 100644
--- a/compiler/rustc_target/src/abi/call/msp430.rs
+++ b/compiler/rustc_target/src/abi/call/msp430.rs
@@ -30,7 +30,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/nvptx.rs b/compiler/rustc_target/src/abi/call/nvptx.rs
deleted file mode 100644
index 428dd95bb..000000000
--- a/compiler/rustc_target/src/abi/call/nvptx.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// Reference: PTX Writer's Guide to Interoperability
-// https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
-
-use crate::abi::call::{ArgAbi, FnAbi};
-
-fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
- if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
- ret.make_indirect();
- } else {
- ret.extend_integer_width_to(32);
- }
-}
-
-fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
- if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
- arg.make_indirect();
- } else {
- arg.extend_integer_width_to(32);
- }
-}
-
-pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
- if !fn_abi.ret.is_ignore() {
- classify_ret(&mut fn_abi.ret);
- }
-
- for arg in &mut fn_abi.args {
- if arg.is_ignore() {
- continue;
- }
- classify_arg(arg);
- }
-}
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
index fc16f1c97..4abe51cd6 100644
--- a/compiler/rustc_target/src/abi/call/nvptx64.rs
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -38,7 +38,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -55,7 +55,7 @@ where
panic!("Kernels should not return anything other than () or !");
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/powerpc.rs b/compiler/rustc_target/src/abi/call/powerpc.rs
index 27a5c6d2f..70c32db0a 100644
--- a/compiler/rustc_target/src/abi/call/powerpc.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc.rs
@@ -21,7 +21,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
index c22ef9c8f..359bb8fc0 100644
--- a/compiler/rustc_target/src/abi/call/powerpc64.rs
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -132,7 +132,7 @@ where
classify_ret(cx, &mut fn_abi.ret, abi);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index 752b44f64..1cb360f83 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -340,7 +340,7 @@ where
arg,
xlen,
flen,
- i >= fn_abi.fixed_count,
+ i >= fn_abi.fixed_count as usize,
&mut avail_gprs,
&mut avail_fprs,
);
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
index 13706e8c2..ea2369281 100644
--- a/compiler/rustc_target/src/abi/call/s390x.rs
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -48,7 +48,7 @@ where
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
index cc4431976..edcd1bab8 100644
--- a/compiler/rustc_target/src/abi/call/sparc.rs
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -22,10 +22,8 @@ where
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() {
- arg.cast_to(Uniform { unit: Reg::i32(), total: size });
- if !offset.is_aligned(align) {
- arg.pad_with(Reg::i32());
- }
+ let pad_i32 = !offset.is_aligned(align);
+ arg.cast_to_and_pad_i32(Uniform { unit: Reg::i32(), total: size }, pad_i32);
} else {
arg.extend_integer_width_to(32);
}
@@ -42,7 +40,7 @@ where
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index cc3a0a699..1b74959ad 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -217,7 +217,7 @@ where
classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
index 3237cde10..44427ee53 100644
--- a/compiler/rustc_target/src/abi/call/wasm.rs
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -50,7 +50,7 @@ where
classify_ret(cx, &mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -66,7 +66,7 @@ pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
classify_ret(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index c7d59baf9..7c26335dc 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -49,7 +49,7 @@ where
}
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
@@ -72,7 +72,7 @@ where
let mut free_regs = 2;
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
let attrs = match arg.mode {
PassMode::Ignore
| PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
@@ -81,7 +81,7 @@ where
PassMode::Direct(ref mut attrs) => attrs,
PassMode::Pair(..)
| PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
- | PassMode::Cast(_) => {
+ | PassMode::Cast(..) => {
unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
}
};
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
index a52e01a49..c0c071a61 100644
--- a/compiler/rustc_target/src/abi/call/x86_64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -239,7 +239,7 @@ where
x86_64_arg_or_ret(&mut fn_abi.ret, false);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
index 2aad641b1..1aaf0e511 100644
--- a/compiler/rustc_target/src/abi/call/x86_win64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -31,7 +31,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
fixup(&mut fn_abi.ret);
}
- for arg in &mut fn_abi.args {
+ for arg in fn_abi.args.iter_mut() {
if arg.is_ignore() {
continue;
}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 92ce4d91d..7171ca7bf 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -7,7 +7,7 @@ use crate::spec::Target;
use std::convert::{TryFrom, TryInto};
use std::fmt;
use std::iter::Step;
-use std::num::NonZeroUsize;
+use std::num::{NonZeroUsize, ParseIntError};
use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
use std::str::FromStr;
@@ -69,34 +69,46 @@ impl Default for TargetDataLayout {
}
}
+pub enum TargetDataLayoutErrors<'a> {
+ InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
+ InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
+ MissingAlignment { cause: &'a str },
+ InvalidAlignment { cause: &'a str, err: String },
+ InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
+ InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
+ InvalidBitsSize { err: String },
+}
+
impl TargetDataLayout {
- pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
+ pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
// Parse an address space index from a string.
- let parse_address_space = |s: &str, cause: &str| {
+ let parse_address_space = |s: &'a str, cause: &'a str| {
s.parse::<u32>().map(AddressSpace).map_err(|err| {
- format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
+ TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
})
};
// Parse a bit count from a string.
- let parse_bits = |s: &str, kind: &str, cause: &str| {
- s.parse::<u64>().map_err(|err| {
- format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
+ let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
+ s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
+ kind,
+ bit: s,
+ cause,
+ err,
})
};
// Parse a size string.
- let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
+ let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
// Parse an alignment string.
- let align = |s: &[&str], cause: &str| {
+ let align = |s: &[&'a str], cause: &'a str| {
if s.is_empty() {
- return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
+ return Err(TargetDataLayoutErrors::MissingAlignment { cause });
}
let align_from_bits = |bits| {
- Align::from_bits(bits).map_err(|err| {
- format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
- })
+ Align::from_bits(bits)
+ .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
};
let abi = parse_bits(s[0], "alignment", cause)?;
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
@@ -158,25 +170,24 @@ impl TargetDataLayout {
// Perform consistency checks against the Target information.
if dl.endian != target.endian {
- return Err(format!(
- "inconsistent target specification: \"data-layout\" claims \
- architecture is {}-endian, while \"target-endian\" is `{}`",
- dl.endian.as_str(),
- target.endian.as_str(),
- ));
+ return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
+ dl: dl.endian.as_str(),
+ target: target.endian.as_str(),
+ });
}
let target_pointer_width: u64 = target.pointer_width.into();
if dl.pointer_size.bits() != target_pointer_width {
- return Err(format!(
- "inconsistent target specification: \"data-layout\" claims \
- pointers are {}-bit, while \"target-pointer-width\" is `{}`",
- dl.pointer_size.bits(),
- target.pointer_width
- ));
+ return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
+ pointer_size: dl.pointer_size.bits(),
+ target: target.pointer_width,
+ });
}
- dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
+ dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
+ Ok(bits) => bits,
+ Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
+ };
Ok(dl)
}
@@ -1130,7 +1141,7 @@ pub enum TagEncoding {
/// Niche (values invalid for a type) encoding the discriminant:
/// Discriminant and variant index coincide.
- /// The variant `dataful_variant` contains a niche at an arbitrary
+ /// The variant `untagged_variant` contains a niche at an arbitrary
/// offset (field `tag_field` of the enum), which for a variant with
/// discriminant `d` is set to
/// `(d - niche_variants.start).wrapping_add(niche_start)`.
@@ -1139,7 +1150,7 @@ pub enum TagEncoding {
/// `None` has a null pointer for the second tuple field, and
/// `Some` is the identity function (with a non-null reference).
Niche {
- dataful_variant: VariantIdx,
+ untagged_variant: VariantIdx,
niche_variants: RangeInclusive<VariantIdx>,
niche_start: u128,
},
@@ -1381,7 +1392,7 @@ pub struct PointeeInfo {
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum InitKind {
Zero,
- Uninit,
+ UninitMitigated0x01Fill,
}
/// Trait that needs to be implemented by the higher-level type representation
@@ -1487,72 +1498,4 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
}
}
-
- /// Determines if this type permits "raw" initialization by just transmuting some
- /// memory into an instance of `T`.
- ///
- /// `init_kind` indicates if the memory is zero-initialized or left uninitialized.
- ///
- /// This code is intentionally conservative, and will not detect
- /// * zero init of an enum whose 0 variant does not allow zero initialization
- /// * making uninitialized types who have a full valid range (ints, floats, raw pointers)
- /// * Any form of invalid value being made inside an array (unless the value is uninhabited)
- ///
- /// A strict form of these checks that uses const evaluation exists in
- /// `rustc_const_eval::might_permit_raw_init`, and a tracking issue for making these checks
- /// stricter is <https://github.com/rust-lang/rust/issues/66151>.
- ///
- /// FIXME: Once all the conservatism is removed from here, and the checks are ran by default,
- /// we can use the const evaluation checks always instead.
- pub fn might_permit_raw_init<C>(self, cx: &C, init_kind: InitKind) -> bool
- where
- Self: Copy,
- Ty: TyAbiInterface<'a, C>,
- C: HasDataLayout,
- {
- let scalar_allows_raw_init = move |s: Scalar| -> bool {
- match init_kind {
- InitKind::Zero => {
- // The range must contain 0.
- s.valid_range(cx).contains(0)
- }
- InitKind::Uninit => {
- // The range must include all values.
- s.is_always_valid(cx)
- }
- }
- };
-
- // Check the ABI.
- let valid = match self.abi {
- Abi::Uninhabited => false, // definitely UB
- Abi::Scalar(s) => scalar_allows_raw_init(s),
- Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
- Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
- Abi::Aggregate { .. } => true, // Fields are checked below.
- };
- if !valid {
- // This is definitely not okay.
- return false;
- }
-
- // If we have not found an error yet, we need to recursively descend into fields.
- match &self.fields {
- FieldsShape::Primitive | FieldsShape::Union { .. } => {}
- FieldsShape::Array { .. } => {
- // FIXME(#66151): For now, we are conservative and do not check arrays by default.
- }
- FieldsShape::Arbitrary { offsets, .. } => {
- for idx in 0..offsets.len() {
- if !self.field(cx, idx).might_permit_raw_init(cx, init_kind) {
- // We found a field that is unhappy with this kind of initialization.
- return false;
- }
- }
- }
- }
-
- // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
- true
- }
}
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index 59dbea705..aaba0d7f0 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -11,11 +11,12 @@
#![feature(assert_matches)]
#![feature(associated_type_bounds)]
#![feature(exhaustive_patterns)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(never_type)]
#![feature(rustc_attrs)]
#![feature(step_trait)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
index 9d36e37d7..6d919a4c2 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
@@ -1,20 +1,20 @@
-use crate::spec::{FramePointer, LinkerFlavor, SanitizerSet, Target, TargetOptions};
+use crate::spec::{FramePointer, SanitizerSet, Target, TargetOptions};
pub fn target() -> Target {
- let mut base = super::apple_base::opts("macos");
+ let arch = "arm64";
+ let mut base = super::apple_base::opts("macos", arch, "");
base.cpu = "apple-a14".into();
base.max_atomic_width = Some(128);
// FIXME: The leak sanitizer currently fails the tests, see #88132.
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD;
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-arch", "arm64"]);
base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
// Clang automatically chooses a more specific target based on
// MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
// correctly, we do too.
- let llvm_target = super::apple_base::macos_llvm_target("arm64");
+ let llvm_target = super::apple_base::macos_llvm_target(arch);
Target {
llvm_target: llvm_target.into(),
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
index 1dad07a9a..2d2671549 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
@@ -1,11 +1,11 @@
use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{FramePointer, LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "arm64-apple-ios14.0-macabi";
let mut base = opts("ios", Arch::Arm64_macabi);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-target", llvm_target]);
+ base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
Target {
llvm_target: llvm_target.into(),
diff --git a/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs
index 1b7161fbb..529e98d2c 100644
--- a/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs
+++ b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs
@@ -1,4 +1,4 @@
-use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelroLevel, Target, TargetOptions};
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy, RelroLevel, Target, TargetOptions};
const LINKER_SCRIPT: &str = include_str!("./aarch64_nintendo_switch_freestanding_linker_script.ld");
@@ -10,7 +10,7 @@ pub fn target() -> Target {
data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
arch: "aarch64".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
link_script: Some(LINKER_SCRIPT.into()),
os: "horizon".into(),
@@ -18,7 +18,6 @@ pub fn target() -> Target {
panic_strategy: PanicStrategy::Abort,
position_independent_executables: true,
dynamic_linking: true,
- executables: true,
relro_level: RelroLevel::Off,
..Default::default()
},
diff --git a/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs
index 59c6a95c2..98d3e79c8 100644
--- a/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs
+++ b/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs
@@ -2,7 +2,7 @@ use crate::spec::Target;
pub fn target() -> Target {
let mut base = super::windows_gnullvm_base::opts();
- base.max_atomic_width = Some(64);
+ base.max_atomic_width = Some(128);
base.features = "+neon,+fp-armv8".into();
base.linker = Some("aarch64-w64-mingw32-clang".into());
diff --git a/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
index 856ec4fb0..7c4544b3f 100644
--- a/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
@@ -2,7 +2,7 @@ use crate::spec::Target;
pub fn target() -> Target {
let mut base = super::windows_msvc_base::opts();
- base.max_atomic_width = Some(64);
+ base.max_atomic_width = Some(128);
base.features = "+neon,+fp-armv8".into();
Target {
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
index d3fd7051a..4ae6d4120 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
@@ -6,11 +6,11 @@
//
// For example, `-C target-cpu=cortex-a53`.
-use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
let opts = TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features: "+strict-align,+neon,+fp-armv8".into(),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
index 6316abe1b..2385cb69a 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
@@ -6,12 +6,12 @@
//
// For example, `-C target-cpu=cortex-a53`.
-use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
let opts = TargetOptions {
abi: "softfloat".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features: "+strict-align,-neon,-fp-armv8".into(),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs b/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs
index 162b091b2..817ff2422 100644
--- a/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs
@@ -2,13 +2,13 @@
// uefi-base module for generic UEFI options.
use super::uefi_msvc_base;
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = uefi_msvc_base::opts();
- base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Msvc, &["/machine:arm64"]);
+ base.max_atomic_width = Some(128);
+ base.add_pre_link_args(LinkerFlavor::Msvc(Lld::No), &["/machine:arm64"]);
Target {
llvm_target: "aarch64-unknown-windows".into(),
diff --git a/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
index 54247fd93..db4dbf817 100644
--- a/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
@@ -2,7 +2,7 @@ use crate::spec::Target;
pub fn target() -> Target {
let mut base = super::windows_uwp_msvc_base::opts();
- base.max_atomic_width = Some(64);
+ base.max_atomic_width = Some(128);
Target {
llvm_target: "aarch64-pc-windows-msvc".into(),
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
index 337554dc9..ce45fa139 100644
--- a/compiler/rustc_target/src/spec/abi.rs
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -1,6 +1,8 @@
use std::fmt;
use rustc_macros::HashStable_Generic;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, Symbol};
#[cfg(test)]
mod tests;
@@ -94,6 +96,142 @@ pub fn all_names() -> Vec<&'static str> {
AbiDatas.iter().map(|d| d.name).collect()
}
+pub fn enabled_names(features: &rustc_feature::Features, span: Span) -> Vec<&'static str> {
+ AbiDatas
+ .iter()
+ .map(|d| d.name)
+ .filter(|name| is_enabled(features, span, name).is_ok())
+ .collect()
+}
+
+pub enum AbiDisabled {
+ Unstable { feature: Symbol, explain: &'static str },
+ Unrecognized,
+}
+
+pub fn is_enabled(
+ features: &rustc_feature::Features,
+ span: Span,
+ name: &str,
+) -> Result<(), AbiDisabled> {
+ let s = is_stable(name);
+ if let Err(AbiDisabled::Unstable { feature, .. }) = s {
+ if features.enabled(feature) || span.allows_unstable(feature) {
+ return Ok(());
+ }
+ }
+ s
+}
+
+pub fn is_stable(name: &str) -> Result<(), AbiDisabled> {
+ match name {
+ // Stable
+ "Rust" | "C" | "cdecl" | "stdcall" | "fastcall" | "aapcs" | "win64" | "sysv64"
+ | "system" => Ok(()),
+ "rust-intrinsic" => Err(AbiDisabled::Unstable {
+ feature: sym::intrinsics,
+ explain: "intrinsics are subject to change",
+ }),
+ "platform-intrinsic" => Err(AbiDisabled::Unstable {
+ feature: sym::platform_intrinsics,
+ explain: "platform intrinsics are experimental and possibly buggy",
+ }),
+ "vectorcall" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_vectorcall,
+ explain: "vectorcall is experimental and subject to change",
+ }),
+ "thiscall" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_thiscall,
+ explain: "thiscall is experimental and subject to change",
+ }),
+ "rust-call" => Err(AbiDisabled::Unstable {
+ feature: sym::unboxed_closures,
+ explain: "rust-call ABI is subject to change",
+ }),
+ "rust-cold" => Err(AbiDisabled::Unstable {
+ feature: sym::rust_cold_cc,
+ explain: "rust-cold is experimental and subject to change",
+ }),
+ "ptx-kernel" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_ptx,
+ explain: "PTX ABIs are experimental and subject to change",
+ }),
+ "unadjusted" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_unadjusted,
+ explain: "unadjusted ABI is an implementation detail and perma-unstable",
+ }),
+ "msp430-interrupt" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_msp430_interrupt,
+ explain: "msp430-interrupt ABI is experimental and subject to change",
+ }),
+ "x86-interrupt" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_x86_interrupt,
+ explain: "x86-interrupt ABI is experimental and subject to change",
+ }),
+ "amdgpu-kernel" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_amdgpu_kernel,
+ explain: "amdgpu-kernel ABI is experimental and subject to change",
+ }),
+ "avr-interrupt" | "avr-non-blocking-interrupt" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_avr_interrupt,
+ explain: "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change",
+ }),
+ "efiapi" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_efiapi,
+ explain: "efiapi ABI is experimental and subject to change",
+ }),
+ "C-cmse-nonsecure-call" => Err(AbiDisabled::Unstable {
+ feature: sym::abi_c_cmse_nonsecure_call,
+ explain: "C-cmse-nonsecure-call ABI is experimental and subject to change",
+ }),
+ "C-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "C-unwind ABI is experimental and subject to change",
+ }),
+ "stdcall-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "stdcall-unwind ABI is experimental and subject to change",
+ }),
+ "system-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "system-unwind ABI is experimental and subject to change",
+ }),
+ "thiscall-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "thiscall-unwind ABI is experimental and subject to change",
+ }),
+ "cdecl-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "cdecl-unwind ABI is experimental and subject to change",
+ }),
+ "fastcall-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "fastcall-unwind ABI is experimental and subject to change",
+ }),
+ "vectorcall-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "vectorcall-unwind ABI is experimental and subject to change",
+ }),
+ "aapcs-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "aapcs-unwind ABI is experimental and subject to change",
+ }),
+ "win64-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "win64-unwind ABI is experimental and subject to change",
+ }),
+ "sysv64-unwind" => Err(AbiDisabled::Unstable {
+ feature: sym::c_unwind,
+ explain: "sysv64-unwind ABI is experimental and subject to change",
+ }),
+ "wasm" => Err(AbiDisabled::Unstable {
+ feature: sym::wasm_abi,
+ explain: "wasm ABI is experimental and subject to change",
+ }),
+ _ => Err(AbiDisabled::Unrecognized),
+ }
+}
+
impl Abi {
/// Default ABI chosen for `extern fn` declarations without an explicit ABI.
pub const FALLBACK: Abi = Abi::C { unwind: false };
diff --git a/compiler/rustc_target/src/spec/android_base.rs b/compiler/rustc_target/src/spec/android_base.rs
index dc06597db..9c1df1a06 100644
--- a/compiler/rustc_target/src/spec/android_base.rs
+++ b/compiler/rustc_target/src/spec/android_base.rs
@@ -1,15 +1,16 @@
-use crate::spec::TargetOptions;
+use crate::spec::{SanitizerSet, TargetOptions};
pub fn opts() -> TargetOptions {
let mut base = super::linux_base::opts();
base.os = "android".into();
+ base.is_like_android = true;
base.default_dwarf_version = 2;
- base.position_independent_executables = true;
base.has_thread_local = false;
+ base.supported_sanitizers = SanitizerSet::ADDRESS;
// This is for backward compatibility, see https://github.com/rust-lang/rust/issues/49867
// for context. (At that time, there was no `-C force-unwind-tables`, so the only solution
// was to always emit `uwtable`).
base.default_uwtable = true;
- base.crt_static_respected = false;
+ base.crt_static_respected = true;
base
}
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
index 15e4fb9be..40bc59ca1 100644
--- a/compiler/rustc_target/src/spec/apple_base.rs
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -1,8 +1,41 @@
use std::{borrow::Cow, env};
-use crate::spec::{cvs, FramePointer, LldFlavor, SplitDebuginfo, TargetOptions};
+use crate::spec::{cvs, Cc, DebuginfoKind, FramePointer, LinkArgs};
+use crate::spec::{LinkerFlavor, Lld, SplitDebuginfo, StaticCow, TargetOptions};
+
+fn pre_link_args(os: &'static str, arch: &'static str, abi: &'static str) -> LinkArgs {
+ let platform_name: StaticCow<str> = match abi {
+ "sim" => format!("{}-simulator", os).into(),
+ "macabi" => "mac-catalyst".into(),
+ _ => os.into(),
+ };
+
+ let platform_version: StaticCow<str> = match os.as_ref() {
+ "ios" => ios_lld_platform_version(),
+ "tvos" => tvos_lld_platform_version(),
+ "watchos" => watchos_lld_platform_version(),
+ "macos" => macos_lld_platform_version(arch),
+ _ => unreachable!(),
+ }
+ .into();
+
+ let mut args = TargetOptions::link_args(
+ LinkerFlavor::Darwin(Cc::No, Lld::No),
+ &["-arch", arch, "-platform_version"],
+ );
+ super::add_link_args_iter(
+ &mut args,
+ LinkerFlavor::Darwin(Cc::No, Lld::No),
+ [platform_name, platform_version.clone(), platform_version].into_iter(),
+ );
+ if abi != "macabi" {
+ super::add_link_args(&mut args, LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-arch", arch]);
+ }
+
+ args
+}
-pub fn opts(os: &'static str) -> TargetOptions {
+pub fn opts(os: &'static str, arch: &'static str, abi: &'static str) -> TargetOptions {
// ELF TLS is only available in macOS 10.7+. If you try to compile for 10.6
// either the linker will complain if it is used or the binary will end up
// segfaulting at runtime when run on 10.6. Rust by default supports macOS
@@ -21,10 +54,11 @@ pub fn opts(os: &'static str) -> TargetOptions {
TargetOptions {
os: os.into(),
vendor: "apple".into(),
+ linker_flavor: LinkerFlavor::Darwin(Cc::Yes, Lld::No),
// macOS has -dead_strip, which doesn't rely on function_sections
function_sections: false,
dynamic_linking: true,
- linker_is_gnu: false,
+ pre_link_args: pre_link_args(os, arch, abi),
families: cvs!["unix"],
is_like_osx: true,
default_dwarf_version: 2,
@@ -36,11 +70,16 @@ pub fn opts(os: &'static str) -> TargetOptions {
abi_return_struct_as_int: true,
emit_debug_gdb_scripts: false,
eh_frame_header: false,
- lld_flavor: LldFlavor::Ld64,
+ debuginfo_kind: DebuginfoKind::DwarfDsym,
// The historical default for macOS targets is to run `dsymutil` which
// generates a packed version of debuginfo split from the main file.
split_debuginfo: SplitDebuginfo::Packed,
+ supported_split_debuginfo: Cow::Borrowed(&[
+ SplitDebuginfo::Packed,
+ SplitDebuginfo::Unpacked,
+ SplitDebuginfo::Off,
+ ]),
// This environment variable is pretty magical but is intended for
// producing deterministic builds. This was first discovered to be used
@@ -73,12 +112,17 @@ fn macos_deployment_target(arch: &str) -> (u32, u32) {
.unwrap_or_else(|| macos_default_deployment_target(arch))
}
+fn macos_lld_platform_version(arch: &str) -> String {
+ let (major, minor) = macos_deployment_target(arch);
+ format!("{}.{}", major, minor)
+}
+
pub fn macos_llvm_target(arch: &str) -> String {
let (major, minor) = macos_deployment_target(arch);
format!("{}-apple-macosx{}.{}.0", arch, major, minor)
}
-pub fn macos_link_env_remove() -> Vec<Cow<'static, str>> {
+pub fn macos_link_env_remove() -> Vec<StaticCow<str>> {
let mut env_remove = Vec::with_capacity(2);
// Remove the `SDKROOT` environment variable if it's clearly set for the wrong platform, which
// may occur when we're linking a custom build script while targeting iOS for example.
@@ -109,7 +153,7 @@ pub fn ios_llvm_target(arch: &str) -> String {
format!("{}-apple-ios{}.{}.0", arch, major, minor)
}
-pub fn ios_lld_platform_version() -> String {
+fn ios_lld_platform_version() -> String {
let (major, minor) = ios_deployment_target();
format!("{}.{}", major, minor)
}
@@ -123,7 +167,7 @@ fn tvos_deployment_target() -> (u32, u32) {
deployment_target("TVOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
}
-pub fn tvos_lld_platform_version() -> String {
+fn tvos_lld_platform_version() -> String {
let (major, minor) = tvos_deployment_target();
format!("{}.{}", major, minor)
}
@@ -132,7 +176,7 @@ fn watchos_deployment_target() -> (u32, u32) {
deployment_target("WATCHOS_DEPLOYMENT_TARGET").unwrap_or((5, 0))
}
-pub fn watchos_lld_platform_version() -> String {
+fn watchos_lld_platform_version() -> String {
let (major, minor) = watchos_deployment_target();
format!("{}.{}", major, minor)
}
diff --git a/compiler/rustc_target/src/spec/apple_sdk_base.rs b/compiler/rustc_target/src/spec/apple_sdk_base.rs
index d77558f0f..49e302676 100644
--- a/compiler/rustc_target/src/spec/apple_sdk_base.rs
+++ b/compiler/rustc_target/src/spec/apple_sdk_base.rs
@@ -1,4 +1,4 @@
-use crate::spec::{cvs, LinkArgs, LinkerFlavor, LldFlavor, TargetOptions};
+use crate::spec::{cvs, TargetOptions};
use std::borrow::Cow;
use Arch::*;
@@ -61,53 +61,12 @@ fn link_env_remove(arch: Arch) -> Cow<'static, [Cow<'static, str>]> {
}
}
-fn pre_link_args(os: &'static str, arch: Arch) -> LinkArgs {
- let mut args = LinkArgs::new();
-
- let target_abi = target_abi(arch);
-
- let platform_name = match target_abi {
- "sim" => format!("{}-simulator", os),
- "macabi" => "mac-catalyst".to_string(),
- _ => os.to_string(),
- };
-
- let platform_version = match os.as_ref() {
- "ios" => super::apple_base::ios_lld_platform_version(),
- "tvos" => super::apple_base::tvos_lld_platform_version(),
- "watchos" => super::apple_base::watchos_lld_platform_version(),
- _ => unreachable!(),
- };
-
- let arch_str = target_arch_name(arch);
-
- if target_abi != "macabi" {
- args.insert(LinkerFlavor::Gcc, vec!["-arch".into(), arch_str.into()]);
- }
-
- args.insert(
- LinkerFlavor::Lld(LldFlavor::Ld64),
- vec![
- "-arch".into(),
- arch_str.into(),
- "-platform_version".into(),
- platform_name.into(),
- platform_version.clone().into(),
- platform_version.into(),
- ],
- );
-
- args
-}
-
pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
TargetOptions {
abi: target_abi(arch).into(),
cpu: target_cpu(arch).into(),
- dynamic_linking: false,
- pre_link_args: pre_link_args(os, arch),
link_env_remove: link_env_remove(arch),
has_thread_local: false,
- ..super::apple_base::opts(os)
+ ..super::apple_base::opts(os, target_arch_name(arch), target_abi(arch))
}
}
diff --git a/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
index 7b23fe1c4..cb7f5f2a5 100644
--- a/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
+++ b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
@@ -10,7 +10,7 @@ pub fn target() -> Target {
arch: "aarch64".into(),
options: TargetOptions {
features: "+neon,+fp-armv8,+apple-a7".into(),
- max_atomic_width: Some(64),
+ max_atomic_width: Some(128),
forces_embed_bitcode: true,
// These arguments are not actually invoked - they just have
// to look right to pass App Store validation.
diff --git a/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs
new file mode 100644
index 000000000..4836f3cf7
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armeb_unknown_linux_gnueabi.rs
@@ -0,0 +1,19 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armeb-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+strict-align,+v8,+crc".into(),
+ endian: Endian::Big,
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
index 511693abe..8c65d6afc 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
@@ -1,8 +1,7 @@
// Targets the Big endian Cortex-R4/R5 processor (ARMv7-R)
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -13,7 +12,7 @@ pub fn target() -> Target {
options: TargetOptions {
abi: "eabi".into(),
endian: Endian::Big,
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
index 5df4a0a15..7013bc60d 100644
--- a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
@@ -1,8 +1,7 @@
// Targets the Cortex-R4F/R5F processor (ARMv7-R)
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -13,7 +12,7 @@ pub fn target() -> Target {
options: TargetOptions {
abi: "eabihf".into(),
endian: Endian::Big,
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
diff --git a/compiler/rustc_target/src/spec/armv4t_none_eabi.rs b/compiler/rustc_target/src/spec/armv4t_none_eabi.rs
new file mode 100644
index 000000000..7ac1aab3b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv4t_none_eabi.rs
@@ -0,0 +1,56 @@
+//! Targets the ARMv4T, with code as `a32` code by default.
+//!
+//! Primarily of use for the GBA, but usable with other devices too.
+//!
+//! Please ping @Lokathor if changes are needed.
+//!
+//! This target profile assumes that you have the ARM binutils in your path
+//! (specifically the linker, `arm-none-eabi-ld`). They can be obtained for free
+//! for all major OSes from the ARM developer's website, and they may also be
+//! available in your system's package manager. Unfortunately, the standard
+//! linker that Rust uses (`lld`) only supports as far back as `ARMv5TE`, so we
+//! must use the GNU `ld` linker.
+//!
+//! **Important:** This target profile **does not** specify a linker script. You
+//! just get the default link script when you build a binary for this target.
+//! The default link script is very likely wrong, so you should use
+//! `-Clink-arg=-Tmy_script.ld` to override that with a correct linker script.
+
+use crate::spec::{cvs, Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv4t-none-eabi".into(),
+ pointer_width: 32,
+ arch: "arm".into(),
+ /* Data layout args are '-' separated:
+ * little endian
+ * stack is 64-bit aligned (EABI)
+ * pointers are 32-bit
+ * i64 must be 64-bit aligned (EABI)
+ * mangle names with ELF style
+ * native integers are 32-bit
+ * All other elements are default
+ */
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::No),
+ linker: Some("arm-none-eabi-ld".into()),
+ asm_args: cvs!["-mthumb-interwork", "-march=armv4t", "-mlittle-endian",],
+ // Force-enable 32-bit atomics, which allows the use of atomic load/store only.
+ // The resulting atomics are ABI incompatible with atomics backed by libatomic.
+ features: "+soft-float,+strict-align,+atomics-32".into(),
+ main_needs_argc_argv: false,
+ atomic_cas: false,
+ has_thumb_interworking: true,
+ relocation_model: RelocModel::Static,
+ panic_strategy: PanicStrategy::Abort,
+ // from thumb_base, rust-lang/rust#44993.
+ emit_debug_gdb_scripts: false,
+ // from thumb_base, apparently gcc/clang give enums a minimum of 8 bits on no-os targets
+ c_enum_min_bits: 8,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv5te_none_eabi.rs b/compiler/rustc_target/src/spec/armv5te_none_eabi.rs
new file mode 100644
index 000000000..dfd27b654
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv5te_none_eabi.rs
@@ -0,0 +1,41 @@
+//! Targets the ARMv5TE, with code as `a32` code by default.
+
+use crate::spec::{cvs, FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv5te-none-eabi".into(),
+ pointer_width: 32,
+ arch: "arm".into(),
+ /* Data layout args are '-' separated:
+ * little endian
+ * stack is 64-bit aligned (EABI)
+ * pointers are 32-bit
+ * i64 must be 64-bit aligned (EABI)
+ * mangle names with ELF style
+ * native integers are 32-bit
+ * All other elements are default
+ */
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // extra args passed to the external assembler (assuming `arm-none-eabi-as`):
+ // * activate t32/a32 interworking
+ // * use arch ARMv5TE
+ // * use little-endian
+ asm_args: cvs!["-mthumb-interwork", "-march=armv5te", "-mlittle-endian",],
+ // minimum extra features, these cannot be disabled via -C
+ // Also force-enable 32-bit atomics, which allows the use of atomic load/store only.
+ // The resulting atomics are ABI incompatible with atomics backed by libatomic.
+ features: "+soft-float,+strict-align,+atomics-32".into(),
+ frame_pointer: FramePointer::MayOmit,
+ main_needs_argc_argv: false,
+ // don't have atomic compare-and-swap
+ atomic_cas: false,
+ has_thumb_interworking: true,
+
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs b/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs
index 1bba39393..40ec6f961 100644
--- a/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs
+++ b/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs
@@ -1,4 +1,4 @@
-use crate::spec::{cvs, LinkerFlavor, RelocModel, Target, TargetOptions};
+use crate::spec::{cvs, Cc, LinkerFlavor, Lld, RelocModel, Target, TargetOptions};
/// A base target for Nintendo 3DS devices using the devkitARM toolchain.
///
@@ -6,7 +6,7 @@ use crate::spec::{cvs, LinkerFlavor, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
let pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Gcc,
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
&["-specs=3dsx.specs", "-mtune=mpcore", "-mfloat-abi=hard", "-mtp=soft"],
);
@@ -21,7 +21,6 @@ pub fn target() -> Target {
env: "newlib".into(),
vendor: "nintendo".into(),
abi: "eabihf".into(),
- linker_flavor: LinkerFlavor::Gcc,
cpu: "mpcore".into(),
families: cvs!["unix"],
linker: Some("arm-none-eabi-gcc".into()),
diff --git a/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
index 38c117a49..402e0fd92 100644
--- a/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
+++ b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, Target, TargetOptions};
// This target if is for the baseline of the Android v7a ABI
// in thumb mode. It's named armv7-* instead of thumbv7-*
@@ -10,7 +10,7 @@ use crate::spec::{LinkerFlavor, SanitizerSet, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::android_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-march=armv7-a"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-march=armv7-a"]);
Target {
llvm_target: "armv7-none-linux-android".into(),
pointer_width: 32,
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
index cb5cbe158..4e20fb325 100644
--- a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
@@ -14,12 +14,12 @@
// - `relocation-model` set to `static`; also no PIE, no relro and no dynamic
// linking. rationale: matches `thumb` targets
-use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
let opts = TargetOptions {
abi: "eabi".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features: "+v7,+thumb2,+soft-float,-neon,+strict-align".into(),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
index fb5dd2e75..ae70129ae 100644
--- a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
@@ -5,12 +5,12 @@
// changes (list in `armv7a_none_eabi.rs`) to bring it closer to the bare-metal
// `thumb` & `aarch64` targets.
-use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+use super::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
let opts = TargetOptions {
abi: "eabihf".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features: "+v7,+vfp3,-d32,+thumb2,-neon,+strict-align".into(),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
index 5f1da09b3..25f301ccc 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
@@ -1,7 +1,6 @@
// Targets the Little-endian Cortex-R4/R5 processor (ARMv7-R)
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -12,7 +11,7 @@ pub fn target() -> Target {
options: TargetOptions {
abi: "eabi".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
index 0038ed0df..40449759d 100644
--- a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
@@ -1,7 +1,6 @@
// Targets the Little-endian Cortex-R4F/R5F processor (ARMv7-R)
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -12,7 +11,7 @@ pub fn target() -> Target {
options: TargetOptions {
abi: "eabihf".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
panic_strategy: PanicStrategy::Abort,
diff --git a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
index b4cf2c5ee..f492c3451 100644
--- a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
+++ b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
@@ -2,6 +2,6 @@ use super::{wasm32_unknown_emscripten, LinkerFlavor, Target};
pub fn target() -> Target {
let mut target = wasm32_unknown_emscripten::target();
- target.add_post_link_args(LinkerFlavor::Em, &["-sWASM=0", "--memory-init-file", "0"]);
+ target.add_post_link_args(LinkerFlavor::EmCc, &["-sWASM=0", "--memory-init-file", "0"]);
target
}
diff --git a/compiler/rustc_target/src/spec/avr_gnu_base.rs b/compiler/rustc_target/src/spec/avr_gnu_base.rs
index 1d441e558..9c3406b53 100644
--- a/compiler/rustc_target/src/spec/avr_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/avr_gnu_base.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, RelocModel, Target, TargetOptions};
/// A base target for AVR devices using the GNU toolchain.
///
@@ -17,10 +17,14 @@ pub fn target(target_cpu: &'static str, mmcu: &'static str) -> Target {
linker: Some("avr-gcc".into()),
eh_frame_header: false,
- pre_link_args: TargetOptions::link_args(LinkerFlavor::Gcc, &[mmcu]),
- late_link_args: TargetOptions::link_args(LinkerFlavor::Gcc, &["-lgcc"]),
+ pre_link_args: TargetOptions::link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &[mmcu]),
+ late_link_args: TargetOptions::link_args(
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ &["-lgcc"],
+ ),
max_atomic_width: Some(0),
atomic_cas: false,
+ relocation_model: RelocModel::Static,
..TargetOptions::default()
},
}
diff --git a/compiler/rustc_target/src/spec/bpf_base.rs b/compiler/rustc_target/src/spec/bpf_base.rs
index 3c4da6f88..baf365871 100644
--- a/compiler/rustc_target/src/spec/bpf_base.rs
+++ b/compiler/rustc_target/src/spec/bpf_base.rs
@@ -5,7 +5,7 @@ pub fn opts(endian: Endian) -> TargetOptions {
TargetOptions {
allow_asm: true,
endian,
- linker_flavor: LinkerFlavor::BpfLinker,
+ linker_flavor: LinkerFlavor::Bpf,
atomic_cas: false,
dynamic_linking: true,
no_builtins: true,
diff --git a/compiler/rustc_target/src/spec/crt_objects.rs b/compiler/rustc_target/src/spec/crt_objects.rs
index 52ac3622e..c126390f5 100644
--- a/compiler/rustc_target/src/spec/crt_objects.rs
+++ b/compiler/rustc_target/src/spec/crt_objects.rs
@@ -63,7 +63,7 @@ pub(super) fn all(obj: &'static str) -> CrtObjects {
])
}
-pub(super) fn pre_musl_fallback() -> CrtObjects {
+pub(super) fn pre_musl_self_contained() -> CrtObjects {
new(&[
(LinkOutputKind::DynamicNoPicExe, &["crt1.o", "crti.o", "crtbegin.o"]),
(LinkOutputKind::DynamicPicExe, &["Scrt1.o", "crti.o", "crtbeginS.o"]),
@@ -74,7 +74,7 @@ pub(super) fn pre_musl_fallback() -> CrtObjects {
])
}
-pub(super) fn post_musl_fallback() -> CrtObjects {
+pub(super) fn post_musl_self_contained() -> CrtObjects {
new(&[
(LinkOutputKind::DynamicNoPicExe, &["crtend.o", "crtn.o"]),
(LinkOutputKind::DynamicPicExe, &["crtendS.o", "crtn.o"]),
@@ -85,7 +85,7 @@ pub(super) fn post_musl_fallback() -> CrtObjects {
])
}
-pub(super) fn pre_mingw_fallback() -> CrtObjects {
+pub(super) fn pre_mingw_self_contained() -> CrtObjects {
new(&[
(LinkOutputKind::DynamicNoPicExe, &["crt2.o", "rsbegin.o"]),
(LinkOutputKind::DynamicPicExe, &["crt2.o", "rsbegin.o"]),
@@ -96,7 +96,7 @@ pub(super) fn pre_mingw_fallback() -> CrtObjects {
])
}
-pub(super) fn post_mingw_fallback() -> CrtObjects {
+pub(super) fn post_mingw_self_contained() -> CrtObjects {
all("rsend.o")
}
@@ -108,7 +108,7 @@ pub(super) fn post_mingw() -> CrtObjects {
all("rsend.o")
}
-pub(super) fn pre_wasi_fallback() -> CrtObjects {
+pub(super) fn pre_wasi_self_contained() -> CrtObjects {
// Use crt1-command.o instead of crt1.o to enable support for new-style
// commands. See https://reviews.llvm.org/D81689 for more info.
new(&[
@@ -120,37 +120,41 @@ pub(super) fn pre_wasi_fallback() -> CrtObjects {
])
}
-pub(super) fn post_wasi_fallback() -> CrtObjects {
+pub(super) fn post_wasi_self_contained() -> CrtObjects {
new(&[])
}
-/// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
+/// Which logic to use to determine whether to use self-contained linking mode
+/// if `-Clink-self-contained` is not specified explicitly.
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
-pub enum CrtObjectsFallback {
+pub enum LinkSelfContainedDefault {
+ False,
+ True,
Musl,
Mingw,
- Wasm,
}
-impl FromStr for CrtObjectsFallback {
+impl FromStr for LinkSelfContainedDefault {
type Err = ();
- fn from_str(s: &str) -> Result<CrtObjectsFallback, ()> {
+ fn from_str(s: &str) -> Result<LinkSelfContainedDefault, ()> {
Ok(match s {
- "musl" => CrtObjectsFallback::Musl,
- "mingw" => CrtObjectsFallback::Mingw,
- "wasm" => CrtObjectsFallback::Wasm,
+ "false" => LinkSelfContainedDefault::False,
+ "true" | "wasm" => LinkSelfContainedDefault::True,
+ "musl" => LinkSelfContainedDefault::Musl,
+ "mingw" => LinkSelfContainedDefault::Mingw,
_ => return Err(()),
})
}
}
-impl ToJson for CrtObjectsFallback {
+impl ToJson for LinkSelfContainedDefault {
fn to_json(&self) -> Json {
match *self {
- CrtObjectsFallback::Musl => "musl",
- CrtObjectsFallback::Mingw => "mingw",
- CrtObjectsFallback::Wasm => "wasm",
+ LinkSelfContainedDefault::False => "false",
+ LinkSelfContainedDefault::True => "true",
+ LinkSelfContainedDefault::Musl => "musl",
+ LinkSelfContainedDefault::Mingw => "mingw",
}
.to_json()
}
diff --git a/compiler/rustc_target/src/spec/fuchsia_base.rs b/compiler/rustc_target/src/spec/fuchsia_base.rs
index df1e3275f..4c2775850 100644
--- a/compiler/rustc_target/src/spec/fuchsia_base.rs
+++ b/compiler/rustc_target/src/spec/fuchsia_base.rs
@@ -1,8 +1,13 @@
-use crate::spec::{crt_objects, cvs, LinkOutputKind, LinkerFlavor, LldFlavor, TargetOptions};
+use crate::spec::{crt_objects, cvs, Cc, LinkOutputKind, LinkerFlavor, Lld, TargetOptions};
pub fn opts() -> TargetOptions {
+ // This mirrors the linker options provided by clang. We presume lld for
+ // now. When using clang as the linker it will supply these options for us,
+ // so we only list them for ld/lld.
+ //
+ // https://github.com/llvm/llvm-project/blob/db9322b2066c55254e7691efeab863f43bfcc084/clang/lib/Driver/ToolChains/Fuchsia.cpp#L31
let pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Ld,
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
&[
"--build-id",
"--hash-style=gnu",
@@ -20,7 +25,7 @@ pub fn opts() -> TargetOptions {
TargetOptions {
os: "fuchsia".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
dynamic_linking: true,
families: cvs!["unix"],
diff --git a/compiler/rustc_target/src/spec/hermit_base.rs b/compiler/rustc_target/src/spec/hermit_base.rs
index 562ccef7e..dd9991381 100644
--- a/compiler/rustc_target/src/spec/hermit_base.rs
+++ b/compiler/rustc_target/src/spec/hermit_base.rs
@@ -1,14 +1,14 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, TargetOptions, TlsModel};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, TargetOptions, TlsModel};
pub fn opts() -> TargetOptions {
let pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Ld,
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
&["--build-id", "--hash-style=gnu", "--Bstatic"],
);
TargetOptions {
os: "hermit".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
has_thread_local: true,
pre_link_args,
diff --git a/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
index cc2c78c69..3aad05eb2 100644
--- a/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
@@ -1,4 +1,4 @@
-use crate::spec::Target;
+use crate::spec::{Cc, LinkerFlavor, Target};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
@@ -9,8 +9,7 @@ pub fn target() -> Target {
base.crt_static_default = false;
base.has_rpath = true;
- base.linker_is_gnu = false;
- base.dynamic_linking = true;
+ base.linker_flavor = LinkerFlavor::Unix(Cc::Yes);
base.c_enum_min_bits = 8;
diff --git a/compiler/rustc_target/src/spec/i386_apple_ios.rs b/compiler/rustc_target/src/spec/i386_apple_ios.rs
index 8b6266c58..b85214a9c 100644
--- a/compiler/rustc_target/src/spec/i386_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/i386_apple_ios.rs
@@ -14,8 +14,7 @@ pub fn target() -> Target {
arch: "x86".into(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
..base
},
}
diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
index 1718bd77b..15607c12e 100644
--- a/compiler/rustc_target/src/spec/i686_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
@@ -1,13 +1,13 @@
-use crate::spec::{FramePointer, LinkerFlavor, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let mut base = super::apple_base::opts("macos");
+ // ld64 only understand i386 and not i686
+ let mut base = super::apple_base::opts("macos", "i386", "");
base.cpu = "yonah".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-m32"]);
base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
base.frame_pointer = FramePointer::Always;
// Clang automatically chooses a more specific target based on
diff --git a/compiler/rustc_target/src/spec/i686_linux_android.rs b/compiler/rustc_target/src/spec/i686_linux_android.rs
index bdaf5c990..c7c30c239 100644
--- a/compiler/rustc_target/src/spec/i686_linux_android.rs
+++ b/compiler/rustc_target/src/spec/i686_linux_android.rs
@@ -11,8 +11,7 @@ pub fn target() -> Target {
// https://developer.android.com/ndk/guides/abis.html#x86
base.cpu = "pentiumpro".into();
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".into();
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-linux-android".into(),
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
index 631865439..7a1113875 100644
--- a/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
@@ -1,4 +1,4 @@
-use crate::spec::{FramePointer, LinkerFlavor, Target};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_gnu_base::opts();
@@ -9,8 +9,11 @@ pub fn target() -> Target {
// Mark all dynamic libraries and executables as compatible with the larger 4GiB address
// space available to x86 Windows binaries on x86_64.
- base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pe", "--large-address-aware"]);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-Wl,--large-address-aware"]);
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["-m", "i386pe", "--large-address-aware"],
+ );
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-Wl,--large-address-aware"]);
Target {
llvm_target: "i686-pc-windows-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
index f4ceaa1ca..db4c00dc6 100644
--- a/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_msvc_base::opts();
@@ -6,7 +6,7 @@ pub fn target() -> Target {
base.max_atomic_width = Some(64);
base.add_pre_link_args(
- LinkerFlavor::Msvc,
+ LinkerFlavor::Msvc(Lld::No),
&[
// Mark all dynamic libraries and executables as compatible with the larger 4GiB address
// space available to x86 Windows binaries on x86_64.
diff --git a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
index aff284bf2..35ca78034 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-Wl,-znotext"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32", "-Wl,-znotext"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-freebsd".into(),
diff --git a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
index 87aa74e40..e6b72336c 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::haiku_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-haiku".into(),
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
index 765803d16..73e536a7e 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
@@ -1,12 +1,12 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers = SanitizerSet::ADDRESS;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
index d94928043..3825082ba 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
@@ -1,12 +1,11 @@
-use crate::spec::{FramePointer, LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-Wl,-melf_i386"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32", "-Wl,-melf_i386"]);
+ base.stack_probes = StackProbeType::X86;
// The unwinder used by i686-unknown-linux-musl, the LLVM libunwind
// implementation, apparently relies on frame pointers existing... somehow.
diff --git a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
index 8de698b51..b191996c7 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::netbsd_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-netbsdelf".into(),
diff --git a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
index 7f25a1a16..8babe5597 100644
--- a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::openbsd_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-fuse-ld=lld"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32", "-fuse-ld=lld"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-openbsd".into(),
diff --git a/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
index d52810d2f..a3e325698 100644
--- a/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
@@ -1,4 +1,4 @@
-use crate::spec::{FramePointer, LinkerFlavor, Target};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_uwp_gnu_base::opts();
@@ -8,8 +8,11 @@ pub fn target() -> Target {
// Mark all dynamic libraries and executables as compatible with the larger 4GiB address
// space available to x86 Windows binaries on x86_64.
- base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pe", "--large-address-aware"]);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-Wl,--large-address-aware"]);
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["-m", "i386pe", "--large-address-aware"],
+ );
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-Wl,--large-address-aware"]);
Target {
llvm_target: "i686-pc-windows-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
index f62404e82..b5cfdfceb 100644
--- a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
base.cpu = "pentium4".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "i686-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/illumos_base.rs b/compiler/rustc_target/src/spec/illumos_base.rs
index 77e000474..8ac351584 100644
--- a/compiler/rustc_target/src/spec/illumos_base.rs
+++ b/compiler/rustc_target/src/spec/illumos_base.rs
@@ -1,8 +1,8 @@
-use crate::spec::{cvs, FramePointer, LinkerFlavor, TargetOptions};
+use crate::spec::{cvs, Cc, FramePointer, LinkerFlavor, TargetOptions};
pub fn opts() -> TargetOptions {
let late_link_args = TargetOptions::link_args(
- LinkerFlavor::Gcc,
+ LinkerFlavor::Unix(Cc::Yes),
&[
// The illumos libc contains a stack unwinding implementation, as
// does libgcc_s. The latter implementation includes several
@@ -30,7 +30,7 @@ pub fn opts() -> TargetOptions {
has_rpath: true,
families: cvs!["unix"],
is_like_solaris: true,
- linker_is_gnu: false,
+ linker_flavor: LinkerFlavor::Unix(Cc::Yes),
limit_rdylib_exports: false, // Linker doesn't support this
frame_pointer: FramePointer::Always,
eh_frame_header: false,
diff --git a/compiler/rustc_target/src/spec/l4re_base.rs b/compiler/rustc_target/src/spec/l4re_base.rs
index a08756861..3a4d83fad 100644
--- a/compiler/rustc_target/src/spec/l4re_base.rs
+++ b/compiler/rustc_target/src/spec/l4re_base.rs
@@ -1,14 +1,14 @@
-use crate::spec::{cvs, LinkerFlavor, PanicStrategy, TargetOptions};
+use crate::spec::{cvs, Cc, LinkerFlavor, PanicStrategy, RelocModel, TargetOptions};
pub fn opts() -> TargetOptions {
TargetOptions {
os: "l4re".into(),
env: "uclibc".into(),
- linker_flavor: LinkerFlavor::L4Bender,
+ linker_flavor: LinkerFlavor::Unix(Cc::No),
panic_strategy: PanicStrategy::Abort,
linker: Some("l4-bender".into()),
- linker_is_gnu: false,
families: cvs!["unix"],
+ relocation_model: RelocModel::Static,
..Default::default()
}
}
diff --git a/compiler/rustc_target/src/spec/linux_base.rs b/compiler/rustc_target/src/spec/linux_base.rs
index f4fce3b40..df8e84812 100644
--- a/compiler/rustc_target/src/spec/linux_base.rs
+++ b/compiler/rustc_target/src/spec/linux_base.rs
@@ -1,4 +1,5 @@
-use crate::spec::{cvs, RelroLevel, TargetOptions};
+use crate::spec::{cvs, RelroLevel, SplitDebuginfo, TargetOptions};
+use std::borrow::Cow;
pub fn opts() -> TargetOptions {
TargetOptions {
@@ -10,6 +11,11 @@ pub fn opts() -> TargetOptions {
relro_level: RelroLevel::Full,
has_thread_local: true,
crt_static_respected: true,
+ supported_split_debuginfo: Cow::Borrowed(&[
+ SplitDebuginfo::Packed,
+ SplitDebuginfo::Unpacked,
+ SplitDebuginfo::Off,
+ ]),
..Default::default()
}
}
diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs
index 0f5d85205..f41533a95 100644
--- a/compiler/rustc_target/src/spec/linux_kernel_base.rs
+++ b/compiler/rustc_target/src/spec/linux_kernel_base.rs
@@ -6,8 +6,7 @@ pub fn opts() -> TargetOptions {
env: "gnu".into(),
disable_redzone: true,
panic_strategy: PanicStrategy::Abort,
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
frame_pointer: FramePointer::Always,
position_independent_executables: true,
needs_plt: true,
diff --git a/compiler/rustc_target/src/spec/linux_musl_base.rs b/compiler/rustc_target/src/spec/linux_musl_base.rs
index 207a87ab0..61553e71b 100644
--- a/compiler/rustc_target/src/spec/linux_musl_base.rs
+++ b/compiler/rustc_target/src/spec/linux_musl_base.rs
@@ -1,13 +1,13 @@
-use crate::spec::crt_objects::{self, CrtObjectsFallback};
+use crate::spec::crt_objects::{self, LinkSelfContainedDefault};
use crate::spec::TargetOptions;
pub fn opts() -> TargetOptions {
let mut base = super::linux_base::opts();
base.env = "musl".into();
- base.pre_link_objects_fallback = crt_objects::pre_musl_fallback();
- base.post_link_objects_fallback = crt_objects::post_musl_fallback();
- base.crt_objects_fallback = Some(CrtObjectsFallback::Musl);
+ base.pre_link_objects_self_contained = crt_objects::pre_musl_self_contained();
+ base.post_link_objects_self_contained = crt_objects::post_musl_self_contained();
+ base.link_self_contained = LinkSelfContainedDefault::Musl;
// These targets statically link libc by default
base.crt_static_default = true;
diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
index cfc8ec21c..75beb91b1 100644
--- a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
+++ b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
@@ -1,11 +1,13 @@
-use crate::spec::{cvs, Target, TargetOptions};
-use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
+use crate::spec::{cvs, Cc, LinkerFlavor, Lld, RelocModel, Target, TargetOptions};
// The PSP has custom linker requirements.
const LINKER_SCRIPT: &str = include_str!("./mipsel_sony_psp_linker_script.ld");
pub fn target() -> Target {
- let pre_link_args = TargetOptions::link_args(LinkerFlavor::Ld, &["--emit-relocs", "--nmagic"]);
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["--emit-relocs", "--nmagic"],
+ );
Target {
llvm_target: "mipsel-sony-psp".into(),
@@ -16,7 +18,7 @@ pub fn target() -> Target {
options: TargetOptions {
os: "psp".into(),
vendor: "sony".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
cpu: "mips2".into(),
linker: Some("rust-lld".into()),
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
index fe2aa2de8..43b01e7a0 100644
--- a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
@@ -2,8 +2,7 @@
//!
//! Can be used for MIPS M4K core (e.g. on PIC32MX devices)
-use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
-use crate::spec::{PanicStrategy, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -13,7 +12,7 @@ pub fn target() -> Target {
arch: "mips".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
cpu: "mips32r2".into(),
features: "+mips32r2,+soft-float,+noabicalls".into(),
max_atomic_width: Some(32),
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index f7abeafd3..8909cf33a 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -37,7 +37,7 @@
use crate::abi::Endian;
use crate::json::{Json, ToJson};
use crate::spec::abi::{lookup as lookup_abi, Abi};
-use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
+use crate::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::symbol::{sym, Symbol};
@@ -90,16 +90,82 @@ mod windows_msvc_base;
mod windows_uwp_gnu_base;
mod windows_uwp_msvc_base;
+/// Linker is called through a C/C++ compiler.
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum Cc {
+ Yes,
+ No,
+}
+
+/// Linker is LLD.
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum Lld {
+ Yes,
+ No,
+}
+
+/// All linkers have some kinds of command line interfaces and rustc needs to know which commands
+/// to use with each of them. So we cluster all such interfaces into a (somewhat arbitrary) number
+/// of classes that we call "linker flavors".
+///
+/// Technically, it's not even necessary, we can nearly always infer the flavor from linker name
+/// and target properties like `is_like_windows`/`is_like_osx`/etc. However, the PRs originally
+/// introducing `-Clinker-flavor` (#40018 and friends) were aiming to reduce this kind of inference
+/// and provide something certain and explicitly specified instead, and that design goal is still
+/// relevant now.
+///
+/// The second goal is to keep the number of flavors to the minimum if possible.
+/// LLD somewhat forces our hand here because that linker is self-sufficent only if its executable
+/// (`argv[0]`) is named in specific way, otherwise it doesn't work and requires a
+/// `-flavor LLD_FLAVOR` argument to choose which logic to use. Our shipped `rust-lld` in
+/// particular is not named in such specific way, so it needs the flavor option, so we make our
+/// linker flavors sufficiently fine-grained to satisfy LLD without inferring its flavor from other
+/// target properties, in accordance with the first design goal.
+///
+/// The first component of the flavor is tightly coupled with the compilation target,
+/// while the `Cc` and `Lld` flags can vary withing the same target.
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum LinkerFlavor {
- Em,
+ /// Unix-like linker with GNU extensions (both naked and compiler-wrapped forms).
+ /// Besides similar "default" Linux/BSD linkers this also includes Windows/GNU linker,
+ /// which is somewhat different because it doesn't produce ELFs.
+ Gnu(Cc, Lld),
+ /// Unix-like linker for Apple targets (both naked and compiler-wrapped forms).
+ /// Extracted from the "umbrella" `Unix` flavor due to its corresponding LLD flavor.
+ Darwin(Cc, Lld),
+ /// Unix-like linker for Wasm targets (both naked and compiler-wrapped forms).
+ /// Extracted from the "umbrella" `Unix` flavor due to its corresponding LLD flavor.
+ /// Non-LLD version does not exist, so the lld flag is currently hardcoded here.
+ WasmLld(Cc),
+ /// Basic Unix-like linker for "any other Unix" targets (Solaris/illumos, L4Re, MSP430, etc),
+ /// possibly with non-GNU extensions (both naked and compiler-wrapped forms).
+ /// LLD doesn't support any of these.
+ Unix(Cc),
+ /// MSVC-style linker for Windows and UEFI, LLD supports it.
+ Msvc(Lld),
+ /// Emscripten Compiler Frontend, a wrapper around `WasmLld(Cc::Yes)` that has a different
+ /// interface and produces some additional JavaScript output.
+ EmCc,
+ // Below: other linker-like tools with unique interfaces for exotic targets.
+ /// Linker tool for BPF.
+ Bpf,
+ /// Linker tool for Nvidia PTX.
+ Ptx,
+}
+
+/// Linker flavors available externally through command line (`-Clinker-flavor`)
+/// or json target specifications.
+/// FIXME: This set has accumulated historically, bring it more in line with the internal
+/// linker flavors (`LinkerFlavor`).
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum LinkerFlavorCli {
Gcc,
- L4Bender,
Ld,
- Msvc,
Lld(LldFlavor),
- PtxLinker,
+ Msvc,
+ Em,
BpfLinker,
+ PtxLinker,
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
@@ -137,19 +203,85 @@ impl ToJson for LldFlavor {
}
}
-impl ToJson for LinkerFlavor {
- fn to_json(&self) -> Json {
- self.desc().to_json()
+impl LinkerFlavor {
+ pub fn from_cli(cli: LinkerFlavorCli, target: &TargetOptions) -> LinkerFlavor {
+ Self::from_cli_impl(cli, target.linker_flavor.lld_flavor(), target.linker_flavor.is_gnu())
+ }
+
+ /// The passed CLI flavor is preferred over other args coming from the default target spec,
+ /// so this function can produce a flavor that is incompatible with the current target.
+ /// FIXME: Produce errors when `-Clinker-flavor` is set to something incompatible
+ /// with the current target.
+ fn from_cli_impl(cli: LinkerFlavorCli, lld_flavor: LldFlavor, is_gnu: bool) -> LinkerFlavor {
+ match cli {
+ LinkerFlavorCli::Gcc => match lld_flavor {
+ LldFlavor::Ld if is_gnu => LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ LldFlavor::Ld64 => LinkerFlavor::Darwin(Cc::Yes, Lld::No),
+ LldFlavor::Wasm => LinkerFlavor::WasmLld(Cc::Yes),
+ LldFlavor::Ld | LldFlavor::Link => LinkerFlavor::Unix(Cc::Yes),
+ },
+ LinkerFlavorCli::Ld => match lld_flavor {
+ LldFlavor::Ld if is_gnu => LinkerFlavor::Gnu(Cc::No, Lld::No),
+ LldFlavor::Ld64 => LinkerFlavor::Darwin(Cc::No, Lld::No),
+ LldFlavor::Ld | LldFlavor::Wasm | LldFlavor::Link => LinkerFlavor::Unix(Cc::No),
+ },
+ LinkerFlavorCli::Lld(LldFlavor::Ld) => LinkerFlavor::Gnu(Cc::No, Lld::Yes),
+ LinkerFlavorCli::Lld(LldFlavor::Ld64) => LinkerFlavor::Darwin(Cc::No, Lld::Yes),
+ LinkerFlavorCli::Lld(LldFlavor::Wasm) => LinkerFlavor::WasmLld(Cc::No),
+ LinkerFlavorCli::Lld(LldFlavor::Link) => LinkerFlavor::Msvc(Lld::Yes),
+ LinkerFlavorCli::Msvc => LinkerFlavor::Msvc(Lld::No),
+ LinkerFlavorCli::Em => LinkerFlavor::EmCc,
+ LinkerFlavorCli::BpfLinker => LinkerFlavor::Bpf,
+ LinkerFlavorCli::PtxLinker => LinkerFlavor::Ptx,
+ }
+ }
+
+ fn to_cli(self) -> LinkerFlavorCli {
+ match self {
+ LinkerFlavor::Gnu(Cc::Yes, _)
+ | LinkerFlavor::Darwin(Cc::Yes, _)
+ | LinkerFlavor::WasmLld(Cc::Yes)
+ | LinkerFlavor::Unix(Cc::Yes) => LinkerFlavorCli::Gcc,
+ LinkerFlavor::Gnu(_, Lld::Yes) => LinkerFlavorCli::Lld(LldFlavor::Ld),
+ LinkerFlavor::Darwin(_, Lld::Yes) => LinkerFlavorCli::Lld(LldFlavor::Ld64),
+ LinkerFlavor::WasmLld(..) => LinkerFlavorCli::Lld(LldFlavor::Wasm),
+ LinkerFlavor::Gnu(..) | LinkerFlavor::Darwin(..) | LinkerFlavor::Unix(..) => {
+ LinkerFlavorCli::Ld
+ }
+ LinkerFlavor::Msvc(Lld::Yes) => LinkerFlavorCli::Lld(LldFlavor::Link),
+ LinkerFlavor::Msvc(..) => LinkerFlavorCli::Msvc,
+ LinkerFlavor::EmCc => LinkerFlavorCli::Em,
+ LinkerFlavor::Bpf => LinkerFlavorCli::BpfLinker,
+ LinkerFlavor::Ptx => LinkerFlavorCli::PtxLinker,
+ }
+ }
+
+ pub fn lld_flavor(self) -> LldFlavor {
+ match self {
+ LinkerFlavor::Gnu(..)
+ | LinkerFlavor::Unix(..)
+ | LinkerFlavor::EmCc
+ | LinkerFlavor::Bpf
+ | LinkerFlavor::Ptx => LldFlavor::Ld,
+ LinkerFlavor::Darwin(..) => LldFlavor::Ld64,
+ LinkerFlavor::WasmLld(..) => LldFlavor::Wasm,
+ LinkerFlavor::Msvc(..) => LldFlavor::Link,
+ }
+ }
+
+ pub fn is_gnu(self) -> bool {
+ matches!(self, LinkerFlavor::Gnu(..))
}
}
-macro_rules! flavor_mappings {
- ($((($($flavor:tt)*), $string:expr),)*) => (
- impl LinkerFlavor {
+
+macro_rules! linker_flavor_cli_impls {
+ ($(($($flavor:tt)*) $string:literal)*) => (
+ impl LinkerFlavorCli {
pub const fn one_of() -> &'static str {
concat!("one of: ", $($string, " ",)*)
}
- pub fn from_str(s: &str) -> Option<Self> {
+ pub fn from_str(s: &str) -> Option<LinkerFlavorCli> {
Some(match s {
$($string => $($flavor)*,)*
_ => return None,
@@ -165,18 +297,23 @@ macro_rules! flavor_mappings {
)
}
-flavor_mappings! {
- ((LinkerFlavor::Em), "em"),
- ((LinkerFlavor::Gcc), "gcc"),
- ((LinkerFlavor::L4Bender), "l4-bender"),
- ((LinkerFlavor::Ld), "ld"),
- ((LinkerFlavor::Msvc), "msvc"),
- ((LinkerFlavor::PtxLinker), "ptx-linker"),
- ((LinkerFlavor::BpfLinker), "bpf-linker"),
- ((LinkerFlavor::Lld(LldFlavor::Wasm)), "wasm-ld"),
- ((LinkerFlavor::Lld(LldFlavor::Ld64)), "ld64.lld"),
- ((LinkerFlavor::Lld(LldFlavor::Ld)), "ld.lld"),
- ((LinkerFlavor::Lld(LldFlavor::Link)), "lld-link"),
+linker_flavor_cli_impls! {
+ (LinkerFlavorCli::Gcc) "gcc"
+ (LinkerFlavorCli::Ld) "ld"
+ (LinkerFlavorCli::Lld(LldFlavor::Ld)) "ld.lld"
+ (LinkerFlavorCli::Lld(LldFlavor::Ld64)) "ld64.lld"
+ (LinkerFlavorCli::Lld(LldFlavor::Link)) "lld-link"
+ (LinkerFlavorCli::Lld(LldFlavor::Wasm)) "wasm-ld"
+ (LinkerFlavorCli::Msvc) "msvc"
+ (LinkerFlavorCli::Em) "em"
+ (LinkerFlavorCli::BpfLinker) "bpf-linker"
+ (LinkerFlavorCli::PtxLinker) "ptx-linker"
+}
+
+impl ToJson for LinkerFlavorCli {
+ fn to_json(&self) -> Json {
+ self.desc().to_json()
+ }
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)]
@@ -467,8 +604,59 @@ impl fmt::Display for LinkOutputKind {
}
pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<StaticCow<str>>>;
+pub type LinkArgsCli = BTreeMap<LinkerFlavorCli, Vec<StaticCow<str>>>;
+
+/// Which kind of debuginfo does the target use?
+///
+/// Useful in determining whether a target supports Split DWARF (a target with
+/// `DebuginfoKind::Dwarf` and supporting `SplitDebuginfo::Unpacked` for example).
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
+pub enum DebuginfoKind {
+ /// DWARF debuginfo (such as that used on `x86_64_unknown_linux_gnu`).
+ #[default]
+ Dwarf,
+ /// DWARF debuginfo in dSYM files (such as on Apple platforms).
+ DwarfDsym,
+ /// Program database files (such as on Windows).
+ Pdb,
+}
+
+impl DebuginfoKind {
+ fn as_str(&self) -> &'static str {
+ match self {
+ DebuginfoKind::Dwarf => "dwarf",
+ DebuginfoKind::DwarfDsym => "dwarf-dsym",
+ DebuginfoKind::Pdb => "pdb",
+ }
+ }
+}
+
+impl FromStr for DebuginfoKind {
+ type Err = ();
-#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq)]
+ fn from_str(s: &str) -> Result<Self, ()> {
+ Ok(match s {
+ "dwarf" => DebuginfoKind::Dwarf,
+ "dwarf-dsym" => DebuginfoKind::DwarfDsym,
+ "pdb" => DebuginfoKind::Pdb,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for DebuginfoKind {
+ fn to_json(&self) -> Json {
+ self.as_str().to_json()
+ }
+}
+
+impl fmt::Display for DebuginfoKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
pub enum SplitDebuginfo {
/// Split debug-information is disabled, meaning that on supported platforms
/// you can find all debug information in the executable itself. This is
@@ -476,7 +664,8 @@ pub enum SplitDebuginfo {
///
/// * Windows - not supported
/// * macOS - don't run `dsymutil`
- /// * ELF - `.dwarf_*` sections
+ /// * ELF - `.debug_*` sections
+ #[default]
Off,
/// Split debug-information can be found in a "packed" location separate
@@ -484,7 +673,7 @@ pub enum SplitDebuginfo {
///
/// * Windows - `*.pdb`
/// * macOS - `*.dSYM` (run `dsymutil`)
- /// * ELF - `*.dwp` (run `rust-llvm-dwp`)
+ /// * ELF - `*.dwp` (run `thorin`)
Packed,
/// Split debug-information can be found in individual object files on the
@@ -509,7 +698,7 @@ impl SplitDebuginfo {
impl FromStr for SplitDebuginfo {
type Err = ();
- fn from_str(s: &str) -> Result<SplitDebuginfo, ()> {
+ fn from_str(s: &str) -> Result<Self, ()> {
Ok(match s {
"off" => SplitDebuginfo::Off,
"unpacked" => SplitDebuginfo::Unpacked,
@@ -547,6 +736,10 @@ pub enum StackProbeType {
}
impl StackProbeType {
+ // LLVM X86 targets (ix86 and x86_64) can use inline-asm stack probes starting with LLVM 16.
+ // Notable past issues were rust#83139 (fixed in 14) and rust#84667 (fixed in 16).
+ const X86: Self = Self::InlineOrCall { min_llvm_version_for_inline: (16, 0, 0) };
+
fn from_json(json: &Json) -> Result<Self, String> {
let object = json.as_object().ok_or_else(|| "expected a JSON object")?;
let kind = object
@@ -786,15 +979,15 @@ impl fmt::Display for StackProtector {
}
macro_rules! supported_targets {
- ( $(($( $triple:literal, )+ $module:ident ),)+ ) => {
+ ( $(($triple:literal, $module:ident ),)+ ) => {
$(mod $module;)+
/// List of supported targets
- pub const TARGETS: &[&str] = &[$($($triple),+),+];
+ pub const TARGETS: &[&str] = &[$($triple),+];
fn load_builtin(target: &str) -> Option<Target> {
let mut t = match target {
- $( $($triple)|+ => $module::target(), )+
+ $( $triple => $module::target(), )+
_ => return None,
};
t.is_builtin = true;
@@ -810,7 +1003,7 @@ macro_rules! supported_targets {
$(
#[test] // `#[test]`
fn $module() {
- tests_impl::test_target(super::$module::target());
+ tests_impl::test_target(super::$module::target(), $triple);
}
)+
}
@@ -844,6 +1037,7 @@ supported_targets! {
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
+ ("armeb-unknown-linux-gnueabi", armeb_unknown_linux_gnueabi),
("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi),
@@ -893,9 +1087,11 @@ supported_targets! {
("aarch64-unknown-openbsd", aarch64_unknown_openbsd),
("i686-unknown-openbsd", i686_unknown_openbsd),
+ ("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
+ ("powerpc64-unknown-openbsd", powerpc64_unknown_openbsd),
+ ("riscv64gc-unknown-openbsd", riscv64gc_unknown_openbsd),
("sparc64-unknown-openbsd", sparc64_unknown_openbsd),
("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
- ("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
("aarch64-unknown-netbsd", aarch64_unknown_netbsd),
("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf),
@@ -1028,6 +1224,9 @@ supported_targets! {
("mipsel-sony-psp", mipsel_sony_psp),
("mipsel-unknown-none", mipsel_unknown_none),
("thumbv4t-none-eabi", thumbv4t_none_eabi),
+ ("armv4t-none-eabi", armv4t_none_eabi),
+ ("thumbv5te-none-eabi", thumbv5te_none_eabi),
+ ("armv5te-none-eabi", armv5te_none_eabi),
("aarch64_be-unknown-linux-gnu", aarch64_be_unknown_linux_gnu),
("aarch64-unknown-linux-gnu_ilp32", aarch64_unknown_linux_gnu_ilp32),
@@ -1156,48 +1355,49 @@ pub struct TargetOptions {
pub abi: StaticCow<str>,
/// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown".
pub vendor: StaticCow<str>,
- /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
- /// on the command line. Defaults to `LinkerFlavor::Gcc`.
- pub linker_flavor: LinkerFlavor,
/// Linker to invoke
pub linker: Option<StaticCow<str>>,
+ /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
+ /// on the command line. Defaults to `LinkerFlavor::Gnu(Cc::Yes, Lld::No)`.
+ pub linker_flavor: LinkerFlavor,
+ linker_flavor_json: LinkerFlavorCli,
+ lld_flavor_json: LldFlavor,
+ linker_is_gnu_json: bool,
- /// LLD flavor used if `lld` (or `rust-lld`) is specified as a linker
- /// without clarifying its flavor in any way.
- pub lld_flavor: LldFlavor,
-
- /// Linker arguments that are passed *before* any user-defined libraries.
- pub pre_link_args: LinkArgs,
/// Objects to link before and after all other object code.
pub pre_link_objects: CrtObjects,
pub post_link_objects: CrtObjects,
- /// Same as `(pre|post)_link_objects`, but when we fail to pull the objects with help of the
- /// target's native gcc and fall back to the "self-contained" mode and pull them manually.
- /// See `crt_objects.rs` for some more detailed documentation.
- pub pre_link_objects_fallback: CrtObjects,
- pub post_link_objects_fallback: CrtObjects,
- /// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
- pub crt_objects_fallback: Option<CrtObjectsFallback>,
+ /// Same as `(pre|post)_link_objects`, but when self-contained linking mode is enabled.
+ pub pre_link_objects_self_contained: CrtObjects,
+ pub post_link_objects_self_contained: CrtObjects,
+ pub link_self_contained: LinkSelfContainedDefault,
+ /// Linker arguments that are passed *before* any user-defined libraries.
+ pub pre_link_args: LinkArgs,
+ pre_link_args_json: LinkArgsCli,
/// Linker arguments that are unconditionally passed after any
/// user-defined but before post-link objects. Standard platform
/// libraries that should be always be linked to, usually go here.
pub late_link_args: LinkArgs,
+ late_link_args_json: LinkArgsCli,
/// Linker arguments used in addition to `late_link_args` if at least one
/// Rust dependency is dynamically linked.
pub late_link_args_dynamic: LinkArgs,
+ late_link_args_dynamic_json: LinkArgsCli,
/// Linker arguments used in addition to `late_link_args` if all Rust
/// dependencies are statically linked.
pub late_link_args_static: LinkArgs,
+ late_link_args_static_json: LinkArgsCli,
/// Linker arguments that are unconditionally passed *after* any
/// user-defined libraries.
pub post_link_args: LinkArgs,
+ post_link_args_json: LinkArgsCli,
+
/// Optional link script applied to `dylib` and `executable` crate types.
/// This is a string containing the script, not a path. Can only be applied
- /// to linkers where `linker_is_gnu` is true.
+ /// to linkers where linker flavor matches `LinkerFlavor::Gnu(..)`.
pub link_script: Option<StaticCow<str>>,
-
/// Environment variables to be set for the linker invocation.
pub link_env: StaticCow<[(StaticCow<str>, StaticCow<str>)]>,
/// Environment variables to be removed for the linker invocation.
@@ -1254,6 +1454,8 @@ pub struct TargetOptions {
pub abi_return_struct_as_int: bool,
/// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
/// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
+ /// Also indiates whether to use Apple-specific ABI changes, such as extending function
+ /// parameters to 32-bits.
pub is_like_osx: bool,
/// Whether the target toolchain is like Solaris's.
/// Only useful for compiling against Illumos/Solaris,
@@ -1279,11 +1481,11 @@ pub struct TargetOptions {
pub is_like_msvc: bool,
/// Whether a target toolchain is like WASM.
pub is_like_wasm: bool,
+ /// Whether a target toolchain is like Android, implying a Linux kernel and a Bionic libc
+ pub is_like_android: bool,
/// Default supported version of DWARF on this platform.
/// Useful because some platforms (osx, bsd) only want up to DWARF2.
pub default_dwarf_version: u32,
- /// Whether the linker support GNU-like arguments such as -O. Defaults to true.
- pub linker_is_gnu: bool,
/// The MinGW toolchain has a known issue that prevents it from correctly
/// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak
/// symbol needs its own COMDAT section, weak linkage implies a large
@@ -1438,9 +1640,13 @@ pub struct TargetOptions {
/// thumb and arm interworking.
pub has_thumb_interworking: bool,
+ /// Which kind of debuginfo is used by this target?
+ pub debuginfo_kind: DebuginfoKind,
/// How to handle split debug information, if at all. Specifying `None` has
/// target-specific meaning.
pub split_debuginfo: SplitDebuginfo,
+ /// Which kinds of split debuginfo are supported by the target?
+ pub supported_split_debuginfo: StaticCow<[SplitDebuginfo]>,
/// The sanitizers supported by this target
///
@@ -1465,26 +1671,38 @@ pub struct TargetOptions {
/// Add arguments for the given flavor and also for its "twin" flavors
/// that have a compatible command line interface.
-fn add_link_args(link_args: &mut LinkArgs, flavor: LinkerFlavor, args: &[&'static str]) {
- let mut insert = |flavor| {
- link_args.entry(flavor).or_default().extend(args.iter().copied().map(Cow::Borrowed))
- };
+fn add_link_args_iter(
+ link_args: &mut LinkArgs,
+ flavor: LinkerFlavor,
+ args: impl Iterator<Item = StaticCow<str>> + Clone,
+) {
+ let mut insert = |flavor| link_args.entry(flavor).or_default().extend(args.clone());
insert(flavor);
match flavor {
- LinkerFlavor::Ld => insert(LinkerFlavor::Lld(LldFlavor::Ld)),
- LinkerFlavor::Msvc => insert(LinkerFlavor::Lld(LldFlavor::Link)),
- LinkerFlavor::Lld(LldFlavor::Wasm) => {}
- LinkerFlavor::Lld(lld_flavor) => {
- panic!("add_link_args: use non-LLD flavor for {:?}", lld_flavor)
+ LinkerFlavor::Gnu(cc, lld) => {
+ assert_eq!(lld, Lld::No);
+ insert(LinkerFlavor::Gnu(cc, Lld::Yes));
+ }
+ LinkerFlavor::Darwin(cc, lld) => {
+ assert_eq!(lld, Lld::No);
+ insert(LinkerFlavor::Darwin(cc, Lld::Yes));
}
- LinkerFlavor::Gcc
- | LinkerFlavor::Em
- | LinkerFlavor::L4Bender
- | LinkerFlavor::BpfLinker
- | LinkerFlavor::PtxLinker => {}
+ LinkerFlavor::Msvc(lld) => {
+ assert_eq!(lld, Lld::No);
+ insert(LinkerFlavor::Msvc(Lld::Yes));
+ }
+ LinkerFlavor::WasmLld(..)
+ | LinkerFlavor::Unix(..)
+ | LinkerFlavor::EmCc
+ | LinkerFlavor::Bpf
+ | LinkerFlavor::Ptx => {}
}
}
+fn add_link_args(link_args: &mut LinkArgs, flavor: LinkerFlavor, args: &[&'static str]) {
+ add_link_args_iter(link_args, flavor, args.iter().copied().map(Cow::Borrowed))
+}
+
impl TargetOptions {
fn link_args(flavor: LinkerFlavor, args: &[&'static str]) -> LinkArgs {
let mut link_args = LinkArgs::new();
@@ -1499,6 +1717,57 @@ impl TargetOptions {
fn add_post_link_args(&mut self, flavor: LinkerFlavor, args: &[&'static str]) {
add_link_args(&mut self.post_link_args, flavor, args);
}
+
+ fn update_from_cli(&mut self) {
+ self.linker_flavor = LinkerFlavor::from_cli_impl(
+ self.linker_flavor_json,
+ self.lld_flavor_json,
+ self.linker_is_gnu_json,
+ );
+ for (args, args_json) in [
+ (&mut self.pre_link_args, &self.pre_link_args_json),
+ (&mut self.late_link_args, &self.late_link_args_json),
+ (&mut self.late_link_args_dynamic, &self.late_link_args_dynamic_json),
+ (&mut self.late_link_args_static, &self.late_link_args_static_json),
+ (&mut self.post_link_args, &self.post_link_args_json),
+ ] {
+ args.clear();
+ for (flavor, args_json) in args_json {
+ // Cannot use `from_cli` due to borrow checker.
+ let linker_flavor = LinkerFlavor::from_cli_impl(
+ *flavor,
+ self.lld_flavor_json,
+ self.linker_is_gnu_json,
+ );
+ // Normalize to no lld to avoid asserts.
+ let linker_flavor = match linker_flavor {
+ LinkerFlavor::Gnu(cc, _) => LinkerFlavor::Gnu(cc, Lld::No),
+ LinkerFlavor::Darwin(cc, _) => LinkerFlavor::Darwin(cc, Lld::No),
+ LinkerFlavor::Msvc(_) => LinkerFlavor::Msvc(Lld::No),
+ _ => linker_flavor,
+ };
+ if !args.contains_key(&linker_flavor) {
+ add_link_args_iter(args, linker_flavor, args_json.iter().cloned());
+ }
+ }
+ }
+ }
+
+ fn update_to_cli(&mut self) {
+ self.linker_flavor_json = self.linker_flavor.to_cli();
+ self.lld_flavor_json = self.linker_flavor.lld_flavor();
+ self.linker_is_gnu_json = self.linker_flavor.is_gnu();
+ for (args, args_json) in [
+ (&self.pre_link_args, &mut self.pre_link_args_json),
+ (&self.late_link_args, &mut self.late_link_args_json),
+ (&self.late_link_args_dynamic, &mut self.late_link_args_dynamic_json),
+ (&self.late_link_args_static, &mut self.late_link_args_static_json),
+ (&self.post_link_args, &mut self.post_link_args_json),
+ ] {
+ *args_json =
+ args.iter().map(|(flavor, args)| (flavor.to_cli(), args.clone())).collect();
+ }
+ }
}
impl Default for TargetOptions {
@@ -1513,11 +1782,11 @@ impl Default for TargetOptions {
env: "".into(),
abi: "".into(),
vendor: "unknown".into(),
- linker_flavor: LinkerFlavor::Gcc,
linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.into()),
- lld_flavor: LldFlavor::Ld,
- pre_link_args: LinkArgs::new(),
- post_link_args: LinkArgs::new(),
+ linker_flavor: LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ linker_flavor_json: LinkerFlavorCli::Gcc,
+ lld_flavor_json: LldFlavor::Ld,
+ linker_is_gnu_json: true,
link_script: None,
asm_args: cvs![],
cpu: "generic".into(),
@@ -1543,8 +1812,8 @@ impl Default for TargetOptions {
is_like_windows: false,
is_like_msvc: false,
is_like_wasm: false,
+ is_like_android: false,
default_dwarf_version: 4,
- linker_is_gnu: true,
allows_weak_linkage: true,
has_rpath: false,
no_default_libraries: true,
@@ -1554,12 +1823,19 @@ impl Default for TargetOptions {
relro_level: RelroLevel::None,
pre_link_objects: Default::default(),
post_link_objects: Default::default(),
- pre_link_objects_fallback: Default::default(),
- post_link_objects_fallback: Default::default(),
- crt_objects_fallback: None,
+ pre_link_objects_self_contained: Default::default(),
+ post_link_objects_self_contained: Default::default(),
+ link_self_contained: LinkSelfContainedDefault::False,
+ pre_link_args: LinkArgs::new(),
+ pre_link_args_json: LinkArgsCli::new(),
late_link_args: LinkArgs::new(),
+ late_link_args_json: LinkArgsCli::new(),
late_link_args_dynamic: LinkArgs::new(),
+ late_link_args_dynamic_json: LinkArgsCli::new(),
late_link_args_static: LinkArgs::new(),
+ late_link_args_static_json: LinkArgsCli::new(),
+ post_link_args: LinkArgs::new(),
+ post_link_args_json: LinkArgsCli::new(),
link_env: cvs![],
link_env_remove: cvs![],
archive_format: "gnu".into(),
@@ -1598,7 +1874,10 @@ impl Default for TargetOptions {
use_ctors_section: false,
eh_frame_header: true,
has_thumb_interworking: false,
- split_debuginfo: SplitDebuginfo::Off,
+ debuginfo_kind: Default::default(),
+ split_debuginfo: Default::default(),
+ // `Off` is supported by default, but targets can remove this manually, e.g. Windows.
+ supported_split_debuginfo: Cow::Borrowed(&[SplitDebuginfo::Off]),
supported_sanitizers: SanitizerSet::empty(),
default_adjusted_cabi: None,
c_enum_min_bits: 32,
@@ -1776,6 +2055,12 @@ impl Target {
base.$key_name = s;
}
} );
+ ($key_name:ident = $json_name:expr, bool) => ( {
+ let name = $json_name;
+ if let Some(s) = obj.remove(name).and_then(|b| b.as_bool()) {
+ base.$key_name = s;
+ }
+ } );
($key_name:ident, u64) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove(&name).and_then(|j| Json::as_u64(&j)) {
@@ -1871,6 +2156,19 @@ impl Target {
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
+ ($key_name:ident, DebuginfoKind) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<DebuginfoKind>() {
+ Ok(level) => base.$key_name = level,
+ _ => return Some(Err(
+ format!("'{s}' is not a valid value for debuginfo-kind. Use 'dwarf', \
+ 'dwarf-dsym' or 'pdb'.")
+ )),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
($key_name:ident, SplitDebuginfo) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
@@ -1907,6 +2205,25 @@ impl Target {
}
}
} );
+ ($key_name:ident, falliable_list) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|j| {
+ if let Some(v) = j.as_array() {
+ match v.iter().map(|a| FromStr::from_str(a.as_str().unwrap())).collect() {
+ Ok(l) => { base.$key_name = l },
+ // FIXME: `falliable_list` can't re-use the `key!` macro for list
+ // elements and the error messages from that macro, so it has a bad
+ // generic message instead
+ Err(_) => return Some(Err(
+ format!("`{:?}` is not a valid value for `{}`", j, name)
+ )),
+ }
+ } else {
+ incorrect_type.push(name)
+ }
+ Some(Ok(()))
+ }).unwrap_or(Ok(()))
+ } );
($key_name:ident, optional) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.remove(&name) {
@@ -1915,9 +2232,9 @@ impl Target {
.map(|s| s.to_string().into());
}
} );
- ($key_name:ident, LldFlavor) => ( {
- let name = (stringify!($key_name)).replace("_", "-");
- obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ ($key_name:ident = $json_name:expr, LldFlavor) => ( {
+ let name = $json_name;
+ obj.remove(name).and_then(|o| o.as_str().and_then(|s| {
if let Some(flavor) = LldFlavor::from_str(&s) {
base.$key_name = flavor;
} else {
@@ -1929,13 +2246,13 @@ impl Target {
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
- ($key_name:ident, LinkerFlavor) => ( {
- let name = (stringify!($key_name)).replace("_", "-");
- obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
- match LinkerFlavor::from_str(s) {
+ ($key_name:ident = $json_name:expr, LinkerFlavor) => ( {
+ let name = $json_name;
+ obj.remove(name).and_then(|o| o.as_str().and_then(|s| {
+ match LinkerFlavorCli::from_str(s) {
Some(linker_flavor) => base.$key_name = linker_flavor,
_ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \
- Use {}", s, LinkerFlavor::one_of()))),
+ Use {}", s, LinkerFlavorCli::one_of()))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
@@ -1977,20 +2294,20 @@ impl Target {
Ok::<(), String>(())
} );
- ($key_name:ident, crt_objects_fallback) => ( {
- let name = (stringify!($key_name)).replace("_", "-");
- obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
- match s.parse::<CrtObjectsFallback>() {
- Ok(fallback) => base.$key_name = Some(fallback),
- _ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \
- Use 'musl', 'mingw' or 'wasm'", s))),
+ ($key_name:ident = $json_name:expr, link_self_contained) => ( {
+ let name = $json_name;
+ obj.remove(name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<LinkSelfContainedDefault>() {
+ Ok(lsc_default) => base.$key_name = lsc_default,
+ _ => return Some(Err(format!("'{}' is not a valid `-Clink-self-contained` default. \
+ Use 'false', 'true', 'musl' or 'mingw'", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
- ($key_name:ident, link_objects) => ( {
- let name = (stringify!($key_name)).replace("_", "-");
- if let Some(val) = obj.remove(&name) {
+ ($key_name:ident = $json_name:expr, link_objects) => ( {
+ let name = $json_name;
+ if let Some(val) = obj.remove(name) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per CRT object kind.", name))?;
let mut args = CrtObjects::new();
@@ -2016,14 +2333,14 @@ impl Target {
base.$key_name = args;
}
} );
- ($key_name:ident, link_args) => ( {
- let name = (stringify!($key_name)).replace("_", "-");
- if let Some(val) = obj.remove(&name) {
+ ($key_name:ident = $json_name:expr, link_args) => ( {
+ let name = $json_name;
+ if let Some(val) = obj.remove(name) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per linker-flavor.", name))?;
- let mut args = LinkArgs::new();
+ let mut args = LinkArgsCli::new();
for (k, v) in obj {
- let flavor = LinkerFlavor::from_str(&k).ok_or_else(|| {
+ let flavor = LinkerFlavorCli::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for linker-flavor. \
Use 'em', 'gcc', 'ld' or 'msvc'", name, k)
})?;
@@ -2109,19 +2426,20 @@ impl Target {
key!(env);
key!(abi);
key!(vendor);
- key!(linker_flavor, LinkerFlavor)?;
key!(linker, optional);
- key!(lld_flavor, LldFlavor)?;
- key!(pre_link_objects, link_objects);
- key!(post_link_objects, link_objects);
- key!(pre_link_objects_fallback, link_objects);
- key!(post_link_objects_fallback, link_objects);
- key!(crt_objects_fallback, crt_objects_fallback)?;
- key!(pre_link_args, link_args);
- key!(late_link_args, link_args);
- key!(late_link_args_dynamic, link_args);
- key!(late_link_args_static, link_args);
- key!(post_link_args, link_args);
+ key!(linker_flavor_json = "linker-flavor", LinkerFlavor)?;
+ key!(lld_flavor_json = "lld-flavor", LldFlavor)?;
+ key!(linker_is_gnu_json = "linker-is-gnu", bool);
+ key!(pre_link_objects = "pre-link-objects", link_objects);
+ key!(post_link_objects = "post-link-objects", link_objects);
+ key!(pre_link_objects_self_contained = "pre-link-objects-fallback", link_objects);
+ key!(post_link_objects_self_contained = "post-link-objects-fallback", link_objects);
+ key!(link_self_contained = "crt-objects-fallback", link_self_contained)?;
+ key!(pre_link_args_json = "pre-link-args", link_args);
+ key!(late_link_args_json = "late-link-args", link_args);
+ key!(late_link_args_dynamic_json = "late-link-args-dynamic", link_args);
+ key!(late_link_args_static_json = "late-link-args-static", link_args);
+ key!(post_link_args_json = "post-link-args", link_args);
key!(link_script, optional);
key!(link_env, env);
key!(link_env_remove, list);
@@ -2148,8 +2466,8 @@ impl Target {
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
key!(is_like_wasm, bool);
+ key!(is_like_android, bool);
key!(default_dwarf_version, u32);
- key!(linker_is_gnu, bool);
key!(allows_weak_linkage, bool);
key!(has_rpath, bool);
key!(no_default_libraries, bool);
@@ -2193,7 +2511,9 @@ impl Target {
key!(use_ctors_section, bool);
key!(eh_frame_header, bool);
key!(has_thumb_interworking, bool);
+ key!(debuginfo_kind, DebuginfoKind)?;
key!(split_debuginfo, SplitDebuginfo)?;
+ key!(supported_split_debuginfo, falliable_list)?;
key!(supported_sanitizers, SanitizerSet)?;
key!(default_adjusted_cabi, Option<Abi>)?;
key!(c_enum_min_bits, u64);
@@ -2204,6 +2524,8 @@ impl Target {
// This can cause unfortunate ICEs later down the line.
return Err("may not set is_builtin for targets not built-in".into());
}
+ base.update_from_cli();
+
// Each field should have been read using `Json::remove` so any keys remaining are unused.
let remaining_keys = obj.keys();
Ok((
@@ -2219,7 +2541,7 @@ impl Target {
load_builtin(target_triple).expect("built-in target")
}
TargetTriple::TargetJson { .. } => {
- panic!("built-in targets doens't support target-paths")
+ panic!("built-in targets doesn't support target-paths")
}
}
}
@@ -2295,42 +2617,44 @@ impl ToJson for Target {
fn to_json(&self) -> Json {
let mut d = serde_json::Map::new();
let default: TargetOptions = Default::default();
+ let mut target = self.clone();
+ target.update_to_cli();
macro_rules! target_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
- d.insert(name, self.$attr.to_json());
+ d.insert(name, target.$attr.to_json());
}};
}
macro_rules! target_option_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
- if default.$attr != self.$attr {
- d.insert(name, self.$attr.to_json());
+ if default.$attr != target.$attr {
+ d.insert(name, target.$attr.to_json());
}
}};
- ($attr:ident, $key_name:expr) => {{
- let name = $key_name;
- if default.$attr != self.$attr {
- d.insert(name.into(), self.$attr.to_json());
+ ($attr:ident, $json_name:expr) => {{
+ let name = $json_name;
+ if default.$attr != target.$attr {
+ d.insert(name.into(), target.$attr.to_json());
}
}};
- (link_args - $attr:ident) => {{
- let name = (stringify!($attr)).replace("_", "-");
- if default.$attr != self.$attr {
- let obj = self
+ (link_args - $attr:ident, $json_name:expr) => {{
+ let name = $json_name;
+ if default.$attr != target.$attr {
+ let obj = target
.$attr
.iter()
.map(|(k, v)| (k.desc().to_string(), v.clone()))
.collect::<BTreeMap<_, _>>();
- d.insert(name, obj.to_json());
+ d.insert(name.to_string(), obj.to_json());
}
}};
(env - $attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
- if default.$attr != self.$attr {
- let obj = self
+ if default.$attr != target.$attr {
+ let obj = target
.$attr
.iter()
.map(|&(ref k, ref v)| format!("{k}={v}"))
@@ -2352,19 +2676,20 @@ impl ToJson for Target {
target_option_val!(env);
target_option_val!(abi);
target_option_val!(vendor);
- target_option_val!(linker_flavor);
target_option_val!(linker);
- target_option_val!(lld_flavor);
+ target_option_val!(linker_flavor_json, "linker-flavor");
+ target_option_val!(lld_flavor_json, "lld-flavor");
+ target_option_val!(linker_is_gnu_json, "linker-is-gnu");
target_option_val!(pre_link_objects);
target_option_val!(post_link_objects);
- target_option_val!(pre_link_objects_fallback);
- target_option_val!(post_link_objects_fallback);
- target_option_val!(crt_objects_fallback);
- target_option_val!(link_args - pre_link_args);
- target_option_val!(link_args - late_link_args);
- target_option_val!(link_args - late_link_args_dynamic);
- target_option_val!(link_args - late_link_args_static);
- target_option_val!(link_args - post_link_args);
+ target_option_val!(pre_link_objects_self_contained, "pre-link-objects-fallback");
+ target_option_val!(post_link_objects_self_contained, "post-link-objects-fallback");
+ target_option_val!(link_self_contained, "crt-objects-fallback");
+ target_option_val!(link_args - pre_link_args_json, "pre-link-args");
+ target_option_val!(link_args - late_link_args_json, "late-link-args");
+ target_option_val!(link_args - late_link_args_dynamic_json, "late-link-args-dynamic");
+ target_option_val!(link_args - late_link_args_static_json, "late-link-args-static");
+ target_option_val!(link_args - post_link_args_json, "post-link-args");
target_option_val!(link_script);
target_option_val!(env - link_env);
target_option_val!(link_env_remove);
@@ -2392,8 +2717,8 @@ impl ToJson for Target {
target_option_val!(is_like_windows);
target_option_val!(is_like_msvc);
target_option_val!(is_like_wasm);
+ target_option_val!(is_like_android);
target_option_val!(default_dwarf_version);
- target_option_val!(linker_is_gnu);
target_option_val!(allows_weak_linkage);
target_option_val!(has_rpath);
target_option_val!(no_default_libraries);
@@ -2437,7 +2762,9 @@ impl ToJson for Target {
target_option_val!(use_ctors_section);
target_option_val!(eh_frame_header);
target_option_val!(has_thumb_interworking);
+ target_option_val!(debuginfo_kind);
target_option_val!(split_debuginfo);
+ target_option_val!(supported_split_debuginfo);
target_option_val!(supported_sanitizers);
target_option_val!(c_enum_min_bits);
target_option_val!(generate_arange_section);
diff --git a/compiler/rustc_target/src/spec/msp430_none_elf.rs b/compiler/rustc_target/src/spec/msp430_none_elf.rs
index 6b09386ae..251fd2a0a 100644
--- a/compiler/rustc_target/src/spec/msp430_none_elf.rs
+++ b/compiler/rustc_target/src/spec/msp430_none_elf.rs
@@ -1,4 +1,4 @@
-use crate::spec::{cvs, PanicStrategy, RelocModel, Target, TargetOptions};
+use crate::spec::{cvs, Cc, LinkerFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -16,7 +16,7 @@ pub fn target() -> Target {
// dependency on this specific gcc.
asm_args: cvs!["-mcpu=msp430"],
linker: Some("msp430-elf-gcc".into()),
- linker_is_gnu: false,
+ linker_flavor: LinkerFlavor::Unix(Cc::Yes),
// There are no atomic CAS instructions available in the MSP430
// instruction set, and the LLVM backend doesn't currently support
diff --git a/compiler/rustc_target/src/spec/msvc_base.rs b/compiler/rustc_target/src/spec/msvc_base.rs
index edb30b72b..1dad9133e 100644
--- a/compiler/rustc_target/src/spec/msvc_base.rs
+++ b/compiler/rustc_target/src/spec/msvc_base.rs
@@ -1,16 +1,15 @@
-use crate::spec::{LinkerFlavor, LldFlavor, SplitDebuginfo, TargetOptions};
+use crate::spec::{DebuginfoKind, LinkerFlavor, Lld, SplitDebuginfo, TargetOptions};
+use std::borrow::Cow;
pub fn opts() -> TargetOptions {
// Suppress the verbose logo and authorship debugging output, which would needlessly
// clog any log files.
- let pre_link_args = TargetOptions::link_args(LinkerFlavor::Msvc, &["/NOLOGO"]);
+ let pre_link_args = TargetOptions::link_args(LinkerFlavor::Msvc(Lld::No), &["/NOLOGO"]);
TargetOptions {
- linker_flavor: LinkerFlavor::Msvc,
+ linker_flavor: LinkerFlavor::Msvc(Lld::No),
is_like_windows: true,
is_like_msvc: true,
- lld_flavor: LldFlavor::Link,
- linker_is_gnu: false,
pre_link_args,
abi_return_struct_as_int: true,
emit_debug_gdb_scripts: false,
@@ -18,6 +17,8 @@ pub fn opts() -> TargetOptions {
// Currently this is the only supported method of debuginfo on MSVC
// where `*.pdb` files show up next to the final artifact.
split_debuginfo: SplitDebuginfo::Packed,
+ supported_split_debuginfo: Cow::Borrowed(&[SplitDebuginfo::Packed]),
+ debuginfo_kind: DebuginfoKind::Pdb,
..Default::default()
}
diff --git a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
index 1c5b68001..b0582b235 100644
--- a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
+++ b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
@@ -10,10 +10,9 @@ pub fn target() -> Target {
options: TargetOptions {
os: "cuda".into(),
vendor: "nvidia".into(),
- linker_flavor: LinkerFlavor::PtxLinker,
+ linker_flavor: LinkerFlavor::Ptx,
// The linker can be installed from `crates.io`.
linker: Some("rust-ptx-linker".into()),
- linker_is_gnu: false,
// With `ptx-linker` approach, it can be later overridden via link flags.
cpu: "sm_30".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
index 803453c4a..08b273207 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
@@ -1,11 +1,12 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
base.cpu = "ppc64".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64-unknown-freebsd".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
index 5413c4f33..ce64de861 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
@@ -1,15 +1,12 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, RelroLevel, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.cpu = "ppc64".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
-
- // ld.so in at least RHEL6 on ppc64 has a bug related to BIND_NOW, so only enable partial RELRO
- // for now. https://github.com/rust-lang/rust/pull/43170#issuecomment-315411474
- base.relro_level = RelroLevel::Partial;
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
index 159335eb6..81286a668 100644
--- a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
@@ -1,11 +1,12 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
base.cpu = "ppc64".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64-unknown-linux-musl".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs
new file mode 100644
index 000000000..7232dce3e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_openbsd.rs
@@ -0,0 +1,18 @@
+use crate::abi::Endian;
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.cpu = "ppc64".into();
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
+
+ Target {
+ llvm_target: "powerpc64-unknown-openbsd".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
index b7420d232..10da7872c 100644
--- a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
@@ -1,11 +1,12 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
base.cpu = "ppc64".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
index a3d180043..8c941e106 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
@@ -1,10 +1,11 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
base.cpu = "ppc64le".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64le-unknown-freebsd".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
index e18ff3be4..fd896e086 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
@@ -1,10 +1,11 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.cpu = "ppc64le".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64le-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
index b84943d23..3cffcf497 100644
--- a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
@@ -1,10 +1,11 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
base.cpu = "ppc64le".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc64le-unknown-linux-musl".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
index 516b2de37..342f321bd 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
@@ -1,11 +1,15 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, RelocModel, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
// Extra hint to linker that we are generating secure-PLT code.
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "--target=powerpc-unknown-freebsd13.0"]);
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ &["-m32", "--target=powerpc-unknown-freebsd13.0"],
+ );
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-freebsd13.0".into(),
@@ -15,7 +19,6 @@ pub fn target() -> Target {
options: TargetOptions {
endian: Endian::Big,
features: "+secure-plt".into(),
- relocation_model: RelocModel::Pic,
mcount: "_mcount".into(),
..base
},
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
index 6686a0bbf..c8c61dc46 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
index 6a250f4b5..5c51ec91f 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-mspe"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-mspe"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-linux-gnuspe".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
index 34200c679..fc7d802cb 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-linux-musl".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
index 60661ef9b..912149c79 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::netbsd_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-netbsd".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
index ad2c3d40f..dec85f996 100644
--- a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::Target;
+use crate::spec::{StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::openbsd_base::opts();
base.endian = Endian::Big;
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-openbsd".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
index 3f24966e0..a8c1c2a61 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "--secure-plt"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32", "--secure-plt"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
index 0f04f41f9..abb8d13da 100644
--- a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
@@ -1,10 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-mspe", "--secure-plt"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-mspe", "--secure-plt"]);
base.max_atomic_width = Some(32);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "powerpc-unknown-linux-gnuspe".into(),
diff --git a/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
index 232139db6..75a65a268 100644
--- a/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +8,7 @@ pub fn target() -> Target {
arch: "riscv32".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(0),
diff --git a/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs
index 3e5d2887f..f2242bbe0 100644
--- a/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +8,7 @@ pub fn target() -> Target {
arch: "riscv32".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(0),
diff --git a/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
index 99317b9f1..55c6e4d16 100644
--- a/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +8,7 @@ pub fn target() -> Target {
arch: "riscv32".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(32),
diff --git a/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs b/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs
index a5de645c9..a263e5d5c 100644
--- a/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -10,7 +9,7 @@ pub fn target() -> Target {
options: TargetOptions {
os: "xous".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(32),
diff --git a/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs b/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs
index 03baef65c..25638a092 100644
--- a/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{cvs, Target, TargetOptions};
-use crate::spec::{LinkerFlavor, PanicStrategy, RelocModel};
+use crate::spec::{cvs, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -13,7 +12,6 @@ pub fn target() -> Target {
os: "espidf".into(),
env: "newlib".into(),
vendor: "espressif".into(),
- linker_flavor: LinkerFlavor::Gcc,
linker: Some("riscv32-esp-elf-gcc".into()),
cpu: "generic-rv32".into(),
diff --git a/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
index bf510d204..01e773fae 100644
--- a/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
@@ -1,5 +1,4 @@
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +8,7 @@ pub fn target() -> Target {
arch: "riscv32".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv32".into(),
max_atomic_width: Some(0),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
index 03b3cfd1e..67806d578 100644
--- a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
@@ -1,5 +1,5 @@
-use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
-use crate::spec::{Target, TargetOptions};
+use crate::spec::{Cc, CodeModel, LinkerFlavor, Lld, PanicStrategy};
+use crate::spec::{RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +9,7 @@ pub fn target() -> Target {
arch: "riscv64".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
llvm_abiname: "lp64d".into(),
cpu: "generic-rv64".into(),
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs
new file mode 100644
index 000000000..cd10f3afa
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_openbsd.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-openbsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ arch: "riscv64".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "lp64d".into(),
+ max_atomic_width: Some(64),
+ ..super::openbsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
index 2a94c9dd2..f371e09be 100644
--- a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
+++ b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
@@ -1,5 +1,5 @@
-use crate::spec::{CodeModel, Target, TargetOptions};
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Cc, CodeModel, LinkerFlavor, Lld, PanicStrategy};
+use crate::spec::{RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -9,7 +9,7 @@ pub fn target() -> Target {
arch: "riscv64".into(),
options: TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
cpu: "generic-rv64".into(),
max_atomic_width: Some(64),
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
index 8757bbed8..cda88de0e 100644
--- a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
@@ -1,22 +1,23 @@
use crate::abi::Endian;
-use crate::spec::Target;
+use crate::spec::{StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.endian = Endian::Big;
// z10 is the oldest CPU supported by LLVM
base.cpu = "z10".into();
- // FIXME: The data_layout string below and the ABI implementation in
- // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
- // Pass the -vector feature string to LLVM to respect this assumption.
+ // FIXME: The ABI implementation in cabi_s390x.rs is for now hard-coded to assume the no-vector
+ // ABI. Pass the -vector feature string to LLVM to respect this assumption. On LLVM < 16, we
+ // also strip v128 from the data_layout below to match the older LLVM's expectation.
base.features = "-vector".into();
base.max_atomic_width = Some(64);
base.min_global_align = Some(16);
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "s390x-unknown-linux-gnu".into(),
pointer_width: 64,
- data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".into(),
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
arch: "s390x".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
index 4c855271a..91e63aee5 100644
--- a/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
@@ -1,23 +1,24 @@
use crate::abi::Endian;
-use crate::spec::Target;
+use crate::spec::{StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
base.endian = Endian::Big;
// z10 is the oldest CPU supported by LLVM
base.cpu = "z10".into();
- // FIXME: The data_layout string below and the ABI implementation in
- // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
- // Pass the -vector feature string to LLVM to respect this assumption.
+ // FIXME: The ABI implementation in cabi_s390x.rs is for now hard-coded to assume the no-vector
+ // ABI. Pass the -vector feature string to LLVM to respect this assumption. On LLVM < 16, we
+ // also strip v128 from the data_layout below to match the older LLVM's expectation.
base.features = "-vector".into();
base.max_atomic_width = Some(64);
base.min_global_align = Some(16);
base.static_position_independent_executables = true;
+ base.stack_probes = StackProbeType::Inline;
Target {
llvm_target: "s390x-unknown-linux-musl".into(),
pointer_width: 64,
- data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".into(),
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64".into(),
arch: "s390x".into(),
options: base,
}
diff --git a/compiler/rustc_target/src/spec/solaris_base.rs b/compiler/rustc_target/src/spec/solaris_base.rs
index b7e8e8cf7..f97cdb4fb 100644
--- a/compiler/rustc_target/src/spec/solaris_base.rs
+++ b/compiler/rustc_target/src/spec/solaris_base.rs
@@ -1,4 +1,4 @@
-use crate::spec::{cvs, TargetOptions};
+use crate::spec::{cvs, Cc, LinkerFlavor, TargetOptions};
pub fn opts() -> TargetOptions {
TargetOptions {
@@ -7,7 +7,7 @@ pub fn opts() -> TargetOptions {
has_rpath: true,
families: cvs!["unix"],
is_like_solaris: true,
- linker_is_gnu: false,
+ linker_flavor: LinkerFlavor::Unix(Cc::Yes),
limit_rdylib_exports: false, // Linker doesn't support this
eh_frame_header: false,
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
index 836ab0e37..38ab066b0 100644
--- a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
@@ -1,10 +1,10 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::netbsd_base::opts();
base.cpu = "v9".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
Target {
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
index 4a192df39..06a5f782a 100644
--- a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
@@ -1,11 +1,11 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::openbsd_base::opts();
base.endian = Endian::Big;
base.cpu = "v9".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
Target {
diff --git a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
index ea4fafa4b..12968abda 100644
--- a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
@@ -1,12 +1,12 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.endian = Endian::Big;
base.cpu = "v9".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-mv8plus"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-mv8plus"]);
Target {
llvm_target: "sparc-unknown-linux-gnu".into(),
diff --git a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
index aac09181a..440194ef2 100644
--- a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
@@ -1,10 +1,10 @@
use crate::abi::Endian;
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Target};
pub fn target() -> Target {
let mut base = super::solaris_base::opts();
base.endian = Endian::Big;
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Unix(Cc::Yes), &["-m64"]);
// llvm calls this "v9"
base.cpu = "v9".into();
base.vendor = "sun".into();
diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs
index 1db6db78b..172da0ed5 100644
--- a/compiler/rustc_target/src/spec/tests/tests_impl.rs
+++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs
@@ -2,28 +2,33 @@ use super::super::*;
use std::assert_matches::assert_matches;
// Test target self-consistency and JSON encoding/decoding roundtrip.
-pub(super) fn test_target(target: Target) {
- target.check_consistency();
- assert_eq!(Target::from_json(target.to_json()).map(|(j, _)| j), Ok(target));
+pub(super) fn test_target(mut target: Target, triple: &str) {
+ let recycled_target = Target::from_json(target.to_json()).map(|(j, _)| j);
+ target.update_to_cli();
+ target.check_consistency(triple);
+ assert_eq!(recycled_target, Ok(target));
}
impl Target {
- fn check_consistency(&self) {
+ fn check_consistency(&self, triple: &str) {
assert_eq!(self.is_like_osx, self.vendor == "apple");
assert_eq!(self.is_like_solaris, self.os == "solaris" || self.os == "illumos");
assert_eq!(self.is_like_windows, self.os == "windows" || self.os == "uefi");
assert_eq!(self.is_like_wasm, self.arch == "wasm32" || self.arch == "wasm64");
- assert!(self.is_like_windows || !self.is_like_msvc);
+ if self.is_like_msvc {
+ assert!(self.is_like_windows);
+ }
- // Check that default linker flavor and lld flavor are compatible
- // with some other key properties.
- assert_eq!(self.is_like_osx, matches!(self.lld_flavor, LldFlavor::Ld64));
- assert_eq!(self.is_like_msvc, matches!(self.lld_flavor, LldFlavor::Link));
- assert_eq!(self.is_like_wasm, matches!(self.lld_flavor, LldFlavor::Wasm));
- assert_eq!(self.os == "l4re", matches!(self.linker_flavor, LinkerFlavor::L4Bender));
- assert_eq!(self.os == "emscripten", matches!(self.linker_flavor, LinkerFlavor::Em));
- assert_eq!(self.arch == "bpf", matches!(self.linker_flavor, LinkerFlavor::BpfLinker));
- assert_eq!(self.arch == "nvptx64", matches!(self.linker_flavor, LinkerFlavor::PtxLinker));
+ // Check that default linker flavor is compatible with some other key properties.
+ assert_eq!(self.is_like_osx, matches!(self.linker_flavor, LinkerFlavor::Darwin(..)));
+ assert_eq!(self.is_like_msvc, matches!(self.linker_flavor, LinkerFlavor::Msvc(..)));
+ assert_eq!(
+ self.is_like_wasm && self.os != "emscripten",
+ matches!(self.linker_flavor, LinkerFlavor::WasmLld(..))
+ );
+ assert_eq!(self.os == "emscripten", matches!(self.linker_flavor, LinkerFlavor::EmCc));
+ assert_eq!(self.arch == "bpf", matches!(self.linker_flavor, LinkerFlavor::Bpf));
+ assert_eq!(self.arch == "nvptx64", matches!(self.linker_flavor, LinkerFlavor::Ptx));
for args in [
&self.pre_link_args,
@@ -35,47 +40,25 @@ impl Target {
for (&flavor, flavor_args) in args {
assert!(!flavor_args.is_empty());
// Check that flavors mentioned in link args are compatible with the default flavor.
- match (self.linker_flavor, self.lld_flavor) {
- (
- LinkerFlavor::Ld | LinkerFlavor::Lld(LldFlavor::Ld) | LinkerFlavor::Gcc,
- LldFlavor::Ld,
- ) => {
- assert_matches!(
- flavor,
- LinkerFlavor::Ld | LinkerFlavor::Lld(LldFlavor::Ld) | LinkerFlavor::Gcc
- )
- }
- (LinkerFlavor::Gcc, LldFlavor::Ld64) => {
- assert_matches!(
- flavor,
- LinkerFlavor::Lld(LldFlavor::Ld64) | LinkerFlavor::Gcc
- )
- }
- (LinkerFlavor::Msvc | LinkerFlavor::Lld(LldFlavor::Link), LldFlavor::Link) => {
- assert_matches!(
- flavor,
- LinkerFlavor::Msvc | LinkerFlavor::Lld(LldFlavor::Link)
- )
+ match self.linker_flavor {
+ LinkerFlavor::Gnu(..) => {
+ assert_matches!(flavor, LinkerFlavor::Gnu(..));
}
- (LinkerFlavor::Lld(LldFlavor::Wasm) | LinkerFlavor::Gcc, LldFlavor::Wasm) => {
- assert_matches!(
- flavor,
- LinkerFlavor::Lld(LldFlavor::Wasm) | LinkerFlavor::Gcc
- )
+ LinkerFlavor::Darwin(..) => {
+ assert_matches!(flavor, LinkerFlavor::Darwin(..))
}
- (LinkerFlavor::L4Bender, LldFlavor::Ld) => {
- assert_matches!(flavor, LinkerFlavor::L4Bender)
+ LinkerFlavor::WasmLld(..) => {
+ assert_matches!(flavor, LinkerFlavor::WasmLld(..))
}
- (LinkerFlavor::Em, LldFlavor::Wasm) => {
- assert_matches!(flavor, LinkerFlavor::Em)
+ LinkerFlavor::Unix(..) => {
+ assert_matches!(flavor, LinkerFlavor::Unix(..));
}
- (LinkerFlavor::BpfLinker, LldFlavor::Ld) => {
- assert_matches!(flavor, LinkerFlavor::BpfLinker)
+ LinkerFlavor::Msvc(..) => {
+ assert_matches!(flavor, LinkerFlavor::Msvc(..))
}
- (LinkerFlavor::PtxLinker, LldFlavor::Ld) => {
- assert_matches!(flavor, LinkerFlavor::PtxLinker)
+ LinkerFlavor::EmCc | LinkerFlavor::Bpf | LinkerFlavor::Ptx => {
+ assert_eq!(flavor, self.linker_flavor)
}
- flavors => unreachable!("unexpected flavor combination: {:?}", flavors),
}
// Check that link args for cc and non-cc versions of flavors are consistent.
@@ -88,41 +71,83 @@ impl Target {
}
}
};
+
match self.linker_flavor {
- LinkerFlavor::Gcc => match self.lld_flavor {
- LldFlavor::Ld => {
- check_noncc(LinkerFlavor::Ld);
- check_noncc(LinkerFlavor::Lld(LldFlavor::Ld));
- }
- LldFlavor::Wasm => check_noncc(LinkerFlavor::Lld(LldFlavor::Wasm)),
- LldFlavor::Ld64 | LldFlavor::Link => {}
- },
+ LinkerFlavor::Gnu(Cc::Yes, lld) => check_noncc(LinkerFlavor::Gnu(Cc::No, lld)),
+ LinkerFlavor::WasmLld(Cc::Yes) => check_noncc(LinkerFlavor::WasmLld(Cc::No)),
+ LinkerFlavor::Unix(Cc::Yes) => check_noncc(LinkerFlavor::Unix(Cc::No)),
_ => {}
}
}
// Check that link args for lld and non-lld versions of flavors are consistent.
- assert_eq!(args.get(&LinkerFlavor::Ld), args.get(&LinkerFlavor::Lld(LldFlavor::Ld)));
+ for cc in [Cc::No, Cc::Yes] {
+ assert_eq!(
+ args.get(&LinkerFlavor::Gnu(cc, Lld::No)),
+ args.get(&LinkerFlavor::Gnu(cc, Lld::Yes)),
+ );
+ assert_eq!(
+ args.get(&LinkerFlavor::Darwin(cc, Lld::No)),
+ args.get(&LinkerFlavor::Darwin(cc, Lld::Yes)),
+ );
+ }
assert_eq!(
- args.get(&LinkerFlavor::Msvc),
- args.get(&LinkerFlavor::Lld(LldFlavor::Link)),
+ args.get(&LinkerFlavor::Msvc(Lld::No)),
+ args.get(&LinkerFlavor::Msvc(Lld::Yes)),
);
}
- assert!(
- (self.pre_link_objects_fallback.is_empty()
- && self.post_link_objects_fallback.is_empty())
- || self.crt_objects_fallback.is_some()
- );
+ if self.link_self_contained == LinkSelfContainedDefault::False {
+ assert!(
+ self.pre_link_objects_self_contained.is_empty()
+ && self.post_link_objects_self_contained.is_empty()
+ );
+ }
// If your target really needs to deviate from the rules below,
// except it and document the reasons.
// Keep the default "unknown" vendor instead.
assert_ne!(self.vendor, "");
+ assert_ne!(self.os, "");
if !self.can_use_os_unknown() {
// Keep the default "none" for bare metal targets instead.
assert_ne!(self.os, "unknown");
}
+
+ // Check dynamic linking stuff
+ // BPF: when targeting user space vms (like rbpf), those can load dynamic libraries.
+ if self.os == "none" && self.arch != "bpf" {
+ assert!(!self.dynamic_linking);
+ }
+ if self.only_cdylib
+ || self.crt_static_allows_dylibs
+ || !self.late_link_args_dynamic.is_empty()
+ {
+ assert!(self.dynamic_linking);
+ }
+ // Apparently PIC was slow on wasm at some point, see comments in wasm_base.rs
+ if self.dynamic_linking && !(self.is_like_wasm && self.os != "emscripten") {
+ assert_eq!(self.relocation_model, RelocModel::Pic);
+ }
+ // PIEs are supported but not enabled by default with linuxkernel target.
+ if self.position_independent_executables && !triple.ends_with("-linuxkernel") {
+ assert_eq!(self.relocation_model, RelocModel::Pic);
+ }
+ // The UEFI targets do not support dynamic linking but still require PIC (#101377).
+ if self.relocation_model == RelocModel::Pic && self.os != "uefi" {
+ assert!(self.dynamic_linking || self.position_independent_executables);
+ }
+ if self.static_position_independent_executables {
+ assert!(self.position_independent_executables);
+ }
+ if self.position_independent_executables {
+ assert!(self.executables);
+ }
+
+ // Check crt static stuff
+ if self.crt_static_default || self.crt_static_allows_dylibs {
+ assert!(self.crt_static_respected);
+ }
}
// Add your target to the whitelist if it has `std` library
diff --git a/compiler/rustc_target/src/spec/thumb_base.rs b/compiler/rustc_target/src/spec/thumb_base.rs
index 049142b89..000766c57 100644
--- a/compiler/rustc_target/src/spec/thumb_base.rs
+++ b/compiler/rustc_target/src/spec/thumb_base.rs
@@ -27,13 +27,12 @@
// differentiate these targets from our other `arm(v7)-*-*-gnueabi(hf)` targets in the context of
// build scripts / gcc flags.
-use crate::spec::TargetOptions;
-use crate::spec::{FramePointer, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, PanicStrategy, RelocModel, TargetOptions};
pub fn opts() -> TargetOptions {
// See rust-lang/rfcs#1645 for a discussion about these defaults
TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
// In most cases, LLD is good enough
linker: Some("rust-lld".into()),
// Because these devices have very little resources having an unwinder is too onerous so we
diff --git a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
index 7125d141a..5a3e4c88d 100644
--- a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
@@ -16,9 +16,8 @@
//! The default link script is very likely wrong, so you should use
//! `-Clink-arg=-Tmy_script.ld` to override that with a correct linker script.
-use crate::spec::{
- cvs, FramePointer, LinkerFlavor, PanicStrategy, RelocModel, Target, TargetOptions,
-};
+use crate::spec::{cvs, Cc, FramePointer, LinkerFlavor, Lld};
+use crate::spec::{PanicStrategy, RelocModel, Target, TargetOptions};
pub fn target() -> Target {
Target {
@@ -37,7 +36,7 @@ pub fn target() -> Target {
data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
options: TargetOptions {
abi: "eabi".into(),
- linker_flavor: LinkerFlavor::Ld,
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::No),
linker: Some("arm-none-eabi-ld".into()),
// extra args passed to the external assembler (assuming `arm-none-eabi-as`):
@@ -47,7 +46,9 @@ pub fn target() -> Target {
asm_args: cvs!["-mthumb-interwork", "-march=armv4t", "-mlittle-endian",],
// minimum extra features, these cannot be disabled via -C
- features: "+soft-float,+strict-align".into(),
+ // Also force-enable 32-bit atomics, which allows the use of atomic load/store only.
+ // The resulting atomics are ABI incompatible with atomics backed by libatomic.
+ features: "+soft-float,+strict-align,+atomics-32".into(),
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
diff --git a/compiler/rustc_target/src/spec/thumbv5te_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv5te_none_eabi.rs
new file mode 100644
index 000000000..021b0e0eb
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv5te_none_eabi.rs
@@ -0,0 +1,41 @@
+//! Targets the ARMv5TE, with code as `t32` code by default.
+
+use crate::spec::{cvs, FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv5te-none-eabi".into(),
+ pointer_width: 32,
+ arch: "arm".into(),
+ /* Data layout args are '-' separated:
+ * little endian
+ * stack is 64-bit aligned (EABI)
+ * pointers are 32-bit
+ * i64 must be 64-bit aligned (EABI)
+ * mangle names with ELF style
+ * native integers are 32-bit
+ * All other elements are default
+ */
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // extra args passed to the external assembler (assuming `arm-none-eabi-as`):
+ // * activate t32/a32 interworking
+ // * use arch ARMv5TE
+ // * use little-endian
+ asm_args: cvs!["-mthumb-interwork", "-march=armv5te", "-mlittle-endian",],
+ // minimum extra features, these cannot be disabled via -C
+ // Also force-enable 32-bit atomics, which allows the use of atomic load/store only.
+ // The resulting atomics are ABI incompatible with atomics backed by libatomic.
+ features: "+soft-float,+strict-align,+atomics-32".into(),
+ frame_pointer: FramePointer::MayOmit,
+ main_needs_argc_argv: false,
+ // don't have atomic compare-and-swap
+ atomic_cas: false,
+ has_thumb_interworking: true,
+
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
index 2546ab9b7..c9bb0112f 100644
--- a/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
@@ -13,7 +13,9 @@ pub fn target() -> Target {
abi: "eabi".into(),
// The ARMv6-M architecture doesn't support unaligned loads/stores so we disable them
// with +strict-align.
- features: "+strict-align".into(),
+ // Also force-enable 32-bit atomics, which allows the use of atomic load/store only.
+ // The resulting atomics are ABI incompatible with atomics backed by libatomic.
+ features: "+strict-align,+atomics-32".into(),
// There are no atomic CAS instructions available in the instruction set of the ARMv6-M
// architecture
atomic_cas: false,
diff --git a/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
index 4d09d3a4d..f1be274f0 100644
--- a/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
+++ b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, PanicStrategy, Target, TargetOptions};
+use crate::spec::{LinkerFlavor, Lld, PanicStrategy, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::windows_msvc_base::opts();
@@ -9,7 +9,7 @@ pub fn target() -> Target {
// should be smart enough to insert branch islands only
// where necessary, but this is not the observed behavior.
// Disabling the LBR optimization works around the issue.
- base.add_pre_link_args(LinkerFlavor::Msvc, &["/OPT:NOLBR"]);
+ base.add_pre_link_args(LinkerFlavor::Msvc(Lld::No), &["/OPT:NOLBR"]);
Target {
llvm_target: "thumbv7a-pc-windows-msvc".into(),
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
index 4cad9e183..8d80fcd5f 100644
--- a/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
+++ b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetOptions};
// This target if is for the Android v7a ABI in thumb mode with
// NEON unconditionally enabled and, therefore, with 32 FPU registers
@@ -10,7 +10,7 @@ use crate::spec::{LinkerFlavor, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::android_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-march=armv7-a"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-march=armv7-a"]);
Target {
llvm_target: "armv7-none-linux-android".into(),
pointer_width: 32,
diff --git a/compiler/rustc_target/src/spec/uefi_msvc_base.rs b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
index aee8eb2e3..8968d3c8f 100644
--- a/compiler/rustc_target/src/spec/uefi_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
@@ -9,13 +9,13 @@
// the timer-interrupt. Device-drivers are required to use polling-based models. Furthermore, all
// code runs in the same environment, no process separation is supported.
-use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, StackProbeType, TargetOptions};
+use crate::spec::{LinkerFlavor, Lld, PanicStrategy, StackProbeType, TargetOptions};
pub fn opts() -> TargetOptions {
let mut base = super::msvc_base::opts();
base.add_pre_link_args(
- LinkerFlavor::Msvc,
+ LinkerFlavor::Msvc(Lld::No),
&[
// Non-standard subsystems have no default entry-point in PE+ files. We have to define
// one. "efi_main" seems to be a common choice amongst other implementations and the
@@ -36,7 +36,7 @@ pub fn opts() -> TargetOptions {
TargetOptions {
os: "uefi".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Link),
+ linker_flavor: LinkerFlavor::Msvc(Lld::Yes),
disable_redzone: true,
exe_suffix: ".efi".into(),
allows_weak_linkage: false,
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
index c7e7d2210..6f77ef98c 100644
--- a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
@@ -5,13 +5,13 @@ pub fn target() -> Target {
// Reset flags for non-Em flavors back to empty to satisfy sanity checking tests.
let pre_link_args = LinkArgs::new();
let post_link_args = TargetOptions::link_args(
- LinkerFlavor::Em,
+ LinkerFlavor::EmCc,
&["-sABORTING_MALLOC=0", "-Wl,--fatal-warnings"],
);
let opts = TargetOptions {
os: "emscripten".into(),
- linker_flavor: LinkerFlavor::Em,
+ linker_flavor: LinkerFlavor::EmCc,
// emcc emits two files - a .js file to instantiate the wasm and supply platform
// functionality, and a .wasm file.
exe_suffix: ".js".into(),
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
index 4e2927dd9..8dad941b5 100644
--- a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
@@ -10,14 +10,12 @@
//! This target is more or less managed by the Rust and WebAssembly Working
//! Group nowadays at <https://github.com/rustwasm>.
-use super::wasm_base;
-use super::{LinkerFlavor, LldFlavor, Target};
+use super::{wasm_base, Cc, LinkerFlavor, Target};
use crate::spec::abi::Abi;
pub fn target() -> Target {
let mut options = wasm_base::options();
options.os = "unknown".into();
- options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
// This is a default for backwards-compatibility with the original
// definition of this target oh-so-long-ago. Once the "wasm" ABI is
@@ -30,7 +28,7 @@ pub fn target() -> Target {
options.default_adjusted_cabi = Some(Abi::Wasm);
options.add_pre_link_args(
- LinkerFlavor::Lld(LldFlavor::Wasm),
+ LinkerFlavor::WasmLld(Cc::No),
&[
// For now this target just never has an entry symbol no matter the output
// type, so unconditionally pass this.
@@ -44,7 +42,7 @@ pub fn target() -> Target {
],
);
options.add_pre_link_args(
- LinkerFlavor::Gcc,
+ LinkerFlavor::WasmLld(Cc::Yes),
&[
// Make sure clang uses LLD as its linker and is configured appropriately
// otherwise
diff --git a/compiler/rustc_target/src/spec/wasm32_wasi.rs b/compiler/rustc_target/src/spec/wasm32_wasi.rs
index 280457d68..93a956403 100644
--- a/compiler/rustc_target/src/spec/wasm32_wasi.rs
+++ b/compiler/rustc_target/src/spec/wasm32_wasi.rs
@@ -72,18 +72,16 @@
//! best we can with this target. Don't start relying on too much here unless
//! you know what you're getting in to!
-use super::wasm_base;
-use super::{crt_objects, LinkerFlavor, LldFlavor, Target};
+use super::{crt_objects, wasm_base, Cc, LinkerFlavor, Target};
pub fn target() -> Target {
let mut options = wasm_base::options();
options.os = "wasi".into();
- options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
- options.add_pre_link_args(LinkerFlavor::Gcc, &["--target=wasm32-wasi"]);
+ options.add_pre_link_args(LinkerFlavor::WasmLld(Cc::Yes), &["--target=wasm32-wasi"]);
- options.pre_link_objects_fallback = crt_objects::pre_wasi_fallback();
- options.post_link_objects_fallback = crt_objects::post_wasi_fallback();
+ options.pre_link_objects_self_contained = crt_objects::pre_wasi_self_contained();
+ options.post_link_objects_self_contained = crt_objects::post_wasi_self_contained();
// Right now this is a bit of a workaround but we're currently saying that
// the target by default has a static crt which we're taking as a signal
diff --git a/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs
index 5211f7707..3fda398d2 100644
--- a/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs
+++ b/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs
@@ -7,16 +7,14 @@
//! the standard library is available, most of it returns an error immediately
//! (e.g. trying to create a TCP stream or something like that).
-use super::wasm_base;
-use super::{LinkerFlavor, LldFlavor, Target};
+use super::{wasm_base, Cc, LinkerFlavor, Target};
pub fn target() -> Target {
let mut options = wasm_base::options();
options.os = "unknown".into();
- options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
options.add_pre_link_args(
- LinkerFlavor::Lld(LldFlavor::Wasm),
+ LinkerFlavor::WasmLld(Cc::No),
&[
// For now this target just never has an entry symbol no matter the output
// type, so unconditionally pass this.
@@ -25,7 +23,7 @@ pub fn target() -> Target {
],
);
options.add_pre_link_args(
- LinkerFlavor::Gcc,
+ LinkerFlavor::WasmLld(Cc::Yes),
&[
// Make sure clang uses LLD as its linker and is configured appropriately
// otherwise
diff --git a/compiler/rustc_target/src/spec/wasm_base.rs b/compiler/rustc_target/src/spec/wasm_base.rs
index 9216d3e7b..528a84a8b 100644
--- a/compiler/rustc_target/src/spec/wasm_base.rs
+++ b/compiler/rustc_target/src/spec/wasm_base.rs
@@ -1,5 +1,5 @@
-use super::crt_objects::CrtObjectsFallback;
-use super::{cvs, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, TargetOptions, TlsModel};
+use super::crt_objects::LinkSelfContainedDefault;
+use super::{cvs, Cc, LinkerFlavor, PanicStrategy, RelocModel, TargetOptions, TlsModel};
pub fn options() -> TargetOptions {
macro_rules! args {
@@ -49,8 +49,8 @@ pub fn options() -> TargetOptions {
};
}
- let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::Lld(LldFlavor::Wasm), args!(""));
- super::add_link_args(&mut pre_link_args, LinkerFlavor::Gcc, args!("-Wl,"));
+ let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::WasmLld(Cc::No), args!(""));
+ super::add_link_args(&mut pre_link_args, LinkerFlavor::WasmLld(Cc::Yes), args!("-Wl,"));
TargetOptions {
is_like_wasm: true,
@@ -91,12 +91,12 @@ pub fn options() -> TargetOptions {
// we use the LLD shipped with the Rust toolchain by default
linker: Some("rust-lld".into()),
- lld_flavor: LldFlavor::Wasm,
- linker_is_gnu: false,
+ linker_flavor: LinkerFlavor::WasmLld(Cc::No),
pre_link_args,
- crt_objects_fallback: Some(CrtObjectsFallback::Wasm),
+ // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+ link_self_contained: LinkSelfContainedDefault::True,
// This has no effect in LLVM 8 or prior, but in LLVM 9 and later when
// PIC code is implemented this has quite a drastic effect if it stays
diff --git a/compiler/rustc_target/src/spec/windows_gnu_base.rs b/compiler/rustc_target/src/spec/windows_gnu_base.rs
index 90e0af3e3..a32ca469b 100644
--- a/compiler/rustc_target/src/spec/windows_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/windows_gnu_base.rs
@@ -1,9 +1,10 @@
-use crate::spec::crt_objects::{self, CrtObjectsFallback};
-use crate::spec::{cvs, LinkerFlavor, TargetOptions};
+use crate::spec::crt_objects::{self, LinkSelfContainedDefault};
+use crate::spec::{cvs, Cc, DebuginfoKind, LinkerFlavor, Lld, SplitDebuginfo, TargetOptions};
+use std::borrow::Cow;
pub fn opts() -> TargetOptions {
let mut pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Ld,
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
&[
// Enable ASLR
"--dynamicbase",
@@ -13,7 +14,7 @@ pub fn opts() -> TargetOptions {
);
super::add_link_args(
&mut pre_link_args,
- LinkerFlavor::Gcc,
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
&[
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
@@ -41,23 +42,33 @@ pub fn opts() -> TargetOptions {
"-luser32",
"-lkernel32",
];
- let mut late_link_args = TargetOptions::link_args(LinkerFlavor::Ld, mingw_libs);
- super::add_link_args(&mut late_link_args, LinkerFlavor::Gcc, mingw_libs);
+ let mut late_link_args =
+ TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), mingw_libs);
+ super::add_link_args(&mut late_link_args, LinkerFlavor::Gnu(Cc::Yes, Lld::No), mingw_libs);
// If any of our crates are dynamically linked then we need to use
// the shared libgcc_s-dw2-1.dll. This is required to support
// unwinding across DLL boundaries.
let dynamic_unwind_libs = &["-lgcc_s"];
let mut late_link_args_dynamic =
- TargetOptions::link_args(LinkerFlavor::Ld, dynamic_unwind_libs);
- super::add_link_args(&mut late_link_args_dynamic, LinkerFlavor::Gcc, dynamic_unwind_libs);
+ TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), dynamic_unwind_libs);
+ super::add_link_args(
+ &mut late_link_args_dynamic,
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ dynamic_unwind_libs,
+ );
// If all of our crates are statically linked then we can get away
// with statically linking the libgcc unwinding code. This allows
// binaries to be redistributed without the libgcc_s-dw2-1.dll
// dependency, but unfortunately break unwinding across DLL
// boundaries when unwinding across FFI boundaries.
let static_unwind_libs = &["-lgcc_eh", "-l:libpthread.a"];
- let mut late_link_args_static = TargetOptions::link_args(LinkerFlavor::Ld, static_unwind_libs);
- super::add_link_args(&mut late_link_args_static, LinkerFlavor::Gcc, static_unwind_libs);
+ let mut late_link_args_static =
+ TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), static_unwind_libs);
+ super::add_link_args(
+ &mut late_link_args_static,
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ static_unwind_libs,
+ );
TargetOptions {
os: "windows".into(),
@@ -76,9 +87,9 @@ pub fn opts() -> TargetOptions {
pre_link_args,
pre_link_objects: crt_objects::pre_mingw(),
post_link_objects: crt_objects::post_mingw(),
- pre_link_objects_fallback: crt_objects::pre_mingw_fallback(),
- post_link_objects_fallback: crt_objects::post_mingw_fallback(),
- crt_objects_fallback: Some(CrtObjectsFallback::Mingw),
+ pre_link_objects_self_contained: crt_objects::pre_mingw_self_contained(),
+ post_link_objects_self_contained: crt_objects::post_mingw_self_contained(),
+ link_self_contained: LinkSelfContainedDefault::Mingw,
late_link_args,
late_link_args_dynamic,
late_link_args_static,
@@ -86,6 +97,10 @@ pub fn opts() -> TargetOptions {
emit_debug_gdb_scripts: false,
requires_uwtable: true,
eh_frame_header: false,
+ // FIXME(davidtwco): Support Split DWARF on Windows GNU - may require LLVM changes to
+ // output DWO, despite using DWARF, doesn't use ELF..
+ debuginfo_kind: DebuginfoKind::Pdb,
+ supported_split_debuginfo: Cow::Borrowed(&[SplitDebuginfo::Off]),
..Default::default()
}
}
diff --git a/compiler/rustc_target/src/spec/windows_gnullvm_base.rs b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
index bae007dc9..58210c75a 100644
--- a/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
+++ b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
@@ -1,15 +1,17 @@
-use crate::spec::{cvs, LinkerFlavor, TargetOptions};
+use crate::spec::{cvs, Cc, LinkerFlavor, Lld, TargetOptions};
pub fn opts() -> TargetOptions {
// We cannot use `-nodefaultlibs` because compiler-rt has to be passed
// as a path since it's not added to linker search path by the default.
- // There were attemts to make it behave like libgcc (so one can just use -l<name>)
+ // There were attempts to make it behave like libgcc (so one can just use -l<name>)
// but LLVM maintainers rejected it: https://reviews.llvm.org/D51440
- let pre_link_args =
- TargetOptions::link_args(LinkerFlavor::Gcc, &["-nolibc", "--unwindlib=none"]);
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
+ &["-nolibc", "--unwindlib=none"],
+ );
// Order of `late_link_args*` does not matter with LLD.
let late_link_args = TargetOptions::link_args(
- LinkerFlavor::Gcc,
+ LinkerFlavor::Gnu(Cc::Yes, Lld::No),
&["-lmingw32", "-lmingwex", "-lmsvcrt", "-lkernel32", "-luser32"],
);
diff --git a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
index fa69b919c..f30c33d99 100644
--- a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
+++ b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkArgs, LinkerFlavor, TargetOptions};
+use crate::spec::{Cc, LinkArgs, LinkerFlavor, Lld, TargetOptions};
pub fn opts() -> TargetOptions {
let base = super::windows_gnu_base::opts();
@@ -15,8 +15,9 @@ pub fn opts() -> TargetOptions {
"-lmingwex",
"-lmingw32",
];
- let mut late_link_args = TargetOptions::link_args(LinkerFlavor::Ld, mingw_libs);
- super::add_link_args(&mut late_link_args, LinkerFlavor::Gcc, mingw_libs);
+ let mut late_link_args =
+ TargetOptions::link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), mingw_libs);
+ super::add_link_args(&mut late_link_args, LinkerFlavor::Gnu(Cc::Yes, Lld::No), mingw_libs);
// Reset the flags back to empty until the FIXME above is addressed.
let late_link_args_dynamic = LinkArgs::new();
let late_link_args_static = LinkArgs::new();
diff --git a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
index f2573fc2d..8c942c59d 100644
--- a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
@@ -1,11 +1,11 @@
-use crate::spec::{LinkerFlavor, TargetOptions};
+use crate::spec::{LinkerFlavor, Lld, TargetOptions};
pub fn opts() -> TargetOptions {
let mut opts = super::windows_msvc_base::opts();
opts.abi = "uwp".into();
opts.vendor = "uwp".into();
- opts.add_pre_link_args(LinkerFlavor::Msvc, &["/APPCONTAINER", "mincore.lib"]);
+ opts.add_pre_link_args(LinkerFlavor::Msvc(Lld::No), &["/APPCONTAINER", "mincore.lib"]);
opts
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
index dbd26899c..087be1b95 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -1,22 +1,21 @@
-use crate::spec::TargetOptions;
-use crate::spec::{FramePointer, LinkerFlavor, SanitizerSet, StackProbeType, Target};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, SanitizerSet};
+use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
- let mut base = super::apple_base::opts("macos");
+ let arch = "x86_64";
+ let mut base = super::apple_base::opts("macos", arch, "");
base.cpu = "core2".into();
base.max_atomic_width = Some(128); // core2 support cmpxchg16b
base.frame_pointer = FramePointer::Always;
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-arch", "x86_64"]);
+ base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-m64"]);
base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
base.supported_sanitizers =
SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::THREAD;
// Clang automatically chooses a more specific target based on
// MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
// correctly, we do too.
- let arch = "x86_64";
let llvm_target = super::apple_base::macos_llvm_target(&arch);
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
index 5e64ed0cf..e6143025d 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
@@ -13,8 +13,7 @@ pub fn target() -> Target {
arch: "x86_64".into(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
..base
},
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
index 2122bcd37..13259205a 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -1,11 +1,11 @@
use super::apple_sdk_base::{opts, Arch};
-use crate::spec::{LinkerFlavor, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "x86_64-apple-ios13.0-macabi";
let mut base = opts("ios", Arch::X86_64_macabi);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-target", llvm_target]);
+ base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
Target {
llvm_target: llvm_target.into(),
@@ -15,8 +15,7 @@ pub fn target() -> Target {
arch: "x86_64".into(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
..base
},
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
index a848c5a0a..3d54da086 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
@@ -10,8 +10,7 @@ pub fn target() -> Target {
arch: "x86_64".into(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
..base
},
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
index 4dff3c2f2..e499b1985 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
@@ -15,8 +15,7 @@ pub fn target() -> Target {
arch: "x86_64".into(),
options: TargetOptions {
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
forces_embed_bitcode: true,
// Taken from a clang build on Xcode 11.4.1.
// These arguments are not actually invoked - they just have
diff --git a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
index 9d597ea2e..cba6fda19 100644
--- a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
+++ b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
@@ -1,12 +1,10 @@
use std::borrow::Cow;
-use crate::spec::cvs;
-
-use super::{LinkerFlavor, LldFlavor, Target, TargetOptions};
+use super::{cvs, Cc, LinkerFlavor, Lld, Target, TargetOptions};
pub fn target() -> Target {
let pre_link_args = TargetOptions::link_args(
- LinkerFlavor::Ld,
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
&[
"-e",
"elf_entry",
@@ -61,7 +59,7 @@ pub fn target() -> Target {
env: "sgx".into(),
vendor: "fortanix".into(),
abi: "fortanix".into(),
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
max_atomic_width: Some(64),
cpu: "x86-64".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
index 4f88fc350..532dd6d07 100644
--- a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
+++ b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
@@ -4,8 +4,7 @@ pub fn target() -> Target {
let mut base = super::fuchsia_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_android.rs b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
index 6d19cf265..9c9137848 100644
--- a/compiler/rustc_target/src/spec/x86_64_linux_android.rs
+++ b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
@@ -1,4 +1,4 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::android_base::opts();
@@ -6,9 +6,8 @@ pub fn target() -> Target {
// https://developer.android.com/ndk/guides/abis.html#86-64
base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-linux-android".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
index 0550b221f..cb62a8173 100644
--- a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
+++ b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
@@ -1,13 +1,12 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::solaris_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Unix(Cc::Yes), &["-m64"]);
base.cpu = "x86-64".into();
base.vendor = "pc".into();
base.max_atomic_width = Some(64);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
index 59a8cffca..37feaa9db 100644
--- a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
@@ -1,11 +1,14 @@
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_gnu_base::opts();
base.cpu = "x86-64".into();
// Use high-entropy 64 bit address space for ASLR
- base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pep", "--high-entropy-va"]);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-Wl,--high-entropy-va"]);
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["-m", "i386pep", "--high-entropy-va"],
+ );
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64", "-Wl,--high-entropy-va"]);
base.max_atomic_width = Some(64);
base.linker = Some("x86_64-w64-mingw32-gcc".into());
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs
index d3909b389..039bc2bd2 100644
--- a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs
@@ -1,9 +1,9 @@
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_gnullvm_base::opts();
base.cpu = "x86-64".into();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
base.max_atomic_width = Some(64);
base.linker = Some("x86_64-w64-mingw32-clang".into());
diff --git a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
index cbe87589a..0f31ea86b 100644
--- a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
+++ b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
@@ -1,13 +1,12 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::solaris_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Unix(Cc::Yes), &["-m64"]);
base.cpu = "x86-64".into();
base.vendor = "sun".into();
base.max_atomic_width = Some(64);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-pc-solaris".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
index 746f64781..67ce3768d 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::dragonfly_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-unknown-dragonfly".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
index b30784ed6..98988ab35 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::freebsd_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
base.supported_sanitizers =
SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::MEMORY | SanitizerSet::THREAD;
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
index d6d033629..9a7a3b501 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::haiku_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
// This option is required to build executables on Haiku x86_64
base.position_independent_executables = true;
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
index d31530161..fb1af33f8 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
@@ -5,8 +5,7 @@ pub fn target() -> Target {
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
base.features = "+rdrnd,+rdseed".into();
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-unknown-hermit".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
index 9f19c3a2b..04a12a7bf 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
@@ -1,8 +1,8 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, Target};
+use crate::spec::{Cc, LinkerFlavor, SanitizerSet, Target};
pub fn target() -> Target {
let mut base = super::illumos_base::opts();
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-std=c99"]);
+ base.add_pre_link_args(LinkerFlavor::Unix(Cc::Yes), &["-m64", "-std=c99"]);
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
index 78189a0c0..26da7e800 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
@@ -4,8 +4,6 @@ pub fn target() -> Target {
let mut base = super::l4re_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.crt_static_allows_dylibs = false;
- base.dynamic_linking = false;
base.panic_strategy = PanicStrategy::Abort;
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
index 956be0353..a91ab365b 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
base.static_position_independent_executables = true;
base.supported_sanitizers = SanitizerSet::ADDRESS
| SanitizerSet::CFI
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
index 140882747..626d5b480 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
@@ -1,13 +1,12 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_gnu_base::opts();
base.cpu = "x86-64".into();
base.abi = "x32".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-mx32"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-mx32"]);
+ base.stack_probes = StackProbeType::X86;
base.has_thread_local = false;
// BUG(GabrielMajeri): disabling the PLT on x86_64 Linux with x32 ABI
// breaks code gen. See LLVM bug 36743
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
index 87e7784d1..9087dc3df 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::linux_musl_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
base.static_position_independent_executables = true;
base.supported_sanitizers = SanitizerSet::ADDRESS
| SanitizerSet::CFI
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
index d3a67619a..64ae425d8 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let mut base = super::netbsd_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
base.supported_sanitizers = SanitizerSet::ADDRESS
| SanitizerSet::CFI
| SanitizerSet::LEAK
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_none.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none.rs
index 809fd642d..e4d33c2b8 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_none.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none.rs
@@ -4,22 +4,18 @@
// `target-cpu` compiler flags to opt-in more hardware-specific
// features.
-use super::{
- CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, RelroLevel, StackProbeType,
- Target, TargetOptions,
-};
+use super::{Cc, CodeModel, LinkerFlavor, Lld, PanicStrategy};
+use super::{RelroLevel, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let opts = TargetOptions {
cpu: "x86-64".into(),
max_atomic_width: Some(64),
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- stack_probes: StackProbeType::Call,
+ stack_probes: StackProbeType::X86,
position_independent_executables: true,
static_position_independent_executables: true,
relro_level: RelroLevel::Full,
- relocation_model: RelocModel::Pic,
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker_flavor: LinkerFlavor::Gnu(Cc::No, Lld::Yes),
linker: Some("rust-lld".into()),
features:
"-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float"
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
index 593345a5f..ebd9636ff 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
@@ -1,7 +1,7 @@
// This defines the amd64 target for the Linux Kernel. See the linux-kernel-base module for
// generic Linux kernel options.
-use crate::spec::{CodeModel, LinkerFlavor, Target};
+use crate::spec::{Cc, CodeModel, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::linux_kernel_base::opts();
@@ -10,7 +10,7 @@ pub fn target() -> Target {
base.features =
"-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float".into();
base.code_model = Some(CodeModel::Kernel);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
Target {
// FIXME: Some dispute, the linux-on-clang folks think this should use
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
index f50c6bcee..66b8e2022 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::openbsd_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-unknown-openbsd".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
index 668ae9054..b47f15cf5 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::redox_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
Target {
llvm_target: "x86_64-unknown-redox".into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
index 76d2013cf..c3eaa6939 100644
--- a/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
+++ b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
@@ -1,11 +1,14 @@
-use crate::spec::{LinkerFlavor, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, Target};
pub fn target() -> Target {
let mut base = super::windows_uwp_gnu_base::opts();
base.cpu = "x86-64".into();
// Use high-entropy 64 bit address space for ASLR
- base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pep", "--high-entropy-va"]);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-Wl,--high-entropy-va"]);
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["-m", "i386pep", "--high-entropy-va"],
+ );
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64", "-Wl,--high-entropy-va"]);
base.max_atomic_width = Some(64);
Target {
diff --git a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
index 129897495..365ade6bc 100644
--- a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
+++ b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
@@ -1,12 +1,11 @@
-use crate::spec::{LinkerFlavor, StackProbeType, Target};
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
pub fn target() -> Target {
let mut base = super::vxworks_base::opts();
base.cpu = "x86-64".into();
base.max_atomic_width = Some(64);
- base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]);
+ base.stack_probes = StackProbeType::X86;
base.disable_redzone = true;
Target {
diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml
index 566f236f2..67613e1a4 100644
--- a/compiler/rustc_trait_selection/Cargo.toml
+++ b/compiler/rustc_trait_selection/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
rustc_parse_format = { path = "../rustc_parse_format" }
diff --git a/compiler/rustc_trait_selection/src/autoderef.rs b/compiler/rustc_trait_selection/src/autoderef.rs
index 8b7e8984a..61cfeec4b 100644
--- a/compiler/rustc_trait_selection/src/autoderef.rs
+++ b/compiler/rustc_trait_selection/src/autoderef.rs
@@ -1,6 +1,6 @@
+use crate::errors::AutoDerefReachedRecursionLimit;
use crate::traits::query::evaluate_obligation::InferCtxtExt;
use crate::traits::{self, TraitEngine};
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_infer::infer::InferCtxt;
use rustc_middle::ty::{self, TraitRef, Ty, TyCtxt};
@@ -25,7 +25,7 @@ struct AutoderefSnapshot<'tcx> {
pub struct Autoderef<'a, 'tcx> {
// Meta infos:
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
span: Span,
overloaded_span: Span,
body_id: hir::HirId,
@@ -94,7 +94,7 @@ impl<'a, 'tcx> Iterator for Autoderef<'a, 'tcx> {
impl<'a, 'tcx> Autoderef<'a, 'tcx> {
pub fn new(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: hir::HirId,
span: Span,
@@ -222,19 +222,10 @@ pub fn report_autoderef_recursion_limit_error<'tcx>(tcx: TyCtxt<'tcx>, span: Spa
Limit(0) => Limit(2),
limit => limit * 2,
};
- struct_span_err!(
- tcx.sess,
+ tcx.sess.emit_err(AutoDerefReachedRecursionLimit {
span,
- E0055,
- "reached the recursion limit while auto-dereferencing `{:?}`",
- ty
- )
- .span_label(span, "deref recursion limit reached")
- .help(&format!(
- "consider increasing the recursion limit by adding a \
- `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
+ ty,
suggested_limit,
- tcx.crate_name(LOCAL_CRATE),
- ))
- .emit();
+ crate_name: tcx.crate_name(LOCAL_CRATE),
+ });
}
diff --git a/compiler/rustc_trait_selection/src/errors.rs b/compiler/rustc_trait_selection/src/errors.rs
new file mode 100644
index 000000000..7f8705824
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/errors.rs
@@ -0,0 +1,102 @@
+use rustc_errors::{fluent, ErrorGuaranteed, Handler, IntoDiagnostic};
+use rustc_macros::Diagnostic;
+use rustc_middle::ty::{self, PolyTraitRef, Ty};
+use rustc_session::Limit;
+use rustc_span::{Span, Symbol};
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_dump_vtable_entries)]
+pub struct DumpVTableEntries<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_ref: PolyTraitRef<'a>,
+ pub entries: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_unable_to_construct_constant_value)]
+pub struct UnableToConstructConstantValue<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub unevaluated: ty::UnevaluatedConst<'a>,
+}
+
+#[derive(Diagnostic)]
+#[help]
+#[diag(trait_selection_auto_deref_reached_recursion_limit, code = "E0055")]
+pub struct AutoDerefReachedRecursionLimit<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub ty: Ty<'a>,
+ pub suggested_limit: Limit,
+ pub crate_name: Symbol,
+}
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_empty_on_clause_in_rustc_on_unimplemented, code = "E0232")]
+pub struct EmptyOnClauseInOnUnimplemented {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_invalid_on_clause_in_rustc_on_unimplemented, code = "E0232")]
+pub struct InvalidOnClauseInOnUnimplemented {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(trait_selection_no_value_in_rustc_on_unimplemented, code = "E0232")]
+#[note]
+pub struct NoValueInOnUnimplemented {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+pub struct NegativePositiveConflict<'a> {
+ pub impl_span: Span,
+ pub trait_desc: &'a str,
+ pub self_desc: &'a Option<String>,
+ pub negative_impl_span: Result<Span, Symbol>,
+ pub positive_impl_span: Result<Span, Symbol>,
+}
+
+impl IntoDiagnostic<'_> for NegativePositiveConflict<'_> {
+ fn into_diagnostic(
+ self,
+ handler: &Handler,
+ ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut diag = handler.struct_err(fluent::trait_selection_negative_positive_conflict);
+ diag.set_arg("trait_desc", self.trait_desc);
+ diag.set_arg(
+ "self_desc",
+ self.self_desc.clone().map_or_else(|| String::from("none"), |ty| ty),
+ );
+ diag.set_span(self.impl_span);
+ diag.code(rustc_errors::error_code!(E0751));
+ match self.negative_impl_span {
+ Ok(span) => {
+ diag.span_label(span, fluent::negative_implementation_here);
+ }
+ Err(cname) => {
+ diag.note(fluent::negative_implementation_in_crate);
+ diag.set_arg("negative_impl_cname", cname.to_string());
+ }
+ }
+ match self.positive_impl_span {
+ Ok(span) => {
+ diag.span_label(span, fluent::positive_implementation_here);
+ }
+ Err(cname) => {
+ diag.note(fluent::positive_implementation_in_crate);
+ diag.set_arg("positive_impl_cname", cname.to_string());
+ }
+ }
+ diag
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs
index 9d30374f8..a335f8e06 100644
--- a/compiler/rustc_trait_selection/src/infer.rs
+++ b/compiler/rustc_trait_selection/src/infer.rs
@@ -24,6 +24,13 @@ pub trait InferCtxtExt<'tcx> {
span: Span,
) -> bool;
+ fn type_is_sized_modulo_regions(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) -> bool;
+
fn partially_normalize_associated_types_in<T>(
&self,
cause: ObligationCause<'tcx>,
@@ -52,7 +59,7 @@ pub trait InferCtxtExt<'tcx> {
param_env: ty::ParamEnv<'tcx>,
) -> traits::EvaluationResult;
}
-impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
fn type_is_copy_modulo_regions(
&self,
param_env: ty::ParamEnv<'tcx>,
@@ -62,7 +69,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
let ty = self.resolve_vars_if_possible(ty);
if !(param_env, ty).needs_infer() {
- return ty.is_copy_modulo_regions(self.tcx.at(span), param_env);
+ return ty.is_copy_modulo_regions(self.tcx, param_env);
}
let copy_def_id = self.tcx.require_lang_item(LangItem::Copy, None);
@@ -74,6 +81,16 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, copy_def_id, span)
}
+ fn type_is_sized_modulo_regions(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) -> bool {
+ let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+ traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, lang_item, span)
+ }
+
/// Normalizes associated types in `value`, potentially returning
/// new obligations that must further be processed.
fn partially_normalize_associated_types_in<T>(
@@ -125,7 +142,7 @@ pub trait InferCtxtBuilderExt<'tcx> {
fn enter_canonical_trait_query<K, R>(
&mut self,
canonical_key: &Canonical<'tcx, K>,
- operation: impl FnOnce(&InferCtxt<'_, 'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
+ operation: impl FnOnce(&InferCtxt<'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
) -> Fallible<CanonicalizedQueryResponse<'tcx, R>>
where
K: TypeFoldable<'tcx>,
@@ -153,25 +170,17 @@ impl<'tcx> InferCtxtBuilderExt<'tcx> for InferCtxtBuilder<'tcx> {
fn enter_canonical_trait_query<K, R>(
&mut self,
canonical_key: &Canonical<'tcx, K>,
- operation: impl FnOnce(&InferCtxt<'_, 'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
+ operation: impl FnOnce(&InferCtxt<'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
) -> Fallible<CanonicalizedQueryResponse<'tcx, R>>
where
K: TypeFoldable<'tcx>,
R: Debug + TypeFoldable<'tcx>,
Canonical<'tcx, QueryResponse<'tcx, R>>: ArenaAllocatable<'tcx>,
{
- self.enter_with_canonical(
- DUMMY_SP,
- canonical_key,
- |ref infcx, key, canonical_inference_vars| {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
- let value = operation(infcx, &mut *fulfill_cx, key)?;
- infcx.make_canonicalized_query_response(
- canonical_inference_vars,
- value,
- &mut *fulfill_cx,
- )
- },
- )
+ let (ref infcx, key, canonical_inference_vars) =
+ self.build_with_canonical(DUMMY_SP, canonical_key);
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ let value = operation(infcx, &mut *fulfill_cx, key)?;
+ infcx.make_canonicalized_query_response(canonical_inference_vars, value, &mut *fulfill_cx)
}
}
diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs
index 282ee632c..5d52aa075 100644
--- a/compiler/rustc_trait_selection/src/lib.rs
+++ b/compiler/rustc_trait_selection/src/lib.rs
@@ -16,11 +16,10 @@
#![feature(control_flow_enum)]
#![feature(drain_filter)]
#![feature(hash_drain_filter)]
-#![feature(label_break_value)]
#![feature(let_chains)]
-#![feature(let_else)]
#![feature(if_let_guard)]
#![feature(never_type)]
+#![feature(type_alias_impl_trait)]
#![recursion_limit = "512"] // For rustdoc
#[macro_use]
@@ -36,5 +35,6 @@ extern crate rustc_middle;
extern crate smallvec;
pub mod autoderef;
+pub mod errors;
pub mod infer;
pub mod traits;
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
index 294c81d0b..ed34ab95a 100644
--- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -3,13 +3,14 @@
use super::*;
+use crate::errors::UnableToConstructConstantValue;
use crate::infer::region_constraints::{Constraint, RegionConstraintData};
use crate::infer::InferCtxt;
use crate::traits::project::ProjectAndUnifyResult;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{Region, RegionVid, Term};
+use rustc_middle::ty::{PolyTraitRef, Region, RegionVid};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -65,13 +66,13 @@ impl<'tcx> AutoTraitFinder<'tcx> {
/// struct Foo<T> { data: Box<T> }
/// ```
///
- /// then this might return that Foo<T>: Send if T: Send (encoded in the AutoTraitResult type).
- /// The analysis attempts to account for custom impls as well as other complex cases. This
- /// result is intended for use by rustdoc and other such consumers.
+ /// then this might return that `Foo<T>: Send` if `T: Send` (encoded in the AutoTraitResult
+ /// type). The analysis attempts to account for custom impls as well as other complex cases.
+ /// This result is intended for use by rustdoc and other such consumers.
///
/// (Note that due to the coinductive nature of Send, the full and correct result is actually
/// quite simple to generate. That is, when a type has no custom impl, it is Send iff its field
- /// types are all Send. So, in our example, we might have that Foo<T>: Send if Box<T>: Send.
+ /// types are all Send. So, in our example, we might have that `Foo<T>: Send` if `Box<T>: Send`.
/// But this is often not the best way to present to the user.)
///
/// Warning: The API should be considered highly unstable, and it may be refactored or removed
@@ -89,145 +90,105 @@ impl<'tcx> AutoTraitFinder<'tcx> {
let trait_pred = ty::Binder::dummy(trait_ref);
- let bail_out = tcx.infer_ctxt().enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
- let result = selcx.select(&Obligation::new(
- ObligationCause::dummy(),
- orig_env,
- trait_pred.to_poly_trait_predicate(),
- ));
-
- match result {
- Ok(Some(ImplSource::UserDefined(_))) => {
- debug!(
- "find_auto_trait_generics({:?}): \
- manual impl found, bailing out",
- trait_ref
- );
- return true;
- }
- _ => {}
- }
-
- let result = selcx.select(&Obligation::new(
- ObligationCause::dummy(),
- orig_env,
- trait_pred.to_poly_trait_predicate_negative_polarity(),
- ));
-
- match result {
- Ok(Some(ImplSource::UserDefined(_))) => {
- debug!(
- "find_auto_trait_generics({:?}): \
- manual impl found, bailing out",
- trait_ref
- );
- true
- }
- _ => false,
+ let infcx = tcx.infer_ctxt().build();
+ let mut selcx = SelectionContext::new(&infcx);
+ for f in [
+ PolyTraitRef::to_poly_trait_predicate,
+ PolyTraitRef::to_poly_trait_predicate_negative_polarity,
+ ] {
+ let result =
+ selcx.select(&Obligation::new(ObligationCause::dummy(), orig_env, f(&trait_pred)));
+ if let Ok(Some(ImplSource::UserDefined(_))) = result {
+ debug!(
+ "find_auto_trait_generics({:?}): \
+ manual impl found, bailing out",
+ trait_ref
+ );
+ // If an explicit impl exists, it always takes priority over an auto impl
+ return AutoTraitResult::ExplicitImpl;
}
- });
-
- // If an explicit impl exists, it always takes priority over an auto impl
- if bail_out {
- return AutoTraitResult::ExplicitImpl;
}
- tcx.infer_ctxt().enter(|infcx| {
- let mut fresh_preds = FxHashSet::default();
+ let infcx = tcx.infer_ctxt().build();
+ let mut fresh_preds = FxHashSet::default();
+
+ // Due to the way projections are handled by SelectionContext, we need to run
+ // evaluate_predicates twice: once on the original param env, and once on the result of
+ // the first evaluate_predicates call.
+ //
+ // The problem is this: most of rustc, including SelectionContext and traits::project,
+ // are designed to work with a concrete usage of a type (e.g., Vec<u8>
+ // fn<T>() { Vec<T> }. This information will generally never change - given
+ // the 'T' in fn<T>() { ... }, we'll never know anything else about 'T'.
+ // If we're unable to prove that 'T' implements a particular trait, we're done -
+ // there's nothing left to do but error out.
+ //
+ // However, synthesizing an auto trait impl works differently. Here, we start out with
+ // a set of initial conditions - the ParamEnv of the struct/enum/union we're dealing
+ // with - and progressively discover the conditions we need to fulfill for it to
+ // implement a certain auto trait. This ends up breaking two assumptions made by trait
+ // selection and projection:
+ //
+ // * We can always cache the result of a particular trait selection for the lifetime of
+ // an InfCtxt
+ // * Given a projection bound such as '<T as SomeTrait>::SomeItem = K', if 'T:
+ // SomeTrait' doesn't hold, then we don't need to care about the 'SomeItem = K'
+ //
+ // We fix the first assumption by manually clearing out all of the InferCtxt's caches
+ // in between calls to SelectionContext.select. This allows us to keep all of the
+ // intermediate types we create bound to the 'tcx lifetime, rather than needing to lift
+ // them between calls.
+ //
+ // We fix the second assumption by reprocessing the result of our first call to
+ // evaluate_predicates. Using the example of '<T as SomeTrait>::SomeItem = K', our first
+ // pass will pick up 'T: SomeTrait', but not 'SomeItem = K'. On our second pass,
+ // traits::project will see that 'T: SomeTrait' is in our ParamEnv, allowing
+ // SelectionContext to return it back to us.
+
+ let Some((new_env, user_env)) = self.evaluate_predicates(
+ &infcx,
+ trait_did,
+ ty,
+ orig_env,
+ orig_env,
+ &mut fresh_preds,
+ false,
+ ) else {
+ return AutoTraitResult::NegativeImpl;
+ };
+
+ let (full_env, full_user_env) = self
+ .evaluate_predicates(&infcx, trait_did, ty, new_env, user_env, &mut fresh_preds, true)
+ .unwrap_or_else(|| {
+ panic!("Failed to fully process: {:?} {:?} {:?}", ty, trait_did, orig_env)
+ });
- // Due to the way projections are handled by SelectionContext, we need to run
- // evaluate_predicates twice: once on the original param env, and once on the result of
- // the first evaluate_predicates call.
- //
- // The problem is this: most of rustc, including SelectionContext and traits::project,
- // are designed to work with a concrete usage of a type (e.g., Vec<u8>
- // fn<T>() { Vec<T> }. This information will generally never change - given
- // the 'T' in fn<T>() { ... }, we'll never know anything else about 'T'.
- // If we're unable to prove that 'T' implements a particular trait, we're done -
- // there's nothing left to do but error out.
- //
- // However, synthesizing an auto trait impl works differently. Here, we start out with
- // a set of initial conditions - the ParamEnv of the struct/enum/union we're dealing
- // with - and progressively discover the conditions we need to fulfill for it to
- // implement a certain auto trait. This ends up breaking two assumptions made by trait
- // selection and projection:
- //
- // * We can always cache the result of a particular trait selection for the lifetime of
- // an InfCtxt
- // * Given a projection bound such as '<T as SomeTrait>::SomeItem = K', if 'T:
- // SomeTrait' doesn't hold, then we don't need to care about the 'SomeItem = K'
- //
- // We fix the first assumption by manually clearing out all of the InferCtxt's caches
- // in between calls to SelectionContext.select. This allows us to keep all of the
- // intermediate types we create bound to the 'tcx lifetime, rather than needing to lift
- // them between calls.
- //
- // We fix the second assumption by reprocessing the result of our first call to
- // evaluate_predicates. Using the example of '<T as SomeTrait>::SomeItem = K', our first
- // pass will pick up 'T: SomeTrait', but not 'SomeItem = K'. On our second pass,
- // traits::project will see that 'T: SomeTrait' is in our ParamEnv, allowing
- // SelectionContext to return it back to us.
-
- let Some((new_env, user_env)) = self.evaluate_predicates(
- &infcx,
- trait_did,
- ty,
- orig_env,
- orig_env,
- &mut fresh_preds,
- false,
- ) else {
- return AutoTraitResult::NegativeImpl;
- };
-
- let (full_env, full_user_env) = self
- .evaluate_predicates(
- &infcx,
- trait_did,
- ty,
- new_env,
- user_env,
- &mut fresh_preds,
- true,
- )
- .unwrap_or_else(|| {
- panic!("Failed to fully process: {:?} {:?} {:?}", ty, trait_did, orig_env)
- });
-
- debug!(
- "find_auto_trait_generics({:?}): fulfilling \
- with {:?}",
- trait_ref, full_env
- );
- infcx.clear_caches();
-
- // At this point, we already have all of the bounds we need. FulfillmentContext is used
- // to store all of the necessary region/lifetime bounds in the InferContext, as well as
- // an additional sanity check.
- let mut fulfill = <dyn TraitEngine<'tcx>>::new(tcx);
- fulfill.register_bound(&infcx, full_env, ty, trait_did, ObligationCause::dummy());
- let errors = fulfill.select_all_or_error(&infcx);
-
- if !errors.is_empty() {
- panic!("Unable to fulfill trait {:?} for '{:?}': {:?}", trait_did, ty, errors);
- }
+ debug!(
+ "find_auto_trait_generics({:?}): fulfilling \
+ with {:?}",
+ trait_ref, full_env
+ );
+ infcx.clear_caches();
+
+ // At this point, we already have all of the bounds we need. FulfillmentContext is used
+ // to store all of the necessary region/lifetime bounds in the InferContext, as well as
+ // an additional sanity check.
+ let errors =
+ super::fully_solve_bound(&infcx, ObligationCause::dummy(), full_env, ty, trait_did);
+ if !errors.is_empty() {
+ panic!("Unable to fulfill trait {:?} for '{:?}': {:?}", trait_did, ty, errors);
+ }
- infcx.process_registered_region_obligations(&Default::default(), full_env);
+ infcx.process_registered_region_obligations(&Default::default(), full_env);
- let region_data = infcx
- .inner
- .borrow_mut()
- .unwrap_region_constraints()
- .region_constraint_data()
- .clone();
+ let region_data =
+ infcx.inner.borrow_mut().unwrap_region_constraints().region_constraint_data().clone();
- let vid_to_region = self.map_vid_to_region(&region_data);
+ let vid_to_region = self.map_vid_to_region(&region_data);
- let info = AutoTraitInfo { full_user_env, region_data, vid_to_region };
+ let info = AutoTraitInfo { full_user_env, region_data, vid_to_region };
- AutoTraitResult::PositiveImpl(auto_trait_callback(info))
- })
+ AutoTraitResult::PositiveImpl(auto_trait_callback(info))
}
}
@@ -273,7 +234,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
/// user.
fn evaluate_predicates(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
trait_did: DefId,
ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -343,7 +304,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
}
}
- let obligations = impl_source.clone().nested_obligations().into_iter();
+ let obligations = impl_source.borrow_nested_obligations().iter().cloned();
if !self.evaluate_nested_obligations(
ty,
@@ -613,7 +574,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
}
fn is_self_referential_projection(&self, p: ty::PolyProjectionPredicate<'_>) -> bool {
- if let Term::Ty(ty) = p.term().skip_binder() {
+ if let Some(ty) = p.term().skip_binder().ty() {
matches!(ty.kind(), ty::Projection(proj) if proj == &p.skip_binder().projection_ty)
} else {
false
@@ -832,8 +793,11 @@ impl<'tcx> AutoTraitFinder<'tcx> {
Ok(None) => {
let tcx = self.tcx;
let def_id = unevaluated.def.did;
- let reported = tcx.sess.struct_span_err(tcx.def_span(def_id), &format!("unable to construct a constant value for the unevaluated constant {:?}", unevaluated)).emit();
-
+ let reported =
+ tcx.sess.emit_err(UnableToConstructConstantValue {
+ span: tcx.def_span(def_id),
+ unevaluated: unevaluated,
+ });
Err(ErrorHandled::Reported(reported))
}
Err(err) => Err(err),
@@ -875,7 +839,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
pub fn clean_pred(
&self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
p: ty::Predicate<'tcx>,
) -> ty::Predicate<'tcx> {
infcx.freshen(p)
diff --git a/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs
index 9ef7ac9a8..81e1d6449 100644
--- a/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs
@@ -28,7 +28,7 @@ impl FulfillmentContext<'_> {
impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
fn normalize_projection_type(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
_param_env: ty::ParamEnv<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
_cause: ObligationCause<'tcx>,
@@ -38,7 +38,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
fn register_predicate_obligation(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligation: PredicateObligation<'tcx>,
) {
assert!(!infcx.is_in_snapshot());
@@ -49,7 +49,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
self.obligations.insert(obligation);
}
- fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>> {
{
let errors = self.select_where_possible(infcx);
@@ -71,10 +71,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
.collect()
}
- fn select_where_possible(
- &mut self,
- infcx: &InferCtxt<'_, 'tcx>,
- ) -> Vec<FulfillmentError<'tcx>> {
+ fn select_where_possible(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>> {
assert!(!infcx.is_in_snapshot());
let mut errors = Vec::new();
diff --git a/compiler/rustc_trait_selection/src/traits/codegen.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs
index c0700748c..8a62bf015 100644
--- a/compiler/rustc_trait_selection/src/traits/codegen.rs
+++ b/compiler/rustc_trait_selection/src/traits/codegen.rs
@@ -4,10 +4,12 @@
// general routines.
use crate::infer::{DefiningAnchor, TyCtxtInferExt};
+use crate::traits::error_reporting::TypeErrCtxtExt;
use crate::traits::{
ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngine, TraitEngineExt,
Unimplemented,
};
+use rustc_infer::traits::FulfillmentErrorCode;
use rustc_middle::traits::CodegenObligationError;
use rustc_middle::ty::{self, TyCtxt};
@@ -18,8 +20,7 @@ use rustc_middle::ty::{self, TyCtxt};
/// obligations *could be* resolved if we wanted to.
///
/// This also expects that `trait_ref` is fully normalized.
-#[instrument(level = "debug", skip(tcx))]
-pub fn codegen_fulfill_obligation<'tcx>(
+pub fn codegen_select_candidate<'tcx>(
tcx: TyCtxt<'tcx>,
(param_env, trait_ref): (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>),
) -> Result<&'tcx ImplSource<'tcx, ()>, CodegenObligationError> {
@@ -28,53 +29,61 @@ pub fn codegen_fulfill_obligation<'tcx>(
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
- let mut infcx_builder =
- tcx.infer_ctxt().ignoring_regions().with_opaque_type_inference(DefiningAnchor::Bubble);
- infcx_builder.enter(|infcx| {
- //~^ HACK `Bubble` is required for
- // this test to pass: type-alias-impl-trait/assoc-projection-ice.rs
- let mut selcx = SelectionContext::new(&infcx);
+ let infcx = tcx
+ .infer_ctxt()
+ .ignoring_regions()
+ .with_opaque_type_inference(DefiningAnchor::Bubble)
+ .build();
+ //~^ HACK `Bubble` is required for
+ // this test to pass: type-alias-impl-trait/assoc-projection-ice.rs
+ let mut selcx = SelectionContext::new(&infcx);
- let obligation_cause = ObligationCause::dummy();
- let obligation =
- Obligation::new(obligation_cause, param_env, trait_ref.to_poly_trait_predicate());
+ let obligation_cause = ObligationCause::dummy();
+ let obligation =
+ Obligation::new(obligation_cause, param_env, trait_ref.to_poly_trait_predicate());
- let selection = match selcx.select(&obligation) {
- Ok(Some(selection)) => selection,
- Ok(None) => return Err(CodegenObligationError::Ambiguity),
- Err(Unimplemented) => return Err(CodegenObligationError::Unimplemented),
- Err(e) => {
- bug!("Encountered error `{:?}` selecting `{:?}` during codegen", e, trait_ref)
- }
- };
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => return Err(CodegenObligationError::Ambiguity),
+ Err(Unimplemented) => return Err(CodegenObligationError::Unimplemented),
+ Err(e) => {
+ bug!("Encountered error `{:?}` selecting `{:?}` during codegen", e, trait_ref)
+ }
+ };
- debug!(?selection);
+ debug!(?selection);
- // Currently, we use a fulfillment context to completely resolve
- // all nested obligations. This is because they can inform the
- // inference of the impl's type parameters.
- let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(tcx);
- let impl_source = selection.map(|predicate| {
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
- });
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters.
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(tcx);
+ let impl_source = selection.map(|predicate| {
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ });
- // In principle, we only need to do this so long as `impl_source`
- // contains unbound type parameters. It could be a slight
- // optimization to stop iterating early.
- let errors = fulfill_cx.select_all_or_error(&infcx);
- if !errors.is_empty() {
- return Err(CodegenObligationError::FulfillmentError);
+ // In principle, we only need to do this so long as `impl_source`
+ // contains unbound type parameters. It could be a slight
+ // optimization to stop iterating early.
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ // `rustc_monomorphize::collector` assumes there are no type errors.
+ // Cycle errors are the only post-monomorphization errors possible; emit them now so
+ // `rustc_ty_utils::resolve_associated_item` doesn't return `None` post-monomorphization.
+ for err in errors {
+ if let FulfillmentErrorCode::CodeCycle(cycle) = err.code {
+ infcx.err_ctxt().report_overflow_error_cycle(&cycle);
+ }
}
+ return Err(CodegenObligationError::FulfillmentError);
+ }
- let impl_source = infcx.resolve_vars_if_possible(impl_source);
- let impl_source = infcx.tcx.erase_regions(impl_source);
+ let impl_source = infcx.resolve_vars_if_possible(impl_source);
+ let impl_source = infcx.tcx.erase_regions(impl_source);
- // Opaque types may have gotten their hidden types constrained, but we can ignore them safely
- // as they will get constrained elsewhere, too.
- // (ouz-a) This is required for `type-alias-impl-trait/assoc-projection-ice.rs` to pass
- let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ // Opaque types may have gotten their hidden types constrained, but we can ignore them safely
+ // as they will get constrained elsewhere, too.
+ // (ouz-a) This is required for `type-alias-impl-trait/assoc-projection-ice.rs` to pass
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
- debug!("Cache miss: {trait_ref:?} => {impl_source:?}");
- Ok(&*tcx.arena.alloc(impl_source))
- })
+ Ok(&*tcx.arena.alloc(impl_source))
}
diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs
index 1c8cdf4ca..8aab75490 100644
--- a/compiler/rustc_trait_selection/src/traits/coherence.rs
+++ b/compiler/rustc_trait_selection/src/traits/coherence.rs
@@ -6,21 +6,22 @@
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::{CombinedSnapshot, InferOk};
+use crate::traits::outlives_bounds::InferCtxtExt as _;
use crate::traits::select::IntercrateAmbiguityCause;
use crate::traits::util::impl_subject_and_oblig;
use crate::traits::SkipLeakCheck;
use crate::traits::{
- self, FulfillmentContext, Normalized, Obligation, ObligationCause, PredicateObligation,
- PredicateObligations, SelectionContext, TraitEngineExt,
+ self, Normalized, Obligation, ObligationCause, ObligationCtxt, PredicateObligation,
+ PredicateObligations, SelectionContext,
};
use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::Diagnostic;
-use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::CRATE_HIR_ID;
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
-use rustc_infer::traits::{util, TraitEngine};
+use rustc_infer::traits::util;
use rustc_middle::traits::specialization_graph::OverlapMode;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::visit::TypeVisitable;
use rustc_middle::ty::{self, ImplSubject, Ty, TyCtxt, TypeVisitor};
use rustc_span::symbol::sym;
@@ -59,23 +60,17 @@ pub fn add_placeholder_note(err: &mut Diagnostic) {
);
}
-/// If there are types that satisfy both impls, invokes `on_overlap`
+/// If there are types that satisfy both impls, returns `Some`
/// with a suitably-freshened `ImplHeader` with those types
-/// substituted. Otherwise, invokes `no_overlap`.
-#[instrument(skip(tcx, skip_leak_check, on_overlap, no_overlap), level = "debug")]
-pub fn overlapping_impls<F1, F2, R>(
+/// substituted. Otherwise, returns `None`.
+#[instrument(skip(tcx, skip_leak_check), level = "debug")]
+pub fn overlapping_impls(
tcx: TyCtxt<'_>,
impl1_def_id: DefId,
impl2_def_id: DefId,
skip_leak_check: SkipLeakCheck,
overlap_mode: OverlapMode,
- on_overlap: F1,
- no_overlap: F2,
-) -> R
-where
- F1: FnOnce(OverlapResult<'_>) -> R,
- F2: FnOnce() -> R,
-{
+) -> Option<OverlapResult<'_>> {
// Before doing expensive operations like entering an inference context, do
// a quick check via fast_reject to tell if the impl headers could possibly
// unify.
@@ -96,28 +91,24 @@ where
if !may_overlap {
// Some types involved are definitely different, so the impls couldn't possibly overlap.
debug!("overlapping_impls: fast_reject early-exit");
- return no_overlap();
+ return None;
}
- let overlaps = tcx.infer_ctxt().enter(|infcx| {
- let selcx = &mut SelectionContext::intercrate(&infcx);
- overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).is_some()
- });
-
+ let infcx = tcx.infer_ctxt().build();
+ let selcx = &mut SelectionContext::intercrate(&infcx);
+ let overlaps =
+ overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).is_some();
if !overlaps {
- return no_overlap();
+ return None;
}
// In the case where we detect an error, run the check again, but
// this time tracking intercrate ambiguity causes for better
// diagnostics. (These take time and can lead to false errors.)
- tcx.infer_ctxt().enter(|infcx| {
- let selcx = &mut SelectionContext::intercrate(&infcx);
- selcx.enable_tracking_intercrate_ambiguity_causes();
- on_overlap(
- overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).unwrap(),
- )
- })
+ let infcx = tcx.infer_ctxt().build();
+ let selcx = &mut SelectionContext::intercrate(&infcx);
+ selcx.enable_tracking_intercrate_ambiguity_causes();
+ Some(overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).unwrap())
}
fn with_fresh_ty_vars<'cx, 'tcx>(
@@ -166,7 +157,7 @@ fn overlap_within_probe<'cx, 'tcx>(
impl1_def_id: DefId,
impl2_def_id: DefId,
overlap_mode: OverlapMode,
- snapshot: &CombinedSnapshot<'_, 'tcx>,
+ snapshot: &CombinedSnapshot<'tcx>,
) -> Option<OverlapResult<'tcx>> {
let infcx = selcx.infcx();
@@ -196,7 +187,7 @@ fn overlap_within_probe<'cx, 'tcx>(
}
}
- // We disable the leak when when creating the `snapshot` by using
+ // We disable the leak when creating the `snapshot` by using
// `infcx.probe_maybe_disable_leak_check`.
if infcx.leak_check(true, snapshot).is_err() {
debug!("overlap: leak check failed");
@@ -297,36 +288,41 @@ fn negative_impl<'cx, 'tcx>(
let tcx = selcx.infcx().tcx;
// Create an infcx, taking the predicates of impl1 as assumptions:
- tcx.infer_ctxt().enter(|infcx| {
- // create a parameter environment corresponding to a (placeholder) instantiation of impl1
- let impl_env = tcx.param_env(impl1_def_id);
- let subject1 = match traits::fully_normalize(
- &infcx,
- FulfillmentContext::new(),
- ObligationCause::dummy(),
- impl_env,
- tcx.impl_subject(impl1_def_id),
- ) {
- Ok(s) => s,
- Err(err) => bug!("failed to fully normalize {:?}: {:?}", impl1_def_id, err),
- };
+ let infcx = tcx.infer_ctxt().build();
+ // create a parameter environment corresponding to a (placeholder) instantiation of impl1
+ let impl_env = tcx.param_env(impl1_def_id);
+ let subject1 = match traits::fully_normalize(
+ &infcx,
+ ObligationCause::dummy(),
+ impl_env,
+ tcx.impl_subject(impl1_def_id),
+ ) {
+ Ok(s) => s,
+ Err(err) => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(impl1_def_id),
+ format!("failed to fully normalize {:?}: {:?}", impl1_def_id, err),
+ );
+ return false;
+ }
+ };
- // Attempt to prove that impl2 applies, given all of the above.
- let selcx = &mut SelectionContext::new(&infcx);
- let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id);
- let (subject2, obligations) =
- impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_substs);
+ // Attempt to prove that impl2 applies, given all of the above.
+ let selcx = &mut SelectionContext::new(&infcx);
+ let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id);
+ let (subject2, obligations) =
+ impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_substs);
- !equate(&infcx, impl_env, subject1, subject2, obligations)
- })
+ !equate(&infcx, impl_env, subject1, subject2, obligations, impl1_def_id)
}
-fn equate<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
+fn equate<'tcx>(
+ infcx: &InferCtxt<'tcx>,
impl_env: ty::ParamEnv<'tcx>,
subject1: ImplSubject<'tcx>,
subject2: ImplSubject<'tcx>,
obligations: impl Iterator<Item = PredicateObligation<'tcx>>,
+ body_def_id: DefId,
) -> bool {
// do the impls unify? If not, not disjoint.
let Ok(InferOk { obligations: more_obligations, .. }) =
@@ -340,7 +336,7 @@ fn equate<'cx, 'tcx>(
let opt_failing_obligation = obligations
.into_iter()
.chain(more_obligations)
- .find(|o| negative_impl_exists(selcx, impl_env, o));
+ .find(|o| negative_impl_exists(selcx, o, body_def_id));
if let Some(failing_obligation) = opt_failing_obligation {
debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
@@ -354,18 +350,16 @@ fn equate<'cx, 'tcx>(
#[instrument(level = "debug", skip(selcx))]
fn negative_impl_exists<'cx, 'tcx>(
selcx: &SelectionContext<'cx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
o: &PredicateObligation<'tcx>,
+ body_def_id: DefId,
) -> bool {
- let infcx = &selcx.infcx().fork();
-
- if resolve_negative_obligation(infcx, param_env, o) {
+ if resolve_negative_obligation(selcx.infcx().fork(), o, body_def_id) {
return true;
}
// Try to prove a negative obligation exists for super predicates
- for o in util::elaborate_predicates(infcx.tcx, iter::once(o.predicate)) {
- if resolve_negative_obligation(infcx, param_env, &o) {
+ for o in util::elaborate_predicates(selcx.tcx(), iter::once(o.predicate)) {
+ if resolve_negative_obligation(selcx.infcx().fork(), &o, body_def_id) {
return true;
}
}
@@ -374,10 +368,10 @@ fn negative_impl_exists<'cx, 'tcx>(
}
#[instrument(level = "debug", skip(infcx))]
-fn resolve_negative_obligation<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
+fn resolve_negative_obligation<'tcx>(
+ infcx: InferCtxt<'tcx>,
o: &PredicateObligation<'tcx>,
+ body_def_id: DefId,
) -> bool {
let tcx = infcx.tcx;
@@ -385,17 +379,25 @@ fn resolve_negative_obligation<'cx, 'tcx>(
return false;
};
- let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
- fulfillment_cx.register_predicate_obligation(infcx, o);
-
- let errors = fulfillment_cx.select_all_or_error(infcx);
-
- if !errors.is_empty() {
+ let param_env = o.param_env;
+ if !super::fully_solve_obligation(&infcx, o).is_empty() {
return false;
}
- // FIXME -- also add "assumed to be well formed" types into the `outlives_env`
- let outlives_env = OutlivesEnvironment::new(param_env);
+ let (body_id, body_def_id) = if let Some(body_def_id) = body_def_id.as_local() {
+ (tcx.hir().local_def_id_to_hir_id(body_def_id), body_def_id)
+ } else {
+ (CRATE_HIR_ID, CRATE_DEF_ID)
+ };
+
+ let ocx = ObligationCtxt::new(&infcx);
+ let wf_tys = ocx.assumed_wf_types(param_env, DUMMY_SP, body_def_id);
+ let outlives_env = OutlivesEnvironment::with_bounds(
+ param_env,
+ Some(&infcx),
+ infcx.implied_bounds_tys(param_env, body_id, wf_tys),
+ );
+
infcx.process_registered_region_obligations(outlives_env.region_bound_pairs(), param_env);
infcx.resolve_regions(&outlives_env).is_empty()
@@ -404,12 +406,12 @@ fn resolve_negative_obligation<'cx, 'tcx>(
pub fn trait_ref_is_knowable<'tcx>(
tcx: TyCtxt<'tcx>,
trait_ref: ty::TraitRef<'tcx>,
-) -> Option<Conflict> {
+) -> Result<(), Conflict> {
debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref);
if orphan_check_trait_ref(tcx, trait_ref, InCrate::Remote).is_ok() {
// A downstream or cousin crate is allowed to implement some
// substitution of this trait-ref.
- return Some(Conflict::Downstream);
+ return Err(Conflict::Downstream);
}
if trait_ref_is_local_or_fundamental(tcx, trait_ref) {
@@ -418,7 +420,7 @@ pub fn trait_ref_is_knowable<'tcx>(
// allowed to implement a substitution of this trait ref, which
// means impls could only come from dependencies of this crate,
// which we already know about.
- return None;
+ return Ok(());
}
// This is a remote non-fundamental trait, so if another crate
@@ -431,10 +433,10 @@ pub fn trait_ref_is_knowable<'tcx>(
// we are an owner.
if orphan_check_trait_ref(tcx, trait_ref, InCrate::Local).is_ok() {
debug!("trait_ref_is_knowable: orphan check passed");
- None
+ Ok(())
} else {
debug!("trait_ref_is_knowable: nonlocal, nonfundamental, unowned");
- Some(Conflict::Upstream)
+ Err(Conflict::Upstream)
}
}
@@ -740,7 +742,21 @@ impl<'tcx> TypeVisitor<'tcx> for OrphanChecker<'tcx> {
result
}
- // FIXME: Constants should participate in orphan checking.
+ /// All possible values for a constant parameter already exist
+ /// in the crate defining the trait, so they are always non-local[^1].
+ ///
+ /// Because there's no way to have an impl where the first local
+ /// generic argument is a constant, we also don't have to fail
+ /// the orphan check when encountering a parameter or a generic constant.
+ ///
+ /// This means that we can completely ignore constants during the orphan check.
+ ///
+ /// See `src/test/ui/coherence/const-generics-orphan-check-ok.rs` for examples.
+ ///
+ /// [^1]: This might not hold for function pointers or trait objects in the future.
+ /// As these should be quite rare as const arguments and especially rare as impl
+ /// parameters, allowing uncovered const parameters in impls seems more useful
+ /// than allowing `impl<T> Trait<local_fn_ptr, T> for i32` to compile.
fn visit_const(&mut self, _c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
ControlFlow::CONTINUE
}
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 254bc4ab6..84038625f 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -9,14 +9,12 @@
//! `thir_abstract_const` which can then be checked for structural equality with other
//! generic constants mentioned in the `caller_bounds` of the current environment.
use rustc_errors::ErrorGuaranteed;
-use rustc_hir::def::DefKind;
use rustc_infer::infer::InferCtxt;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::abstract_const::{
walk_abstract_const, AbstractConst, FailureKind, Node, NotConstEvaluatable,
};
use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
-use rustc_session::lint;
use rustc_span::Span;
use std::iter;
@@ -101,7 +99,7 @@ impl<'tcx> ConstUnifyCtxt<'tcx> {
a_uv == b_uv
}
// FIXME(generic_const_exprs): We may want to either actually try
- // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
+ // to evaluate `a_ct` and `b_ct` if they are fully concrete or something like
// this, for now we just return false here.
_ => false,
}
@@ -138,7 +136,7 @@ impl<'tcx> ConstUnifyCtxt<'tcx> {
#[instrument(skip(tcx), level = "debug")]
pub fn try_unify_abstract_consts<'tcx>(
tcx: TyCtxt<'tcx>,
- (a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>),
+ (a, b): (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx>),
param_env: ty::ParamEnv<'tcx>,
) -> bool {
(|| {
@@ -159,13 +157,22 @@ pub fn try_unify_abstract_consts<'tcx>(
/// Check if a given constant can be evaluated.
#[instrument(skip(infcx), level = "debug")]
-pub fn is_const_evaluatable<'cx, 'tcx>(
- infcx: &InferCtxt<'cx, 'tcx>,
- uv: ty::Unevaluated<'tcx, ()>,
+pub fn is_const_evaluatable<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ ct: ty::Const<'tcx>,
param_env: ty::ParamEnv<'tcx>,
span: Span,
) -> Result<(), NotConstEvaluatable> {
let tcx = infcx.tcx;
+ let uv = match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => uv,
+ ty::ConstKind::Param(_)
+ | ty::ConstKind::Bound(_, _)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Value(_)
+ | ty::ConstKind::Error(_) => return Ok(()),
+ ty::ConstKind::Infer(_) => return Err(NotConstEvaluatable::MentionsInfer),
+ };
if tcx.features().generic_const_exprs {
if let Some(ct) = AbstractConst::new(tcx, uv)? {
@@ -183,7 +190,7 @@ pub fn is_const_evaluatable<'cx, 'tcx>(
FailureKind::Concrete => {}
}
}
- let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span));
+ let concrete = infcx.const_eval_resolve(param_env, uv, Some(span));
match concrete {
Err(ErrorHandled::TooGeneric) => {
Err(NotConstEvaluatable::Error(infcx.tcx.sess.delay_span_bug(
@@ -210,7 +217,7 @@ pub fn is_const_evaluatable<'cx, 'tcx>(
// and hopefully soon change this to an error.
//
// See #74595 for more details about this.
- let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span));
+ let concrete = infcx.const_eval_resolve(param_env, uv, Some(span));
match concrete {
// If we're evaluating a foreign constant, under a nightly compiler without generic
@@ -235,39 +242,25 @@ pub fn is_const_evaluatable<'cx, 'tcx>(
.emit()
}
- Err(ErrorHandled::TooGeneric) => Err(if uv.has_infer_types_or_consts() {
- NotConstEvaluatable::MentionsInfer
- } else if uv.has_param_types_or_consts() {
- NotConstEvaluatable::MentionsParam
- } else {
- let guar = infcx.tcx.sess.delay_span_bug(span, format!("Missing value for constant, but no error reported?"));
- NotConstEvaluatable::Error(guar)
- }),
+ Err(ErrorHandled::TooGeneric) => {
+ let err = if uv.has_non_region_infer() {
+ NotConstEvaluatable::MentionsInfer
+ } else if uv.has_non_region_param() {
+ NotConstEvaluatable::MentionsParam
+ } else {
+ let guar = infcx.tcx.sess.delay_span_bug(span, format!("Missing value for constant, but no error reported?"));
+ NotConstEvaluatable::Error(guar)
+ };
+
+ Err(err)
+ },
Err(ErrorHandled::Linted) => {
let reported =
infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint");
Err(NotConstEvaluatable::Error(reported))
}
Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)),
- Ok(_) => {
- if uv.substs.has_param_types_or_consts() {
- assert!(matches!(infcx.tcx.def_kind(uv.def.did), DefKind::AnonConst));
- let mir_body = infcx.tcx.mir_for_ctfe_opt_const_arg(uv.def);
-
- if mir_body.is_polymorphic {
- let Some(local_def_id) = uv.def.did.as_local() else { return Ok(()) };
- tcx.struct_span_lint_hir(
- lint::builtin::CONST_EVALUATABLE_UNCHECKED,
- tcx.hir().local_def_id_to_hir_id(local_def_id),
- span,
- |err| {
- err.build("cannot use constants which depend on generic parameters in types").emit();
- })
- }
- }
-
- Ok(())
- },
+ Ok(_) => Ok(()),
}
}
}
@@ -281,7 +274,7 @@ fn satisfied_from_param_env<'tcx>(
for pred in param_env.caller_bounds() {
match pred.kind().skip_binder() {
ty::PredicateKind::ConstEvaluatable(uv) => {
- if let Some(b_ct) = AbstractConst::new(tcx, uv)? {
+ if let Some(b_ct) = AbstractConst::from_const(tcx, uv)? {
let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
// Try to unify with each subtree in the AbstractConst to allow for
diff --git a/compiler/rustc_trait_selection/src/traits/engine.rs b/compiler/rustc_trait_selection/src/traits/engine.rs
index 6c177f638..e0c8deec9 100644
--- a/compiler/rustc_trait_selection/src/traits/engine.rs
+++ b/compiler/rustc_trait_selection/src/traits/engine.rs
@@ -3,7 +3,8 @@ use std::cell::RefCell;
use super::TraitEngine;
use super::{ChalkFulfillmentContext, FulfillmentContext};
use crate::infer::InferCtxtExt;
-use rustc_hir::def_id::DefId;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::{InferCtxt, InferOk};
use rustc_infer::traits::{
FulfillmentError, Obligation, ObligationCause, PredicateObligation, TraitEngineExt as _,
@@ -12,9 +13,11 @@ use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::TypeFoldable;
use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
pub trait TraitEngineExt<'tcx> {
fn new(tcx: TyCtxt<'tcx>) -> Box<Self>;
+ fn new_in_snapshot(tcx: TyCtxt<'tcx>) -> Box<Self>;
}
impl<'tcx> TraitEngineExt<'tcx> for dyn TraitEngine<'tcx> {
@@ -25,20 +28,32 @@ impl<'tcx> TraitEngineExt<'tcx> for dyn TraitEngine<'tcx> {
Box::new(FulfillmentContext::new())
}
}
+
+ fn new_in_snapshot(tcx: TyCtxt<'tcx>) -> Box<Self> {
+ if tcx.sess.opts.unstable_opts.chalk {
+ Box::new(ChalkFulfillmentContext::new())
+ } else {
+ Box::new(FulfillmentContext::new_in_snapshot())
+ }
+ }
}
/// Used if you want to have pleasant experience when dealing
/// with obligations outside of hir or mir typeck.
pub struct ObligationCtxt<'a, 'tcx> {
- pub infcx: &'a InferCtxt<'a, 'tcx>,
+ pub infcx: &'a InferCtxt<'tcx>,
engine: RefCell<Box<dyn TraitEngine<'tcx>>>,
}
impl<'a, 'tcx> ObligationCtxt<'a, 'tcx> {
- pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ pub fn new(infcx: &'a InferCtxt<'tcx>) -> Self {
Self { infcx, engine: RefCell::new(<dyn TraitEngine<'_>>::new(infcx.tcx)) }
}
+ pub fn new_in_snapshot(infcx: &'a InferCtxt<'tcx>) -> Self {
+ Self { infcx, engine: RefCell::new(<dyn TraitEngine<'_>>::new_in_snapshot(infcx.tcx)) }
+ }
+
pub fn register_obligation(&self, obligation: PredicateObligation<'tcx>) {
self.engine.borrow_mut().register_predicate_obligation(self.infcx, obligation);
}
@@ -109,4 +124,34 @@ impl<'a, 'tcx> ObligationCtxt<'a, 'tcx> {
pub fn select_all_or_error(&self) -> Vec<FulfillmentError<'tcx>> {
self.engine.borrow_mut().select_all_or_error(self.infcx)
}
+
+ pub fn assumed_wf_types(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Span,
+ def_id: LocalDefId,
+ ) -> FxHashSet<Ty<'tcx>> {
+ let tcx = self.infcx.tcx;
+ let assumed_wf_types = tcx.assumed_wf_types(def_id);
+ let mut implied_bounds = FxHashSet::default();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let cause = ObligationCause::misc(span, hir_id);
+ for ty in assumed_wf_types {
+ // FIXME(@lcnr): rustc currently does not check wf for types
+ // pre-normalization, meaning that implied bounds are sometimes
+ // incorrect. See #100910 for more details.
+ //
+ // Not adding the unnormalized types here mostly fixes that, except
+ // that there are projections which are still ambiguous in the item definition
+ // but do normalize successfully when using the item, see #98543.
+ //
+ // Anyways, I will hopefully soon change implied bounds to make all of this
+ // sound and then uncomment this line again.
+
+ // implied_bounds.insert(ty);
+ let normalized = self.normalize(cause.clone(), param_env, ty);
+ implied_bounds.insert(normalized);
+ }
+ implied_bounds
+ }
}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
index e442c5c91..1217d264a 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -2,10 +2,10 @@ pub mod on_unimplemented;
pub mod suggestions;
use super::{
- EvaluationResult, FulfillmentContext, FulfillmentError, FulfillmentErrorCode,
- MismatchedProjectionTypes, Obligation, ObligationCause, ObligationCauseCode,
- OnUnimplementedDirective, OnUnimplementedNote, OutputTypeParameterMismatch, Overflow,
- PredicateObligation, SelectionContext, SelectionError, TraitNotObjectSafe,
+ FulfillmentContext, FulfillmentError, FulfillmentErrorCode, MismatchedProjectionTypes,
+ Obligation, ObligationCause, ObligationCauseCode, OnUnimplementedDirective,
+ OnUnimplementedNote, OutputTypeParameterMismatch, Overflow, PredicateObligation,
+ SelectionContext, SelectionError, TraitNotObjectSafe,
};
use crate::infer::error_reporting::{TyCategory, TypeAnnotationNeeded as ErrorCode};
@@ -22,6 +22,8 @@ use rustc_hir::intravisit::Visitor;
use rustc_hir::GenericParam;
use rustc_hir::Item;
use rustc_hir::Node;
+use rustc_infer::infer::error_reporting::TypeErrCtxt;
+use rustc_infer::infer::TypeTrace;
use rustc_infer::traits::TraitEngine;
use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::abstract_const::NotConstEvaluatable;
@@ -31,6 +33,8 @@ use rustc_middle::ty::{
self, SubtypePredicate, ToPolyTraitRef, ToPredicate, TraitRef, Ty, TyCtxt, TypeFoldable,
TypeVisitable,
};
+use rustc_session::Limit;
+use rustc_span::def_id::LOCAL_CRATE;
use rustc_span::symbol::{kw, sym};
use rustc_span::{ExpnKind, Span, DUMMY_SP};
use std::fmt;
@@ -40,8 +44,8 @@ use std::ops::ControlFlow;
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
use crate::traits::query::normalize::AtExt as _;
use crate::traits::specialize::to_pretty_impl_header;
-use on_unimplemented::InferCtxtExt as _;
-use suggestions::InferCtxtExt as _;
+use on_unimplemented::TypeErrCtxtExt as _;
+use suggestions::TypeErrCtxtExt as _;
pub use rustc_infer::traits::error_reporting::*;
@@ -62,6 +66,37 @@ pub struct ImplCandidate<'tcx> {
}
pub trait InferCtxtExt<'tcx> {
+ /// Given some node representing a fn-like thing in the HIR map,
+ /// returns a span and `ArgKind` information that describes the
+ /// arguments it expects. This can be supplied to
+ /// `report_arg_count_mismatch`.
+ fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)>;
+
+ /// Reports an error when the number of arguments needed by a
+ /// trait match doesn't match the number that the expression
+ /// provides.
+ fn report_arg_count_mismatch(
+ &self,
+ span: Span,
+ found_span: Option<Span>,
+ expected_args: Vec<ArgKind>,
+ found_args: Vec<ArgKind>,
+ is_closure: bool,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+
+ /// Checks if the type implements one of `Fn`, `FnMut`, or `FnOnce`
+ /// in that order, and returns the generic type corresponding to the
+ /// argument of that trait (corresponding to the closure arguments).
+ fn type_implements_fn_trait(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: ty::Binder<'tcx, Ty<'tcx>>,
+ constness: ty::BoundConstness,
+ polarity: ty::ImplPolarity,
+ ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()>;
+}
+
+pub trait TypeErrCtxtExt<'tcx> {
fn report_fulfillment_errors(
&self,
errors: &[FulfillmentError<'tcx>],
@@ -77,6 +112,8 @@ pub trait InferCtxtExt<'tcx> {
where
T: fmt::Display + TypeFoldable<'tcx>;
+ fn suggest_new_overflow_limit(&self, err: &mut Diagnostic);
+
fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> !;
/// The `root_obligation` parameter should be the `root_obligation` field
@@ -89,12 +126,71 @@ pub trait InferCtxtExt<'tcx> {
error: &SelectionError<'tcx>,
fallback_has_occurred: bool,
);
+}
+impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
/// Given some node representing a fn-like thing in the HIR map,
/// returns a span and `ArgKind` information that describes the
/// arguments it expects. This can be supplied to
/// `report_arg_count_mismatch`.
- fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)>;
+ fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)> {
+ let sm = self.tcx.sess.source_map();
+ let hir = self.tcx.hir();
+ Some(match node {
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { body, fn_decl_span, .. }),
+ ..
+ }) => (
+ fn_decl_span,
+ hir.body(body)
+ .params
+ .iter()
+ .map(|arg| {
+ if let hir::Pat { kind: hir::PatKind::Tuple(ref args, _), span, .. } =
+ *arg.pat
+ {
+ Some(ArgKind::Tuple(
+ Some(span),
+ args.iter()
+ .map(|pat| {
+ sm.span_to_snippet(pat.span)
+ .ok()
+ .map(|snippet| (snippet, "_".to_owned()))
+ })
+ .collect::<Option<Vec<_>>>()?,
+ ))
+ } else {
+ let name = sm.span_to_snippet(arg.pat.span).ok()?;
+ Some(ArgKind::Arg(name, "_".to_owned()))
+ }
+ })
+ .collect::<Option<Vec<ArgKind>>>()?,
+ ),
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref sig, ..), .. })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(ref sig, _), .. })
+ | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(ref sig, _), ..
+ }) => (
+ sig.span,
+ sig.decl
+ .inputs
+ .iter()
+ .map(|arg| match arg.kind {
+ hir::TyKind::Tup(ref tys) => ArgKind::Tuple(
+ Some(arg.span),
+ vec![("_".to_owned(), "_".to_owned()); tys.len()],
+ ),
+ _ => ArgKind::empty(),
+ })
+ .collect::<Vec<ArgKind>>(),
+ ),
+ Node::Ctor(ref variant_data) => {
+ let span = variant_data.ctor_hir_id().map_or(DUMMY_SP, |id| hir.span(id));
+ (span, vec![ArgKind::empty(); variant_data.fields().len()])
+ }
+ _ => panic!("non-FnLike node found: {:?}", node),
+ })
+ }
/// Reports an error when the number of arguments needed by a
/// trait match doesn't match the number that the expression
@@ -106,21 +202,175 @@ pub trait InferCtxtExt<'tcx> {
expected_args: Vec<ArgKind>,
found_args: Vec<ArgKind>,
is_closure: bool,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = if is_closure { "closure" } else { "function" };
+
+ let args_str = |arguments: &[ArgKind], other: &[ArgKind]| {
+ let arg_length = arguments.len();
+ let distinct = matches!(other, &[ArgKind::Tuple(..)]);
+ match (arg_length, arguments.get(0)) {
+ (1, Some(&ArgKind::Tuple(_, ref fields))) => {
+ format!("a single {}-tuple as argument", fields.len())
+ }
+ _ => format!(
+ "{} {}argument{}",
+ arg_length,
+ if distinct && arg_length > 1 { "distinct " } else { "" },
+ pluralize!(arg_length)
+ ),
+ }
+ };
+
+ let expected_str = args_str(&expected_args, &found_args);
+ let found_str = args_str(&found_args, &expected_args);
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0593,
+ "{} is expected to take {}, but it takes {}",
+ kind,
+ expected_str,
+ found_str,
+ );
+
+ err.span_label(span, format!("expected {} that takes {}", kind, expected_str));
+
+ if let Some(found_span) = found_span {
+ err.span_label(found_span, format!("takes {}", found_str));
+
+ // move |_| { ... }
+ // ^^^^^^^^-- def_span
+ //
+ // move |_| { ... }
+ // ^^^^^-- prefix
+ let prefix_span = self.tcx.sess.source_map().span_until_non_whitespace(found_span);
+ // move |_| { ... }
+ // ^^^-- pipe_span
+ let pipe_span =
+ if let Some(span) = found_span.trim_start(prefix_span) { span } else { found_span };
+
+ // Suggest to take and ignore the arguments with expected_args_length `_`s if
+ // found arguments is empty (assume the user just wants to ignore args in this case).
+ // For example, if `expected_args_length` is 2, suggest `|_, _|`.
+ if found_args.is_empty() && is_closure {
+ let underscores = vec!["_"; expected_args.len()].join(", ");
+ err.span_suggestion_verbose(
+ pipe_span,
+ &format!(
+ "consider changing the closure to take and ignore the expected argument{}",
+ pluralize!(expected_args.len())
+ ),
+ format!("|{}|", underscores),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ if let &[ArgKind::Tuple(_, ref fields)] = &found_args[..] {
+ if fields.len() == expected_args.len() {
+ let sugg = fields
+ .iter()
+ .map(|(name, _)| name.to_owned())
+ .collect::<Vec<String>>()
+ .join(", ");
+ err.span_suggestion_verbose(
+ found_span,
+ "change the closure to take multiple arguments instead of a single tuple",
+ format!("|{}|", sugg),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..]
+ && fields.len() == found_args.len()
+ && is_closure
+ {
+ let sugg = format!(
+ "|({}){}|",
+ found_args
+ .iter()
+ .map(|arg| match arg {
+ ArgKind::Arg(name, _) => name.to_owned(),
+ _ => "_".to_owned(),
+ })
+ .collect::<Vec<String>>()
+ .join(", "),
+ // add type annotations if available
+ if found_args.iter().any(|arg| match arg {
+ ArgKind::Arg(_, ty) => ty != "_",
+ _ => false,
+ }) {
+ format!(
+ ": ({})",
+ fields
+ .iter()
+ .map(|(_, ty)| ty.to_owned())
+ .collect::<Vec<String>>()
+ .join(", ")
+ )
+ } else {
+ String::new()
+ },
+ );
+ err.span_suggestion_verbose(
+ found_span,
+ "change the closure to accept a tuple instead of individual arguments",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ err
+ }
- /// Checks if the type implements one of `Fn`, `FnMut`, or `FnOnce`
- /// in that order, and returns the generic type corresponding to the
- /// argument of that trait (corresponding to the closure arguments).
fn type_implements_fn_trait(
&self,
param_env: ty::ParamEnv<'tcx>,
ty: ty::Binder<'tcx, Ty<'tcx>>,
constness: ty::BoundConstness,
polarity: ty::ImplPolarity,
- ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()>;
-}
+ ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()> {
+ self.commit_if_ok(|_| {
+ for trait_def_id in [
+ self.tcx.lang_items().fn_trait(),
+ self.tcx.lang_items().fn_mut_trait(),
+ self.tcx.lang_items().fn_once_trait(),
+ ] {
+ let Some(trait_def_id) = trait_def_id else { continue };
+ // Make a fresh inference variable so we can determine what the substitutions
+ // of the trait are.
+ let var = self.next_ty_var(TypeVariableOrigin {
+ span: DUMMY_SP,
+ kind: TypeVariableOriginKind::MiscVariable,
+ });
+ let substs = self.tcx.mk_substs_trait(ty.skip_binder(), &[var.into()]);
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ ty.rebind(ty::TraitPredicate {
+ trait_ref: ty::TraitRef::new(trait_def_id, substs),
+ constness,
+ polarity,
+ })
+ .to_predicate(self.tcx),
+ );
+ let mut fulfill_cx = FulfillmentContext::new_in_snapshot();
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ if fulfill_cx.select_all_or_error(self).is_empty() {
+ return Ok((
+ ty::ClosureKind::from_def_id(self.tcx, trait_def_id)
+ .expect("expected to map DefId to ClosureKind"),
+ ty.rebind(self.resolve_vars_if_possible(var)),
+ ));
+ }
+ }
-impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+ Err(())
+ })
+ }
+}
+impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
fn report_fulfillment_errors(
&self,
errors: &[FulfillmentError<'tcx>],
@@ -250,6 +500,19 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
bug!();
}
+ fn suggest_new_overflow_limit(&self, err: &mut Diagnostic) {
+ let suggested_limit = match self.tcx.recursion_limit() {
+ Limit(0) => Limit(2),
+ limit => limit * 2,
+ };
+ err.help(&format!(
+ "consider increasing the recursion limit by adding a \
+ `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
+ suggested_limit,
+ self.tcx.crate_name(LOCAL_CRATE),
+ ));
+ }
+
/// Reports that a cycle was detected which led to overflow and halts
/// compilation. This is equivalent to `report_overflow_error` except
/// that we can give a more helpful error message (and, in particular,
@@ -348,7 +611,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
message,
label,
note,
- enclosing_scope,
+ parent_label,
append_const_msg,
} = self.on_unimplemented_note(trait_ref, &obligation);
let have_alt_message = message.is_some() || label.is_some();
@@ -449,12 +712,27 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
{
"consider using `()`, or a `Result`".to_owned()
} else {
- format!(
- "{}the trait `{}` is not implemented for `{}`",
- pre_message,
- trait_predicate.print_modifiers_and_trait_path(),
- trait_ref.skip_binder().self_ty(),
- )
+ let ty_desc = match trait_ref.skip_binder().self_ty().kind() {
+ ty::FnDef(_, _) => Some("fn item"),
+ ty::Closure(_, _) => Some("closure"),
+ _ => None,
+ };
+
+ match ty_desc {
+ Some(desc) => format!(
+ "{}the trait `{}` is not implemented for {} `{}`",
+ pre_message,
+ trait_predicate.print_modifiers_and_trait_path(),
+ desc,
+ trait_ref.skip_binder().self_ty(),
+ ),
+ None => format!(
+ "{}the trait `{}` is not implemented for `{}`",
+ pre_message,
+ trait_predicate.print_modifiers_and_trait_path(),
+ trait_ref.skip_binder().self_ty(),
+ ),
+ }
};
if self.suggest_add_reference_to_arg(
@@ -482,10 +760,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
if let ObligationCauseCode::ObjectCastObligation(concrete_ty, obj_ty) = obligation.cause.code().peel_derives() &&
- Some(trait_ref.def_id()) == self.tcx.lang_items().sized_trait() {
+ Some(trait_ref.def_id()) == self.tcx.lang_items().sized_trait() {
self.suggest_borrowing_for_object_cast(&mut err, &root_obligation, *concrete_ty, *obj_ty);
}
+ let mut unsatisfied_const = false;
if trait_predicate.is_const_if_const() && obligation.param_env.is_const() {
let non_const_predicate = trait_ref.without_const();
let non_const_obligation = Obligation {
@@ -495,6 +774,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
recursion_depth: obligation.recursion_depth,
};
if self.predicate_may_hold(&non_const_obligation) {
+ unsatisfied_const = true;
err.span_note(
span,
&format!(
@@ -514,7 +794,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// If it has a custom `#[rustc_on_unimplemented]` note, let's display it
err.note(s.as_str());
}
- if let Some(ref s) = enclosing_scope {
+ if let Some(ref s) = parent_label {
let body = tcx
.hir()
.opt_local_def_id(obligation.cause.body_id)
@@ -523,11 +803,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
hir_id: obligation.cause.body_id,
})
});
-
- let enclosing_scope_span =
- tcx.hir().span_with_body(tcx.hir().local_def_id_to_hir_id(body));
-
- err.span_label(enclosing_scope_span, s);
+ err.span_label(tcx.def_span(body), s);
}
self.suggest_floating_point_literal(&obligation, &mut err, &trait_ref);
@@ -594,11 +870,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Try to report a help message
if is_fn_trait
&& let Ok((implemented_kind, params)) = self.type_implements_fn_trait(
- obligation.param_env,
- trait_ref.self_ty(),
- trait_predicate.skip_binder().constness,
- trait_predicate.skip_binder().polarity,
- )
+ obligation.param_env,
+ trait_ref.self_ty(),
+ trait_predicate.skip_binder().constness,
+ trait_predicate.skip_binder().polarity,
+ )
{
// If the type implements `Fn`, `FnMut`, or `FnOnce`, suppress the following
// suggestion to add trait bounds for the type, since we only typically implement
@@ -649,8 +925,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
);
}
}
- } else if !trait_ref.has_infer_types_or_consts()
- && self.predicate_can_apply(obligation.param_env, trait_ref)
+ } else if !trait_ref.has_non_region_infer()
+ && self.predicate_can_apply(obligation.param_env, trait_predicate)
{
// If a where-clause may be useful, remind the
// user that they can add it.
@@ -665,7 +941,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
None,
obligation.cause.body_id,
);
- } else if !suggested {
+ } else if !suggested && !unsatisfied_const {
// Can't show anything else useful, try to find similar impls.
let impl_candidates = self.find_similar_impl_candidates(trait_predicate);
if !self.report_similar_impl_candidates(
@@ -828,12 +1104,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Additional context information explaining why the closure only implements
// a particular trait.
- if let Some(typeck_results) = self.in_progress_typeck_results {
+ if let Some(typeck_results) = &self.typeck_results {
let hir_id = self
.tcx
.hir()
.local_def_id_to_hir_id(closure_def_id.expect_local());
- let typeck_results = typeck_results.borrow();
match (found_kind, typeck_results.closure_kind_origins().get(hir_id)) {
(ty::ClosureKind::FnOnce, Some((span, place))) => {
err.span_label(
@@ -859,8 +1134,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
}
- err.emit();
- return;
+ err
}
ty::PredicateKind::WellFormed(ty) => {
@@ -941,9 +1215,14 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
self.reported_closure_mismatch.borrow_mut().insert((span, found_span));
+ let mut not_tupled = false;
+
let found = match found_trait_ref.skip_binder().substs.type_at(1).kind() {
ty::Tuple(ref tys) => vec![ArgKind::empty(); tys.len()],
- _ => vec![ArgKind::empty()],
+ _ => {
+ not_tupled = true;
+ vec![ArgKind::empty()]
+ }
};
let expected_ty = expected_trait_ref.skip_binder().substs.type_at(1);
@@ -951,15 +1230,34 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
ty::Tuple(ref tys) => {
tys.iter().map(|t| ArgKind::from_expected_ty(t, Some(span))).collect()
}
- _ => vec![ArgKind::Arg("_".to_owned(), expected_ty.to_string())],
+ _ => {
+ not_tupled = true;
+ vec![ArgKind::Arg("_".to_owned(), expected_ty.to_string())]
+ }
};
- if found.len() == expected.len() {
+ // If this is a `Fn` family trait and either the expected or found
+ // is not tupled, then fall back to just a regular mismatch error.
+ // This shouldn't be common unless manually implementing one of the
+ // traits manually, but don't make it more confusing when it does
+ // happen.
+ if Some(expected_trait_ref.def_id()) != tcx.lang_items().gen_trait() && not_tupled {
+ self.report_and_explain_type_error(
+ TypeTrace::poly_trait_refs(
+ &obligation.cause,
+ true,
+ expected_trait_ref,
+ found_trait_ref,
+ ),
+ ty::error::TypeError::Mismatch,
+ )
+ } else if found.len() == expected.len() {
self.report_closure_arg_mismatch(
span,
found_span,
found_trait_ref,
expected_trait_ref,
+ obligation.cause.code(),
)
} else {
let (closure_span, found) = found_did
@@ -1008,7 +1306,10 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
match obligation.predicate.kind().skip_binder() {
- ty::PredicateKind::ConstEvaluatable(uv) => {
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ let ty::ConstKind::Unevaluated(uv) = ct.kind() else {
+ bug!("const evaluatable failed for non-unevaluated const `{ct:?}`");
+ };
let mut err =
self.tcx.sess.struct_span_err(span, "unconstrained generic constant");
let const_span = self.tcx.def_span(uv.def.did);
@@ -1054,250 +1355,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
err.emit();
}
-
- /// Given some node representing a fn-like thing in the HIR map,
- /// returns a span and `ArgKind` information that describes the
- /// arguments it expects. This can be supplied to
- /// `report_arg_count_mismatch`.
- fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)> {
- let sm = self.tcx.sess.source_map();
- let hir = self.tcx.hir();
- Some(match node {
- Node::Expr(&hir::Expr {
- kind: hir::ExprKind::Closure(&hir::Closure { body, fn_decl_span, .. }),
- ..
- }) => (
- fn_decl_span,
- hir.body(body)
- .params
- .iter()
- .map(|arg| {
- if let hir::Pat { kind: hir::PatKind::Tuple(ref args, _), span, .. } =
- *arg.pat
- {
- Some(ArgKind::Tuple(
- Some(span),
- args.iter()
- .map(|pat| {
- sm.span_to_snippet(pat.span)
- .ok()
- .map(|snippet| (snippet, "_".to_owned()))
- })
- .collect::<Option<Vec<_>>>()?,
- ))
- } else {
- let name = sm.span_to_snippet(arg.pat.span).ok()?;
- Some(ArgKind::Arg(name, "_".to_owned()))
- }
- })
- .collect::<Option<Vec<ArgKind>>>()?,
- ),
- Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref sig, ..), .. })
- | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(ref sig, _), .. })
- | Node::TraitItem(&hir::TraitItem {
- kind: hir::TraitItemKind::Fn(ref sig, _), ..
- }) => (
- sig.span,
- sig.decl
- .inputs
- .iter()
- .map(|arg| match arg.kind {
- hir::TyKind::Tup(ref tys) => ArgKind::Tuple(
- Some(arg.span),
- vec![("_".to_owned(), "_".to_owned()); tys.len()],
- ),
- _ => ArgKind::empty(),
- })
- .collect::<Vec<ArgKind>>(),
- ),
- Node::Ctor(ref variant_data) => {
- let span = variant_data.ctor_hir_id().map_or(DUMMY_SP, |id| hir.span(id));
- (span, vec![ArgKind::empty(); variant_data.fields().len()])
- }
- _ => panic!("non-FnLike node found: {:?}", node),
- })
- }
-
- /// Reports an error when the number of arguments needed by a
- /// trait match doesn't match the number that the expression
- /// provides.
- fn report_arg_count_mismatch(
- &self,
- span: Span,
- found_span: Option<Span>,
- expected_args: Vec<ArgKind>,
- found_args: Vec<ArgKind>,
- is_closure: bool,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let kind = if is_closure { "closure" } else { "function" };
-
- let args_str = |arguments: &[ArgKind], other: &[ArgKind]| {
- let arg_length = arguments.len();
- let distinct = matches!(other, &[ArgKind::Tuple(..)]);
- match (arg_length, arguments.get(0)) {
- (1, Some(&ArgKind::Tuple(_, ref fields))) => {
- format!("a single {}-tuple as argument", fields.len())
- }
- _ => format!(
- "{} {}argument{}",
- arg_length,
- if distinct && arg_length > 1 { "distinct " } else { "" },
- pluralize!(arg_length)
- ),
- }
- };
-
- let expected_str = args_str(&expected_args, &found_args);
- let found_str = args_str(&found_args, &expected_args);
-
- let mut err = struct_span_err!(
- self.tcx.sess,
- span,
- E0593,
- "{} is expected to take {}, but it takes {}",
- kind,
- expected_str,
- found_str,
- );
-
- err.span_label(span, format!("expected {} that takes {}", kind, expected_str));
-
- if let Some(found_span) = found_span {
- err.span_label(found_span, format!("takes {}", found_str));
-
- // move |_| { ... }
- // ^^^^^^^^-- def_span
- //
- // move |_| { ... }
- // ^^^^^-- prefix
- let prefix_span = self.tcx.sess.source_map().span_until_non_whitespace(found_span);
- // move |_| { ... }
- // ^^^-- pipe_span
- let pipe_span =
- if let Some(span) = found_span.trim_start(prefix_span) { span } else { found_span };
-
- // Suggest to take and ignore the arguments with expected_args_length `_`s if
- // found arguments is empty (assume the user just wants to ignore args in this case).
- // For example, if `expected_args_length` is 2, suggest `|_, _|`.
- if found_args.is_empty() && is_closure {
- let underscores = vec!["_"; expected_args.len()].join(", ");
- err.span_suggestion_verbose(
- pipe_span,
- &format!(
- "consider changing the closure to take and ignore the expected argument{}",
- pluralize!(expected_args.len())
- ),
- format!("|{}|", underscores),
- Applicability::MachineApplicable,
- );
- }
-
- if let &[ArgKind::Tuple(_, ref fields)] = &found_args[..] {
- if fields.len() == expected_args.len() {
- let sugg = fields
- .iter()
- .map(|(name, _)| name.to_owned())
- .collect::<Vec<String>>()
- .join(", ");
- err.span_suggestion_verbose(
- found_span,
- "change the closure to take multiple arguments instead of a single tuple",
- format!("|{}|", sugg),
- Applicability::MachineApplicable,
- );
- }
- }
- if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..]
- && fields.len() == found_args.len()
- && is_closure
- {
- let sugg = format!(
- "|({}){}|",
- found_args
- .iter()
- .map(|arg| match arg {
- ArgKind::Arg(name, _) => name.to_owned(),
- _ => "_".to_owned(),
- })
- .collect::<Vec<String>>()
- .join(", "),
- // add type annotations if available
- if found_args.iter().any(|arg| match arg {
- ArgKind::Arg(_, ty) => ty != "_",
- _ => false,
- }) {
- format!(
- ": ({})",
- fields
- .iter()
- .map(|(_, ty)| ty.to_owned())
- .collect::<Vec<String>>()
- .join(", ")
- )
- } else {
- String::new()
- },
- );
- err.span_suggestion_verbose(
- found_span,
- "change the closure to accept a tuple instead of individual arguments",
- sugg,
- Applicability::MachineApplicable,
- );
- }
- }
-
- err
- }
-
- fn type_implements_fn_trait(
- &self,
- param_env: ty::ParamEnv<'tcx>,
- ty: ty::Binder<'tcx, Ty<'tcx>>,
- constness: ty::BoundConstness,
- polarity: ty::ImplPolarity,
- ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()> {
- self.commit_if_ok(|_| {
- for trait_def_id in [
- self.tcx.lang_items().fn_trait(),
- self.tcx.lang_items().fn_mut_trait(),
- self.tcx.lang_items().fn_once_trait(),
- ] {
- let Some(trait_def_id) = trait_def_id else { continue };
- // Make a fresh inference variable so we can determine what the substitutions
- // of the trait are.
- let var = self.next_ty_var(TypeVariableOrigin {
- span: DUMMY_SP,
- kind: TypeVariableOriginKind::MiscVariable,
- });
- let substs = self.tcx.mk_substs_trait(ty.skip_binder(), &[var.into()]);
- let obligation = Obligation::new(
- ObligationCause::dummy(),
- param_env,
- ty.rebind(ty::TraitPredicate {
- trait_ref: ty::TraitRef::new(trait_def_id, substs),
- constness,
- polarity,
- })
- .to_predicate(self.tcx),
- );
- let mut fulfill_cx = FulfillmentContext::new_in_snapshot();
- fulfill_cx.register_predicate_obligation(self, obligation);
- if fulfill_cx.select_all_or_error(self).is_empty() {
- return Ok((
- ty::ClosureKind::from_def_id(self.tcx, trait_def_id)
- .expect("expected to map DefId to ClosureKind"),
- ty.rebind(self.resolve_vars_if_possible(var)),
- ));
- }
- }
-
- Err(())
- })
- }
}
-trait InferCtxtPrivExt<'hir, 'tcx> {
+trait InferCtxtPrivExt<'tcx> {
// returns if `cond` not occurring implies that `error` does not occur - i.e., that
// `error` occurring implies that `cond` occurs.
fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool;
@@ -1315,6 +1375,13 @@ trait InferCtxtPrivExt<'hir, 'tcx> {
error: &MismatchedProjectionTypes<'tcx>,
);
+ fn maybe_detailed_projection_msg(
+ &self,
+ pred: ty::ProjectionPredicate<'tcx>,
+ normalized_ty: ty::Term<'tcx>,
+ expected_ty: ty::Term<'tcx>,
+ ) -> Option<String>;
+
fn fuzzy_match_tys(
&self,
a: Ty<'tcx>,
@@ -1371,7 +1438,7 @@ trait InferCtxtPrivExt<'hir, 'tcx> {
fn predicate_can_apply(
&self,
param_env: ty::ParamEnv<'tcx>,
- pred: ty::PolyTraitRef<'tcx>,
+ pred: ty::PolyTraitPredicate<'tcx>,
) -> bool;
fn note_obligation_cause(&self, err: &mut Diagnostic, obligation: &PredicateObligation<'tcx>);
@@ -1389,13 +1456,13 @@ trait InferCtxtPrivExt<'hir, 'tcx> {
predicate: ty::Predicate<'tcx>,
);
- fn maybe_suggest_unsized_generics(&self, err: &mut Diagnostic, span: Span, node: Node<'hir>);
+ fn maybe_suggest_unsized_generics(&self, err: &mut Diagnostic, span: Span, node: Node<'tcx>);
fn maybe_indirection_for_unsized(
&self,
err: &mut Diagnostic,
- item: &'hir Item<'hir>,
- param: &'hir GenericParam<'hir>,
+ item: &'tcx Item<'tcx>,
+ param: &'tcx GenericParam<'tcx>,
) -> bool;
fn is_recursive_obligation(
@@ -1405,7 +1472,7 @@ trait InferCtxtPrivExt<'hir, 'tcx> {
) -> bool;
}
-impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
+impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// returns if `cond` not occurring implies that `error` does not occur - i.e., that
// `error` occurring implies that `cond` occurs.
fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool {
@@ -1476,13 +1543,31 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
.emit();
}
FulfillmentErrorCode::CodeConstEquateError(ref expected_found, ref err) => {
- self.report_mismatched_consts(
+ let mut diag = self.report_mismatched_consts(
&error.obligation.cause,
expected_found.expected,
expected_found.found,
err.clone(),
- )
- .emit();
+ );
+ let code = error.obligation.cause.code().peel_derives().peel_match_impls();
+ if let ObligationCauseCode::BindingObligation(..)
+ | ObligationCauseCode::ItemObligation(..)
+ | ObligationCauseCode::ExprBindingObligation(..)
+ | ObligationCauseCode::ExprItemObligation(..) = code
+ {
+ self.note_obligation_cause_code(
+ &mut diag,
+ &error.obligation.predicate,
+ error.obligation.param_env,
+ code,
+ &mut vec![],
+ &mut Default::default(),
+ );
+ }
+ diag.emit();
+ }
+ FulfillmentErrorCode::CodeCycle(ref cycle) => {
+ self.report_overflow_error_cycle(cycle);
}
}
}
@@ -1500,8 +1585,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
}
self.probe(|_| {
- let err_buf;
- let mut err = &error.err;
+ let mut err = error.err;
let mut values = None;
// try to find the mismatched types to report the error with.
@@ -1534,31 +1618,28 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
obligation.cause.code().peel_derives(),
ObligationCauseCode::ItemObligation(_)
| ObligationCauseCode::BindingObligation(_, _)
+ | ObligationCauseCode::ExprItemObligation(..)
+ | ObligationCauseCode::ExprBindingObligation(..)
| ObligationCauseCode::ObjectCastObligation(..)
| ObligationCauseCode::OpaqueType
);
- if let Err(error) = self.at(&obligation.cause, obligation.param_env).eq_exp(
+ if let Err(new_err) = self.at(&obligation.cause, obligation.param_env).eq_exp(
is_normalized_ty_expected,
normalized_ty,
data.term,
) {
- values = Some(infer::ValuePairs::Terms(ExpectedFound::new(
- is_normalized_ty_expected,
- normalized_ty,
- data.term,
- )));
- err_buf = error;
- err = &err_buf;
+ values = Some((data, is_normalized_ty_expected, normalized_ty, data.term));
+ err = new_err;
}
}
- let mut diag = struct_span_err!(
- self.tcx.sess,
- obligation.cause.span,
- E0271,
- "type mismatch resolving `{}`",
- predicate
- );
+ let msg = values
+ .and_then(|(predicate, _, normalized_ty, expected_ty)| {
+ self.maybe_detailed_projection_msg(predicate, normalized_ty, expected_ty)
+ })
+ .unwrap_or_else(|| format!("type mismatch resolving `{}`", predicate));
+ let mut diag = struct_span_err!(self.tcx.sess, obligation.cause.span, E0271, "{msg}");
+
let secondary_span = match predicate.kind().skip_binder() {
ty::PredicateKind::Projection(proj) => self
.tcx
@@ -1584,7 +1665,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
..
})
| hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::TyAlias(ty),
+ kind: hir::ImplItemKind::Type(ty),
..
}),
) => Some((ty.span, format!("type mismatch resolving `{}`", predicate))),
@@ -1596,7 +1677,13 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
&mut diag,
&obligation.cause,
secondary_span,
- values,
+ values.map(|(_, is_normalized_ty_expected, normalized_ty, term)| {
+ infer::ValuePairs::Terms(ExpectedFound::new(
+ is_normalized_ty_expected,
+ normalized_ty,
+ term,
+ ))
+ }),
err,
true,
false,
@@ -1606,6 +1693,33 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
});
}
+ fn maybe_detailed_projection_msg(
+ &self,
+ pred: ty::ProjectionPredicate<'tcx>,
+ normalized_ty: ty::Term<'tcx>,
+ expected_ty: ty::Term<'tcx>,
+ ) -> Option<String> {
+ let trait_def_id = pred.projection_ty.trait_def_id(self.tcx);
+ let self_ty = pred.projection_ty.self_ty();
+
+ if Some(pred.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output() {
+ Some(format!(
+ "expected `{self_ty}` to be a {fn_kind} that returns `{expected_ty}`, but it returns `{normalized_ty}`",
+ fn_kind = self_ty.prefix_string(self.tcx)
+ ))
+ } else if Some(trait_def_id) == self.tcx.lang_items().future_trait() {
+ Some(format!(
+ "expected `{self_ty}` to be a future that resolves to `{expected_ty}`, but it resolves to `{normalized_ty}`"
+ ))
+ } else if Some(trait_def_id) == self.tcx.get_diagnostic_item(sym::Iterator) {
+ Some(format!(
+ "expected `{self_ty}` to be an iterator that yields `{expected_ty}`, but it yields `{normalized_ty}`"
+ ))
+ } else {
+ None
+ }
+ }
+
fn fuzzy_match_tys(
&self,
mut a: Ty<'tcx>,
@@ -1731,13 +1845,21 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
return false;
}
if candidates.len() == 1 {
+ let ty_desc = match candidates[0].self_ty().kind() {
+ ty::FnPtr(_) => Some("fn pointer"),
+ _ => None,
+ };
+ let the_desc = match ty_desc {
+ Some(desc) => format!(" implemented for {} `", desc),
+ None => " implemented for `".to_string(),
+ };
err.highlighted_help(vec![
(
format!("the trait `{}` ", candidates[0].print_only_trait_path()),
Style::NoStyle,
),
("is".to_string(), Style::Highlight),
- (" implemented for `".to_string(), Style::NoStyle),
+ (the_desc, Style::NoStyle),
(candidates[0].self_ty().to_string(), Style::Highlight),
("`".to_string(), Style::NoStyle),
]);
@@ -1804,7 +1926,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
// types, too.
self.tcx
.visibility(def.did())
- .is_accessible_from(body_id.owner.to_def_id(), self.tcx)
+ .is_accessible_from(body_id.owner.def_id, self.tcx)
} else {
true
}
@@ -1814,16 +1936,11 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
}
let normalize = |candidate| {
- self.tcx.infer_ctxt().enter(|ref infcx| {
- let normalized = infcx
- .at(&ObligationCause::dummy(), ty::ParamEnv::empty())
- .normalize(candidate)
- .ok();
- match normalized {
- Some(normalized) => normalized.value,
- None => candidate,
- }
- })
+ let infcx = self.tcx.infer_ctxt().build();
+ infcx
+ .at(&ObligationCause::dummy(), ty::ParamEnv::empty())
+ .normalize(candidate)
+ .map_or(candidate, |normalized| normalized.value)
};
// Sort impl candidates so that ordering is consistent for UI tests.
@@ -1940,7 +2057,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
let predicate = self.resolve_vars_if_possible(obligation.predicate);
let span = obligation.cause.span;
- debug!(?predicate, obligation.cause.code = tracing::field::debug(&obligation.cause.code()));
+ debug!(?predicate, obligation.cause.code = ?obligation.cause.code());
// Ambiguity errors are often caused as fallout from earlier errors.
// We ignore them if this `infcx` is tainted in some cases below.
@@ -1997,7 +2114,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
// Pick the first substitution that still contains inference variables as the one
// we're going to emit an error for. If there are none (see above), fall back to
// a more general error.
- let subst = data.trait_ref.substs.iter().find(|s| s.has_infer_types_or_consts());
+ let subst = data.trait_ref.substs.iter().find(|s| s.has_non_region_infer());
let mut err = if let Some(subst) = subst {
self.emit_inference_failure_err(body_id, span, subst, ErrorCode::E0283, true)
@@ -2033,13 +2150,11 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
}
}
- if let ObligationCauseCode::ItemObligation(def_id) = *obligation.cause.code() {
+ if let ObligationCauseCode::ItemObligation(def_id) | ObligationCauseCode::ExprItemObligation(def_id, ..) = *obligation.cause.code() {
self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id());
- } else if let (
- Ok(ref snippet),
- &ObligationCauseCode::BindingObligation(def_id, _),
- ) =
- (self.tcx.sess.source_map().span_to_snippet(span), obligation.cause.code())
+ } else if let Ok(snippet) = &self.tcx.sess.source_map().span_to_snippet(span)
+ && let ObligationCauseCode::BindingObligation(def_id, _) | ObligationCauseCode::ExprBindingObligation(def_id, ..)
+ = *obligation.cause.code()
{
let generics = self.tcx.generics_of(def_id);
if generics.params.iter().any(|p| p.name != kw::SelfUpper)
@@ -2119,12 +2234,12 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
&& let [
..,
trait_path_segment @ hir::PathSegment {
- res: Some(rustc_hir::def::Res::Def(rustc_hir::def::DefKind::Trait, trait_id)),
+ res: rustc_hir::def::Res::Def(rustc_hir::def::DefKind::Trait, trait_id),
..
},
hir::PathSegment {
ident: assoc_item_name,
- res: Some(rustc_hir::def::Res::Def(_, item_id)),
+ res: rustc_hir::def::Res::Def(_, item_id),
..
}
] = path.segments
@@ -2169,13 +2284,22 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
trait_impls.non_blanket_impls().len()
)
};
-
+ let mut suggestions = vec![(
+ trait_path_segment.ident.span.shrink_to_lo(),
+ format!("<{} as ", self.tcx.type_of(impl_def_id))
+ )];
+ if let Some(generic_arg) = trait_path_segment.args {
+ let between_span = trait_path_segment.ident.span.between(generic_arg.span_ext);
+ // get rid of :: between Trait and <type>
+ // must be '::' between them, otherwise the parser won't accept the code
+ suggestions.push((between_span, "".to_string(),));
+ suggestions.push((generic_arg.span_ext.shrink_to_hi(), format!(">")));
+ } else {
+ suggestions.push((trait_path_segment.ident.span.shrink_to_hi(), format!(">")));
+ }
err.multipart_suggestion(
message,
- vec![
- (trait_path_segment.ident.span.shrink_to_lo(), format!("<{} as ", self.tcx.def_path(impl_def_id).to_string_no_crate_verbose())),
- (trait_path_segment.ident.span.shrink_to_hi(), format!(">"))
- ],
+ suggestions,
Applicability::MaybeIncorrect
);
}
@@ -2220,7 +2344,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
.substs
.iter()
.chain(Some(data.term.into_arg()))
- .find(|g| g.has_infer_types_or_consts());
+ .find(|g| g.has_non_region_infer());
if let Some(subst) = subst {
let mut err = self.emit_inference_failure_err(
body_id,
@@ -2249,7 +2373,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
if predicate.references_error() || self.is_tainted_by_errors() {
return;
}
- let subst = data.substs.iter().find(|g| g.has_infer_types_or_consts());
+ let subst = data.walk().find(|g| g.is_non_region_infer());
if let Some(subst) = subst {
let err = self.emit_inference_failure_err(
body_id,
@@ -2389,10 +2513,10 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
fn predicate_can_apply(
&self,
param_env: ty::ParamEnv<'tcx>,
- pred: ty::PolyTraitRef<'tcx>,
+ pred: ty::PolyTraitPredicate<'tcx>,
) -> bool {
struct ParamToVarFolder<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
var_map: FxHashMap<Ty<'tcx>, Ty<'tcx>>,
}
@@ -2433,7 +2557,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
let obligation = Obligation::new(
ObligationCause::dummy(),
param_env,
- cleaned_pred.without_const().to_predicate(selcx.tcx()),
+ cleaned_pred.to_predicate(selcx.tcx()),
);
self.predicate_may_hold(&obligation)
@@ -2462,15 +2586,10 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
err: &mut Diagnostic,
obligation: &PredicateObligation<'tcx>,
) {
- let (
- ty::PredicateKind::Trait(pred),
- &ObligationCauseCode::BindingObligation(item_def_id, span),
- ) = (
- obligation.predicate.kind().skip_binder(),
- obligation.cause.code().peel_derives(),
- ) else {
- return;
- };
+ let ty::PredicateKind::Trait(pred) = obligation.predicate.kind().skip_binder() else { return; };
+ let (ObligationCauseCode::BindingObligation(item_def_id, span)
+ | ObligationCauseCode::ExprBindingObligation(item_def_id, span, ..))
+ = *obligation.cause.code().peel_derives() else { return; };
debug!(?pred, ?item_def_id, ?span);
let (Some(node), true) = (
@@ -2483,12 +2602,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
}
#[instrument(level = "debug", skip_all)]
- fn maybe_suggest_unsized_generics<'hir>(
- &self,
- err: &mut Diagnostic,
- span: Span,
- node: Node<'hir>,
- ) {
+ fn maybe_suggest_unsized_generics(&self, err: &mut Diagnostic, span: Span, node: Node<'tcx>) {
let Some(generics) = node.generics() else {
return;
};
@@ -2539,11 +2653,11 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
);
}
- fn maybe_indirection_for_unsized<'hir>(
+ fn maybe_indirection_for_unsized(
&self,
err: &mut Diagnostic,
- item: &'hir Item<'hir>,
- param: &'hir GenericParam<'hir>,
+ item: &Item<'tcx>,
+ param: &GenericParam<'tcx>,
) -> bool {
// Suggesting `T: ?Sized` is only valid in an ADT if `T` is only used in a
// borrow. `struct S<'a, T: ?Sized>(&'a T);` is valid, `struct S<T: ?Sized>(T);`
@@ -2643,82 +2757,6 @@ impl<'v> Visitor<'v> for FindTypeParam {
}
}
-pub fn recursive_type_with_infinite_size_error<'tcx>(
- tcx: TyCtxt<'tcx>,
- type_def_id: DefId,
- spans: Vec<(Span, Option<hir::HirId>)>,
-) {
- assert!(type_def_id.is_local());
- let span = tcx.def_span(type_def_id);
- let path = tcx.def_path_str(type_def_id);
- let mut err =
- struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size", path);
- err.span_label(span, "recursive type has infinite size");
- for &(span, _) in &spans {
- err.span_label(span, "recursive without indirection");
- }
- let msg = format!(
- "insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `{}` representable",
- path,
- );
- if spans.len() <= 4 {
- // FIXME(compiler-errors): This suggestion might be erroneous if Box is shadowed
- err.multipart_suggestion(
- &msg,
- spans
- .into_iter()
- .flat_map(|(span, field_id)| {
- if let Some(generic_span) = get_option_generic_from_field_id(tcx, field_id) {
- // If we match an `Option` and can grab the span of the Option's generic, then
- // suggest boxing the generic arg for a non-null niche optimization.
- vec![
- (generic_span.shrink_to_lo(), "Box<".to_string()),
- (generic_span.shrink_to_hi(), ">".to_string()),
- ]
- } else {
- vec![
- (span.shrink_to_lo(), "Box<".to_string()),
- (span.shrink_to_hi(), ">".to_string()),
- ]
- }
- })
- .collect(),
- Applicability::HasPlaceholders,
- );
- } else {
- err.help(&msg);
- }
- err.emit();
-}
-
-/// Extract the span for the generic type `T` of `Option<T>` in a field definition
-fn get_option_generic_from_field_id(tcx: TyCtxt<'_>, field_id: Option<hir::HirId>) -> Option<Span> {
- let node = tcx.hir().find(field_id?);
-
- // Expect a field from our field_id
- let Some(hir::Node::Field(field_def)) = node
- else { bug!("Expected HirId corresponding to FieldDef, found: {:?}", node) };
-
- // Match a type that is a simple QPath with no Self
- let hir::TyKind::Path(hir::QPath::Resolved(None, path)) = &field_def.ty.kind
- else { return None };
-
- // Check if the path we're checking resolves to Option
- let hir::def::Res::Def(_, did) = path.res
- else { return None };
-
- // Bail if this path doesn't describe `::core::option::Option`
- if !tcx.is_diagnostic_item(sym::Option, did) {
- return None;
- }
-
- // Match a single generic arg in the 0th path segment
- let generic_arg = path.segments.last()?.args?.args.get(0)?;
-
- // Take the span out of the type, if it's a type
- if let hir::GenericArg::Type(generic_ty) = generic_arg { Some(generic_ty.span) } else { None }
-}
-
/// Summarizes information
#[derive(Clone)]
pub enum ArgKind {
@@ -2763,3 +2801,8 @@ impl<'tcx> ty::TypeVisitor<'tcx> for HasNumericInferVisitor {
}
}
}
+
+pub enum DefIdOrName {
+ DefId(DefId),
+ Name(&'static str),
+}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
index e6907637c..5eef54c63 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
@@ -1,17 +1,17 @@
use super::{
ObligationCauseCode, OnUnimplementedDirective, OnUnimplementedNote, PredicateObligation,
};
-use crate::infer::InferCtxt;
+use crate::infer::error_reporting::TypeErrCtxt;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::SubstsRef;
use rustc_middle::ty::{self, GenericParamDefKind};
use rustc_span::symbol::sym;
use std::iter;
use super::InferCtxtPrivExt;
-pub trait InferCtxtExt<'tcx> {
+pub trait TypeErrCtxtExt<'tcx> {
/*private*/
fn impl_similar_to(
&self,
@@ -29,7 +29,7 @@ pub trait InferCtxtExt<'tcx> {
) -> OnUnimplementedNote;
}
-impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
fn impl_similar_to(
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
@@ -143,7 +143,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
if let ObligationCauseCode::ItemObligation(item)
- | ObligationCauseCode::BindingObligation(item, _) = *obligation.cause.code()
+ | ObligationCauseCode::BindingObligation(item, _)
+ | ObligationCauseCode::ExprItemObligation(item, ..)
+ | ObligationCauseCode::ExprBindingObligation(item, ..) = *obligation.cause.code()
{
// FIXME: maybe also have some way of handling methods
// from other traits? That would require name resolution,
@@ -162,6 +164,10 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
flags.push((sym::from_desugaring, Some(format!("{:?}", k))));
}
+ if let ObligationCauseCode::MainFunctionType = obligation.cause.code() {
+ flags.push((sym::cause, Some("MainFunctionType".to_string())));
+ }
+
// Add all types without trimmed paths.
ty::print::with_no_trimmed_paths!({
let generics = self.tcx.generics_of(def_id);
@@ -254,7 +260,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
}
}
- if let ty::Dynamic(traits, _) = self_ty.kind() {
+ if let ty::Dynamic(traits, _, _) = self_ty.kind() {
for t in traits.iter() {
if let ty::ExistentialPredicate::Trait(trait_ref) = t.skip_binder() {
flags.push((sym::_Self, Some(self.tcx.def_path_str(trait_ref.def_id))))
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
index 219413121..8c41d9d24 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -1,5 +1,5 @@
use super::{
- EvaluationResult, Obligation, ObligationCause, ObligationCauseCode, PredicateObligation,
+ DefIdOrName, Obligation, ObligationCause, ObligationCauseCode, PredicateObligation,
SelectionContext,
};
@@ -7,6 +7,7 @@ use crate::autoderef::Autoderef;
use crate::infer::InferCtxt;
use crate::traits::normalize_to;
+use hir::def::CtorOf;
use hir::HirId;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::stack::ensure_sufficient_stack;
@@ -20,18 +21,17 @@ use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Node};
-use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::infer::error_reporting::TypeErrCtxt;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::hir::map;
use rustc_middle::ty::{
self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind, DefIdTree,
GeneratorDiagnosticData, GeneratorInteriorTypeCause, Infer, InferTy, IsSuggestable,
- ProjectionPredicate, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
- TypeVisitable,
+ ToPredicate, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitable,
};
use rustc_middle::ty::{TypeAndMut, TypeckResults};
-use rustc_session::Limit;
-use rustc_span::def_id::LOCAL_CRATE;
-use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{BytePos, DesugaringKind, ExpnKind, Span, DUMMY_SP};
use rustc_target::spec::abi;
use std::fmt;
@@ -63,7 +63,7 @@ impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
// meet an obligation
fn try_get_upvar_span<F>(
&self,
- infer_context: &InferCtxt<'a, 'tcx>,
+ infer_context: &InferCtxt<'tcx>,
generator_did: DefId,
ty_matches: F,
) -> Option<GeneratorInteriorOrUpvar>
@@ -169,12 +169,12 @@ impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
}
// This trait is public to expose the diagnostics methods to clippy.
-pub trait InferCtxtExt<'tcx> {
+pub trait TypeErrCtxtExt<'tcx> {
fn suggest_restricting_param_bound(
&self,
err: &mut Diagnostic,
trait_pred: ty::PolyTraitPredicate<'tcx>,
- proj_pred: Option<ty::PolyProjectionPredicate<'tcx>>,
+ associated_item: Option<(&'static str, Ty<'tcx>)>,
body_id: hir::HirId,
);
@@ -256,8 +256,15 @@ pub trait InferCtxtExt<'tcx> {
found_span: Option<Span>,
found: ty::PolyTraitRef<'tcx>,
expected: ty::PolyTraitRef<'tcx>,
+ cause: &ObligationCauseCode<'tcx>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+ fn note_conflicting_closure_bounds(
+ &self,
+ cause: &ObligationCauseCode<'tcx>,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ );
+
fn suggest_fully_qualified_path(
&self,
err: &mut Diagnostic,
@@ -297,8 +304,6 @@ pub trait InferCtxtExt<'tcx> {
) where
T: fmt::Display;
- fn suggest_new_overflow_limit(&self, err: &mut Diagnostic);
-
/// Suggest to await before try: future? => future.await?
fn suggest_await_before_try(
&self,
@@ -462,12 +467,12 @@ fn suggest_restriction<'tcx>(
}
}
-impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
fn suggest_restricting_param_bound(
&self,
mut err: &mut Diagnostic,
trait_pred: ty::PolyTraitPredicate<'tcx>,
- proj_pred: Option<ty::PolyProjectionPredicate<'tcx>>,
+ associated_ty: Option<(&'static str, Ty<'tcx>)>,
body_id: hir::HirId,
) {
let trait_pred = self.resolve_numeric_literals_with_default(trait_pred);
@@ -604,21 +609,18 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
trait_pred.print_modifiers_and_trait_path().to_string()
);
- if let Some(proj_pred) = proj_pred {
- let ProjectionPredicate { projection_ty, term } = proj_pred.skip_binder();
- let item = self.tcx.associated_item(projection_ty.item_def_id);
-
+ if let Some((name, term)) = associated_ty {
// FIXME: this case overlaps with code in TyCtxt::note_and_explain_type_err.
// That should be extracted into a helper function.
if constraint.ends_with('>') {
constraint = format!(
- "{}, {}={}>",
+ "{}, {} = {}>",
&constraint[..constraint.len() - 1],
- item.name,
+ name,
term
);
} else {
- constraint.push_str(&format!("<{}={}>", item.name, term));
+ constraint.push_str(&format!("<{} = {}>", name, term));
}
}
@@ -648,7 +650,13 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
..
}) if !param_ty => {
// Missing generic type parameter bound.
- if suggest_arbitrary_trait_bound(self.tcx, generics, &mut err, trait_pred) {
+ if suggest_arbitrary_trait_bound(
+ self.tcx,
+ generics,
+ &mut err,
+ trait_pred,
+ associated_ty,
+ ) {
return;
}
}
@@ -657,7 +665,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
_ => {}
}
- hir_id = self.tcx.hir().local_def_id_to_hir_id(self.tcx.hir().get_parent_item(hir_id));
+ hir_id = self.tcx.hir().get_parent_item(hir_id).into();
}
}
@@ -671,11 +679,15 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> bool {
// It only make sense when suggesting dereferences for arguments
- let ObligationCauseCode::FunctionArgumentObligation { .. } = obligation.cause.code() else {
- return false;
- };
- let param_env = obligation.param_env;
- let body_id = obligation.cause.body_id;
+ let ObligationCauseCode::FunctionArgumentObligation { arg_hir_id, .. } = obligation.cause.code()
+ else { return false; };
+ let Some(typeck_results) = &self.typeck_results
+ else { return false; };
+ let hir::Node::Expr(expr) = self.tcx.hir().get(*arg_hir_id)
+ else { return false; };
+ let Some(arg_ty) = typeck_results.expr_ty_adjusted_opt(expr)
+ else { return false; };
+
let span = obligation.cause.span;
let mut real_trait_pred = trait_pred;
let mut code = obligation.cause.code();
@@ -685,11 +697,25 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
real_trait_pred = parent_trait_pred;
}
- // Skipping binder here, remapping below
- let real_ty = real_trait_pred.self_ty().skip_binder();
+ let real_ty = real_trait_pred.self_ty();
+ // We `erase_late_bound_regions` here because `make_subregion` does not handle
+ // `ReLateBound`, and we don't particularly care about the regions.
+ if self
+ .can_eq(obligation.param_env, self.tcx.erase_late_bound_regions(real_ty), arg_ty)
+ .is_err()
+ {
+ continue;
+ }
- if let ty::Ref(region, base_ty, mutbl) = *real_ty.kind() {
- let mut autoderef = Autoderef::new(self, param_env, body_id, span, base_ty, span);
+ if let ty::Ref(region, base_ty, mutbl) = *real_ty.skip_binder().kind() {
+ let mut autoderef = Autoderef::new(
+ self,
+ obligation.param_env,
+ obligation.cause.body_id,
+ span,
+ base_ty,
+ span,
+ );
if let Some(steps) = autoderef.find_map(|(ty, steps)| {
// Re-add the `&`
let ty = self.tcx.mk_ref(region, TypeAndMut { ty, mutbl });
@@ -697,24 +723,29 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Remapping bound vars here
let real_trait_pred_and_ty =
real_trait_pred.map_bound(|inner_trait_pred| (inner_trait_pred, ty));
- let obligation = self
- .mk_trait_obligation_with_new_self_ty(param_env, real_trait_pred_and_ty);
+ let obligation = self.mk_trait_obligation_with_new_self_ty(
+ obligation.param_env,
+ real_trait_pred_and_ty,
+ );
Some(steps).filter(|_| self.predicate_may_hold(&obligation))
}) {
if steps > 0 {
- if let Ok(src) = self.tcx.sess.source_map().span_to_snippet(span) {
- // Don't care about `&mut` because `DerefMut` is used less
- // often and user will not expect autoderef happens.
- if src.starts_with('&') && !src.starts_with("&mut ") {
- let derefs = "*".repeat(steps);
- err.span_suggestion(
- span,
- "consider dereferencing here",
- format!("&{}{}", derefs, &src[1..]),
- Applicability::MachineApplicable,
- );
- return true;
- }
+ // Don't care about `&mut` because `DerefMut` is used less
+ // often and user will not expect autoderef happens.
+ if let Some(hir::Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Not, expr),
+ ..
+ })) = self.tcx.hir().find(*arg_hir_id)
+ {
+ let derefs = "*".repeat(steps);
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "consider dereferencing here",
+ derefs,
+ Applicability::MachineApplicable,
+ );
+ return true;
}
}
} else if real_trait_pred != trait_pred {
@@ -724,7 +755,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let real_trait_pred_and_base_ty =
real_trait_pred.map_bound(|inner_trait_pred| (inner_trait_pred, base_ty));
let obligation = self.mk_trait_obligation_with_new_self_ty(
- param_env,
+ obligation.param_env,
real_trait_pred_and_base_ty,
);
if self.predicate_may_hold(&obligation) {
@@ -750,7 +781,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Get the local name of this closure. This can be inaccurate because
// of the possibility of reassignment, but this should be good enough.
match &kind {
- hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, None) => {
+ hir::PatKind::Binding(hir::BindingAnnotation::NONE, _, ident, None) => {
Some(ident.name)
}
_ => {
@@ -783,75 +814,138 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
err: &mut Diagnostic,
trait_pred: ty::PolyTraitPredicate<'tcx>,
) -> bool {
- // Skipping binder here, remapping below
- let self_ty = trait_pred.self_ty().skip_binder();
-
- let (def_id, output_ty, callable) = match *self_ty.kind() {
- ty::Closure(def_id, substs) => (def_id, substs.as_closure().sig().output(), "closure"),
- ty::FnDef(def_id, _) => (def_id, self_ty.fn_sig(self.tcx).output(), "function"),
- _ => return false,
- };
- let msg = format!("use parentheses to call the {}", callable);
+ if let ty::PredicateKind::Trait(trait_pred) = obligation.predicate.kind().skip_binder()
+ && Some(trait_pred.def_id()) == self.tcx.lang_items().sized_trait()
+ {
+ // Don't suggest calling to turn an unsized type into a sized type
+ return false;
+ }
- // "We should really create a single list of bound vars from the combined vars
- // from the predicate and function, but instead we just liberate the function bound vars"
- let output_ty = self.tcx.liberate_late_bound_regions(def_id, output_ty);
+ // This is duplicated from `extract_callable_info` in typeck, which
+ // relies on autoderef, so we can't use it here.
+ let found = trait_pred.self_ty().skip_binder().peel_refs();
+ let Some((def_id_or_name, output, inputs)) = (match *found.kind()
+ {
+ ty::FnPtr(fn_sig) => {
+ Some((DefIdOrName::Name("function pointer"), fn_sig.output(), fn_sig.inputs()))
+ }
+ ty::FnDef(def_id, _) => {
+ let fn_sig = found.fn_sig(self.tcx);
+ Some((DefIdOrName::DefId(def_id), fn_sig.output(), fn_sig.inputs()))
+ }
+ ty::Closure(def_id, substs) => {
+ let fn_sig = substs.as_closure().sig();
+ Some((
+ DefIdOrName::DefId(def_id),
+ fn_sig.output(),
+ fn_sig.inputs().map_bound(|inputs| &inputs[1..]),
+ ))
+ }
+ ty::Opaque(def_id, substs) => {
+ self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::DefId(def_id),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Dynamic(data, _, ty::Dyn) => {
+ data.iter().find_map(|pred| {
+ if let ty::ExistentialPredicate::Projection(proj) = pred.skip_binder()
+ && Some(proj.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // for existential projection, substs are shifted over by 1
+ && let ty::Tuple(args) = proj.substs.type_at(0).kind()
+ {
+ Some((
+ DefIdOrName::Name("trait object"),
+ pred.rebind(proj.term.ty().unwrap()),
+ pred.rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ ty::Param(_) => {
+ obligation.param_env.caller_bounds().iter().find_map(|pred| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ && proj.projection_ty.self_ty() == found
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ DefIdOrName::Name("type parameter"),
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ pred.kind().rebind(args.as_slice()),
+ ))
+ } else {
+ None
+ }
+ })
+ }
+ _ => None,
+ }) else { return false; };
+ let output = self.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ LateBoundRegionConversionTime::FnCall,
+ output,
+ );
+ let inputs = inputs.skip_binder().iter().map(|ty| {
+ self.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ LateBoundRegionConversionTime::FnCall,
+ inputs.rebind(*ty),
+ )
+ });
// Remapping bound vars here
- let trait_pred_and_self = trait_pred.map_bound(|trait_pred| (trait_pred, output_ty));
+ let trait_pred_and_self = trait_pred.map_bound(|trait_pred| (trait_pred, output));
let new_obligation =
self.mk_trait_obligation_with_new_self_ty(obligation.param_env, trait_pred_and_self);
-
- match self.evaluate_obligation(&new_obligation) {
- Ok(
- EvaluationResult::EvaluatedToOk
- | EvaluationResult::EvaluatedToOkModuloRegions
- | EvaluationResult::EvaluatedToOkModuloOpaqueTypes
- | EvaluationResult::EvaluatedToAmbig,
- ) => {}
- _ => return false,
+ if !self.predicate_must_hold_modulo_regions(&new_obligation) {
+ return false;
}
- let hir = self.tcx.hir();
+
// Get the name of the callable and the arguments to be used in the suggestion.
- let (snippet, sugg) = match hir.get_if_local(def_id) {
- Some(hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Closure(hir::Closure { fn_decl, fn_decl_span, .. }),
- ..
- })) => {
- err.span_label(*fn_decl_span, "consider calling this closure");
- let Some(name) = self.get_closure_name(def_id, err, &msg) else {
- return false;
- };
- let args = fn_decl.inputs.iter().map(|_| "_").collect::<Vec<_>>().join(", ");
- let sugg = format!("({})", args);
- (format!("{}{}", name, sugg), sugg)
- }
- Some(hir::Node::Item(hir::Item {
- ident,
- kind: hir::ItemKind::Fn(.., body_id),
- ..
- })) => {
- err.span_label(ident.span, "consider calling this function");
- let body = hir.body(*body_id);
- let args = body
- .params
- .iter()
- .map(|arg| match &arg.pat.kind {
- hir::PatKind::Binding(_, _, ident, None)
- // FIXME: provide a better suggestion when encountering `SelfLower`, it
- // should suggest a method call.
- if ident.name != kw::SelfLower => ident.to_string(),
- _ => "_".to_string(),
- })
- .collect::<Vec<_>>()
- .join(", ");
- let sugg = format!("({})", args);
- (format!("{}{}", ident, sugg), sugg)
- }
- _ => return false,
+ let hir = self.tcx.hir();
+
+ let msg = match def_id_or_name {
+ DefIdOrName::DefId(def_id) => match self.tcx.def_kind(def_id) {
+ DefKind::Ctor(CtorOf::Struct, _) => {
+ "use parentheses to construct this tuple struct".to_string()
+ }
+ DefKind::Ctor(CtorOf::Variant, _) => {
+ "use parentheses to construct this tuple variant".to_string()
+ }
+ kind => format!("use parentheses to call this {}", kind.descr(def_id)),
+ },
+ DefIdOrName::Name(name) => format!("use parentheses to call this {name}"),
};
+
+ let args = inputs
+ .map(|ty| {
+ if ty.is_suggestable(self.tcx, false) {
+ format!("/* {ty} */")
+ } else {
+ "/* value */".to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", ");
+
if matches!(obligation.cause.code(), ObligationCauseCode::FunctionArgumentObligation { .. })
+ && obligation.cause.span.can_be_used_for_suggestions()
{
// When the obligation error has been ensured to have been caused by
// an argument, the `obligation.cause.span` points at the expression
@@ -860,11 +954,36 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
err.span_suggestion_verbose(
obligation.cause.span.shrink_to_hi(),
&msg,
- sugg,
+ format!("({args})"),
Applicability::HasPlaceholders,
);
- } else {
- err.help(&format!("{}: `{}`", msg, snippet));
+ } else if let DefIdOrName::DefId(def_id) = def_id_or_name {
+ let name = match hir.get_if_local(def_id) {
+ Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl_span, .. }),
+ ..
+ })) => {
+ err.span_label(*fn_decl_span, "consider calling this closure");
+ let Some(name) = self.get_closure_name(def_id, err, &msg) else {
+ return false;
+ };
+ name.to_string()
+ }
+ Some(hir::Node::Item(hir::Item { ident, kind: hir::ItemKind::Fn(..), .. })) => {
+ err.span_label(ident.span, "consider calling this function");
+ ident.to_string()
+ }
+ Some(hir::Node::Ctor(..)) => {
+ let name = self.tcx.def_path_str(def_id);
+ err.span_label(
+ self.tcx.def_span(def_id),
+ format!("consider calling the constructor for `{}`", name),
+ );
+ name
+ }
+ _ => return false,
+ };
+ err.help(&format!("{msg}: `{name}({args})`"));
}
true
}
@@ -882,6 +1001,10 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
obligation.cause.code()
{
&parent_code
+ } else if let ObligationCauseCode::ItemObligation(_)
+ | ObligationCauseCode::ExprItemObligation(..) = obligation.cause.code()
+ {
+ obligation.cause.code()
} else if let ExpnKind::Desugaring(DesugaringKind::ForLoop) =
span.ctxt().outer_expn_data().kind
{
@@ -906,102 +1029,121 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let param_env = obligation.param_env;
// Try to apply the original trait binding obligation by borrowing.
- let mut try_borrowing =
- |old_pred: ty::PolyTraitPredicate<'tcx>, blacklist: &[DefId]| -> bool {
- if blacklist.contains(&old_pred.def_id()) {
- return false;
- }
- // We map bounds to `&T` and `&mut T`
- let trait_pred_and_imm_ref = old_pred.map_bound(|trait_pred| {
- (
- trait_pred,
- self.tcx.mk_imm_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
- )
- });
- let trait_pred_and_mut_ref = old_pred.map_bound(|trait_pred| {
+ let mut try_borrowing = |old_pred: ty::PolyTraitPredicate<'tcx>,
+ blacklist: &[DefId]|
+ -> bool {
+ if blacklist.contains(&old_pred.def_id()) {
+ return false;
+ }
+ // We map bounds to `&T` and `&mut T`
+ let trait_pred_and_imm_ref = old_pred.map_bound(|trait_pred| {
+ (
+ trait_pred,
+ self.tcx.mk_imm_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
+ )
+ });
+ let trait_pred_and_mut_ref = old_pred.map_bound(|trait_pred| {
+ (
+ trait_pred,
+ self.tcx.mk_mut_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
+ )
+ });
+
+ let mk_result = |trait_pred_and_new_ty| {
+ let obligation =
+ self.mk_trait_obligation_with_new_self_ty(param_env, trait_pred_and_new_ty);
+ self.predicate_must_hold_modulo_regions(&obligation)
+ };
+ let imm_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_imm_ref);
+ let mut_ref_self_ty_satisfies_pred = mk_result(trait_pred_and_mut_ref);
+
+ let (ref_inner_ty_satisfies_pred, ref_inner_ty_mut) =
+ if let ObligationCauseCode::ItemObligation(_) | ObligationCauseCode::ExprItemObligation(..) = obligation.cause.code()
+ && let ty::Ref(_, ty, mutability) = old_pred.self_ty().skip_binder().kind()
+ {
(
- trait_pred,
- self.tcx.mk_mut_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
+ mk_result(old_pred.map_bound(|trait_pred| (trait_pred, *ty))),
+ matches!(mutability, hir::Mutability::Mut),
)
- });
-
- let mk_result = |trait_pred_and_new_ty| {
- let obligation =
- self.mk_trait_obligation_with_new_self_ty(param_env, trait_pred_and_new_ty);
- self.predicate_must_hold_modulo_regions(&obligation)
+ } else {
+ (false, false)
};
- let imm_result = mk_result(trait_pred_and_imm_ref);
- let mut_result = mk_result(trait_pred_and_mut_ref);
-
- if imm_result || mut_result {
- if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
- // We have a very specific type of error, where just borrowing this argument
- // might solve the problem. In cases like this, the important part is the
- // original type obligation, not the last one that failed, which is arbitrary.
- // Because of this, we modify the error to refer to the original obligation and
- // return early in the caller.
-
- let msg = format!("the trait bound `{}` is not satisfied", old_pred);
- if has_custom_message {
- err.note(&msg);
- } else {
- err.message =
- vec![(rustc_errors::DiagnosticMessage::Str(msg), Style::NoStyle)];
- }
- if snippet.starts_with('&') {
- // This is already a literal borrow and the obligation is failing
- // somewhere else in the obligation chain. Do not suggest non-sense.
- return false;
- }
- err.span_label(
- span,
+
+ if imm_ref_self_ty_satisfies_pred
+ || mut_ref_self_ty_satisfies_pred
+ || ref_inner_ty_satisfies_pred
+ {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ // We don't want a borrowing suggestion on the fields in structs,
+ // ```
+ // struct Foo {
+ // the_foos: Vec<Foo>
+ // }
+ // ```
+ if !matches!(
+ span.ctxt().outer_expn_data().kind,
+ ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop)
+ ) {
+ return false;
+ }
+ if snippet.starts_with('&') {
+ // This is already a literal borrow and the obligation is failing
+ // somewhere else in the obligation chain. Do not suggest non-sense.
+ return false;
+ }
+ // We have a very specific type of error, where just borrowing this argument
+ // might solve the problem. In cases like this, the important part is the
+ // original type obligation, not the last one that failed, which is arbitrary.
+ // Because of this, we modify the error to refer to the original obligation and
+ // return early in the caller.
+
+ let msg = format!("the trait bound `{}` is not satisfied", old_pred);
+ if has_custom_message {
+ err.note(&msg);
+ } else {
+ err.message =
+ vec![(rustc_errors::DiagnosticMessage::Str(msg), Style::NoStyle)];
+ }
+ err.span_label(
+ span,
+ format!(
+ "the trait `{}` is not implemented for `{}`",
+ old_pred.print_modifiers_and_trait_path(),
+ old_pred.self_ty().skip_binder(),
+ ),
+ );
+
+ if imm_ref_self_ty_satisfies_pred && mut_ref_self_ty_satisfies_pred {
+ err.span_suggestions(
+ span.shrink_to_lo(),
+ "consider borrowing here",
+ ["&".to_string(), "&mut ".to_string()].into_iter(),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ let is_mut = mut_ref_self_ty_satisfies_pred || ref_inner_ty_mut;
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
&format!(
- "expected an implementor of trait `{}`",
- old_pred.print_modifiers_and_trait_path(),
+ "consider{} borrowing here",
+ if is_mut { " mutably" } else { "" }
),
+ format!("&{}", if is_mut { "mut " } else { "" }),
+ Applicability::MaybeIncorrect,
);
-
- // This if is to prevent a special edge-case
- if matches!(
- span.ctxt().outer_expn_data().kind,
- ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop)
- ) {
- // We don't want a borrowing suggestion on the fields in structs,
- // ```
- // struct Foo {
- // the_foos: Vec<Foo>
- // }
- // ```
-
- if imm_result && mut_result {
- err.span_suggestions(
- span.shrink_to_lo(),
- "consider borrowing here",
- ["&".to_string(), "&mut ".to_string()].into_iter(),
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_suggestion_verbose(
- span.shrink_to_lo(),
- &format!(
- "consider{} borrowing here",
- if mut_result { " mutably" } else { "" }
- ),
- format!("&{}", if mut_result { "mut " } else { "" }),
- Applicability::MaybeIncorrect,
- );
- }
- }
- return true;
}
+ return true;
}
- return false;
- };
+ }
+ return false;
+ };
if let ObligationCauseCode::ImplDerivedObligation(cause) = &*code {
try_borrowing(cause.derived.parent_trait_pred, &[])
} else if let ObligationCauseCode::BindingObligation(_, _)
- | ObligationCauseCode::ItemObligation(_) = code
+ | ObligationCauseCode::ItemObligation(_)
+ | ObligationCauseCode::ExprItemObligation(..)
+ | ObligationCauseCode::ExprBindingObligation(..) = code
{
try_borrowing(poly_trait_pred, &never_suggest_borrow)
} else {
@@ -1017,7 +1159,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
self_ty: Ty<'tcx>,
object_ty: Ty<'tcx>,
) {
- let ty::Dynamic(predicates, _) = object_ty.kind() else { return; };
+ let ty::Dynamic(predicates, _, ty::Dyn) = object_ty.kind() else { return; };
let self_ref_ty = self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, self_ty);
for predicate in predicates.iter() {
@@ -1110,8 +1252,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// and if not maybe suggest doing something else? If we kept the expression around we
// could also check if it is an fn call (very likely) and suggest changing *that*, if
// it is from the local crate.
- err.span_suggestion_verbose(
- expr.span.shrink_to_hi().with_hi(span.hi()),
+ err.span_suggestion(
+ span,
"remove the `.await`",
"",
Applicability::MachineApplicable,
@@ -1126,8 +1268,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
&format!("this call returns `{}`", pred.self_ty()),
);
}
- if let Some(typeck_results) =
- self.in_progress_typeck_results.map(|t| t.borrow())
+ if let Some(typeck_results) = &self.typeck_results
&& let ty = typeck_results.expr_ty_adjusted(base)
&& let ty::FnDef(def_id, _substs) = ty.kind()
&& let Some(hir::Node::Item(hir::Item { ident, span, vis_span, .. })) =
@@ -1181,7 +1322,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
return;
}
let trait_pred = self.resolve_vars_if_possible(trait_pred);
- if trait_pred.has_infer_types_or_consts() {
+ if trait_pred.has_non_region_infer() {
// Do not ICE while trying to find if a reborrow would succeed on a trait with
// unresolved bindings.
return;
@@ -1250,8 +1391,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
&& let Some(stmt) = blk.stmts.last()
&& let hir::StmtKind::Semi(expr) = stmt.kind
// Only suggest this if the expression behind the semicolon implements the predicate
- && let Some(typeck_results) = self.in_progress_typeck_results
- && let Some(ty) = typeck_results.borrow().expr_ty_opt(expr)
+ && let Some(typeck_results) = &self.typeck_results
+ && let Some(ty) = typeck_results.expr_ty_opt(expr)
&& self.predicate_may_hold(&self.mk_trait_obligation_with_new_self_ty(
obligation.param_env, trait_pred.map_bound(|trait_pred| (trait_pred, ty))
))
@@ -1315,7 +1456,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let trait_pred = self.resolve_vars_if_possible(trait_pred);
let ty = trait_pred.skip_binder().self_ty();
let is_object_safe = match ty.kind() {
- ty::Dynamic(predicates, _) => {
+ ty::Dynamic(predicates, _, ty::Dyn) => {
// If the `dyn Trait` is not object safe, do not suggest `Box<dyn Trait>`.
predicates
.principal_def_id()
@@ -1340,7 +1481,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let mut visitor = ReturnsVisitor::default();
visitor.visit_body(&body);
- let typeck_results = self.in_progress_typeck_results.map(|t| t.borrow()).unwrap();
+ let typeck_results = self.typeck_results.as_ref().unwrap();
let Some(liberated_sig) = typeck_results.liberated_fn_sigs().get(fn_hir_id).copied() else { return false; };
let ret_types = visitor
@@ -1375,7 +1516,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let mut spans_and_needs_box = vec![];
match liberated_sig.output().kind() {
- ty::Dynamic(predicates, _) => {
+ ty::Dynamic(predicates, _, ty::Dyn) => {
let cause = ObligationCause::misc(ret_ty.span, fn_hir_id);
let param_env = ty::ParamEnv::empty();
@@ -1523,7 +1664,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// Point at all the `return`s in the function as they have failed trait bounds.
let mut visitor = ReturnsVisitor::default();
visitor.visit_body(&body);
- let typeck_results = self.in_progress_typeck_results.map(|t| t.borrow()).unwrap();
+ let typeck_results = self.typeck_results.as_ref().unwrap();
for expr in &visitor.returns {
if let Some(returned_ty) = typeck_results.node_type_opt(expr.hir_id) {
let ty = self.resolve_vars_if_possible(returned_ty);
@@ -1539,34 +1680,41 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
found_span: Option<Span>,
found: ty::PolyTraitRef<'tcx>,
expected: ty::PolyTraitRef<'tcx>,
+ cause: &ObligationCauseCode<'tcx>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
pub(crate) fn build_fn_sig_ty<'tcx>(
- tcx: TyCtxt<'tcx>,
+ infcx: &InferCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Ty<'tcx> {
let inputs = trait_ref.skip_binder().substs.type_at(1);
let sig = match inputs.kind() {
ty::Tuple(inputs)
- if tcx.fn_trait_kind_from_lang_item(trait_ref.def_id()).is_some() =>
+ if infcx.tcx.fn_trait_kind_from_lang_item(trait_ref.def_id()).is_some() =>
{
- tcx.mk_fn_sig(
+ infcx.tcx.mk_fn_sig(
inputs.iter(),
- tcx.mk_ty_infer(ty::TyVar(ty::TyVid::from_u32(0))),
+ infcx.next_ty_var(TypeVariableOrigin {
+ span: DUMMY_SP,
+ kind: TypeVariableOriginKind::MiscVariable,
+ }),
false,
hir::Unsafety::Normal,
abi::Abi::Rust,
)
}
- _ => tcx.mk_fn_sig(
+ _ => infcx.tcx.mk_fn_sig(
std::iter::once(inputs),
- tcx.mk_ty_infer(ty::TyVar(ty::TyVid::from_u32(0))),
+ infcx.next_ty_var(TypeVariableOrigin {
+ span: DUMMY_SP,
+ kind: TypeVariableOriginKind::MiscVariable,
+ }),
false,
hir::Unsafety::Normal,
abi::Abi::Rust,
),
};
- tcx.mk_fn_ptr(trait_ref.rebind(sig))
+ infcx.tcx.mk_fn_ptr(trait_ref.rebind(sig))
}
let argument_kind = match expected.skip_binder().self_ty().kind() {
@@ -1586,18 +1734,76 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let found_span = found_span.unwrap_or(span);
err.span_label(found_span, "found signature defined here");
- let expected = build_fn_sig_ty(self.tcx, expected);
- let found = build_fn_sig_ty(self.tcx, found);
+ let expected = build_fn_sig_ty(self, expected);
+ let found = build_fn_sig_ty(self, found);
- let (expected_str, found_str) =
- self.tcx.infer_ctxt().enter(|infcx| infcx.cmp(expected, found));
+ let (expected_str, found_str) = self.cmp(expected, found);
let signature_kind = format!("{argument_kind} signature");
err.note_expected_found(&signature_kind, expected_str, &signature_kind, found_str);
+ self.note_conflicting_closure_bounds(cause, &mut err);
+
err
}
+ // Add a note if there are two `Fn`-family bounds that have conflicting argument
+ // requirements, which will always cause a closure to have a type error.
+ fn note_conflicting_closure_bounds(
+ &self,
+ cause: &ObligationCauseCode<'tcx>,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) {
+ // First, look for an `ExprBindingObligation`, which means we can get
+ // the unsubstituted predicate list of the called function. And check
+ // that the predicate that we failed to satisfy is a `Fn`-like trait.
+ if let ObligationCauseCode::ExprBindingObligation(def_id, _, _, idx) = cause
+ && let predicates = self.tcx.predicates_of(def_id).instantiate_identity(self.tcx)
+ && let Some(pred) = predicates.predicates.get(*idx)
+ && let ty::PredicateKind::Trait(trait_pred) = pred.kind().skip_binder()
+ && ty::ClosureKind::from_def_id(self.tcx, trait_pred.def_id()).is_some()
+ {
+ let expected_self =
+ self.tcx.anonymize_late_bound_regions(pred.kind().rebind(trait_pred.self_ty()));
+ let expected_substs = self
+ .tcx
+ .anonymize_late_bound_regions(pred.kind().rebind(trait_pred.trait_ref.substs));
+
+ // Find another predicate whose self-type is equal to the expected self type,
+ // but whose substs don't match.
+ let other_pred = std::iter::zip(&predicates.predicates, &predicates.spans)
+ .enumerate()
+ .find(|(other_idx, (pred, _))| match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(trait_pred)
+ if ty::ClosureKind::from_def_id(self.tcx, trait_pred.def_id())
+ .is_some()
+ && other_idx != idx
+ // Make sure that the self type matches
+ // (i.e. constraining this closure)
+ && expected_self
+ == self.tcx.anonymize_late_bound_regions(
+ pred.kind().rebind(trait_pred.self_ty()),
+ )
+ // But the substs don't match (i.e. incompatible args)
+ && expected_substs
+ != self.tcx.anonymize_late_bound_regions(
+ pred.kind().rebind(trait_pred.trait_ref.substs),
+ ) =>
+ {
+ true
+ }
+ _ => false,
+ });
+ // If we found one, then it's very likely the cause of the error.
+ if let Some((_, (_, other_pred_span))) = other_pred {
+ err.span_note(
+ *other_pred_span,
+ "closure inferred to have a different signature due to this bound",
+ );
+ }
+ }
+ }
+
fn suggest_fully_qualified_path(
&self,
err: &mut Diagnostic,
@@ -1786,12 +1992,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
let span = self.tcx.def_span(generator_did);
- let in_progress_typeck_results = self.in_progress_typeck_results.map(|t| t.borrow());
let generator_did_root = self.tcx.typeck_root_def_id(generator_did);
debug!(
?generator_did,
?generator_did_root,
- in_progress_typeck_results.hir_owner = ?in_progress_typeck_results.as_ref().map(|t| t.hir_owner),
+ typeck_results.hir_owner = ?self.typeck_results.as_ref().map(|t| t.hir_owner),
?span,
);
@@ -1846,7 +2051,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
// type-checking; otherwise, get them by performing a query. This is needed to avoid
// cycles. If we can't use resolved types because the generator comes from another crate,
// we still provide a targeted error but without all the relevant spans.
- let generator_data: Option<GeneratorData<'tcx, '_>> = match &in_progress_typeck_results {
+ let generator_data: Option<GeneratorData<'tcx, '_>> = match &self.typeck_results {
Some(t) if t.hir_owner.to_def_id() == generator_did_root => {
Some(GeneratorData::Local(&t))
}
@@ -2201,7 +2406,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
| ObligationCauseCode::QuestionMark
| ObligationCauseCode::CheckAssociatedTypeBounds { .. }
| ObligationCauseCode::LetElse
- | ObligationCauseCode::BinOp { .. } => {}
+ | ObligationCauseCode::BinOp { .. }
+ | ObligationCauseCode::AscribeUserTypeProvePredicate(..) => {}
ObligationCauseCode::SliceOrArrayElem => {
err.note("slice and array elements must have `Sized` type");
}
@@ -2223,11 +2429,13 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
region, object_ty,
));
}
- ObligationCauseCode::ItemObligation(_item_def_id) => {
+ ObligationCauseCode::ItemObligation(_)
+ | ObligationCauseCode::ExprItemObligation(..) => {
// We hold the `DefId` of the item introducing the obligation, but displaying it
// doesn't add user usable information. It always point at an associated item.
}
- ObligationCauseCode::BindingObligation(item_def_id, span) => {
+ ObligationCauseCode::BindingObligation(item_def_id, span)
+ | ObligationCauseCode::ExprBindingObligation(item_def_id, span, ..) => {
let item_name = tcx.def_path_str(item_def_id);
let mut multispan = MultiSpan::from(span);
if let Some(ident) = tcx.opt_item_ident(item_def_id) {
@@ -2537,9 +2745,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
parent_trait_pred.remap_constness_diag(param_env);
let parent_def_id = parent_trait_pred.def_id();
let msg = format!(
- "required because of the requirements on the impl of `{}` for `{}`",
- parent_trait_pred.print_modifiers_and_trait_path(),
- parent_trait_pred.skip_binder().self_ty()
+ "required for `{}` to implement `{}`",
+ parent_trait_pred.skip_binder().self_ty(),
+ parent_trait_pred.print_modifiers_and_trait_path()
);
let mut is_auto_trait = false;
match self.tcx.hir().get_if_local(data.impl_def_id) {
@@ -2608,9 +2816,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
pluralize!(count)
));
err.note(&format!(
- "required because of the requirements on the impl of `{}` for `{}`",
- parent_trait_pred.print_modifiers_and_trait_path(),
- parent_trait_pred.skip_binder().self_ty()
+ "required for `{}` to implement `{}`",
+ parent_trait_pred.skip_binder().self_ty(),
+ parent_trait_pred.print_modifiers_and_trait_path()
));
}
// #74711: avoid a stack overflow
@@ -2649,19 +2857,18 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
if let Some(Node::Expr(expr @ hir::Expr { kind: hir::ExprKind::Block(..), .. })) =
hir.find(arg_hir_id)
{
- let in_progress_typeck_results =
- self.in_progress_typeck_results.map(|t| t.borrow());
let parent_id = hir.get_parent_item(arg_hir_id);
- let typeck_results: &TypeckResults<'tcx> = match &in_progress_typeck_results {
+ let typeck_results: &TypeckResults<'tcx> = match &self.typeck_results {
Some(t) if t.hir_owner == parent_id => t,
- _ => self.tcx.typeck(parent_id),
+ _ => self.tcx.typeck(parent_id.def_id),
};
- let ty = typeck_results.expr_ty_adjusted(expr);
- let span = expr.peel_blocks().span;
+ let expr = expr.peel_blocks();
+ let ty = typeck_results.expr_ty_adjusted_opt(expr).unwrap_or(tcx.ty_error());
+ let span = expr.span;
if Some(span) != err.span.primary_span() {
err.span_label(
span,
- &if ty.references_error() {
+ if ty.references_error() {
String::new()
} else {
format!("this tail expression is of type `{:?}`", ty)
@@ -2738,19 +2945,6 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
}
}
- fn suggest_new_overflow_limit(&self, err: &mut Diagnostic) {
- let suggested_limit = match self.tcx.recursion_limit() {
- Limit(0) => Limit(2),
- limit => limit * 2,
- };
- err.help(&format!(
- "consider increasing the recursion limit by adding a \
- `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
- suggested_limit,
- self.tcx.crate_name(LOCAL_CRATE),
- ));
- }
-
#[instrument(
level = "debug", skip(self, err), fields(trait_pred.self_ty = ?trait_pred.self_ty())
)]
@@ -2832,19 +3026,15 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
ObligationCauseCode::BinOp { rhs_span: Some(span), is_lit, .. } if *is_lit => span,
_ => return,
};
- match (
- trait_ref.skip_binder().self_ty().kind(),
- trait_ref.skip_binder().substs.type_at(1).kind(),
- ) {
- (ty::Float(_), ty::Infer(InferTy::IntVar(_))) => {
- err.span_suggestion_verbose(
- rhs_span.shrink_to_hi(),
- "consider using a floating-point literal by writing it with `.0`",
- ".0",
- Applicability::MaybeIncorrect,
- );
- }
- _ => {}
+ if let ty::Float(_) = trait_ref.skip_binder().self_ty().kind()
+ && let ty::Infer(InferTy::IntVar(_)) = trait_ref.skip_binder().substs.type_at(1).kind()
+ {
+ err.span_suggestion_verbose(
+ rhs_span.shrink_to_hi(),
+ "consider using a floating-point literal by writing it with `.0`",
+ ".0",
+ Applicability::MaybeIncorrect,
+ );
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
index 556ef466c..a417e1440 100644
--- a/compiler/rustc_trait_selection/src/traits/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -25,10 +25,9 @@ use super::Unimplemented;
use super::{FulfillmentError, FulfillmentErrorCode};
use super::{ObligationCause, PredicateObligation};
-use crate::traits::error_reporting::InferCtxtExt as _;
use crate::traits::project::PolyProjectionObligation;
use crate::traits::project::ProjectionCacheKeyExt as _;
-use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> {
/// Note that we include both the `ParamEnv` and the `Predicate`,
@@ -103,7 +102,7 @@ impl<'a, 'tcx> FulfillmentContext<'tcx> {
}
/// Attempts to select obligations using `selcx`.
- fn select(&mut self, selcx: &mut SelectionContext<'a, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ fn select(&mut self, selcx: SelectionContext<'a, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
let span = debug_span!("select", obligation_forest_size = ?self.predicates.len());
let _enter = span.enter();
@@ -135,10 +134,10 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
/// `SomeTrait` or a where-clause that lets us unify `$0` with
/// something concrete. If this fails, we'll unify `$0` with
/// `projection_ty` again.
- #[tracing::instrument(level = "debug", skip(self, infcx, param_env, cause))]
+ #[instrument(level = "debug", skip(self, infcx, param_env, cause))]
fn normalize_projection_type(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
cause: ObligationCause<'tcx>,
@@ -166,7 +165,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
fn register_predicate_obligation(
&mut self,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligation: PredicateObligation<'tcx>,
) {
// this helps to reduce duplicate errors, as well as making
@@ -183,7 +182,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
.register_obligation(PendingPredicateObligation { obligation, stalled_on: vec![] });
}
- fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>> {
{
let errors = self.select_where_possible(infcx);
if !errors.is_empty() {
@@ -194,12 +193,9 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
self.predicates.to_errors(CodeAmbiguity).into_iter().map(to_fulfillment_error).collect()
}
- fn select_where_possible(
- &mut self,
- infcx: &InferCtxt<'_, 'tcx>,
- ) -> Vec<FulfillmentError<'tcx>> {
- let mut selcx = SelectionContext::new(infcx);
- self.select(&mut selcx)
+ fn select_where_possible(&mut self, infcx: &InferCtxt<'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ let selcx = SelectionContext::new(infcx);
+ self.select(selcx)
}
fn pending_obligations(&self) -> Vec<PredicateObligation<'tcx>> {
@@ -211,8 +207,8 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
}
}
-struct FulfillProcessor<'a, 'b, 'tcx> {
- selcx: &'a mut SelectionContext<'b, 'tcx>,
+struct FulfillProcessor<'a, 'tcx> {
+ selcx: SelectionContext<'a, 'tcx>,
}
fn mk_pending(os: Vec<PredicateObligation<'_>>) -> Vec<PendingPredicateObligation<'_>> {
@@ -221,9 +217,10 @@ fn mk_pending(os: Vec<PredicateObligation<'_>>) -> Vec<PendingPredicateObligatio
.collect()
}
-impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
+impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
type Obligation = PendingPredicateObligation<'tcx>;
type Error = FulfillmentErrorCode<'tcx>;
+ type OUT = Outcome<Self::Obligation, Self::Error>;
/// Identifies whether a predicate obligation needs processing.
///
@@ -279,7 +276,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
debug!(?obligation, "pre-resolve");
- if obligation.predicate.has_infer_types_or_consts() {
+ if obligation.predicate.has_non_region_infer() {
obligation.predicate =
self.selcx.infcx().resolve_vars_if_possible(obligation.predicate);
}
@@ -291,7 +288,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
if obligation.predicate.has_projections() {
let mut obligations = Vec::new();
let predicate = crate::traits::project::try_normalize_with_depth_to(
- self.selcx,
+ &mut self.selcx,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
@@ -358,7 +355,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
}
ty::PredicateKind::RegionOutlives(data) => {
- if infcx.considering_regions || data.has_placeholders() {
+ if infcx.considering_regions {
infcx.region_outlives_predicate(&obligation.cause, Binder::dummy(data));
}
@@ -427,16 +424,14 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
obligation.param_env,
Binder::dummy(subtype),
) {
- None => {
+ Err((a, b)) => {
// None means that both are unresolved.
- pending_obligation.stalled_on = vec![
- TyOrConstInferVar::maybe_from_ty(subtype.a).unwrap(),
- TyOrConstInferVar::maybe_from_ty(subtype.b).unwrap(),
- ];
+ pending_obligation.stalled_on =
+ vec![TyOrConstInferVar::Ty(a), TyOrConstInferVar::Ty(b)];
ProcessResult::Unchanged
}
- Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
- Some(Err(err)) => {
+ Ok(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
+ Ok(Err(err)) => {
let expected_found =
ExpectedFound::new(subtype.a_is_expected, subtype.a, subtype.b);
ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError(
@@ -453,16 +448,14 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
obligation.param_env,
Binder::dummy(coerce),
) {
- None => {
+ Err((a, b)) => {
// None means that both are unresolved.
- pending_obligation.stalled_on = vec![
- TyOrConstInferVar::maybe_from_ty(coerce.a).unwrap(),
- TyOrConstInferVar::maybe_from_ty(coerce.b).unwrap(),
- ];
+ pending_obligation.stalled_on =
+ vec![TyOrConstInferVar::Ty(a), TyOrConstInferVar::Ty(b)];
ProcessResult::Unchanged
}
- Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
- Some(Err(err)) => {
+ Ok(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
+ Ok(Err(err)) => {
let expected_found = ExpectedFound::new(false, coerce.a, coerce.b);
ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError(
expected_found,
@@ -483,9 +476,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
Err(NotConstEvaluatable::MentionsInfer) => {
pending_obligation.stalled_on.clear();
pending_obligation.stalled_on.extend(
- uv.substs
- .iter()
- .filter_map(TyOrConstInferVar::maybe_from_generic_arg),
+ uv.walk().filter_map(TyOrConstInferVar::maybe_from_generic_arg),
);
ProcessResult::Unchanged
}
@@ -499,23 +490,20 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
}
ty::PredicateKind::ConstEquate(c1, c2) => {
+ assert!(
+ self.selcx.tcx().features().generic_const_exprs,
+ "`ConstEquate` without a feature gate: {c1:?} {c2:?}",
+ );
debug!(?c1, ?c2, "equating consts");
- let tcx = self.selcx.tcx();
- if tcx.features().generic_const_exprs {
- // FIXME: we probably should only try to unify abstract constants
- // if the constants depend on generic parameters.
- //
- // Let's just see where this breaks :shrug:
- if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
- (c1.kind(), c2.kind())
- {
- if infcx.try_unify_abstract_consts(
- a.shrink(),
- b.shrink(),
- obligation.param_env,
- ) {
- return ProcessResult::Changed(vec![]);
- }
+ // FIXME: we probably should only try to unify abstract constants
+ // if the constants depend on generic parameters.
+ //
+ // Let's just see where this breaks :shrug:
+ if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
+ (c1.kind(), c2.kind())
+ {
+ if infcx.try_unify_abstract_consts(a, b, obligation.param_env) {
+ return ProcessResult::Changed(vec![]);
}
}
@@ -577,7 +565,7 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
)
}
(Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
- if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() {
+ if c1.has_non_region_infer() || c2.has_non_region_infer() {
ProcessResult::Unchanged
} else {
// Two different constants using generic parameters ~> error.
@@ -597,23 +585,26 @@ impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
}
}
+ #[inline(never)]
fn process_backedge<'c, I>(
&mut self,
cycle: I,
_marker: PhantomData<&'c PendingPredicateObligation<'tcx>>,
- ) where
+ ) -> Result<(), FulfillmentErrorCode<'tcx>>
+ where
I: Clone + Iterator<Item = &'c PendingPredicateObligation<'tcx>>,
{
if self.selcx.coinductive_match(cycle.clone().map(|s| s.obligation.predicate)) {
debug!("process_child_obligations: coinductive match");
+ Ok(())
} else {
let cycle: Vec<_> = cycle.map(|c| c.obligation.clone()).collect();
- self.selcx.infcx().report_overflow_error_cycle(&cycle);
+ Err(FulfillmentErrorCode::CodeCycle(cycle))
}
}
}
-impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
+impl<'a, 'tcx> FulfillProcessor<'a, 'tcx> {
#[instrument(level = "debug", skip(self, obligation, stalled_on))]
fn process_trait_obligation(
&mut self,
@@ -648,7 +639,7 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
// information about the types in the trait.
stalled_on.clear();
stalled_on.extend(substs_infer_vars(
- self.selcx,
+ &self.selcx,
trait_obligation.predicate.map_bound(|pred| pred.trait_ref.substs),
));
@@ -700,12 +691,12 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
}
}
- match project::poly_project_and_unify_type(self.selcx, &project_obligation) {
+ match project::poly_project_and_unify_type(&mut self.selcx, &project_obligation) {
ProjectAndUnifyResult::Holds(os) => ProcessResult::Changed(mk_pending(os)),
ProjectAndUnifyResult::FailedNormalization => {
stalled_on.clear();
stalled_on.extend(substs_infer_vars(
- self.selcx,
+ &self.selcx,
project_obligation.predicate.map_bound(|pred| pred.projection_ty.substs),
));
ProcessResult::Unchanged
@@ -723,7 +714,7 @@ impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
/// Returns the set of inference variables contained in `substs`.
fn substs_infer_vars<'a, 'tcx>(
- selcx: &mut SelectionContext<'a, 'tcx>,
+ selcx: &SelectionContext<'a, 'tcx>,
substs: ty::Binder<'tcx, SubstsRef<'tcx>>,
) -> impl Iterator<Item = TyOrConstInferVar<'tcx>> {
selcx
@@ -731,11 +722,11 @@ fn substs_infer_vars<'a, 'tcx>(
.resolve_vars_if_possible(substs)
.skip_binder() // ok because this check doesn't care about regions
.iter()
- .filter(|arg| arg.has_infer_types_or_consts())
+ .filter(|arg| arg.has_non_region_infer())
.flat_map(|arg| {
let mut walker = arg.walk();
while let Some(c) = walker.next() {
- if !c.has_infer_types_or_consts() {
+ if !c.has_non_region_infer() {
walker.visited.remove(&c);
walker.skip_current_subtree();
}
diff --git a/compiler/rustc_trait_selection/src/traits/misc.rs b/compiler/rustc_trait_selection/src/traits/misc.rs
index dd2769c71..be603c609 100644
--- a/compiler/rustc_trait_selection/src/traits/misc.rs
+++ b/compiler/rustc_trait_selection/src/traits/misc.rs
@@ -7,7 +7,7 @@ use rustc_hir as hir;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
-use crate::traits::error_reporting::InferCtxtExt;
+use crate::traits::error_reporting::TypeErrCtxtExt;
#[derive(Clone)]
pub enum CopyImplementationError<'tcx> {
@@ -23,66 +23,64 @@ pub fn can_type_implement_copy<'tcx>(
parent_cause: ObligationCause<'tcx>,
) -> Result<(), CopyImplementationError<'tcx>> {
// FIXME: (@jroesch) float this code up
- tcx.infer_ctxt().enter(|infcx| {
- let (adt, substs) = match self_type.kind() {
- // These types used to have a builtin impl.
- // Now libcore provides that impl.
- ty::Uint(_)
- | ty::Int(_)
- | ty::Bool
- | ty::Float(_)
- | ty::Char
- | ty::RawPtr(..)
- | ty::Never
- | ty::Ref(_, _, hir::Mutability::Not)
- | ty::Array(..) => return Ok(()),
+ let infcx = tcx.infer_ctxt().build();
+ let (adt, substs) = match self_type.kind() {
+ // These types used to have a builtin impl.
+ // Now libcore provides that impl.
+ ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::Char
+ | ty::RawPtr(..)
+ | ty::Never
+ | ty::Ref(_, _, hir::Mutability::Not)
+ | ty::Array(..) => return Ok(()),
- ty::Adt(adt, substs) => (adt, substs),
+ ty::Adt(adt, substs) => (adt, substs),
- _ => return Err(CopyImplementationError::NotAnAdt),
- };
+ _ => return Err(CopyImplementationError::NotAnAdt),
+ };
- let mut infringing = Vec::new();
- for variant in adt.variants() {
- for field in &variant.fields {
- let ty = field.ty(tcx, substs);
- if ty.references_error() {
- continue;
- }
- let span = tcx.def_span(field.did);
- // FIXME(compiler-errors): This gives us better spans for bad
- // projection types like in issue-50480.
- // If the ADT has substs, point to the cause we are given.
- // If it does not, then this field probably doesn't normalize
- // to begin with, and point to the bad field's span instead.
- let cause = if field
- .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did()))
- .has_param_types_or_consts()
- {
- parent_cause.clone()
- } else {
- ObligationCause::dummy_with_span(span)
- };
- let ctx = traits::FulfillmentContext::new();
- match traits::fully_normalize(&infcx, ctx, cause, param_env, ty) {
- Ok(ty) => {
- if !infcx.type_is_copy_modulo_regions(param_env, ty, span) {
- infringing.push((field, ty));
- }
- }
- Err(errors) => {
- infcx.report_fulfillment_errors(&errors, None, false);
- }
- };
+ let mut infringing = Vec::new();
+ for variant in adt.variants() {
+ for field in &variant.fields {
+ let ty = field.ty(tcx, substs);
+ if ty.references_error() {
+ continue;
}
+ let span = tcx.def_span(field.did);
+ // FIXME(compiler-errors): This gives us better spans for bad
+ // projection types like in issue-50480.
+ // If the ADT has substs, point to the cause we are given.
+ // If it does not, then this field probably doesn't normalize
+ // to begin with, and point to the bad field's span instead.
+ let cause = if field
+ .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did()))
+ .has_non_region_param()
+ {
+ parent_cause.clone()
+ } else {
+ ObligationCause::dummy_with_span(span)
+ };
+ match traits::fully_normalize(&infcx, cause, param_env, ty) {
+ Ok(ty) => {
+ if !infcx.type_is_copy_modulo_regions(param_env, ty, span) {
+ infringing.push((field, ty));
+ }
+ }
+ Err(errors) => {
+ infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ }
+ };
}
- if !infringing.is_empty() {
- return Err(CopyImplementationError::InfrigingFields(infringing));
- }
- if adt.has_dtor(tcx) {
- return Err(CopyImplementationError::HasDestructor);
- }
+ }
+ if !infringing.is_empty() {
+ return Err(CopyImplementationError::InfrigingFields(infringing));
+ }
+ if adt.has_dtor(tcx) {
+ return Err(CopyImplementationError::HasDestructor);
+ }
- Ok(())
- })
+ Ok(())
}
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
index 9c6bb0731..0bf54c096 100644
--- a/compiler/rustc_trait_selection/src/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -13,6 +13,7 @@ mod fulfill;
pub mod misc;
mod object_safety;
mod on_unimplemented;
+pub mod outlives_bounds;
mod project;
pub mod query;
pub(crate) mod relationships;
@@ -22,18 +23,22 @@ mod structural_match;
mod util;
pub mod wf;
+use crate::errors::DumpVTableEntries;
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::{InferCtxt, TyCtxtInferExt};
-use crate::traits::error_reporting::InferCtxtExt as _;
+use crate::traits::error_reporting::TypeErrCtxtExt as _;
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
+use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{self, GenericParamDefKind, ToPredicate, Ty, TyCtxt, VtblEntry};
+use rustc_middle::ty::{
+ self, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, VtblEntry,
+};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
use rustc_span::{sym, Span};
use smallvec::SmallVec;
@@ -113,11 +118,21 @@ pub enum TraitQueryMode {
/// Creates predicate obligations from the generic bounds.
pub fn predicates_for_generics<'tcx>(
- cause: ObligationCause<'tcx>,
+ cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
generic_bounds: ty::InstantiatedPredicates<'tcx>,
) -> impl Iterator<Item = PredicateObligation<'tcx>> {
- util::predicates_for_generics(cause, 0, param_env, generic_bounds)
+ let generic_bounds = generic_bounds;
+ debug!("predicates_for_generics(generic_bounds={:?})", generic_bounds);
+
+ std::iter::zip(generic_bounds.predicates, generic_bounds.spans).enumerate().map(
+ move |(idx, (predicate, span))| Obligation {
+ cause: cause(idx, span),
+ recursion_depth: 0,
+ param_env,
+ predicate,
+ },
+ )
}
/// Determines whether the type `ty` is known to meet `bound` and
@@ -125,8 +140,8 @@ pub fn predicates_for_generics<'tcx>(
/// `bound` or is not known to meet bound (note that this is
/// conservative towards *no impl*, which is the opposite of the
/// `evaluate` methods).
-pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub fn type_known_to_meet_bound_modulo_regions<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
def_id: DefId,
@@ -155,28 +170,26 @@ pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
result
);
- if result && ty.has_infer_types_or_consts() {
+ if result && ty.has_non_region_infer() {
// Because of inference "guessing", selection can sometimes claim
// to succeed while the success requires a guess. To ensure
// this function's result remains infallible, we must confirm
// that guess. While imperfect, I believe this is sound.
- // The handling of regions in this area of the code is terrible,
- // see issue #29149. We should be able to improve on this with
- // NLL.
- let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
-
// We can use a dummy node-id here because we won't pay any mind
// to region obligations that arise (there shouldn't really be any
// anyhow).
let cause = ObligationCause::misc(span, hir::CRATE_HIR_ID);
- fulfill_cx.register_bound(infcx, param_env, ty, def_id, cause);
+ // The handling of regions in this area of the code is terrible,
+ // see issue #29149. We should be able to improve on this with
+ // NLL.
+ let errors = fully_solve_bound(infcx, cause, param_env, ty, def_id);
// Note: we only assume something is `Copy` if we can
// *definitively* show that it implements `Copy`. Otherwise,
// assume it is move; linear is always ok.
- match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ match &errors[..] {
[] => {
debug!(
"type_known_to_meet_bound_modulo_regions: ty={:?} bound={} success",
@@ -221,56 +234,51 @@ fn do_normalize_predicates<'tcx>(
// by wfcheck anyway, so I'm not sure we have to check
// them here too, and we will remove this function when
// we move over to lazy normalization *anyway*.
- tcx.infer_ctxt().ignoring_regions().enter(|infcx| {
- let fulfill_cx = FulfillmentContext::new();
- let predicates =
- match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, predicates) {
- Ok(predicates) => predicates,
- Err(errors) => {
- let reported = infcx.report_fulfillment_errors(&errors, None, false);
- return Err(reported);
- }
- };
+ let infcx = tcx.infer_ctxt().ignoring_regions().build();
+ let predicates = match fully_normalize(&infcx, cause, elaborated_env, predicates) {
+ Ok(predicates) => predicates,
+ Err(errors) => {
+ let reported = infcx.err_ctxt().report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+ };
+
+ debug!("do_normalize_predictes: normalized predicates = {:?}", predicates);
- debug!("do_normalize_predictes: normalized predicates = {:?}", predicates);
+ // We can use the `elaborated_env` here; the region code only
+ // cares about declarations like `'a: 'b`.
+ let outlives_env = OutlivesEnvironment::new(elaborated_env);
- // We can use the `elaborated_env` here; the region code only
- // cares about declarations like `'a: 'b`.
- let outlives_env = OutlivesEnvironment::new(elaborated_env);
+ // FIXME: It's very weird that we ignore region obligations but apparently
+ // still need to use `resolve_regions` as we need the resolved regions in
+ // the normalized predicates.
+ let errors = infcx.resolve_regions(&outlives_env);
+ if !errors.is_empty() {
+ tcx.sess.delay_span_bug(
+ span,
+ format!("failed region resolution while normalizing {elaborated_env:?}: {errors:?}"),
+ );
+ }
- // FIXME: It's very weird that we ignore region obligations but apparently
- // still need to use `resolve_regions` as we need the resolved regions in
- // the normalized predicates.
- let errors = infcx.resolve_regions(&outlives_env);
- if !errors.is_empty() {
- tcx.sess.delay_span_bug(
+ match infcx.fully_resolve(predicates) {
+ Ok(predicates) => Ok(predicates),
+ Err(fixup_err) => {
+ // If we encounter a fixup error, it means that some type
+ // variable wound up unconstrained. I actually don't know
+ // if this can happen, and I certainly don't expect it to
+ // happen often, but if it did happen it probably
+ // represents a legitimate failure due to some kind of
+ // unconstrained variable.
+ //
+ // @lcnr: Let's still ICE here for now. I want a test case
+ // for that.
+ span_bug!(
span,
- format!(
- "failed region resolution while normalizing {elaborated_env:?}: {errors:?}"
- ),
+ "inference variables in normalized parameter environment: {}",
+ fixup_err
);
}
-
- match infcx.fully_resolve(predicates) {
- Ok(predicates) => Ok(predicates),
- Err(fixup_err) => {
- // If we encounter a fixup error, it means that some type
- // variable wound up unconstrained. I actually don't know
- // if this can happen, and I certainly don't expect it to
- // happen often, but if it did happen it probably
- // represents a legitimate failure due to some kind of
- // unconstrained variable.
- //
- // @lcnr: Let's still ICE here for now. I want a test case
- // for that.
- span_bug!(
- span,
- "inference variables in normalized parameter environment: {}",
- fixup_err
- );
- }
- }
- })
+ }
}
// FIXME: this is gonna need to be removed ...
@@ -381,9 +389,9 @@ pub fn normalize_param_env_or_error<'tcx>(
)
}
-pub fn fully_normalize<'a, 'tcx, T>(
- infcx: &InferCtxt<'a, 'tcx>,
- mut fulfill_cx: FulfillmentContext<'tcx>,
+/// Normalize a type and process all resulting obligations, returning any errors
+pub fn fully_normalize<'tcx, T>(
+ infcx: &InferCtxt<'tcx>,
cause: ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: T,
@@ -399,8 +407,10 @@ where
"fully_normalize: normalized_value={:?} obligations={:?}",
normalized_value, obligations
);
+
+ let mut fulfill_cx = FulfillmentContext::new();
for obligation in obligations {
- fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
+ fulfill_cx.register_predicate_obligation(infcx, obligation);
}
debug!("fully_normalize: select_all_or_error start");
@@ -414,6 +424,43 @@ where
Ok(resolved_value)
}
+/// Process an obligation (and any nested obligations that come from it) to
+/// completion, returning any errors
+pub fn fully_solve_obligation<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ obligation: PredicateObligation<'tcx>,
+) -> Vec<FulfillmentError<'tcx>> {
+ let mut engine = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ engine.register_predicate_obligation(infcx, obligation);
+ engine.select_all_or_error(infcx)
+}
+
+/// Process a set of obligations (and any nested obligations that come from them)
+/// to completion
+pub fn fully_solve_obligations<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
+) -> Vec<FulfillmentError<'tcx>> {
+ let mut engine = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ engine.register_predicate_obligations(infcx, obligations);
+ engine.select_all_or_error(infcx)
+}
+
+/// Process a bound (and any nested obligations that come from it) to completion.
+/// This is a convenience function for traits that have no generic arguments, such
+/// as auto traits, and builtin traits like Copy or Sized.
+pub fn fully_solve_bound<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ bound: DefId,
+) -> Vec<FulfillmentError<'tcx>> {
+ let mut engine = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ engine.register_bound(infcx, param_env, ty, bound, cause);
+ engine.select_all_or_error(infcx)
+}
+
/// Normalizes the predicates and checks whether they hold in an empty environment. If this
/// returns true, then either normalize encountered an error or one of the predicates did not
/// hold. Used when creating vtables to check for unsatisfiable methods.
@@ -423,31 +470,20 @@ pub fn impossible_predicates<'tcx>(
) -> bool {
debug!("impossible_predicates(predicates={:?})", predicates);
- let result = tcx.infer_ctxt().enter(|infcx| {
- // HACK: Set tainted by errors to gracefully exit in case of overflow.
- infcx.set_tainted_by_errors();
-
- let param_env = ty::ParamEnv::reveal_all();
- let mut selcx = SelectionContext::new(&infcx);
- let mut fulfill_cx = FulfillmentContext::new();
- let cause = ObligationCause::dummy();
- let Normalized { value: predicates, obligations } =
- normalize(&mut selcx, param_env, cause.clone(), predicates);
- for obligation in obligations {
- fulfill_cx.register_predicate_obligation(&infcx, obligation);
- }
- for predicate in predicates {
- let obligation = Obligation::new(cause.clone(), param_env, predicate);
- fulfill_cx.register_predicate_obligation(&infcx, obligation);
- }
-
- let errors = fulfill_cx.select_all_or_error(&infcx);
+ let infcx = tcx.infer_ctxt().build();
+ let param_env = ty::ParamEnv::reveal_all();
+ let ocx = ObligationCtxt::new(&infcx);
+ let predicates = ocx.normalize(ObligationCause::dummy(), param_env, predicates);
+ for predicate in predicates {
+ let obligation = Obligation::new(ObligationCause::dummy(), param_env, predicate);
+ ocx.register_obligation(obligation);
+ }
+ let errors = ocx.select_all_or_error();
- // Clean up after ourselves
- let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ // Clean up after ourselves
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
- !errors.is_empty()
- });
+ let result = !errors.is_empty();
debug!("impossible_predicates = {:?}", result);
result
}
@@ -474,6 +510,82 @@ fn subst_and_check_impossible_predicates<'tcx>(
result
}
+/// Checks whether a trait's method is impossible to call on a given impl.
+///
+/// This only considers predicates that reference the impl's generics, and not
+/// those that reference the method's generics.
+fn is_impossible_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (impl_def_id, trait_item_def_id): (DefId, DefId),
+) -> bool {
+ struct ReferencesOnlyParentGenerics<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ generics: &'tcx ty::Generics,
+ trait_item_def_id: DefId,
+ }
+ impl<'tcx> ty::TypeVisitor<'tcx> for ReferencesOnlyParentGenerics<'tcx> {
+ type BreakTy = ();
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // If this is a parameter from the trait item's own generics, then bail
+ if let ty::Param(param) = t.kind()
+ && let param_def_id = self.generics.type_param(param, self.tcx).def_id
+ && self.tcx.parent(param_def_id) == self.trait_item_def_id
+ {
+ return ControlFlow::BREAK;
+ }
+ t.super_visit_with(self)
+ }
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ReEarlyBound(param) = r.kind()
+ && let param_def_id = self.generics.region_param(&param, self.tcx).def_id
+ && self.tcx.parent(param_def_id) == self.trait_item_def_id
+ {
+ return ControlFlow::BREAK;
+ }
+ r.super_visit_with(self)
+ }
+ fn visit_const(&mut self, ct: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Param(param) = ct.kind()
+ && let param_def_id = self.generics.const_param(&param, self.tcx).def_id
+ && self.tcx.parent(param_def_id) == self.trait_item_def_id
+ {
+ return ControlFlow::BREAK;
+ }
+ ct.super_visit_with(self)
+ }
+ }
+
+ let generics = tcx.generics_of(trait_item_def_id);
+ let predicates = tcx.predicates_of(trait_item_def_id);
+ let impl_trait_ref =
+ tcx.impl_trait_ref(impl_def_id).expect("expected impl to correspond to trait");
+ let param_env = tcx.param_env(impl_def_id);
+
+ let mut visitor = ReferencesOnlyParentGenerics { tcx, generics, trait_item_def_id };
+ let predicates_for_trait = predicates.predicates.iter().filter_map(|(pred, span)| {
+ if pred.visit_with(&mut visitor).is_continue() {
+ Some(Obligation::new(
+ ObligationCause::dummy_with_span(*span),
+ param_env,
+ ty::EarlyBinder(*pred).subst(tcx, impl_trait_ref.substs),
+ ))
+ } else {
+ None
+ }
+ });
+
+ let infcx = tcx.infer_ctxt().ignoring_regions().build();
+ for obligation in predicates_for_trait {
+ // Ignore overflow error, to be conservative.
+ if let Ok(result) = infcx.evaluate_obligation(&obligation)
+ && !result.may_apply()
+ {
+ return true;
+ }
+ }
+ false
+}
+
#[derive(Clone, Debug)]
enum VtblSegment<'tcx> {
MetadataDSA,
@@ -645,16 +757,16 @@ fn dump_vtable_entries<'tcx>(
trait_ref: ty::PolyTraitRef<'tcx>,
entries: &[VtblEntry<'tcx>],
) {
- let msg = format!("vtable entries for `{}`: {:#?}", trait_ref, entries);
- tcx.sess.struct_span_err(sp, &msg).emit();
+ tcx.sess.emit_err(DumpVTableEntries {
+ span: sp,
+ trait_ref,
+ entries: format!("{:#?}", entries),
+ });
}
-fn own_existential_vtable_entries<'tcx>(
- tcx: TyCtxt<'tcx>,
- trait_ref: ty::PolyExistentialTraitRef<'tcx>,
-) -> &'tcx [DefId] {
+fn own_existential_vtable_entries<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId) -> &'tcx [DefId] {
let trait_methods = tcx
- .associated_items(trait_ref.def_id())
+ .associated_items(trait_def_id)
.in_definition_order()
.filter(|item| item.kind == ty::AssocKind::Fn);
// Now list each method's DefId (for within its trait).
@@ -663,7 +775,7 @@ fn own_existential_vtable_entries<'tcx>(
let def_id = trait_method.def_id;
// Some methods cannot be called on an object; skip those.
- if !is_vtable_safe_method(tcx, trait_ref.def_id(), &trait_method) {
+ if !is_vtable_safe_method(tcx, trait_def_id, &trait_method) {
debug!("own_existential_vtable_entry: not vtable safe");
return None;
}
@@ -695,7 +807,7 @@ fn vtable_entries<'tcx>(
// Lookup the shape of vtable for the trait.
let own_existential_entries =
- tcx.own_existential_vtable_entries(existential_trait_ref);
+ tcx.own_existential_vtable_entries(existential_trait_ref.def_id());
let own_entries = own_existential_entries.iter().copied().map(|def_id| {
debug!("vtable_entries: trait_method={:?}", def_id);
@@ -831,10 +943,9 @@ pub fn vtable_trait_upcasting_coercion_new_vptr_slot<'tcx>(
}),
);
- let implsrc = tcx.infer_ctxt().enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
- selcx.select(&obligation).unwrap()
- });
+ let infcx = tcx.infer_ctxt().build();
+ let mut selcx = SelectionContext::new(&infcx);
+ let implsrc = selcx.select(&obligation).unwrap();
let Some(ImplSource::TraitUpcasting(implsrc_traitcasting)) = implsrc else {
bug!();
@@ -849,11 +960,12 @@ pub fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers {
specialization_graph_of: specialize::specialization_graph_provider,
specializes: specialize::specializes,
- codegen_fulfill_obligation: codegen::codegen_fulfill_obligation,
+ codegen_select_candidate: codegen::codegen_select_candidate,
own_existential_vtable_entries,
vtable_entries,
vtable_trait_upcasting_coercion_new_vptr_slot,
subst_and_check_impossible_predicates,
+ is_impossible_method,
try_unify_abstract_consts: |tcx, param_env_and| {
let (param_env, (a, b)) = param_env_and.into_parts();
const_evaluatable::try_unify_abstract_consts(tcx, (a, b), param_env)
diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs
index 612f51309..0bb25a74d 100644
--- a/compiler/rustc_trait_selection/src/traits/object_safety.rs
+++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs
@@ -8,19 +8,20 @@
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters.
-use super::elaborate_predicates;
+use super::{elaborate_predicates, elaborate_trait_ref};
use crate::infer::TyCtxtInferExt;
use crate::traits::query::evaluate_obligation::InferCtxtExt;
use crate::traits::{self, Obligation, ObligationCause};
-use rustc_errors::{FatalError, MultiSpan};
+use hir::def::DefKind;
+use rustc_errors::{DelayDm, FatalError, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::abstract_const::{walk_abstract_const, AbstractConst};
-use rustc_middle::ty::subst::{GenericArg, InternalSubsts, Subst};
use rustc_middle::ty::{
self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
+use rustc_middle::ty::{GenericArg, InternalSubsts};
use rustc_middle::ty::{Predicate, ToPredicate};
use rustc_session::lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY;
use rustc_span::symbol::Symbol;
@@ -163,37 +164,42 @@ fn lint_object_unsafe_trait(
) {
// Using `CRATE_NODE_ID` is wrong, but it's hard to get a more precise id.
// It's also hard to get a use site span, so we use the method definition span.
- tcx.struct_span_lint_hir(WHERE_CLAUSES_OBJECT_SAFETY, hir::CRATE_HIR_ID, span, |lint| {
- let mut err = lint.build(&format!(
- "the trait `{}` cannot be made into an object",
- tcx.def_path_str(trait_def_id)
- ));
- let node = tcx.hir().get_if_local(trait_def_id);
- let mut spans = MultiSpan::from_span(span);
- if let Some(hir::Node::Item(item)) = node {
- spans.push_span_label(item.ident.span, "this trait cannot be made into an object...");
- spans.push_span_label(span, format!("...because {}", violation.error_msg()));
- } else {
- spans.push_span_label(
- span,
- format!(
- "the trait cannot be made into an object because {}",
- violation.error_msg()
- ),
+ tcx.struct_span_lint_hir(
+ WHERE_CLAUSES_OBJECT_SAFETY,
+ hir::CRATE_HIR_ID,
+ span,
+ DelayDm(|| format!("the trait `{}` cannot be made into an object", tcx.def_path_str(trait_def_id))),
+ |err| {
+ let node = tcx.hir().get_if_local(trait_def_id);
+ let mut spans = MultiSpan::from_span(span);
+ if let Some(hir::Node::Item(item)) = node {
+ spans.push_span_label(
+ item.ident.span,
+ "this trait cannot be made into an object...",
+ );
+ spans.push_span_label(span, format!("...because {}", violation.error_msg()));
+ } else {
+ spans.push_span_label(
+ span,
+ format!(
+ "the trait cannot be made into an object because {}",
+ violation.error_msg()
+ ),
+ );
+ };
+ err.span_note(
+ spans,
+ "for a trait to be \"object safe\" it needs to allow building a vtable to allow the \
+ call to be resolvable dynamically; for more information visit \
+ <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
);
- };
- err.span_note(
- spans,
- "for a trait to be \"object safe\" it needs to allow building a vtable to allow the \
- call to be resolvable dynamically; for more information visit \
- <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
- );
- if node.is_some() {
- // Only provide the help if its a local trait, otherwise it's not
- violation.solution(&mut err);
- }
- err.emit();
- });
+ if node.is_some() {
+ // Only provide the help if its a local trait, otherwise it's not
+ violation.solution(err);
+ }
+ err
+ },
+ );
}
fn sized_trait_bound_spans<'tcx>(
@@ -431,6 +437,9 @@ fn virtual_call_violation_for_method<'tcx>(
if contains_illegal_self_type_reference(tcx, trait_def_id, sig.output()) {
return Some(MethodViolationCode::ReferencesSelfOutput);
}
+ if contains_illegal_impl_trait_in_trait(tcx, sig.output()) {
+ return Some(MethodViolationCode::ReferencesImplTraitInTrait);
+ }
// We can't monomorphize things like `fn foo<A>(...)`.
let own_counts = tcx.generics_of(method.def_id).own_counts();
@@ -438,19 +447,6 @@ fn virtual_call_violation_for_method<'tcx>(
return Some(MethodViolationCode::Generic);
}
- if tcx
- .predicates_of(method.def_id)
- .predicates
- .iter()
- // A trait object can't claim to live more than the concrete type,
- // so outlives predicates will always hold.
- .cloned()
- .filter(|(p, _)| p.to_opt_type_outlives().is_none())
- .any(|pred| contains_illegal_self_type_reference(tcx, trait_def_id, pred))
- {
- return Some(MethodViolationCode::WhereClauseReferencesSelf);
- }
-
let receiver_ty = tcx.liberate_late_bound_regions(method.def_id, sig.input(0));
// Until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on.
@@ -529,6 +525,21 @@ fn virtual_call_violation_for_method<'tcx>(
}
}
+ // NOTE: This check happens last, because it results in a lint, and not a
+ // hard error.
+ if tcx
+ .predicates_of(method.def_id)
+ .predicates
+ .iter()
+ // A trait object can't claim to live more than the concrete type,
+ // so outlives predicates will always hold.
+ .cloned()
+ .filter(|(p, _)| p.to_opt_type_outlives().is_none())
+ .any(|pred| contains_illegal_self_type_reference(tcx, trait_def_id, pred))
+ {
+ return Some(MethodViolationCode::WhereClauseReferencesSelf);
+ }
+
None
}
@@ -556,51 +567,44 @@ fn receiver_for_self_ty<'tcx>(
/// Creates the object type for the current trait. For example,
/// if the current trait is `Deref`, then this will be
/// `dyn Deref<Target = Self::Target> + 'static`.
+#[instrument(level = "trace", skip(tcx), ret)]
fn object_ty_for_trait<'tcx>(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
lifetime: ty::Region<'tcx>,
) -> Ty<'tcx> {
- debug!("object_ty_for_trait: trait_def_id={:?}", trait_def_id);
-
let trait_ref = ty::TraitRef::identity(tcx, trait_def_id);
+ debug!(?trait_ref);
let trait_predicate = trait_ref.map_bound(|trait_ref| {
ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref))
});
-
- let mut associated_types = traits::supertraits(tcx, trait_ref)
- .flat_map(|super_trait_ref| {
- tcx.associated_items(super_trait_ref.def_id())
- .in_definition_order()
- .map(move |item| (super_trait_ref, item))
- })
- .filter(|(_, item)| item.kind == ty::AssocKind::Type)
- .collect::<Vec<_>>();
-
- // existential predicates need to be in a specific order
- associated_types.sort_by_cached_key(|(_, item)| tcx.def_path_hash(item.def_id));
-
- let projection_predicates = associated_types.into_iter().map(|(super_trait_ref, item)| {
- // We *can* get bound lifetimes here in cases like
- // `trait MyTrait: for<'s> OtherTrait<&'s T, Output=bool>`.
- super_trait_ref.map_bound(|super_trait_ref| {
- ty::ExistentialPredicate::Projection(ty::ExistentialProjection {
- term: tcx.mk_projection(item.def_id, super_trait_ref.substs).into(),
- item_def_id: item.def_id,
- substs: super_trait_ref.substs,
- })
+ debug!(?trait_predicate);
+
+ let mut elaborated_predicates: Vec<_> = elaborate_trait_ref(tcx, trait_ref)
+ .filter_map(|obligation| {
+ debug!(?obligation);
+ let pred = obligation.predicate.to_opt_poly_projection_pred()?;
+ Some(pred.map_bound(|p| {
+ ty::ExistentialPredicate::Projection(ty::ExistentialProjection {
+ item_def_id: p.projection_ty.item_def_id,
+ substs: p.projection_ty.substs,
+ term: p.term,
+ })
+ }))
})
- });
+ .collect();
+ // NOTE: Since #37965, the existential predicates list has depended on the
+ // list of predicates to be sorted. This is mostly to enforce that the primary
+ // predicate comes first.
+ elaborated_predicates.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ elaborated_predicates.dedup();
let existential_predicates = tcx
- .mk_poly_existential_predicates(iter::once(trait_predicate).chain(projection_predicates));
+ .mk_poly_existential_predicates(iter::once(trait_predicate).chain(elaborated_predicates));
+ debug!(?existential_predicates);
- let object_ty = tcx.mk_dynamic(existential_predicates, lifetime);
-
- debug!("object_ty_for_trait: object_ty=`{}`", object_ty);
-
- object_ty
+ tcx.mk_dynamic(existential_predicates, lifetime, ty::Dyn)
}
/// Checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a
@@ -725,10 +729,9 @@ fn receiver_is_dispatchable<'tcx>(
Obligation::new(ObligationCause::dummy(), param_env, predicate)
};
- tcx.infer_ctxt().enter(|ref infcx| {
- // the receiver is dispatchable iff the obligation holds
- infcx.predicate_must_hold_modulo_regions(&obligation)
- })
+ let infcx = tcx.infer_ctxt().build();
+ // the receiver is dispatchable iff the obligation holds
+ infcx.predicate_must_hold_modulo_regions(&obligation)
}
fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
@@ -793,6 +796,12 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
ControlFlow::CONTINUE
}
}
+ ty::Projection(ref data)
+ if self.tcx.def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder =>
+ {
+ // We'll deny these later in their own pass
+ ControlFlow::CONTINUE
+ }
ty::Projection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
@@ -828,21 +837,14 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
}
}
- fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ fn visit_const(&mut self, ct: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
// Constants can only influence object safety if they reference `Self`.
// This is only possible for unevaluated constants, so we walk these here.
//
- // If `AbstractConst::new` returned an error we already failed compilation
+ // If `AbstractConst::from_const` returned an error we already failed compilation
// so we don't have to emit an additional error here.
- //
- // We currently recurse into abstract consts here but do not recurse in
- // `is_const_evaluatable`. This means that the object safety check is more
- // liberal than the const eval check.
- //
- // This shouldn't really matter though as we can't really use any
- // constants which are not considered const evaluatable.
use rustc_middle::ty::abstract_const::Node;
- if let Ok(Some(ct)) = AbstractConst::new(self.tcx, uv.shrink()) {
+ if let Ok(Some(ct)) = AbstractConst::from_const(self.tcx, ct) {
walk_abstract_const(self.tcx, ct, |node| match node.root(self.tcx) {
Node::Leaf(leaf) => self.visit_const(leaf),
Node::Cast(_, _, ty) => self.visit_ty(ty),
@@ -851,7 +853,7 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
}
})
} else {
- ControlFlow::CONTINUE
+ ct.super_visit_with(self)
}
}
}
@@ -861,6 +863,22 @@ fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
.is_break()
}
+pub fn contains_illegal_impl_trait_in_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: ty::Binder<'tcx, Ty<'tcx>>,
+) -> bool {
+ // FIXME(RPITIT): Perhaps we should use a visitor here?
+ ty.skip_binder().walk().any(|arg| {
+ if let ty::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Projection(proj) = ty.kind()
+ {
+ tcx.def_kind(proj.item_def_id) == DefKind::ImplTraitPlaceholder
+ } else {
+ false
+ }
+ })
+}
+
pub fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers { object_safety_violations, ..*providers };
}
diff --git a/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs
index 9227bbf01..4a4f34b76 100644
--- a/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs
+++ b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs
@@ -8,6 +8,10 @@ use rustc_parse_format::{ParseMode, Parser, Piece, Position};
use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
+use crate::errors::{
+ EmptyOnClauseInOnUnimplemented, InvalidOnClauseInOnUnimplemented, NoValueInOnUnimplemented,
+};
+
#[derive(Clone, Debug)]
pub struct OnUnimplementedFormatString(Symbol);
@@ -18,7 +22,7 @@ pub struct OnUnimplementedDirective {
pub message: Option<OnUnimplementedFormatString>,
pub label: Option<OnUnimplementedFormatString>,
pub note: Option<OnUnimplementedFormatString>,
- pub enclosing_scope: Option<OnUnimplementedFormatString>,
+ pub parent_label: Option<OnUnimplementedFormatString>,
pub append_const_msg: Option<Option<Symbol>>,
}
@@ -27,7 +31,7 @@ pub struct OnUnimplementedNote {
pub message: Option<String>,
pub label: Option<String>,
pub note: Option<String>,
- pub enclosing_scope: Option<String>,
+ pub parent_label: Option<String>,
/// Append a message for `~const Trait` errors. `None` means not requested and
/// should fallback to a generic message, `Some(None)` suggests using the default
/// appended message, `Some(Some(s))` suggests use the `s` message instead of the
@@ -35,21 +39,6 @@ pub struct OnUnimplementedNote {
pub append_const_msg: Option<Option<Symbol>>,
}
-fn parse_error(
- tcx: TyCtxt<'_>,
- span: Span,
- message: &str,
- label: &str,
- note: Option<&str>,
-) -> ErrorGuaranteed {
- let mut diag = struct_span_err!(tcx.sess, span, E0232, "{}", message);
- diag.span_label(span, label);
- if let Some(note) = note {
- diag.note(note);
- }
- diag.emit()
-}
-
impl<'tcx> OnUnimplementedDirective {
fn parse(
tcx: TyCtxt<'tcx>,
@@ -70,25 +59,9 @@ impl<'tcx> OnUnimplementedDirective {
} else {
let cond = item_iter
.next()
- .ok_or_else(|| {
- parse_error(
- tcx,
- span,
- "empty `on`-clause in `#[rustc_on_unimplemented]`",
- "empty on-clause here",
- None,
- )
- })?
+ .ok_or_else(|| tcx.sess.emit_err(EmptyOnClauseInOnUnimplemented { span }))?
.meta_item()
- .ok_or_else(|| {
- parse_error(
- tcx,
- span,
- "invalid `on`-clause in `#[rustc_on_unimplemented]`",
- "invalid on-clause here",
- None,
- )
- })?;
+ .ok_or_else(|| tcx.sess.emit_err(InvalidOnClauseInOnUnimplemented { span }))?;
attr::eval_condition(cond, &tcx.sess.parse_sess, Some(tcx.features()), &mut |cfg| {
if let Some(value) = cfg.value && let Err(guar) = parse_value(value) {
errored = Some(guar);
@@ -101,7 +74,7 @@ impl<'tcx> OnUnimplementedDirective {
let mut message = None;
let mut label = None;
let mut note = None;
- let mut enclosing_scope = None;
+ let mut parent_label = None;
let mut subcommands = vec![];
let mut append_const_msg = None;
@@ -121,9 +94,9 @@ impl<'tcx> OnUnimplementedDirective {
note = parse_value(note_)?;
continue;
}
- } else if item.has_name(sym::enclosing_scope) && enclosing_scope.is_none() {
- if let Some(enclosing_scope_) = item.value_str() {
- enclosing_scope = parse_value(enclosing_scope_)?;
+ } else if item.has_name(sym::parent_label) && parent_label.is_none() {
+ if let Some(parent_label_) = item.value_str() {
+ parent_label = parse_value(parent_label_)?;
continue;
}
} else if item.has_name(sym::on)
@@ -150,13 +123,7 @@ impl<'tcx> OnUnimplementedDirective {
}
// nothing found
- parse_error(
- tcx,
- item.span(),
- "this attribute must have a valid value",
- "expected value here",
- Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#),
- );
+ tcx.sess.emit_err(NoValueInOnUnimplemented { span: item.span() });
}
if let Some(reported) = errored {
@@ -168,7 +135,7 @@ impl<'tcx> OnUnimplementedDirective {
message,
label,
note,
- enclosing_scope,
+ parent_label,
append_const_msg,
})
}
@@ -193,7 +160,7 @@ impl<'tcx> OnUnimplementedDirective {
attr.span,
)?),
note: None,
- enclosing_scope: None,
+ parent_label: None,
append_const_msg: None,
}))
} else {
@@ -214,7 +181,7 @@ impl<'tcx> OnUnimplementedDirective {
let mut message = None;
let mut label = None;
let mut note = None;
- let mut enclosing_scope = None;
+ let mut parent_label = None;
let mut append_const_msg = None;
info!("evaluate({:?}, trait_ref={:?}, options={:?})", self, trait_ref, options);
@@ -250,8 +217,8 @@ impl<'tcx> OnUnimplementedDirective {
note = Some(note_.clone());
}
- if let Some(ref enclosing_scope_) = command.enclosing_scope {
- enclosing_scope = Some(enclosing_scope_.clone());
+ if let Some(ref parent_label_) = command.parent_label {
+ parent_label = Some(parent_label_.clone());
}
append_const_msg = command.append_const_msg;
@@ -261,7 +228,7 @@ impl<'tcx> OnUnimplementedDirective {
label: label.map(|l| l.format(tcx, trait_ref, &options_map)),
message: message.map(|m| m.format(tcx, trait_ref, &options_map)),
note: note.map(|n| n.format(tcx, trait_ref, &options_map)),
- enclosing_scope: enclosing_scope.map(|e_s| e_s.format(tcx, trait_ref, &options_map)),
+ parent_label: parent_label.map(|e_s| e_s.format(tcx, trait_ref, &options_map)),
append_const_msg,
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs
new file mode 100644
index 000000000..108dae092
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs
@@ -0,0 +1,115 @@
+use crate::infer::InferCtxt;
+use crate::traits::query::type_op::{self, TypeOp, TypeOpOutput};
+use crate::traits::query::NoSolution;
+use crate::traits::{ObligationCause, TraitEngine, TraitEngineExt};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::HirId;
+use rustc_middle::ty::{self, ParamEnv, Ty};
+
+pub use rustc_middle::traits::query::OutlivesBound;
+
+type Bounds<'a, 'tcx: 'a> = impl Iterator<Item = OutlivesBound<'tcx>> + 'a;
+pub trait InferCtxtExt<'a, 'tcx> {
+ fn implied_outlives_bounds(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ty: Ty<'tcx>,
+ ) -> Vec<OutlivesBound<'tcx>>;
+
+ fn implied_bounds_tys(
+ &'a self,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ tys: FxHashSet<Ty<'tcx>>,
+ ) -> Bounds<'a, 'tcx>;
+}
+
+impl<'a, 'tcx: 'a> InferCtxtExt<'a, 'tcx> for InferCtxt<'tcx> {
+ /// Implied bounds are region relationships that we deduce
+ /// automatically. The idea is that (e.g.) a caller must check that a
+ /// function's argument types are well-formed immediately before
+ /// calling that fn, and hence the *callee* can assume that its
+ /// argument types are well-formed. This may imply certain relationships
+ /// between generic parameters. For example:
+ /// ```
+ /// fn foo<'a,T>(x: &'a T) {}
+ /// ```
+ /// can only be called with a `'a` and `T` such that `&'a T` is WF.
+ /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
+ ///
+ /// # Parameters
+ ///
+ /// - `param_env`, the where-clauses in scope
+ /// - `body_id`, the body-id to use when normalizing assoc types.
+ /// Note that this may cause outlives obligations to be injected
+ /// into the inference context with this body-id.
+ /// - `ty`, the type that we are supposed to assume is WF.
+ #[instrument(level = "debug", skip(self, param_env, body_id), ret)]
+ fn implied_outlives_bounds(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ty: Ty<'tcx>,
+ ) -> Vec<OutlivesBound<'tcx>> {
+ let span = self.tcx.hir().span(body_id);
+ let result = param_env
+ .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty })
+ .fully_perform(self);
+ let result = match result {
+ Ok(r) => r,
+ Err(NoSolution) => {
+ self.tcx.sess.delay_span_bug(
+ span,
+ "implied_outlives_bounds failed to solve all obligations",
+ );
+ return vec![];
+ }
+ };
+
+ let TypeOpOutput { output, constraints, .. } = result;
+
+ if let Some(constraints) = constraints {
+ debug!(?constraints);
+ // Instantiation may have produced new inference variables and constraints on those
+ // variables. Process these constraints.
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(self.tcx);
+ let cause = ObligationCause::misc(span, body_id);
+ for &constraint in &constraints.outlives {
+ let obligation = self.query_outlives_constraint_to_obligation(
+ constraint,
+ cause.clone(),
+ param_env,
+ );
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ }
+ if !constraints.member_constraints.is_empty() {
+ span_bug!(span, "{:#?}", constraints.member_constraints);
+ }
+ let errors = fulfill_cx.select_all_or_error(self);
+ if !errors.is_empty() {
+ self.tcx.sess.delay_span_bug(
+ span,
+ "implied_outlives_bounds failed to solve obligations from instantiation",
+ );
+ }
+ };
+
+ output
+ }
+
+ fn implied_bounds_tys(
+ &'a self,
+ param_env: ParamEnv<'tcx>,
+ body_id: HirId,
+ tys: FxHashSet<Ty<'tcx>>,
+ ) -> Bounds<'a, 'tcx> {
+ tys.into_iter()
+ .map(move |ty| {
+ let ty = self.resolve_vars_if_possible(ty);
+ self.implied_outlives_bounds(param_env, body_id, ty)
+ })
+ .flatten()
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index c4e80e1ba..e4284b9d3 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -18,7 +18,7 @@ use super::{Normalized, NormalizedTy, ProjectionCacheEntry, ProjectionCacheKey};
use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use crate::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime};
-use crate::traits::error_reporting::InferCtxtExt as _;
+use crate::traits::error_reporting::TypeErrCtxtExt as _;
use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
use crate::traits::select::ProjectionMatchesProjection;
use rustc_data_structures::sso::SsoHashSet;
@@ -30,8 +30,8 @@ use rustc_hir::lang_items::LangItem;
use rustc_infer::infer::resolve::OpportunisticRegionResolver;
use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::visit::{MaxUniverse, TypeVisitable};
+use rustc_middle::ty::DefIdTree;
use rustc_middle::ty::{self, Term, ToPredicate, Ty, TyCtxt};
use rustc_span::symbol::sym;
@@ -62,7 +62,8 @@ enum ProjectionCandidate<'tcx> {
/// From a where-clause in the env or object type
ParamEnv(ty::PolyProjectionPredicate<'tcx>),
- /// From the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
+ /// From the definition of `Trait` when you have something like
+ /// `<<A as Trait>::B as Trait2>::C`.
TraitDef(ty::PolyProjectionPredicate<'tcx>),
/// Bounds specified on an object type
@@ -70,6 +71,16 @@ enum ProjectionCandidate<'tcx> {
/// From an "impl" (or a "pseudo-impl" returned by select)
Select(Selection<'tcx>),
+
+ ImplTraitInTrait(ImplTraitInTraitCandidate<'tcx>),
+}
+
+#[derive(PartialEq, Eq, Debug)]
+enum ImplTraitInTraitCandidate<'tcx> {
+ // The `impl Trait` from a trait function's default body
+ Trait,
+ // A concrete type provided from a trait's `impl Trait` from an impl
+ Impl(ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>>),
}
enum ProjectionCandidateSet<'tcx> {
@@ -231,7 +242,7 @@ pub(super) fn poly_project_and_unify_type<'cx, 'tcx>(
/// If successful, this may result in additional obligations.
///
/// See [poly_project_and_unify_type] for an explanation of the return value.
-#[tracing::instrument(level = "debug", skip(selcx))]
+#[instrument(level = "debug", skip(selcx))]
fn project_and_unify_type<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionObligation<'tcx>,
@@ -253,7 +264,7 @@ fn project_and_unify_type<'cx, 'tcx>(
};
debug!(?normalized, ?obligations, "project_and_unify_type result");
let actual = obligation.predicate.term;
- // For an example where this is neccessary see src/test/ui/impl-trait/nested-return-type2.rs
+ // For an example where this is necessary see src/test/ui/impl-trait/nested-return-type2.rs
// This allows users to omit re-mentioning all bounds on an associated type and just use an
// `impl Trait` for the assoc type to add more bounds.
let InferOk { value: actual, obligations: new } =
@@ -511,7 +522,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> {
self.param_env,
ty,
);
- self.selcx.infcx().report_overflow_error(&obligation, true);
+ self.selcx.infcx().err_ctxt().report_overflow_error(&obligation, true);
}
let substs = substs.fold_with(self);
@@ -552,7 +563,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> {
)
.ok()
.flatten()
- .unwrap_or_else(|| ty::Term::Ty(ty.super_fold_with(self)))
+ .unwrap_or_else(|| ty.super_fold_with(self).into())
};
debug!(
?self.depth,
@@ -620,19 +631,33 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> {
#[instrument(skip(self), level = "debug")]
fn fold_const(&mut self, constant: ty::Const<'tcx>) -> ty::Const<'tcx> {
- if self.selcx.tcx().lazy_normalization() || !self.eager_inference_replacement {
+ let tcx = self.selcx.tcx();
+ if tcx.lazy_normalization() {
constant
} else {
let constant = constant.super_fold_with(self);
- debug!(?constant);
- debug!("self.param_env: {:?}", self.param_env);
- constant.eval(self.selcx.tcx(), self.param_env)
+ debug!(?constant, ?self.param_env);
+ with_replaced_escaping_bound_vars(
+ self.selcx.infcx(),
+ &mut self.universes,
+ constant,
+ |constant| constant.eval(tcx, self.param_env),
+ )
+ }
+ }
+
+ #[inline]
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ if p.allow_normalization() && needs_normalization(&p, self.param_env.reveal()) {
+ p.super_fold_with(self)
+ } else {
+ p
}
}
}
pub struct BoundVarReplacer<'me, 'tcx> {
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
// These three maps track the bound variable that were replaced by placeholders. It might be
// nice to remove these since we already have the `kind` in the placeholder; we really just need
// the `var` (but we *could* bring that into scope if we were to track them as we pass them).
@@ -647,11 +672,46 @@ pub struct BoundVarReplacer<'me, 'tcx> {
universe_indices: &'me mut Vec<Option<ty::UniverseIndex>>,
}
+/// Executes `f` on `value` after replacing all escaping bound variables with placeholders
+/// and then replaces these placeholders with the original bound variables in the result.
+///
+/// In most places, bound variables should be replaced right when entering a binder, making
+/// this function unnecessary. However, normalization currently does not do that, so we have
+/// to do this lazily.
+///
+/// You should not add any additional uses of this function, at least not without first
+/// discussing it with t-types.
+///
+/// FIXME(@lcnr): We may even consider experimenting with eagerly replacing bound vars during
+/// normalization as well, at which point this function will be unnecessary and can be removed.
+pub fn with_replaced_escaping_bound_vars<'a, 'tcx, T: TypeFoldable<'tcx>, R: TypeFoldable<'tcx>>(
+ infcx: &'a InferCtxt<'tcx>,
+ universe_indices: &'a mut Vec<Option<ty::UniverseIndex>>,
+ value: T,
+ f: impl FnOnce(T) -> R,
+) -> R {
+ if value.has_escaping_bound_vars() {
+ let (value, mapped_regions, mapped_types, mapped_consts) =
+ BoundVarReplacer::replace_bound_vars(infcx, universe_indices, value);
+ let result = f(value);
+ PlaceholderReplacer::replace_placeholders(
+ infcx,
+ mapped_regions,
+ mapped_types,
+ mapped_consts,
+ universe_indices,
+ result,
+ )
+ } else {
+ f(value)
+ }
+}
+
impl<'me, 'tcx> BoundVarReplacer<'me, 'tcx> {
/// Returns `Some` if we *were* able to replace bound vars. If there are any bound vars that
/// use a binding level above `universe_indices.len()`, we fail.
pub fn replace_bound_vars<T: TypeFoldable<'tcx>>(
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
universe_indices: &'me mut Vec<Option<ty::UniverseIndex>>,
value: T,
) -> (
@@ -771,7 +831,7 @@ impl<'tcx> TypeFolder<'tcx> for BoundVarReplacer<'_, 'tcx> {
// The inverse of `BoundVarReplacer`: replaces placeholders with the bound vars from which they came.
pub struct PlaceholderReplacer<'me, 'tcx> {
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy>,
mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
@@ -781,7 +841,7 @@ pub struct PlaceholderReplacer<'me, 'tcx> {
impl<'me, 'tcx> PlaceholderReplacer<'me, 'tcx> {
pub fn replace_placeholders<T: TypeFoldable<'tcx>>(
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy>,
mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
@@ -1182,7 +1242,7 @@ impl<'tcx> Progress<'tcx> {
///
/// IMPORTANT:
/// - `obligation` must be fully normalized
-#[tracing::instrument(level = "info", skip(selcx))]
+#[instrument(level = "info", skip(selcx))]
fn project<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
@@ -1201,6 +1261,8 @@ fn project<'cx, 'tcx>(
let mut candidates = ProjectionCandidateSet::None;
+ assemble_candidate_for_impl_trait_in_trait(selcx, obligation, &mut candidates);
+
// Make sure that the following procedures are kept in order. ParamEnv
// needs to be first because it has highest priority, and Select checks
// the return value of push_candidate which assumes it's ran at last.
@@ -1239,6 +1301,63 @@ fn project<'cx, 'tcx>(
}
}
+/// If the predicate's item is an `ImplTraitPlaceholder`, we do a select on the
+/// corresponding trait ref. If this yields an `impl`, then we're able to project
+/// to a concrete type, since we have an `impl`'s method to provide the RPITIT.
+fn assemble_candidate_for_impl_trait_in_trait<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+) {
+ let tcx = selcx.tcx();
+ if tcx.def_kind(obligation.predicate.item_def_id) == DefKind::ImplTraitPlaceholder {
+ let trait_fn_def_id = tcx.impl_trait_in_trait_parent(obligation.predicate.item_def_id);
+ // If we are trying to project an RPITIT with trait's default `Self` parameter,
+ // then we must be within a default trait body.
+ if obligation.predicate.self_ty()
+ == ty::InternalSubsts::identity_for_item(tcx, obligation.predicate.item_def_id)
+ .type_at(0)
+ && tcx.associated_item(trait_fn_def_id).defaultness(tcx).has_value()
+ {
+ candidate_set.push_candidate(ProjectionCandidate::ImplTraitInTrait(
+ ImplTraitInTraitCandidate::Trait,
+ ));
+ return;
+ }
+
+ let trait_def_id = tcx.parent(trait_fn_def_id);
+ let trait_substs =
+ obligation.predicate.substs.truncate_to(tcx, tcx.generics_of(trait_def_id));
+ // FIXME(named-returns): Binders
+ let trait_predicate =
+ ty::Binder::dummy(ty::TraitRef { def_id: trait_def_id, substs: trait_substs })
+ .to_poly_trait_predicate();
+
+ let _ =
+ selcx.infcx().commit_if_ok(|_| match selcx.select(&obligation.with(trait_predicate)) {
+ Ok(Some(super::ImplSource::UserDefined(data))) => {
+ candidate_set.push_candidate(ProjectionCandidate::ImplTraitInTrait(
+ ImplTraitInTraitCandidate::Impl(data),
+ ));
+ Ok(())
+ }
+ Ok(None) => {
+ candidate_set.mark_ambiguous();
+ return Err(());
+ }
+ Ok(Some(_)) => {
+ // Don't know enough about the impl to provide a useful signature
+ return Err(());
+ }
+ Err(e) => {
+ debug!(error = ?e, "selection error");
+ candidate_set.mark_error(e);
+ return Err(());
+ }
+ });
+ }
+}
+
/// The first thing we have to do is scan through the parameter
/// environment to see whether there are any projection predicates
/// there that can answer this question.
@@ -1257,7 +1376,7 @@ fn assemble_candidates_from_param_env<'cx, 'tcx>(
);
}
-/// In the case of a nested projection like <<A as Foo>::FooT as Bar>::BarT, we may find
+/// In the case of a nested projection like `<<A as Foo>::FooT as Bar>::BarT`, we may find
/// that the definition of `Foo` has some clues:
///
/// ```ignore (illustrative)
@@ -1344,7 +1463,7 @@ fn assemble_candidates_from_object_ty<'cx, 'tcx>(
);
}
-#[tracing::instrument(
+#[instrument(
level = "debug",
skip(selcx, candidate_set, ctor, env_predicates, potentially_unnormalized_candidates)
)]
@@ -1378,7 +1497,7 @@ fn assemble_candidates_from_predicates<'cx, 'tcx>(
candidate_set.push_candidate(ctor(data));
if potentially_unnormalized_candidates
- && !obligation.predicate.has_infer_types_or_consts()
+ && !obligation.predicate.has_non_region_infer()
{
// HACK: Pick the first trait def candidate for a fully
// inferred predicate. This is to allow duplicates that
@@ -1395,12 +1514,17 @@ fn assemble_candidates_from_predicates<'cx, 'tcx>(
}
}
-#[tracing::instrument(level = "debug", skip(selcx, obligation, candidate_set))]
+#[instrument(level = "debug", skip(selcx, obligation, candidate_set))]
fn assemble_candidates_from_impls<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
candidate_set: &mut ProjectionCandidateSet<'tcx>,
) {
+ // Can't assemble candidate from impl for RPITIT
+ if selcx.tcx().def_kind(obligation.predicate.item_def_id) == DefKind::ImplTraitPlaceholder {
+ return;
+ }
+
// If we are resolving `<T as TraitRef<...>>::Item == Type`,
// start out by selecting the predicate `T as TraitRef<...>`:
let poly_trait_ref = ty::Binder::dummy(obligation.predicate.trait_ref(selcx.tcx()));
@@ -1676,6 +1800,18 @@ fn confirm_candidate<'cx, 'tcx>(
ProjectionCandidate::Select(impl_source) => {
confirm_select_candidate(selcx, obligation, impl_source)
}
+ ProjectionCandidate::ImplTraitInTrait(ImplTraitInTraitCandidate::Impl(data)) => {
+ confirm_impl_trait_in_trait_candidate(selcx, obligation, data)
+ }
+ // If we're projecting an RPITIT for a default trait body, that's just
+ // the same def-id, but as an opaque type (with regular RPIT semantics).
+ ProjectionCandidate::ImplTraitInTrait(ImplTraitInTraitCandidate::Trait) => Progress {
+ term: selcx
+ .tcx()
+ .mk_opaque(obligation.predicate.item_def_id, obligation.predicate.substs)
+ .into(),
+ obligations: vec![],
+ },
};
// When checking for cycle during evaluation, we compare predicates with
@@ -2021,15 +2157,15 @@ fn confirm_impl_candidate<'cx, 'tcx>(
let identity_substs =
crate::traits::InternalSubsts::identity_for_item(tcx, assoc_ty.item.def_id);
let did = ty::WithOptConstParam::unknown(assoc_ty.item.def_id);
- let kind = ty::ConstKind::Unevaluated(ty::Unevaluated::new(did, identity_substs));
+ let kind = ty::ConstKind::Unevaluated(ty::UnevaluatedConst::new(did, identity_substs));
ty.map_bound(|ty| tcx.mk_const(ty::ConstS { ty, kind }).into())
} else {
ty.map_bound(|ty| ty.into())
};
- if substs.len() != tcx.generics_of(assoc_ty.item.def_id).count() {
+ if !check_substs_compatible(tcx, &assoc_ty.item, substs) {
let err = tcx.ty_error_with_message(
obligation.cause.span,
- "impl item and trait item have different parameter counts",
+ "impl item and trait item have different parameters",
);
Progress { term: err.into(), obligations: nested }
} else {
@@ -2038,10 +2174,125 @@ fn confirm_impl_candidate<'cx, 'tcx>(
}
}
+// Verify that the trait item and its implementation have compatible substs lists
+fn check_substs_compatible<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ assoc_ty: &ty::AssocItem,
+ substs: ty::SubstsRef<'tcx>,
+) -> bool {
+ fn check_substs_compatible_inner<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &'tcx ty::Generics,
+ args: &'tcx [ty::GenericArg<'tcx>],
+ ) -> bool {
+ if generics.count() != args.len() {
+ return false;
+ }
+
+ let (parent_args, own_args) = args.split_at(generics.parent_count);
+
+ if let Some(parent) = generics.parent
+ && let parent_generics = tcx.generics_of(parent)
+ && !check_substs_compatible_inner(tcx, parent_generics, parent_args) {
+ return false;
+ }
+
+ for (param, arg) in std::iter::zip(&generics.params, own_args) {
+ match (&param.kind, arg.unpack()) {
+ (ty::GenericParamDefKind::Type { .. }, ty::GenericArgKind::Type(_))
+ | (ty::GenericParamDefKind::Lifetime, ty::GenericArgKind::Lifetime(_))
+ | (ty::GenericParamDefKind::Const { .. }, ty::GenericArgKind::Const(_)) => {}
+ _ => return false,
+ }
+ }
+
+ true
+ }
+
+ check_substs_compatible_inner(tcx, tcx.generics_of(assoc_ty.def_id), substs.as_slice())
+}
+
+fn confirm_impl_trait_in_trait_candidate<'tcx>(
+ selcx: &mut SelectionContext<'_, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ data: ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>>,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+ let mut obligations = data.nested;
+
+ let trait_fn_def_id = tcx.impl_trait_in_trait_parent(obligation.predicate.item_def_id);
+ let Ok(leaf_def) = assoc_def(selcx, data.impl_def_id, trait_fn_def_id) else {
+ return Progress { term: tcx.ty_error().into(), obligations };
+ };
+ if !leaf_def.item.defaultness(tcx).has_value() {
+ return Progress { term: tcx.ty_error().into(), obligations };
+ }
+
+ // Use the default `impl Trait` for the trait, e.g., for a default trait body
+ if leaf_def.item.container == ty::AssocItemContainer::TraitContainer {
+ return Progress {
+ term: tcx
+ .mk_opaque(obligation.predicate.item_def_id, obligation.predicate.substs)
+ .into(),
+ obligations,
+ };
+ }
+
+ let impl_fn_def_id = leaf_def.item.def_id;
+ // Rebase from {trait}::{fn}::{opaque} to {impl}::{fn}::{opaque},
+ // since `data.substs` are the impl substs.
+ let impl_fn_substs =
+ obligation.predicate.substs.rebase_onto(tcx, tcx.parent(trait_fn_def_id), data.substs);
+
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ super::ItemObligation(impl_fn_def_id),
+ );
+ let predicates = normalize_with_depth_to(
+ selcx,
+ obligation.param_env,
+ cause.clone(),
+ obligation.recursion_depth + 1,
+ tcx.predicates_of(impl_fn_def_id).instantiate(tcx, impl_fn_substs),
+ &mut obligations,
+ );
+ obligations.extend(std::iter::zip(predicates.predicates, predicates.spans).map(
+ |(pred, span)| {
+ Obligation::with_depth(
+ ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ if span.is_dummy() {
+ super::ItemObligation(impl_fn_def_id)
+ } else {
+ super::BindingObligation(impl_fn_def_id, span)
+ },
+ ),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ pred,
+ )
+ },
+ ));
+
+ let ty = super::normalize_to(
+ selcx,
+ obligation.param_env,
+ cause.clone(),
+ tcx.bound_trait_impl_trait_tys(impl_fn_def_id)
+ .map_bound(|tys| {
+ tys.map_or_else(|_| tcx.ty_error(), |tys| tys[&obligation.predicate.item_def_id])
+ })
+ .subst(tcx, impl_fn_substs),
+ &mut obligations,
+ );
+
+ Progress { term: ty.into(), obligations }
+}
+
// Get obligations corresponding to the predicates from the where-clause of the
// associated type itself.
-// Note: `feature(generic_associated_types)` is required to write such
-// predicates, even for non-generic associated types.
fn assoc_ty_own_obligations<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
diff --git a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
index 32669e23d..c84f128dd 100644
--- a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
@@ -31,7 +31,7 @@ pub trait InferCtxtExt<'tcx> {
) -> EvaluationResult;
}
-impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+impl<'tcx> InferCtxtExt<'tcx> for InferCtxt<'tcx> {
/// Evaluates whether the predicate can be satisfied (by any means)
/// in the given `ParamEnv`.
fn predicate_may_hold(&self, obligation: &PredicateObligation<'tcx>) -> bool {
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index 449d7a7b4..58e4597b7 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -5,17 +5,16 @@
use crate::infer::at::At;
use crate::infer::canonical::OriginalQueryValues;
use crate::infer::{InferCtxt, InferOk};
-use crate::traits::error_reporting::InferCtxtExt;
-use crate::traits::project::needs_normalization;
+use crate::traits::error_reporting::TypeErrCtxtExt;
+use crate::traits::project::{needs_normalization, BoundVarReplacer, PlaceholderReplacer};
use crate::traits::{Obligation, ObligationCause, PredicateObligation, Reveal};
use rustc_data_structures::sso::SsoHashMap;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_infer::traits::Normalized;
-use rustc_middle::mir;
use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
+use rustc_span::DUMMY_SP;
use std::ops::ControlFlow;
@@ -48,10 +47,11 @@ impl<'cx, 'tcx> AtExt<'tcx> for At<'cx, 'tcx> {
T: TypeFoldable<'tcx>,
{
debug!(
- "normalize::<{}>(value={:?}, param_env={:?})",
+ "normalize::<{}>(value={:?}, param_env={:?}, cause={:?})",
std::any::type_name::<T>(),
value,
self.param_env,
+ self.cause,
);
if !needs_normalization(&value, self.param_env.reveal()) {
return Ok(Normalized { value, obligations: vec![] });
@@ -154,7 +154,7 @@ impl<'tcx> TypeVisitor<'tcx> for MaxEscapingBoundVarVisitor {
}
struct QueryNormalizer<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
cause: &'cx ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
obligations: Vec<PredicateObligation<'tcx>>,
@@ -213,7 +213,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
self.param_env,
ty,
);
- self.infcx.report_overflow_error(&obligation, true);
+ self.infcx.err_ctxt().report_overflow_error(&obligation, true);
}
let generic_ty = self.tcx().bound_type_of(def_id);
@@ -254,7 +254,15 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
let result = tcx.normalize_projection_ty(c_data)?;
// We don't expect ambiguity.
if result.is_ambiguous() {
- bug!("unexpected ambiguity: {:?} {:?}", c_data, result);
+ // Rustdoc normalizes possibly not well-formed types, so only
+ // treat this as a bug if we're not in rustdoc.
+ if !tcx.sess.opts.actually_rustdoc {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ format!("unexpected ambiguity: {:?} {:?}", c_data, result),
+ );
+ }
+ return Err(NoSolution);
}
let InferOk { value: result, obligations } =
self.infcx.instantiate_query_response_and_region_obligations(
@@ -266,7 +274,15 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
debug!("QueryNormalizer: result = {:#?}", result);
debug!("QueryNormalizer: obligations = {:#?}", obligations);
self.obligations.extend(obligations);
- Ok(result.normalized_ty)
+
+ let res = result.normalized_ty;
+ // `tcx.normalize_projection_ty` may normalize to a type that still has
+ // unevaluated consts, so keep normalizing here if that's the case.
+ if res != ty && res.has_type_flags(ty::TypeFlags::HAS_CT_PROJECTION) {
+ Ok(res.try_super_fold_with(self)?)
+ } else {
+ Ok(res)
+ }
}
ty::Projection(data) => {
@@ -275,11 +291,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
let tcx = self.infcx.tcx;
let infcx = self.infcx;
let (data, mapped_regions, mapped_types, mapped_consts) =
- crate::traits::project::BoundVarReplacer::replace_bound_vars(
- infcx,
- &mut self.universes,
- data,
- );
+ BoundVarReplacer::replace_bound_vars(infcx, &mut self.universes, data);
let data = data.try_fold_with(self)?;
let mut orig_values = OriginalQueryValues::default();
@@ -293,7 +305,15 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
let result = tcx.normalize_projection_ty(c_data)?;
// We don't expect ambiguity.
if result.is_ambiguous() {
- bug!("unexpected ambiguity: {:?} {:?}", c_data, result);
+ // Rustdoc normalizes possibly not well-formed types, so only
+ // treat this as a bug if we're not in rustdoc.
+ if !tcx.sess.opts.actually_rustdoc {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ format!("unexpected ambiguity: {:?} {:?}", c_data, result),
+ );
+ }
+ return Err(NoSolution);
}
let InferOk { value: result, obligations } =
self.infcx.instantiate_query_response_and_region_obligations(
@@ -305,18 +325,26 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
debug!("QueryNormalizer: result = {:#?}", result);
debug!("QueryNormalizer: obligations = {:#?}", obligations);
self.obligations.extend(obligations);
- Ok(crate::traits::project::PlaceholderReplacer::replace_placeholders(
+ let res = PlaceholderReplacer::replace_placeholders(
infcx,
mapped_regions,
mapped_types,
mapped_consts,
&self.universes,
result.normalized_ty,
- ))
+ );
+ // `tcx.normalize_projection_ty` may normalize to a type that still has
+ // unevaluated consts, so keep normalizing here if that's the case.
+ if res != ty && res.has_type_flags(ty::TypeFlags::HAS_CT_PROJECTION) {
+ Ok(res.try_super_fold_with(self)?)
+ } else {
+ Ok(res)
+ }
}
_ => ty.try_super_fold_with(self),
})()?;
+
self.cache.insert(ty, res);
Ok(res)
}
@@ -326,29 +354,24 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
constant: ty::Const<'tcx>,
) -> Result<ty::Const<'tcx>, Self::Error> {
let constant = constant.try_super_fold_with(self)?;
- Ok(constant.eval(self.infcx.tcx, self.param_env))
+ debug!(?constant, ?self.param_env);
+ Ok(crate::traits::project::with_replaced_escaping_bound_vars(
+ self.infcx,
+ &mut self.universes,
+ constant,
+ |constant| constant.eval(self.infcx.tcx, self.param_env),
+ ))
}
- fn try_fold_mir_const(
+ #[inline]
+ fn try_fold_predicate(
&mut self,
- constant: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
- Ok(match constant {
- mir::ConstantKind::Ty(c) => {
- let const_folded = c.try_super_fold_with(self)?;
- match const_folded.kind() {
- ty::ConstKind::Value(valtree) => {
- let tcx = self.infcx.tcx;
- let ty = const_folded.ty();
- let const_val = tcx.valtree_to_const_val((ty, valtree));
- debug!(?ty, ?valtree, ?const_val);
-
- mir::ConstantKind::Val(const_val, ty)
- }
- _ => mir::ConstantKind::Ty(const_folded),
- }
- }
- mir::ConstantKind::Val(_, _) => constant.try_super_fold_with(self)?,
- })
+ p: ty::Predicate<'tcx>,
+ ) -> Result<ty::Predicate<'tcx>, Self::Error> {
+ if p.allow_normalization() && needs_normalization(&p, self.param_env.reveal()) {
+ p.try_super_fold_with(self)
+ } else {
+ Ok(p)
+ }
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
index c99564936..6bf3ed0d0 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
@@ -1,11 +1,9 @@
use crate::infer::canonical::query_response;
use crate::infer::{InferCtxt, InferOk};
-use crate::traits::engine::TraitEngineExt as _;
+use crate::traits;
use crate::traits::query::type_op::TypeOpOutput;
use crate::traits::query::Fallible;
-use crate::traits::TraitEngine;
use rustc_infer::infer::region_constraints::RegionConstraintData;
-use rustc_infer::traits::TraitEngineExt as _;
use rustc_span::source_map::DUMMY_SP;
use std::fmt;
@@ -18,16 +16,16 @@ pub struct CustomTypeOp<F, G> {
impl<F, G> CustomTypeOp<F, G> {
pub fn new<'tcx, R>(closure: F, description: G) -> Self
where
- F: FnOnce(&InferCtxt<'_, 'tcx>) -> Fallible<InferOk<'tcx, R>>,
+ F: FnOnce(&InferCtxt<'tcx>) -> Fallible<InferOk<'tcx, R>>,
G: Fn() -> String,
{
CustomTypeOp { closure, description }
}
}
-impl<'tcx, F, R, G> super::TypeOp<'tcx> for CustomTypeOp<F, G>
+impl<'tcx, F, R: fmt::Debug, G> super::TypeOp<'tcx> for CustomTypeOp<F, G>
where
- F: for<'a, 'cx> FnOnce(&'a InferCtxt<'cx, 'tcx>) -> Fallible<InferOk<'tcx, R>>,
+ F: for<'a, 'cx> FnOnce(&'a InferCtxt<'tcx>) -> Fallible<InferOk<'tcx, R>>,
G: Fn() -> String,
{
type Output = R;
@@ -38,7 +36,7 @@ where
/// Processes the operation and all resulting obligations,
/// returning the final result along with any region constraints
/// (they will be given over to the NLL region solver).
- fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ fn fully_perform(self, infcx: &InferCtxt<'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
if cfg!(debug_assertions) {
info!("fully_perform({:?})", self);
}
@@ -59,11 +57,9 @@ where
/// Executes `op` and then scrapes out all the "old style" region
/// constraints that result, creating query-region-constraints.
pub fn scrape_region_constraints<'tcx, Op: super::TypeOp<'tcx, Output = R>, R>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
op: impl FnOnce() -> Fallible<InferOk<'tcx, R>>,
) -> Fallible<(TypeOpOutput<'tcx, Op>, RegionConstraintData<'tcx>)> {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
-
// During NLL, we expect that nobody will register region
// obligations **except** as part of a custom type op (and, at the
// end of each custom type op, we scrape out the region
@@ -77,8 +73,7 @@ pub fn scrape_region_constraints<'tcx, Op: super::TypeOp<'tcx, Output = R>, R>(
);
let InferOk { value, obligations } = infcx.commit_if_ok(|_| op())?;
- fulfill_cx.register_predicate_obligations(infcx, obligations);
- let errors = fulfill_cx.select_all_or_error(infcx);
+ let errors = traits::fully_solve_obligations(infcx, obligations);
if !errors.is_empty() {
infcx.tcx.sess.diagnostic().delay_span_bug(
DUMMY_SP,
@@ -94,8 +89,8 @@ pub fn scrape_region_constraints<'tcx, Op: super::TypeOp<'tcx, Output = R>, R>(
infcx.tcx,
region_obligations
.iter()
- .map(|r_o| (r_o.sup_type, r_o.sub_region))
- .map(|(ty, r)| (infcx.resolve_vars_if_possible(ty), r)),
+ .map(|r_o| (r_o.sup_type, r_o.sub_region, r_o.origin.to_constraint_category()))
+ .map(|(ty, r, cc)| (infcx.resolve_vars_if_possible(ty), r, cc)),
&region_constraint_data,
);
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs
index 578e1d00c..29ae8ae6b 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs
@@ -26,13 +26,13 @@ pub use rustc_middle::traits::query::type_op::*;
/// extract out the resulting region constraints (or an error if it
/// cannot be completed).
pub trait TypeOp<'tcx>: Sized + fmt::Debug {
- type Output;
+ type Output: fmt::Debug;
type ErrorInfo;
/// Processes the operation and all resulting obligations,
/// returning the final result along with any region constraints
/// (they will be given over to the NLL region solver).
- fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>>;
+ fn fully_perform(self, infcx: &InferCtxt<'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>>;
}
/// The output from performing a type op
@@ -78,7 +78,7 @@ pub trait QueryTypeOp<'tcx>: fmt::Debug + Copy + TypeFoldable<'tcx> + 'tcx {
fn fully_perform_into(
query_key: ParamEnvAnd<'tcx, Self>,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
output_query_region_constraints: &mut QueryRegionConstraints<'tcx>,
) -> Fallible<(
Self::QueryResponse,
@@ -120,7 +120,7 @@ where
type Output = Q::QueryResponse;
type ErrorInfo = Canonical<'tcx, ParamEnvAnd<'tcx, Q>>;
- fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ fn fully_perform(self, infcx: &InferCtxt<'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
let mut region_constraints = QueryRegionConstraints::default();
let (output, error_info, mut obligations, _) =
Q::fully_perform_into(self, infcx, &mut region_constraints)?;
diff --git a/compiler/rustc_trait_selection/src/traits/relationships.rs b/compiler/rustc_trait_selection/src/traits/relationships.rs
index 8148e2b78..8cf500a46 100644
--- a/compiler/rustc_trait_selection/src/traits/relationships.rs
+++ b/compiler/rustc_trait_selection/src/traits/relationships.rs
@@ -6,7 +6,7 @@ use rustc_middle::ty::{self, ToPredicate};
pub(crate) fn update<'tcx, T>(
engine: &mut T,
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
obligation: &PredicateObligation<'tcx>,
) where
T: TraitEngine<'tcx>,
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index a60ce0f34..4c5bc3339 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -6,9 +6,10 @@
//!
//! [rustc dev guide]:https://rustc-dev-guide.rust-lang.org/traits/resolution.html#candidate-assembly
use hir::LangItem;
+use rustc_errors::DelayDm;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_infer::traits::TraitEngine;
+use rustc_infer::traits::ObligationCause;
use rustc_infer::traits::{Obligation, SelectionError, TraitObligation};
use rustc_lint_defs::builtin::DEREF_INTO_DYN_SUPERTRAIT;
use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -28,7 +29,7 @@ use super::SelectionCandidate::{self, *};
use super::{EvaluatedCandidate, SelectionCandidateSet, SelectionContext, TraitObligationStack};
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
pub(super) fn candidate_from_obligation<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
@@ -48,7 +49,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if let Some(c) =
self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
{
- debug!(candidate = ?c, "CACHE HIT");
+ debug!("CACHE HIT");
return c;
}
@@ -61,7 +62,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let (candidate, dep_node) =
self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
- debug!(?candidate, "CACHE MISS");
+ debug!("CACHE MISS");
self.insert_candidate_cache(
stack.obligation.param_env,
cache_fresh_trait_pred,
@@ -75,7 +76,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
- if let Some(conflict) = self.is_knowable(stack) {
+ if let Err(conflict) = self.is_knowable(stack) {
debug!("coherence stage: not knowable");
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
@@ -173,7 +174,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
debug!(?stack, ?candidates, "winnowed to {} candidates", candidates.len());
- let needs_infer = stack.obligation.predicate.has_infer_types_or_consts();
+ let needs_infer = stack.obligation.predicate.has_non_region_infer();
// If there are STILL multiple candidates, we can further
// reduce the list by dropping duplicates -- including
@@ -309,6 +310,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// User-defined transmutability impls are permitted.
self.assemble_candidates_from_impls(obligation, &mut candidates);
self.assemble_candidates_for_transmutability(obligation, &mut candidates);
+ } else if lang_items.tuple_trait() == Some(def_id) {
+ self.assemble_candidate_for_tuple(obligation, &mut candidates);
} else {
if lang_items.clone_trait() == Some(def_id) {
// Same builtin conditions as `Copy`, i.e., every type which has builtin support
@@ -337,7 +340,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(candidates)
}
- #[tracing::instrument(level = "debug", skip(self, candidates))]
+ #[instrument(level = "debug", skip(self, candidates))]
fn assemble_candidates_from_projected_tys(
&mut self,
obligation: &TraitObligation<'tcx>,
@@ -360,14 +363,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.infcx
.probe(|_| self.match_projection_obligation_against_definition_bounds(obligation));
- candidates.vec.extend(result.into_iter().map(ProjectionCandidate));
+ candidates
+ .vec
+ .extend(result.into_iter().map(|(idx, constness)| ProjectionCandidate(idx, constness)));
}
/// Given an obligation like `<SomeTrait for T>`, searches the obligations that the caller
/// supplied to find out whether it is listed among them.
///
/// Never affects the inference environment.
- #[tracing::instrument(level = "debug", skip(self, stack, candidates))]
+ #[instrument(level = "debug", skip(self, stack, candidates))]
fn assemble_candidates_from_caller_bounds<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
@@ -620,7 +625,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
- _ => candidates.vec.push(AutoImplCandidate(def_id)),
+ _ => candidates.vec.push(AutoImplCandidate),
}
}
}
@@ -706,8 +711,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn need_migrate_deref_output_trait_object(
&mut self,
ty: Ty<'tcx>,
- cause: &traits::ObligationCause<'tcx>,
param_env: ty::ParamEnv<'tcx>,
+ cause: &ObligationCause<'tcx>,
) -> Option<(Ty<'tcx>, DefId)> {
let tcx = self.tcx();
if tcx.features().trait_upcasting {
@@ -729,24 +734,27 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return None;
}
- let mut fulfillcx = traits::FulfillmentContext::new_in_snapshot();
- let normalized_ty = fulfillcx.normalize_projection_type(
- &self.infcx,
+ let ty = traits::normalize_projection_type(
+ self,
param_env,
ty::ProjectionTy {
item_def_id: tcx.lang_items().deref_target()?,
substs: trait_ref.substs,
},
cause.clone(),
- );
-
- let ty::Dynamic(data, ..) = normalized_ty.kind() else {
- return None;
- };
-
- let def_id = data.principal_def_id()?;
-
- return Some((normalized_ty, def_id));
+ 0,
+ // We're *intentionally* throwing these away,
+ // since we don't actually use them.
+ &mut vec![],
+ )
+ .ty()
+ .unwrap();
+
+ if let ty::Dynamic(data, ..) = ty.kind() {
+ Some((ty, data.principal_def_id()?))
+ } else {
+ None
+ }
}
/// Searches for unsizing that might apply to `obligation`.
@@ -809,8 +817,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if let Some((deref_output_ty, deref_output_trait_did)) = self
.need_migrate_deref_output_trait_object(
source,
- &obligation.cause,
obligation.param_env,
+ &obligation.cause,
)
{
if deref_output_trait_did == target_trait_did {
@@ -818,13 +826,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
DEREF_INTO_DYN_SUPERTRAIT,
obligation.cause.body_id,
obligation.cause.span,
- |lint| {
- lint.build(&format!(
- "`{}` implements `Deref` with supertrait `{}` as output",
- source,
- deref_output_ty
- )).emit();
- },
+ DelayDm(|| format!(
+ "`{}` implements `Deref` with supertrait `{}` as output",
+ source, deref_output_ty
+ )),
+ |lint| lint,
);
return;
}
@@ -877,17 +883,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
};
}
- #[tracing::instrument(level = "debug", skip(self, obligation, candidates))]
+ #[instrument(level = "debug", skip(self, obligation, candidates))]
fn assemble_candidates_for_transmutability(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
- if obligation.has_param_types_or_consts() {
+ if obligation.has_non_region_param() {
return;
}
- if obligation.has_infer_types_or_consts() {
+ if obligation.has_non_region_infer() {
candidates.ambiguous = true;
return;
}
@@ -895,7 +901,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
candidates.vec.push(TransmutabilityCandidate);
}
- #[tracing::instrument(level = "debug", skip(self, obligation, candidates))]
+ #[instrument(level = "debug", skip(self, obligation, candidates))]
fn assemble_candidates_for_trait_alias(
&mut self,
obligation: &TraitObligation<'tcx>,
@@ -908,13 +914,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let def_id = obligation.predicate.def_id();
if self.tcx().is_trait_alias(def_id) {
- candidates.vec.push(TraitAliasCandidate(def_id));
+ candidates.vec.push(TraitAliasCandidate);
}
}
/// Assembles the trait which are built-in to the language itself:
/// `Copy`, `Clone` and `Sized`.
- #[tracing::instrument(level = "debug", skip(self, candidates))]
+ #[instrument(level = "debug", skip(self, candidates))]
fn assemble_builtin_bound_candidates(
&mut self,
conditions: BuiltinImplConditions<'tcx>,
@@ -1006,4 +1012,46 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
}
+
+ fn assemble_candidate_for_tuple(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ let self_ty = self.infcx().shallow_resolve(obligation.self_ty().skip_binder());
+ match self_ty.kind() {
+ ty::Tuple(_) => {
+ candidates.vec.push(BuiltinCandidate { has_nested: false });
+ }
+ ty::Infer(ty::TyVar(_)) => {
+ candidates.ambiguous = true;
+ }
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(_, _)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _, _)
+ | ty::Closure(_, _)
+ | ty::Generator(_, _, _)
+ | ty::GeneratorWitness(_)
+ | ty::Never
+ | ty::Projection(_)
+ | ty::Opaque(_, _)
+ | ty::Param(_)
+ | ty::Bound(_, _)
+ | ty::Error(_)
+ | ty::Infer(_)
+ | ty::Placeholder(_) => {}
+ }
+ }
}
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index 2a1099fc8..ed22058c6 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -11,9 +11,10 @@ use rustc_hir::lang_items::LangItem;
use rustc_index::bit_set::GrowableBitSet;
use rustc_infer::infer::InferOk;
use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType;
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef};
-use rustc_middle::ty::{self, GenericParamDefKind, Ty, TyCtxt};
-use rustc_middle::ty::{ToPolyTraitRef, ToPredicate};
+use rustc_middle::ty::{
+ self, GenericArg, GenericArgKind, GenericParamDefKind, InternalSubsts, SubstsRef,
+ ToPolyTraitRef, ToPredicate, Ty, TyCtxt,
+};
use rustc_span::def_id::DefId;
use crate::traits::project::{normalize_with_depth, normalize_with_depth_to};
@@ -63,15 +64,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ImplSource::UserDefined(self.confirm_impl_candidate(obligation, impl_def_id))
}
- AutoImplCandidate(trait_def_id) => {
- let data = self.confirm_auto_impl_candidate(obligation, trait_def_id);
+ AutoImplCandidate => {
+ let data = self.confirm_auto_impl_candidate(obligation);
ImplSource::AutoImpl(data)
}
- ProjectionCandidate(idx) => {
+ ProjectionCandidate(idx, constness) => {
let obligations = self.confirm_projection_candidate(obligation, idx)?;
- // FIXME(jschievink): constness
- ImplSource::Param(obligations, ty::BoundConstness::NotConst)
+ ImplSource::Param(obligations, constness)
}
ObjectCandidate(idx) => {
@@ -100,8 +100,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
PointeeCandidate => ImplSource::Pointee(ImplSourcePointeeData),
- TraitAliasCandidate(alias_def_id) => {
- let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
+ TraitAliasCandidate => {
+ let data = self.confirm_trait_alias_candidate(obligation);
ImplSource::TraitAlias(data)
}
@@ -279,29 +279,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let predicate = obligation.predicate;
let type_at = |i| predicate.map_bound(|p| p.trait_ref.substs.type_at(i));
- let bool_at = |i| {
- predicate
- .skip_binder()
- .trait_ref
- .substs
- .const_at(i)
- .try_eval_bool(self.tcx(), obligation.param_env)
- .unwrap_or(true)
- };
+ let const_at = |i| predicate.skip_binder().trait_ref.substs.const_at(i);
let src_and_dst = predicate.map_bound(|p| rustc_transmute::Types {
- src: p.trait_ref.substs.type_at(1),
dst: p.trait_ref.substs.type_at(0),
+ src: p.trait_ref.substs.type_at(1),
});
let scope = type_at(2).skip_binder();
- let assume = rustc_transmute::Assume {
- alignment: bool_at(3),
- lifetimes: bool_at(4),
- validity: bool_at(5),
- visibility: bool_at(6),
- };
+ let Some(assume) =
+ rustc_transmute::Assume::from_const(self.infcx.tcx, obligation.param_env, const_at(3)) else {
+ return Err(Unimplemented);
+ };
let cause = obligation.cause.clone();
@@ -325,13 +315,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
fn confirm_auto_impl_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
- trait_def_id: DefId,
) -> ImplSourceAutoImplData<PredicateObligation<'tcx>> {
- debug!(?obligation, ?trait_def_id, "confirm_auto_impl_candidate");
+ debug!(?obligation, "confirm_auto_impl_candidate");
let self_ty = self.infcx.shallow_resolve(obligation.predicate.self_ty());
let types = self.constituent_types_for_ty(self_ty);
- self.vtable_auto_impl(obligation, trait_def_id, types)
+ self.vtable_auto_impl(obligation, obligation.predicate.def_id(), types)
}
/// See `confirm_auto_impl_candidate`.
@@ -629,17 +618,47 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
)
.map_bound(|(trait_ref, _)| trait_ref);
- let nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
+ let mut nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
+
+ // Confirm the `type Output: Sized;` bound that is present on `FnOnce`
+ let cause = obligation.derived_cause(BuiltinDerivedObligation);
+ // The binder on the Fn obligation is "less" important than the one on
+ // the signature, as evidenced by how we treat it during projection.
+ // The safe thing to do here is to liberate it, though, which should
+ // have no worse effect than skipping the binder here.
+ let liberated_fn_ty =
+ self.infcx.replace_bound_vars_with_placeholders(obligation.predicate.rebind(self_ty));
+ let output_ty = self
+ .infcx
+ .replace_bound_vars_with_placeholders(liberated_fn_ty.fn_sig(self.tcx()).output());
+ let output_ty = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ cause.clone(),
+ obligation.recursion_depth,
+ output_ty,
+ &mut nested,
+ );
+ let tr = ty::Binder::dummy(ty::TraitRef::new(
+ self.tcx().require_lang_item(LangItem::Sized, None),
+ self.tcx().mk_substs_trait(output_ty, &[]),
+ ));
+ nested.push(Obligation::new(
+ cause,
+ obligation.param_env,
+ tr.to_poly_trait_predicate().to_predicate(self.tcx()),
+ ));
+
Ok(ImplSourceFnPointerData { fn_ty: self_ty, nested })
}
fn confirm_trait_alias_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
- alias_def_id: DefId,
) -> ImplSourceTraitAliasData<'tcx, PredicateObligation<'tcx>> {
- debug!(?obligation, ?alias_def_id, "confirm_trait_alias_candidate");
+ debug!(?obligation, "confirm_trait_alias_candidate");
+ let alias_def_id = obligation.predicate.def_id();
let predicate = self.infcx().replace_bound_vars_with_placeholders(obligation.predicate);
let trait_ref = predicate.trait_ref;
let trait_def_id = trait_ref.def_id;
@@ -794,7 +813,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let upcast_trait_ref;
match (source.kind(), target.kind()) {
// TraitA+Kx+'a -> TraitB+Ky+'b (trait upcasting coercion).
- (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
+ (&ty::Dynamic(ref data_a, r_a, repr_a), &ty::Dynamic(ref data_b, r_b, repr_b))
+ if repr_a == repr_b =>
+ {
// See `assemble_candidates_for_unsizing` for more info.
// We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
let principal_a = data_a.principal().unwrap();
@@ -820,7 +841,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.map(ty::Binder::dummy),
);
let existential_predicates = tcx.mk_poly_existential_predicates(iter);
- let source_trait = tcx.mk_dynamic(existential_predicates, r_b);
+ let source_trait = tcx.mk_dynamic(existential_predicates, r_b, repr_b);
// Require that the traits involved in this upcast are **equal**;
// only the **lifetime bound** is changed.
@@ -898,7 +919,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let mut nested = vec![];
match (source.kind(), target.kind()) {
// Trait+Kx+'a -> Trait+Ky+'b (auto traits and lifetime subtyping).
- (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
+ (&ty::Dynamic(ref data_a, r_a, ty::Dyn), &ty::Dynamic(ref data_b, r_b, ty::Dyn)) => {
// See `assemble_candidates_for_unsizing` for more info.
// We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
let iter = data_a
@@ -917,7 +938,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.map(ty::Binder::dummy),
);
let existential_predicates = tcx.mk_poly_existential_predicates(iter);
- let source_trait = tcx.mk_dynamic(existential_predicates, r_b);
+ let source_trait = tcx.mk_dynamic(existential_predicates, r_b, ty::Dyn);
// Require that the traits involved in this upcast are **equal**;
// only the **lifetime bound** is changed.
@@ -944,7 +965,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
// `T` -> `Trait`
- (_, &ty::Dynamic(ref data, r)) => {
+ (_, &ty::Dynamic(ref data, r, ty::Dyn)) => {
let mut object_dids = data.auto_traits().chain(data.principal_def_id());
if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) {
return Err(TraitNotObjectSafe(did));
@@ -1047,9 +1068,25 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
return Err(Unimplemented);
}
- // Extract `TailField<T>` and `TailField<U>` from `Struct<T>` and `Struct<U>`.
- let source_tail = tail_field_ty.subst(tcx, substs_a);
- let target_tail = tail_field_ty.subst(tcx, substs_b);
+ // Extract `TailField<T>` and `TailField<U>` from `Struct<T>` and `Struct<U>`,
+ // normalizing in the process, since `type_of` returns something directly from
+ // astconv (which means it's un-normalized).
+ let source_tail = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ tail_field_ty.subst(tcx, substs_a),
+ &mut nested,
+ );
+ let target_tail = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ tail_field_ty.subst(tcx, substs_b),
+ &mut nested,
+ );
// Check that the source struct with the target's
// unsizing parameters is equal to the target.
@@ -1188,6 +1225,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Never
| ty::Foreign(_) => {}
+ // `ManuallyDrop` is trivially drop
+ ty::Adt(def, _) if Some(def.did()) == tcx.lang_items().manually_drop() => {}
+
// These types are built-in, so we can fast-track by registering
// nested predicates for their constituent type(s)
ty::Array(ty, _) | ty::Slice(ty) => {
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index c01ac1979..9ebff4892 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -20,7 +20,7 @@ use super::{
};
use crate::infer::{InferCtxt, InferOk, TypeFreshener};
-use crate::traits::error_reporting::InferCtxtExt;
+use crate::traits::error_reporting::TypeErrCtxtExt;
use crate::traits::project::ProjectAndUnifyResult;
use crate::traits::project::ProjectionCacheKeyExt;
use crate::traits::ProjectionCacheKey;
@@ -35,9 +35,8 @@ use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::abstract_const::NotConstEvaluatable;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
use rustc_middle::ty::fold::BottomUpFolder;
-use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::relate::TypeRelation;
-use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::SubstsRef;
use rustc_middle::ty::{self, EarlyBinder, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, TypeVisitable};
use rustc_span::symbol::sym;
@@ -94,7 +93,7 @@ impl IntercrateAmbiguityCause {
}
pub struct SelectionContext<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
/// Freshener used specifically for entries on the obligation
/// stack. This ensures that all entries on the stack at one time
@@ -215,7 +214,7 @@ enum BuiltinImplConditions<'tcx> {
}
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
- pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx,
freshener: infcx.freshener_keep_static(),
@@ -225,28 +224,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
- pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
- SelectionContext {
- infcx,
- freshener: infcx.freshener_keep_static(),
- intercrate: true,
- intercrate_ambiguity_causes: None,
- query_mode: TraitQueryMode::Standard,
- }
+ pub fn intercrate(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
+ SelectionContext { intercrate: true, ..SelectionContext::new(infcx) }
}
pub fn with_query_mode(
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
query_mode: TraitQueryMode,
) -> SelectionContext<'cx, 'tcx> {
debug!(?query_mode, "with_query_mode");
- SelectionContext {
- infcx,
- freshener: infcx.freshener_keep_static(),
- intercrate: false,
- intercrate_ambiguity_causes: None,
- query_mode,
- }
+ SelectionContext { query_mode, ..SelectionContext::new(infcx) }
}
/// Enables tracking of intercrate ambiguity causes. See
@@ -266,7 +253,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
self.intercrate_ambiguity_causes.take().unwrap_or_default()
}
- pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+ pub fn infcx(&self) -> &'cx InferCtxt<'tcx> {
self.infcx
}
@@ -295,7 +282,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// Attempts to satisfy the obligation. If successful, this will affect the surrounding
/// type environment by performing unification.
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
pub fn select(
&mut self,
obligation: &TraitObligation<'tcx>,
@@ -325,10 +312,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Err(SelectionError::Overflow(OverflowError::Canonical))
}
Err(e) => Err(e),
- Ok(candidate) => {
- debug!(?candidate, "confirmed");
- Ok(Some(candidate))
- }
+ Ok(candidate) => Ok(Some(candidate)),
}
}
@@ -435,6 +419,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
level = "debug",
skip(self, previous_stack),
fields(previous_stack = ?previous_stack.head())
+ ret,
)]
fn evaluate_predicate_recursively<'o>(
&mut self,
@@ -450,7 +435,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
None => self.check_recursion_limit(&obligation, &obligation)?,
}
- let result = ensure_sufficient_stack(|| {
+ ensure_sufficient_stack(|| {
let bound_predicate = obligation.predicate.kind();
match bound_predicate.skip_binder() {
ty::PredicateKind::Trait(t) => {
@@ -464,15 +449,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let p = bound_predicate.rebind(p);
// Does this code ever run?
match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
- Some(Ok(InferOk { mut obligations, .. })) => {
+ Ok(Ok(InferOk { mut obligations, .. })) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(
previous_stack,
obligations.into_iter(),
)
}
- Some(Err(_)) => Ok(EvaluatedToErr),
- None => Ok(EvaluatedToAmbig),
+ Ok(Err(_)) => Ok(EvaluatedToErr),
+ Err(..) => Ok(EvaluatedToAmbig),
}
}
@@ -480,15 +465,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let p = bound_predicate.rebind(p);
// Does this code ever run?
match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
- Some(Ok(InferOk { mut obligations, .. })) => {
+ Ok(Ok(InferOk { mut obligations, .. })) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(
previous_stack,
obligations.into_iter(),
)
}
- Some(Err(_)) => Ok(EvaluatedToErr),
- None => Ok(EvaluatedToAmbig),
+ Ok(Err(_)) => Ok(EvaluatedToErr),
+ Err(..) => Ok(EvaluatedToAmbig),
}
}
@@ -691,23 +676,21 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
ty::PredicateKind::ConstEquate(c1, c2) => {
+ assert!(
+ self.tcx().features().generic_const_exprs,
+ "`ConstEquate` without a feature gate: {c1:?} {c2:?}",
+ );
debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts");
- if self.tcx().features().generic_const_exprs {
- // FIXME: we probably should only try to unify abstract constants
- // if the constants depend on generic parameters.
- //
- // Let's just see where this breaks :shrug:
- if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
- (c1.kind(), c2.kind())
- {
- if self.infcx.try_unify_abstract_consts(
- a.shrink(),
- b.shrink(),
- obligation.param_env,
- ) {
- return Ok(EvaluatedToOk);
- }
+ // FIXME: we probably should only try to unify abstract constants
+ // if the constants depend on generic parameters.
+ //
+ // Let's just see where this breaks :shrug:
+ if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
+ (c1.kind(), c2.kind())
+ {
+ if self.infcx.try_unify_abstract_consts(a, b, obligation.param_env) {
+ return Ok(EvaluatedToOk);
}
}
@@ -747,7 +730,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
)
}
(Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
- if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() {
+ if c1.has_non_region_infer() || c2.has_non_region_infer() {
Ok(EvaluatedToAmbig)
} else {
// Two different constants using generic parameters ~> error.
@@ -760,14 +743,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
bug!("TypeWellFormedFromEnv is only used for chalk")
}
}
- });
-
- debug!("finished: {:?} from {:?}", result, obligation);
-
- result
+ })
}
- #[instrument(skip(self, previous_stack), level = "debug")]
+ #[instrument(skip(self, previous_stack), level = "debug", ret)]
fn evaluate_trait_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
@@ -798,12 +777,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// If a trait predicate is in the (local or global) evaluation cache,
// then we know it holds without cycles.
if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
- debug!(?result, "CACHE HIT");
+ debug!("CACHE HIT");
return Ok(result);
}
if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
- debug!(?result, "PROVISIONAL CACHE HIT");
+ debug!("PROVISIONAL CACHE HIT");
stack.update_reached_depth(result.reached_depth);
return Ok(result.result);
}
@@ -826,11 +805,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let reached_depth = stack.reached_depth.get();
if reached_depth >= stack.depth {
- debug!(?result, "CACHE MISS");
+ debug!("CACHE MISS");
self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
stack.cache().on_completion(stack.dfn);
} else {
- debug!(?result, "PROVISIONAL");
+ debug!("PROVISIONAL");
debug!(
"caching provisionally because {:?} \
is a cycle participant (at depth {}, reached depth {})",
@@ -849,7 +828,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// must be met of course). One obvious case this comes up is
/// marker traits like `Send`. Think of a linked list:
///
- /// struct List<T> { data: T, next: Option<Box<List<T>>> }
+ /// struct List<T> { data: T, next: Option<Box<List<T>>> }
///
/// `Box<List<T>>` will be `Send` if `T` is `Send` and
/// `Option<Box<List<T>>>` is `Send`, and in turn
@@ -936,38 +915,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let unbound_input_types =
stack.fresh_trait_pred.skip_binder().trait_ref.substs.types().any(|ty| ty.is_fresh());
- if stack.obligation.polarity() != ty::ImplPolarity::Negative {
- // This check was an imperfect workaround for a bug in the old
- // intercrate mode; it should be removed when that goes away.
- if unbound_input_types && self.intercrate {
- debug!("evaluate_stack --> unbound argument, intercrate --> ambiguous",);
- // Heuristics: show the diagnostics when there are no candidates in crate.
- if self.intercrate_ambiguity_causes.is_some() {
- debug!("evaluate_stack: intercrate_ambiguity_causes is some");
- if let Ok(candidate_set) = self.assemble_candidates(stack) {
- if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
- let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
- let self_ty = trait_ref.self_ty();
- let cause = with_no_trimmed_paths!({
- IntercrateAmbiguityCause::DownstreamCrate {
- trait_desc: trait_ref.print_only_trait_path().to_string(),
- self_desc: if self_ty.has_concrete_skeleton() {
- Some(self_ty.to_string())
- } else {
- None
- },
- }
- });
-
- debug!(?cause, "evaluate_stack: pushing cause");
- self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
- }
- }
- }
- return Ok(EvaluatedToAmbig);
- }
- }
-
if unbound_input_types
&& stack.iter().skip(1).any(|prev| {
stack.obligation.param_env == prev.obligation.param_env
@@ -1023,7 +970,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
#[instrument(
level = "debug",
skip(self, stack),
- fields(depth = stack.obligation.recursion_depth)
+ fields(depth = stack.obligation.recursion_depth),
+ ret
)]
fn evaluate_candidate<'o>(
&mut self,
@@ -1056,7 +1004,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
result = result.max(EvaluatedToOkModuloRegions);
}
- debug!(?result);
Ok(result)
}
@@ -1150,7 +1097,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ErrorGuaranteed::unchecked_claim_error_was_emitted(),
));
}
- self.infcx.report_overflow_error(error_obligation, true);
+ self.infcx.err_ctxt().report_overflow_error(error_obligation, true);
}
TraitQueryMode::Canonical => {
return Err(OverflowError::Canonical);
@@ -1202,8 +1149,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ImplCandidate(def_id) if tcx.constness(def_id) == hir::Constness::Const => {}
// const param
ParamCandidate(trait_pred) if trait_pred.is_const_if_const() => {}
+ // const projection
+ ProjectionCandidate(_, ty::BoundConstness::ConstIfConst) => {}
// auto trait impl
- AutoImplCandidate(..) => {}
+ AutoImplCandidate => {}
// generator, this will raise error in other places
// or ignore error with const_async_blocks feature
GeneratorCandidate => {}
@@ -1265,11 +1214,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Ok(Some(candidate))
}
- fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
+ fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<(), Conflict> {
debug!("is_knowable(intercrate={:?})", self.intercrate);
if !self.intercrate || stack.obligation.polarity() == ty::ImplPolarity::Negative {
- return None;
+ return Ok(());
}
let obligation = &stack.obligation;
@@ -1405,11 +1354,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// a projection, look at the bounds of `T::Bar`, see if we can find a
/// `Baz` bound. We return indexes into the list returned by
/// `tcx.item_bounds` for any applicable bounds.
- #[instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
fn match_projection_obligation_against_definition_bounds(
&mut self,
obligation: &TraitObligation<'tcx>,
- ) -> smallvec::SmallVec<[usize; 2]> {
+ ) -> smallvec::SmallVec<[(usize, ty::BoundConstness); 2]> {
let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
let placeholder_trait_predicate =
self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate);
@@ -1435,7 +1384,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// unnecessary ambiguity.
let mut distinct_normalized_bounds = FxHashSet::default();
- let matching_bounds = bounds
+ bounds
.iter()
.enumerate()
.filter_map(|(idx, bound)| {
@@ -1457,15 +1406,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
_ => false,
}
}) {
- return Some(idx);
+ return Some((idx, pred.constness));
}
}
None
})
- .collect();
-
- debug!(?matching_bounds);
- matching_bounds
+ .collect()
}
/// Equates the trait in `obligation` with trait bound. If the two traits
@@ -1576,7 +1522,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
if !generics.params.is_empty()
&& obligation.predicate.substs[generics.parent_count..]
.iter()
- .any(|&p| p.has_infer_types_or_consts() && self.infcx.shallow_resolve(p) != p)
+ .any(|&p| p.has_non_region_infer() && self.infcx.shallow_resolve(p) != p)
{
ProjectionMatchesProjection::Ambiguous
} else {
@@ -1618,12 +1564,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
};
// (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
- // `DiscriminantKindCandidate`, and `ConstDestructCandidate` to anything else.
+ // `DiscriminantKindCandidate`, `ConstDestructCandidate`
+ // to anything else.
//
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
match (&other.candidate, &victim.candidate) {
- (_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => {
+ (_, AutoImplCandidate) | (AutoImplCandidate, _) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
@@ -1691,11 +1638,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
- | TraitAliasCandidate(..)
+ | TraitAliasCandidate
| ObjectCandidate(_)
- | ProjectionCandidate(_),
+ | ProjectionCandidate(..),
) => !is_global(cand),
- (ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => {
+ (ObjectCandidate(_) | ProjectionCandidate(..), ParamCandidate(ref cand)) => {
// Prefer these to a global where-clause bound
// (see issue #50825).
is_global(cand)
@@ -1709,7 +1656,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
- | TraitAliasCandidate(..),
+ | TraitAliasCandidate,
ParamCandidate(ref cand),
) => {
// Prefer these to a global where-clause bound
@@ -1717,20 +1664,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
is_global(cand) && other.evaluation.must_apply_modulo_regions()
}
- (ProjectionCandidate(i), ProjectionCandidate(j))
+ (ProjectionCandidate(i, _), ProjectionCandidate(j, _))
| (ObjectCandidate(i), ObjectCandidate(j)) => {
// Arbitrarily pick the lower numbered candidate for backwards
// compatibility reasons. Don't let this affect inference.
i < j && !needs_infer
}
- (ObjectCandidate(_), ProjectionCandidate(_))
- | (ProjectionCandidate(_), ObjectCandidate(_)) => {
+ (ObjectCandidate(_), ProjectionCandidate(..))
+ | (ProjectionCandidate(..), ObjectCandidate(_)) => {
bug!("Have both object and projection candidate")
}
// Arbitrarily give projection and object candidates priority.
(
- ObjectCandidate(_) | ProjectionCandidate(_),
+ ObjectCandidate(_) | ProjectionCandidate(..),
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
@@ -1739,7 +1686,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
- | TraitAliasCandidate(..),
+ | TraitAliasCandidate,
) => true,
(
@@ -1751,18 +1698,18 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { .. }
- | TraitAliasCandidate(..),
- ObjectCandidate(_) | ProjectionCandidate(_),
+ | TraitAliasCandidate,
+ ObjectCandidate(_) | ProjectionCandidate(..),
) => false,
(&ImplCandidate(other_def), &ImplCandidate(victim_def)) => {
// See if we can toss out `victim` based on specialization.
- // This requires us to know *for sure* that the `other` impl applies
- // i.e., `EvaluatedToOk`.
+ // While this requires us to know *for sure* that the `other` impl applies
+ // we still use modulo regions here.
//
- // FIXME(@lcnr): Using `modulo_regions` here seems kind of scary
- // to me but is required for `std` to compile, so I didn't change it
- // for now.
+ // This is fine as specialization currently assumes that specializing
+ // impls have to be always applicable, meaning that the only allowed
+ // region constraints may be constraints also present on the default impl.
let tcx = self.tcx();
if other.evaluation.must_apply_modulo_regions() {
if tcx.specializes((other_def, victim_def)) {
@@ -1832,7 +1779,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
- | TraitAliasCandidate(..),
+ | TraitAliasCandidate,
ImplCandidate(_)
| ClosureCandidate
| GeneratorCandidate
@@ -1841,7 +1788,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| BuiltinUnsizeCandidate
| TraitUpcastingUnsizeCandidate(_)
| BuiltinCandidate { has_nested: true }
- | TraitAliasCandidate(..),
+ | TraitAliasCandidate,
) => false,
}
}
@@ -1871,6 +1818,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Array(..)
| ty::Closure(..)
| ty::Never
+ | ty::Dynamic(_, _, ty::DynStar)
| ty::Error(_) => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
@@ -1937,8 +1885,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
ty::Dynamic(..)
| ty::Str
| ty::Slice(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(..)
+ | ty::Generator(_, _, hir::Movability::Static)
| ty::Foreign(..)
| ty::Ref(_, _, hir::Mutability::Mut) => None,
@@ -1947,6 +1894,43 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Where(obligation.predicate.rebind(tys.iter().collect()))
}
+ ty::Generator(_, substs, hir::Movability::Movable) => {
+ if self.tcx().features().generator_clone {
+ let resolved_upvars =
+ self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
+ let resolved_witness =
+ self.infcx.shallow_resolve(substs.as_generator().witness());
+ if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
+ // Not yet resolved.
+ Ambiguous
+ } else {
+ let all = substs
+ .as_generator()
+ .upvar_tys()
+ .chain(iter::once(substs.as_generator().witness()))
+ .collect::<Vec<_>>();
+ Where(obligation.predicate.rebind(all))
+ }
+ } else {
+ None
+ }
+ }
+
+ ty::GeneratorWitness(binder) => {
+ let witness_tys = binder.skip_binder();
+ for witness_ty in witness_tys.iter() {
+ let resolved = self.infcx.shallow_resolve(witness_ty);
+ if resolved.is_ty_var() {
+ return Ambiguous;
+ }
+ }
+ // (*) binder moved here
+ let all_vars = self.tcx().mk_bound_variable_kinds(
+ obligation.predicate.bound_vars().iter().chain(binder.bound_vars().iter()),
+ );
+ Where(ty::Binder::bind_with_vars(witness_tys.to_vec(), all_vars))
+ }
+
ty::Closure(_, substs) => {
// (*) binder moved here
let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
@@ -2153,7 +2137,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
- #[tracing::instrument(level = "debug", skip(self))]
+ #[instrument(level = "debug", skip(self), ret)]
fn match_impl(
&mut self,
impl_def_id: DefId,
@@ -2194,17 +2178,16 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
.at(&cause, obligation.param_env)
.define_opaque_types(false)
.eq(placeholder_obligation_trait_ref, impl_trait_ref)
- .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
+ .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{e}`"))?;
nested_obligations.extend(obligations);
if !self.intercrate
&& self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
{
- debug!("match_impl: reservation impls only apply in intercrate mode");
+ debug!("reservation impls only apply in intercrate mode");
return Err(());
}
- debug!(?impl_substs, ?nested_obligations, "match_impl: success");
Ok(Normalized { value: impl_substs, obligations: nested_obligations })
}
@@ -2335,7 +2318,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
/// impl or trait. The obligations are substituted and fully
/// normalized. This is used when confirming an impl or default
/// impl.
- #[tracing::instrument(level = "debug", skip(self, cause, param_env))]
+ #[instrument(level = "debug", skip(self, cause, param_env))]
fn impl_or_trait_obligations(
&mut self,
cause: &ObligationCause<'tcx>,
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
index 6223c5ea3..c89165858 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -12,22 +12,21 @@
pub mod specialization_graph;
use specialization_graph::GraphExt;
+use crate::errors::NegativePositiveConflict;
use crate::infer::{InferCtxt, InferOk, TyCtxtInferExt};
use crate::traits::select::IntercrateAmbiguityCause;
-use crate::traits::{
- self, coherence, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine, TraitEngineExt,
-};
+use crate::traits::{self, coherence, FutureCompatOverlapErrorKind, ObligationCause};
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
-use rustc_errors::{struct_span_err, EmissionGuarantee, LintDiagnosticBuilder};
+use rustc_errors::{struct_span_err, DiagnosticBuilder, EmissionGuarantee};
use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::{self, ImplSubject, TyCtxt};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
use rustc_session::lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS;
use rustc_span::{Span, DUMMY_SP};
+use super::util;
use super::SelectionContext;
-use super::{util, FulfillmentContext};
/// Information pertinent to an overlapping impl error.
#[derive(Debug)]
@@ -74,8 +73,8 @@ pub struct OverlapError {
/// through associated type projection. We deal with such cases by using
/// *fulfillment* to relate the two impls, requiring that all projections are
/// resolved.
-pub fn translate_substs<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub fn translate_substs<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_impl: DefId,
source_substs: SubstsRef<'tcx>,
@@ -150,14 +149,9 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap();
// Create an infcx, taking the predicates of impl1 as assumptions:
- tcx.infer_ctxt().enter(|infcx| {
- let impl1_trait_ref = match traits::fully_normalize(
- &infcx,
- FulfillmentContext::new(),
- ObligationCause::dummy(),
- penv,
- impl1_trait_ref,
- ) {
+ let infcx = tcx.infer_ctxt().build();
+ let impl1_trait_ref =
+ match traits::fully_normalize(&infcx, ObligationCause::dummy(), penv, impl1_trait_ref) {
Ok(impl1_trait_ref) => impl1_trait_ref,
Err(_errors) => {
tcx.sess.delay_span_bug(
@@ -168,9 +162,8 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
}
};
- // Attempt to prove that impl2 applies, given all of the above.
- fulfill_implication(&infcx, penv, impl1_trait_ref, impl2_def_id).is_ok()
- })
+ // Attempt to prove that impl2 applies, given all of the above.
+ fulfill_implication(&infcx, penv, impl1_trait_ref, impl2_def_id).is_ok()
}
/// Attempt to fulfill all obligations of `target_impl` after unification with
@@ -178,8 +171,8 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
/// generics of `target_impl`, including both those needed to unify with
/// `source_trait_ref` and those whose identity is determined via a where
/// clause in the impl.
-fn fulfill_implication<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+fn fulfill_implication<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_trait_ref: ty::TraitRef<'tcx>,
target_impl: DefId,
@@ -211,11 +204,8 @@ fn fulfill_implication<'a, 'tcx>(
// (which are packed up in penv)
infcx.save_and_restore_in_snapshot_flag(|infcx| {
- let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
- for oblig in obligations.chain(more_obligations) {
- fulfill_cx.register_predicate_obligation(&infcx, oblig);
- }
- match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ let errors = traits::fully_solve_obligations(&infcx, obligations.chain(more_obligations));
+ match &errors[..] {
[] => {
debug!(
"fulfill_implication: an impl for {:?} specializes {:?}",
@@ -333,35 +323,13 @@ fn report_negative_positive_conflict(
positive_impl_def_id: DefId,
sg: &mut specialization_graph::Graph,
) {
- let impl_span = tcx.def_span(local_impl_def_id);
-
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0751,
- "found both positive and negative implementation of trait `{}`{}:",
- overlap.trait_desc,
- overlap.self_desc.clone().map_or_else(String::new, |ty| format!(" for type `{}`", ty))
- );
-
- match tcx.span_of_impl(negative_impl_def_id) {
- Ok(span) => {
- err.span_label(span, "negative implementation here");
- }
- Err(cname) => {
- err.note(&format!("negative implementation in crate `{}`", cname));
- }
- }
-
- match tcx.span_of_impl(positive_impl_def_id) {
- Ok(span) => {
- err.span_label(span, "positive implementation here");
- }
- Err(cname) => {
- err.note(&format!("positive implementation in crate `{}`", cname));
- }
- }
-
+ let mut err = tcx.sess.create_err(NegativePositiveConflict {
+ impl_span: tcx.def_span(local_impl_def_id),
+ trait_desc: &overlap.trait_desc,
+ self_desc: &overlap.self_desc,
+ negative_impl_span: tcx.span_of_impl(negative_impl_def_id),
+ positive_impl_span: tcx.span_of_impl(positive_impl_def_id),
+ });
sg.has_errored = Some(err.emit());
}
@@ -377,26 +345,12 @@ fn report_conflicting_impls(
// Work to be done after we've built the DiagnosticBuilder. We have to define it
// now because the struct_lint methods don't return back the DiagnosticBuilder
// that's passed in.
- fn decorate<G: EmissionGuarantee>(
+ fn decorate<'a, 'b, G: EmissionGuarantee>(
tcx: TyCtxt<'_>,
overlap: OverlapError,
- used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
impl_span: Span,
- err: LintDiagnosticBuilder<'_, G>,
- ) -> G {
- let msg = format!(
- "conflicting implementations of trait `{}`{}{}",
- overlap.trait_desc,
- overlap
- .self_desc
- .clone()
- .map_or_else(String::new, |ty| { format!(" for type `{}`", ty) }),
- match used_to_be_allowed {
- Some(FutureCompatOverlapErrorKind::Issue33140) => ": (E0119)",
- _ => "",
- }
- );
- let mut err = err.build(&msg);
+ err: &'b mut DiagnosticBuilder<'a, G>,
+ ) -> &'b mut DiagnosticBuilder<'a, G> {
match tcx.span_of_impl(overlap.with_impl) {
Ok(span) => {
err.span_label(span, "first implementation here");
@@ -411,7 +365,9 @@ fn report_conflicting_impls(
}
Err(cname) => {
let msg = match to_pretty_impl_header(tcx, overlap.with_impl) {
- Some(s) => format!("conflicting implementation in crate `{}`:\n- {}", cname, s),
+ Some(s) => {
+ format!("conflicting implementation in crate `{}`:\n- {}", cname, s)
+ }
None => format!("conflicting implementation in crate `{}`", cname),
};
err.note(&msg);
@@ -419,28 +375,33 @@ fn report_conflicting_impls(
}
for cause in &overlap.intercrate_ambiguity_causes {
- cause.add_intercrate_ambiguity_hint(&mut err);
+ cause.add_intercrate_ambiguity_hint(err);
}
if overlap.involves_placeholder {
- coherence::add_placeholder_note(&mut err);
+ coherence::add_placeholder_note(err);
}
- err.emit()
+ err
}
+ let msg = format!(
+ "conflicting implementations of trait `{}`{}{}",
+ overlap.trait_desc,
+ overlap.self_desc.as_deref().map_or_else(String::new, |ty| format!(" for type `{ty}`")),
+ match used_to_be_allowed {
+ Some(FutureCompatOverlapErrorKind::Issue33140) => ": (E0119)",
+ _ => "",
+ }
+ );
+
match used_to_be_allowed {
None => {
let reported = if overlap.with_impl.is_local()
|| tcx.orphan_check_impl(impl_def_id).is_ok()
{
- let err = struct_span_err!(tcx.sess, impl_span, E0119, "");
- Some(decorate(
- tcx,
- overlap,
- used_to_be_allowed,
- impl_span,
- LintDiagnosticBuilder::new(err),
- ))
+ let mut err = struct_span_err!(tcx.sess, impl_span, E0119, "{msg}",);
+ decorate(tcx, overlap, impl_span, &mut err);
+ Some(err.emit())
} else {
Some(tcx.sess.delay_span_bug(impl_span, "impl should have failed the orphan check"))
};
@@ -455,9 +416,8 @@ fn report_conflicting_impls(
lint,
tcx.hir().local_def_id_to_hir_id(impl_def_id),
impl_span,
- |ldb| {
- decorate(tcx, overlap, used_to_be_allowed, impl_span, ldb);
- },
+ msg,
+ |err| decorate(tcx, overlap, impl_span, err),
);
}
};
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
index fcb73b43f..63f89a33e 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
@@ -137,9 +137,8 @@ impl ChildrenExt<'_> for Children {
impl_def_id,
traits::SkipLeakCheck::default(),
overlap_mode,
- |_| true,
- || false,
- );
+ )
+ .is_some();
let error = create_overlap_error(overlap);
@@ -162,34 +161,29 @@ impl ChildrenExt<'_> for Children {
impl_def_id,
traits::SkipLeakCheck::Yes,
overlap_mode,
- |overlap| {
- if let Some(overlap_kind) =
- tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling)
- {
- match overlap_kind {
- ty::ImplOverlapKind::Permitted { marker: _ } => {}
- ty::ImplOverlapKind::Issue33140 => {
- *last_lint_mut = Some(FutureCompatOverlapError {
- error: create_overlap_error(overlap),
- kind: FutureCompatOverlapErrorKind::Issue33140,
- });
- }
+ )
+ .map_or(Ok((false, false)), |overlap| {
+ if let Some(overlap_kind) =
+ tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling)
+ {
+ match overlap_kind {
+ ty::ImplOverlapKind::Permitted { marker: _ } => {}
+ ty::ImplOverlapKind::Issue33140 => {
+ *last_lint_mut = Some(FutureCompatOverlapError {
+ error: create_overlap_error(overlap),
+ kind: FutureCompatOverlapErrorKind::Issue33140,
+ });
}
-
- return Ok((false, false));
}
- let le = tcx.specializes((impl_def_id, possible_sibling));
- let ge = tcx.specializes((possible_sibling, impl_def_id));
+ return Ok((false, false));
+ }
- if le == ge {
- report_overlap_error(overlap, last_lint_mut)
- } else {
- Ok((le, ge))
- }
- },
- || Ok((false, false)),
- )?;
+ let le = tcx.specializes((impl_def_id, possible_sibling));
+ let ge = tcx.specializes((possible_sibling, impl_def_id));
+
+ if le == ge { report_overlap_error(overlap, last_lint_mut) } else { Ok((le, ge)) }
+ })?;
if le && !ge {
debug!(
diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs
index 5829a0f92..932dbbb81 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_match.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs
@@ -68,7 +68,7 @@ pub fn search_for_adt_const_param_violation<'tcx>(
/// Note that this does *not* recursively check if the substructure of `adt_ty`
/// implements the traits.
fn type_marked_structural<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
adt_ty: Ty<'tcx>,
cause: ObligationCause<'tcx>,
) -> bool {
@@ -265,9 +265,8 @@ impl<'tcx> TypeVisitor<'tcx> for Search<'tcx> {
pub fn provide(providers: &mut Providers) {
providers.has_structural_eq_impls = |tcx, ty| {
- tcx.infer_ctxt().enter(|infcx| {
- let cause = ObligationCause::dummy();
- type_marked_structural(&infcx, ty, cause)
- })
+ let infcx = tcx.infer_ctxt().build();
+ let cause = ObligationCause::dummy();
+ type_marked_structural(&infcx, ty, cause)
};
}
diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs
index d25006016..ed47d2f83 100644
--- a/compiler/rustc_trait_selection/src/traits/util.rs
+++ b/compiler/rustc_trait_selection/src/traits/util.rs
@@ -5,14 +5,12 @@ use smallvec::SmallVec;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::{GenericArg, Subst, SubstsRef};
use rustc_middle::ty::{self, ImplSubject, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{GenericArg, SubstsRef};
use super::{Normalized, Obligation, ObligationCause, PredicateObligation, SelectionContext};
pub use rustc_infer::traits::{self, util::*};
-use std::iter;
-
///////////////////////////////////////////////////////////////////////////
// `TraitAliasExpander` iterator
///////////////////////////////////////////////////////////////////////////
@@ -210,7 +208,7 @@ pub fn impl_subject_and_oblig<'a, 'tcx>(
let Normalized { value: predicates, obligations: normalization_obligations2 } =
super::normalize(selcx, param_env, ObligationCause::dummy(), predicates);
let impl_obligations =
- predicates_for_generics(ObligationCause::dummy(), 0, param_env, predicates);
+ super::predicates_for_generics(|_, _| ObligationCause::dummy(), param_env, predicates);
let impl_obligations = impl_obligations
.chain(normalization_obligations1.into_iter())
@@ -219,27 +217,6 @@ pub fn impl_subject_and_oblig<'a, 'tcx>(
(subject, impl_obligations)
}
-pub fn predicates_for_generics<'tcx>(
- cause: ObligationCause<'tcx>,
- recursion_depth: usize,
- param_env: ty::ParamEnv<'tcx>,
- generic_bounds: ty::InstantiatedPredicates<'tcx>,
-) -> impl Iterator<Item = PredicateObligation<'tcx>> {
- debug!("predicates_for_generics(generic_bounds={:?})", generic_bounds);
-
- iter::zip(generic_bounds.predicates, generic_bounds.spans).map(move |(predicate, span)| {
- let cause = match *cause.code() {
- traits::ItemObligation(def_id) if !span.is_dummy() => traits::ObligationCause::new(
- cause.span,
- cause.body_id,
- traits::BindingObligation(def_id, span),
- ),
- _ => cause.clone(),
- };
- Obligation { cause, recursion_depth, param_env, predicate }
- })
-}
-
pub fn predicate_for_trait_ref<'tcx>(
tcx: TyCtxt<'tcx>,
cause: ObligationCause<'tcx>,
@@ -291,10 +268,7 @@ pub fn count_own_vtable_entries<'tcx>(
tcx: TyCtxt<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> usize {
- let existential_trait_ref =
- trait_ref.map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
- let existential_trait_ref = tcx.erase_regions(existential_trait_ref);
- tcx.own_existential_vtable_entries(existential_trait_ref).len()
+ tcx.own_existential_vtable_entries(trait_ref.def_id()).len()
}
/// Given an upcast trait object described by `object`, returns the
@@ -305,15 +279,10 @@ pub fn get_vtable_index_of_object_method<'tcx, N>(
object: &super::ImplSourceObjectData<'tcx, N>,
method_def_id: DefId,
) -> Option<usize> {
- let existential_trait_ref = object
- .upcast_trait_ref
- .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
- let existential_trait_ref = tcx.erase_regions(existential_trait_ref);
-
// Count number of methods preceding the one we are selecting and
// add them to the total offset.
if let Some(index) = tcx
- .own_existential_vtable_entries(existential_trait_ref)
+ .own_existential_vtable_entries(object.upcast_trait_ref.def_id())
.iter()
.copied()
.position(|def_id| def_id == method_def_id)
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
index 414857f0a..fc0a9f690 100644
--- a/compiler/rustc_trait_selection/src/traits/wf.rs
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -14,8 +14,8 @@ use std::iter;
/// inference variable, returns `None`, because we are not able to
/// make any progress at all. This is to prevent "livelock" where we
/// say "$0 is WF if $0 is WF".
-pub fn obligations<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub fn obligations<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: hir::HirId,
recursion_depth: usize,
@@ -31,9 +31,9 @@ pub fn obligations<'a, 'tcx>(
if resolved_ty == ty {
// No progress, bail out to prevent "livelock".
return None;
+ } else {
+ resolved_ty
}
-
- resolved_ty
}
_ => ty,
}
@@ -41,16 +41,14 @@ pub fn obligations<'a, 'tcx>(
}
GenericArgKind::Const(ct) => {
match ct.kind() {
- ty::ConstKind::Infer(infer) => {
- let resolved = infcx.shallow_resolve(infer);
- if resolved == infer {
+ ty::ConstKind::Infer(_) => {
+ let resolved = infcx.shallow_resolve(ct);
+ if resolved == ct {
// No progress.
return None;
+ } else {
+ resolved
}
-
- infcx
- .tcx
- .mk_const(ty::ConstS { kind: ty::ConstKind::Infer(resolved), ty: ct.ty() })
}
_ => ct,
}
@@ -81,8 +79,8 @@ pub fn obligations<'a, 'tcx>(
/// well-formed. For example, if there is a trait `Set` defined like
/// `trait Set<K:Eq>`, then the trait reference `Foo: Set<Bar>` is WF
/// if `Bar: Eq`.
-pub fn trait_obligations<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+pub fn trait_obligations<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: hir::HirId,
trait_pred: &ty::TraitPredicate<'tcx>,
@@ -103,8 +101,9 @@ pub fn trait_obligations<'a, 'tcx>(
wf.normalize(infcx)
}
-pub fn predicate_obligations<'a, 'tcx>(
- infcx: &InferCtxt<'a, 'tcx>,
+#[instrument(skip(infcx), ret)]
+pub fn predicate_obligations<'tcx>(
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: hir::HirId,
predicate: ty::Predicate<'tcx>,
@@ -131,9 +130,9 @@ pub fn predicate_obligations<'a, 'tcx>(
}
ty::PredicateKind::Projection(t) => {
wf.compute_projection(t.projection_ty);
- wf.compute(match t.term {
- ty::Term::Ty(ty) => ty.into(),
- ty::Term::Const(c) => c.into(),
+ wf.compute(match t.term.unpack() {
+ ty::TermKind::Ty(ty) => ty.into(),
+ ty::TermKind::Const(c) => c.into(),
})
}
ty::PredicateKind::WellFormed(arg) => {
@@ -149,13 +148,8 @@ pub fn predicate_obligations<'a, 'tcx>(
wf.compute(a.into());
wf.compute(b.into());
}
- ty::PredicateKind::ConstEvaluatable(uv) => {
- let obligations = wf.nominal_obligations(uv.def.did, uv.substs);
- wf.out.extend(obligations);
-
- for arg in uv.substs.iter() {
- wf.compute(arg);
- }
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ wf.compute(ct.into());
}
ty::PredicateKind::ConstEquate(c1, c2) => {
wf.compute(c1.into());
@@ -220,12 +214,14 @@ fn extend_cause_with_original_assoc_item_obligation<'tcx>(
trait_ref, item, cause, pred
);
let (items, impl_def_id) = match item {
- Some(hir::Item { kind: hir::ItemKind::Impl(impl_), def_id, .. }) => (impl_.items, *def_id),
+ Some(hir::Item { kind: hir::ItemKind::Impl(impl_), owner_id, .. }) => {
+ (impl_.items, *owner_id)
+ }
_ => return,
};
let fix_span =
|impl_item_ref: &hir::ImplItemRef| match tcx.hir().impl_item(impl_item_ref.id).kind {
- hir::ImplItemKind::Const(ty, _) | hir::ImplItemKind::TyAlias(ty) => ty.span,
+ hir::ImplItemKind::Const(ty, _) | hir::ImplItemKind::Type(ty) => ty.span,
_ => impl_item_ref.span,
};
@@ -242,7 +238,7 @@ fn extend_cause_with_original_assoc_item_obligation<'tcx>(
tcx.impl_item_implementor_ids(impl_def_id).get(&projection_ty.item_def_id)
&& let Some(impl_item_span) = items
.iter()
- .find(|item| item.id.def_id.to_def_id() == impl_item_id)
+ .find(|item| item.id.owner_id.to_def_id() == impl_item_id)
.map(fix_span)
{
cause.span = impl_item_span;
@@ -257,7 +253,7 @@ fn extend_cause_with_original_assoc_item_obligation<'tcx>(
tcx.impl_item_implementor_ids(impl_def_id).get(&item_def_id)
&& let Some(impl_item_span) = items
.iter()
- .find(|item| item.id.def_id.to_def_id() == impl_item_id)
+ .find(|item| item.id.owner_id.to_def_id() == impl_item_id)
.map(fix_span)
{
cause.span = impl_item_span;
@@ -276,7 +272,7 @@ impl<'tcx> WfPredicates<'tcx> {
traits::ObligationCause::new(self.span, self.body_id, code)
}
- fn normalize(self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<traits::PredicateObligation<'tcx>> {
+ fn normalize(self, infcx: &InferCtxt<'tcx>) -> Vec<traits::PredicateObligation<'tcx>> {
let cause = self.cause(traits::WellFormed(None));
let param_env = self.param_env;
let mut obligations = Vec::with_capacity(self.out.len());
@@ -393,7 +389,8 @@ impl<'tcx> WfPredicates<'tcx> {
// `i32: Clone`
// `i32: Copy`
// ]
- let obligations = self.nominal_obligations(data.item_def_id, data.substs);
+ // Projection types do not require const predicates.
+ let obligations = self.nominal_obligations_without_const(data.item_def_id, data.substs);
self.out.extend(obligations);
let tcx = self.tcx();
@@ -436,11 +433,13 @@ impl<'tcx> WfPredicates<'tcx> {
}
/// Pushes all the predicates needed to validate that `ty` is WF into `out`.
+ #[instrument(level = "debug", skip(self))]
fn compute(&mut self, arg: GenericArg<'tcx>) {
let mut walker = arg.walk();
let param_env = self.param_env;
let depth = self.recursion_depth;
while let Some(arg) = walker.next() {
+ debug!(?arg, ?self.out);
let ty = match arg.unpack() {
GenericArgKind::Type(ty) => ty,
@@ -448,14 +447,14 @@ impl<'tcx> WfPredicates<'tcx> {
// obligations are handled by the parent (e.g. `ty::Ref`).
GenericArgKind::Lifetime(_) => continue,
- GenericArgKind::Const(constant) => {
- match constant.kind() {
+ GenericArgKind::Const(ct) => {
+ match ct.kind() {
ty::ConstKind::Unevaluated(uv) => {
let obligations = self.nominal_obligations(uv.def.did, uv.substs);
self.out.extend(obligations);
let predicate =
- ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(uv.shrink()))
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(ct))
.to_predicate(self.tcx());
let cause = self.cause(traits::WellFormed(None));
self.out.push(traits::Obligation::with_depth(
@@ -472,7 +471,7 @@ impl<'tcx> WfPredicates<'tcx> {
cause,
self.recursion_depth,
self.param_env,
- ty::Binder::dummy(ty::PredicateKind::WellFormed(constant.into()))
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(ct.into()))
.to_predicate(self.tcx()),
));
}
@@ -490,6 +489,8 @@ impl<'tcx> WfPredicates<'tcx> {
}
};
+ debug!("wf bounds for ty={:?} ty.kind={:#?}", ty, ty.kind());
+
match *ty.kind() {
ty::Bool
| ty::Char
@@ -546,7 +547,7 @@ impl<'tcx> WfPredicates<'tcx> {
}
ty::FnDef(did, substs) => {
- let obligations = self.nominal_obligations(did, substs);
+ let obligations = self.nominal_obligations_without_const(did, substs);
self.out.extend(obligations);
}
@@ -636,7 +637,7 @@ impl<'tcx> WfPredicates<'tcx> {
}
}
- ty::Dynamic(data, r) => {
+ ty::Dynamic(data, r, _) => {
// WfObject
//
// Here, we defer WF checking due to higher-ranked
@@ -688,6 +689,8 @@ impl<'tcx> WfPredicates<'tcx> {
));
}
}
+
+ debug!(?self.out);
}
}
@@ -713,7 +716,7 @@ impl<'tcx> WfPredicates<'tcx> {
iter::zip(iter::zip(predicates.predicates, predicates.spans), origins.into_iter().rev())
.map(|((mut pred, span), origin_def_id)| {
let code = if span.is_dummy() {
- traits::MiscObligation
+ traits::ItemObligation(origin_def_id)
} else {
traits::BindingObligation(origin_def_id, span)
};
@@ -843,7 +846,7 @@ pub fn object_region_bounds<'tcx>(
///
/// Requires that trait definitions have been processed so that we can
/// elaborate predicates and walk supertraits.
-#[instrument(skip(tcx, predicates), level = "debug")]
+#[instrument(skip(tcx, predicates), level = "debug", ret)]
pub(crate) fn required_region_bounds<'tcx>(
tcx: TyCtxt<'tcx>,
erased_self_ty: Ty<'tcx>,
diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs
index ff5ca0cbc..0de28b826 100644
--- a/compiler/rustc_traits/src/chalk/db.rs
+++ b/compiler/rustc_traits/src/chalk/db.rs
@@ -7,8 +7,8 @@
//! `crate::chalk::lowering` (to lower rustc types into Chalk types).
use rustc_middle::traits::ChalkRustInterner as RustInterner;
-use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::{self, AssocKind, EarlyBinder, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable};
+use rustc_middle::ty::{InternalSubsts, SubstsRef};
use rustc_ast::ast;
use rustc_attr as attr;
diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs
index c7c604e14..45d5ea93d 100644
--- a/compiler/rustc_traits/src/chalk/lowering.rs
+++ b/compiler/rustc_traits/src/chalk/lowering.rs
@@ -191,7 +191,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::GoalData<RustInterner<'tcx>>> for ty::Predi
GenericArgKind::Const(..) => {
chalk_ir::GoalData::All(chalk_ir::Goals::empty(interner))
}
- GenericArgKind::Lifetime(lt) => bug!("unexpect well formed predicate: {:?}", lt),
+ GenericArgKind::Lifetime(lt) => bug!("unexpected well formed predicate: {:?}", lt),
},
ty::PredicateKind::ObjectSafe(t) => chalk_ir::GoalData::DomainGoal(
@@ -326,7 +326,8 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Ty<RustInterner<'tcx>>> for Ty<'tcx> {
)),
})
}
- ty::Dynamic(predicates, region) => chalk_ir::TyKind::Dyn(chalk_ir::DynTy {
+ // FIXME(dyn-star): handle the dynamic kind (dyn or dyn*)
+ ty::Dynamic(predicates, region, _kind) => chalk_ir::TyKind::Dyn(chalk_ir::DynTy {
bounds: predicates.lower_into(interner),
lifetime: region.lower_into(interner),
}),
@@ -485,10 +486,6 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::Lifetime<RustInterner<'tcx>>> for Region<'t
})
.intern(interner)
}
- ty::ReEmpty(ui) => {
- chalk_ir::LifetimeData::Empty(chalk_ir::UniverseIndex { counter: ui.index() })
- .intern(interner)
- }
ty::ReErased => chalk_ir::LifetimeData::Erased.intern(interner),
}
}
@@ -510,8 +507,8 @@ impl<'tcx> LowerInto<'tcx, Region<'tcx>> for &chalk_ir::Lifetime<RustInterner<'t
name: ty::BoundRegionKind::BrAnon(p.idx as u32),
}),
chalk_ir::LifetimeData::Static => return interner.tcx.lifetimes.re_static,
- chalk_ir::LifetimeData::Empty(ui) => {
- ty::ReEmpty(ty::UniverseIndex::from_usize(ui.counter))
+ chalk_ir::LifetimeData::Empty(_) => {
+ bug!("Chalk should not have been passed an empty lifetime.")
}
chalk_ir::LifetimeData::Erased => return interner.tcx.lifetimes.re_erased,
chalk_ir::LifetimeData::Phantom(void, _) => match *void {},
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
index a20de08b4..d5a8ca5ea 100644
--- a/compiler/rustc_traits/src/dropck_outlives.rs
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -4,7 +4,7 @@ use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::InternalSubsts;
use rustc_middle::ty::{self, EarlyBinder, ParamEnvAnd, Ty, TyCtxt};
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_trait_selection::traits::query::dropck_outlives::trivial_dropck_outlives;
@@ -27,128 +27,120 @@ fn dropck_outlives<'tcx>(
) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>, NoSolution> {
debug!("dropck_outlives(goal={:#?})", canonical_goal);
- tcx.infer_ctxt().enter_with_canonical(
- DUMMY_SP,
- &canonical_goal,
- |ref infcx, goal, canonical_inference_vars| {
- let tcx = infcx.tcx;
- let ParamEnvAnd { param_env, value: for_ty } = goal;
-
- let mut result = DropckOutlivesResult { kinds: vec![], overflows: vec![] };
-
- // A stack of types left to process. Each round, we pop
- // something from the stack and invoke
- // `dtorck_constraint_for_ty`. This may produce new types that
- // have to be pushed on the stack. This continues until we have explored
- // all the reachable types from the type `for_ty`.
- //
- // Example: Imagine that we have the following code:
- //
- // ```rust
- // struct A {
- // value: B,
- // children: Vec<A>,
- // }
- //
- // struct B {
- // value: u32
- // }
- //
- // fn f() {
- // let a: A = ...;
- // ..
- // } // here, `a` is dropped
- // ```
- //
- // at the point where `a` is dropped, we need to figure out
- // which types inside of `a` contain region data that may be
- // accessed by any destructors in `a`. We begin by pushing `A`
- // onto the stack, as that is the type of `a`. We will then
- // invoke `dtorck_constraint_for_ty` which will expand `A`
- // into the types of its fields `(B, Vec<A>)`. These will get
- // pushed onto the stack. Eventually, expanding `Vec<A>` will
- // lead to us trying to push `A` a second time -- to prevent
- // infinite recursion, we notice that `A` was already pushed
- // once and stop.
- let mut ty_stack = vec![(for_ty, 0)];
-
- // Set used to detect infinite recursion.
- let mut ty_set = FxHashSet::default();
-
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
-
- let cause = ObligationCause::dummy();
- let mut constraints = DropckConstraint::empty();
- while let Some((ty, depth)) = ty_stack.pop() {
- debug!(
- "{} kinds, {} overflows, {} ty_stack",
- result.kinds.len(),
- result.overflows.len(),
- ty_stack.len()
- );
- dtorck_constraint_for_ty(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?;
-
- // "outlives" represent types/regions that may be touched
- // by a destructor.
- result.kinds.append(&mut constraints.outlives);
- result.overflows.append(&mut constraints.overflows);
-
- // If we have even one overflow, we should stop trying to evaluate further --
- // chances are, the subsequent overflows for this evaluation won't provide useful
- // information and will just decrease the speed at which we can emit these errors
- // (since we'll be printing for just that much longer for the often enormous types
- // that result here).
- if !result.overflows.is_empty() {
- break;
- }
+ let (ref infcx, goal, canonical_inference_vars) =
+ tcx.infer_ctxt().build_with_canonical(DUMMY_SP, &canonical_goal);
+ let tcx = infcx.tcx;
+ let ParamEnvAnd { param_env, value: for_ty } = goal;
+
+ let mut result = DropckOutlivesResult { kinds: vec![], overflows: vec![] };
+
+ // A stack of types left to process. Each round, we pop
+ // something from the stack and invoke
+ // `dtorck_constraint_for_ty`. This may produce new types that
+ // have to be pushed on the stack. This continues until we have explored
+ // all the reachable types from the type `for_ty`.
+ //
+ // Example: Imagine that we have the following code:
+ //
+ // ```rust
+ // struct A {
+ // value: B,
+ // children: Vec<A>,
+ // }
+ //
+ // struct B {
+ // value: u32
+ // }
+ //
+ // fn f() {
+ // let a: A = ...;
+ // ..
+ // } // here, `a` is dropped
+ // ```
+ //
+ // at the point where `a` is dropped, we need to figure out
+ // which types inside of `a` contain region data that may be
+ // accessed by any destructors in `a`. We begin by pushing `A`
+ // onto the stack, as that is the type of `a`. We will then
+ // invoke `dtorck_constraint_for_ty` which will expand `A`
+ // into the types of its fields `(B, Vec<A>)`. These will get
+ // pushed onto the stack. Eventually, expanding `Vec<A>` will
+ // lead to us trying to push `A` a second time -- to prevent
+ // infinite recursion, we notice that `A` was already pushed
+ // once and stop.
+ let mut ty_stack = vec![(for_ty, 0)];
+
+ // Set used to detect infinite recursion.
+ let mut ty_set = FxHashSet::default();
+
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ let cause = ObligationCause::dummy();
+ let mut constraints = DropckConstraint::empty();
+ while let Some((ty, depth)) = ty_stack.pop() {
+ debug!(
+ "{} kinds, {} overflows, {} ty_stack",
+ result.kinds.len(),
+ result.overflows.len(),
+ ty_stack.len()
+ );
+ dtorck_constraint_for_ty(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?;
+
+ // "outlives" represent types/regions that may be touched
+ // by a destructor.
+ result.kinds.append(&mut constraints.outlives);
+ result.overflows.append(&mut constraints.overflows);
+
+ // If we have even one overflow, we should stop trying to evaluate further --
+ // chances are, the subsequent overflows for this evaluation won't provide useful
+ // information and will just decrease the speed at which we can emit these errors
+ // (since we'll be printing for just that much longer for the often enormous types
+ // that result here).
+ if !result.overflows.is_empty() {
+ break;
+ }
- // dtorck types are "types that will get dropped but which
- // do not themselves define a destructor", more or less. We have
- // to push them onto the stack to be expanded.
- for ty in constraints.dtorck_types.drain(..) {
- match infcx.at(&cause, param_env).normalize(ty) {
- Ok(Normalized { value: ty, obligations }) => {
- fulfill_cx.register_predicate_obligations(infcx, obligations);
-
- debug!("dropck_outlives: ty from dtorck_types = {:?}", ty);
-
- match ty.kind() {
- // All parameters live for the duration of the
- // function.
- ty::Param(..) => {}
-
- // A projection that we couldn't resolve - it
- // might have a destructor.
- ty::Projection(..) | ty::Opaque(..) => {
- result.kinds.push(ty.into());
- }
-
- _ => {
- if ty_set.insert(ty) {
- ty_stack.push((ty, depth + 1));
- }
- }
- }
+ // dtorck types are "types that will get dropped but which
+ // do not themselves define a destructor", more or less. We have
+ // to push them onto the stack to be expanded.
+ for ty in constraints.dtorck_types.drain(..) {
+ match infcx.at(&cause, param_env).normalize(ty) {
+ Ok(Normalized { value: ty, obligations }) => {
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+
+ debug!("dropck_outlives: ty from dtorck_types = {:?}", ty);
+
+ match ty.kind() {
+ // All parameters live for the duration of the
+ // function.
+ ty::Param(..) => {}
+
+ // A projection that we couldn't resolve - it
+ // might have a destructor.
+ ty::Projection(..) | ty::Opaque(..) => {
+ result.kinds.push(ty.into());
}
- // We don't actually expect to fail to normalize.
- // That implies a WF error somewhere else.
- Err(NoSolution) => {
- return Err(NoSolution);
+ _ => {
+ if ty_set.insert(ty) {
+ ty_stack.push((ty, depth + 1));
+ }
}
}
}
+
+ // We don't actually expect to fail to normalize.
+ // That implies a WF error somewhere else.
+ Err(NoSolution) => {
+ return Err(NoSolution);
+ }
}
+ }
+ }
- debug!("dropck_outlives: result = {:#?}", result);
+ debug!("dropck_outlives: result = {:#?}", result);
- infcx.make_canonicalized_query_response(
- canonical_inference_vars,
- result,
- &mut *fulfill_cx,
- )
- },
- )
+ infcx.make_canonicalized_query_response(canonical_inference_vars, result, &mut *fulfill_cx)
}
/// Returns a set of constraints that needs to be satisfied in
diff --git a/compiler/rustc_traits/src/evaluate_obligation.rs b/compiler/rustc_traits/src/evaluate_obligation.rs
index 49c9ba459..493d5de08 100644
--- a/compiler/rustc_traits/src/evaluate_obligation.rs
+++ b/compiler/rustc_traits/src/evaluate_obligation.rs
@@ -18,17 +18,15 @@ fn evaluate_obligation<'tcx>(
debug!("evaluate_obligation(canonical_goal={:#?})", canonical_goal);
// HACK This bubble is required for this tests to pass:
// impl-trait/issue99642.rs
- tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).enter_with_canonical(
- DUMMY_SP,
- &canonical_goal,
- |ref infcx, goal, _canonical_inference_vars| {
- debug!("evaluate_obligation: goal={:#?}", goal);
- let ParamEnvAnd { param_env, value: predicate } = goal;
+ let (ref infcx, goal, _canonical_inference_vars) = tcx
+ .infer_ctxt()
+ .with_opaque_type_inference(DefiningAnchor::Bubble)
+ .build_with_canonical(DUMMY_SP, &canonical_goal);
+ debug!("evaluate_obligation: goal={:#?}", goal);
+ let ParamEnvAnd { param_env, value: predicate } = goal;
- let mut selcx = SelectionContext::with_query_mode(&infcx, TraitQueryMode::Canonical);
- let obligation = Obligation::new(ObligationCause::dummy(), param_env, predicate);
+ let mut selcx = SelectionContext::with_query_mode(&infcx, TraitQueryMode::Canonical);
+ let obligation = Obligation::new(ObligationCause::dummy(), param_env, predicate);
- selcx.evaluate_root_obligation(&obligation)
- },
- )
+ selcx.evaluate_root_obligation(&obligation)
}
diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs
index e3e78f70b..7d36b9558 100644
--- a/compiler/rustc_traits/src/implied_outlives_bounds.rs
+++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs
@@ -35,7 +35,7 @@ fn implied_outlives_bounds<'tcx>(
}
fn compute_implied_outlives_bounds<'tcx>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Fallible<Vec<OutlivesBound<'tcx>>> {
@@ -49,7 +49,8 @@ fn compute_implied_outlives_bounds<'tcx>(
let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default();
let mut wf_args = vec![ty.into()];
- let mut implied_bounds = vec![];
+ let mut outlives_bounds: Vec<ty::OutlivesPredicate<ty::GenericArg<'tcx>, ty::Region<'tcx>>> =
+ vec![];
let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(tcx);
@@ -65,41 +66,28 @@ fn compute_implied_outlives_bounds<'tcx>(
// than the ultimate set. (Note: normally there won't be
// unresolved inference variables here anyway, but there might be
// during typeck under some circumstances.)
+ //
+ // FIXME(@lcnr): It's not really "always fine", having fewer implied
+ // bounds can be backward incompatible, e.g. #101951 was caused by
+ // us not dealing with inference vars in `TypeOutlives` predicates.
let obligations = wf::obligations(infcx, param_env, hir::CRATE_HIR_ID, 0, arg, DUMMY_SP)
.unwrap_or_default();
- // N.B., all of these predicates *ought* to be easily proven
- // true. In fact, their correctness is (mostly) implied by
- // other parts of the program. However, in #42552, we had
- // an annoying scenario where:
- //
- // - Some `T::Foo` gets normalized, resulting in a
- // variable `_1` and a `T: Trait<Foo=_1>` constraint
- // (not sure why it couldn't immediately get
- // solved). This result of `_1` got cached.
- // - These obligations were dropped on the floor here,
- // rather than being registered.
- // - Then later we would get a request to normalize
- // `T::Foo` which would result in `_1` being used from
- // the cache, but hence without the `T: Trait<Foo=_1>`
- // constraint. As a result, `_1` never gets resolved,
- // and we get an ICE (in dropck).
- //
- // Therefore, we register any predicates involving
- // inference variables. We restrict ourselves to those
- // involving inference variables both for efficiency and
- // to avoids duplicate errors that otherwise show up.
+ // While these predicates should all be implied by other parts of
+ // the program, they are still relevant as they may constrain
+ // inference variables, which is necessary to add the correct
+ // implied bounds in some cases, mostly when dealing with projections.
fulfill_cx.register_predicate_obligations(
infcx,
- obligations.iter().filter(|o| o.predicate.has_infer_types_or_consts()).cloned(),
+ obligations.iter().filter(|o| o.predicate.has_non_region_infer()).cloned(),
);
// From the full set of obligations, just filter down to the
// region relationships.
- implied_bounds.extend(obligations.into_iter().flat_map(|obligation| {
+ outlives_bounds.extend(obligations.into_iter().filter_map(|obligation| {
assert!(!obligation.has_escaping_bound_vars());
match obligation.predicate.kind().no_bound_vars() {
- None => vec![],
+ None => None,
Some(pred) => match pred {
ty::PredicateKind::Trait(..)
| ty::PredicateKind::Subtype(..)
@@ -109,21 +97,18 @@ fn compute_implied_outlives_bounds<'tcx>(
| ty::PredicateKind::ObjectSafe(..)
| ty::PredicateKind::ConstEvaluatable(..)
| ty::PredicateKind::ConstEquate(..)
- | ty::PredicateKind::TypeWellFormedFromEnv(..) => vec![],
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
ty::PredicateKind::WellFormed(arg) => {
wf_args.push(arg);
- vec![]
+ None
}
ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
- vec![OutlivesBound::RegionSubRegion(r_b, r_a)]
+ Some(ty::OutlivesPredicate(r_a.into(), r_b))
}
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, r_b)) => {
- let ty_a = infcx.resolve_vars_if_possible(ty_a);
- let mut components = smallvec![];
- push_outlives_components(tcx, ty_a, &mut components);
- implied_bounds_from_components(r_b, components)
+ Some(ty::OutlivesPredicate(ty_a.into(), r_b))
}
},
}
@@ -133,9 +118,27 @@ fn compute_implied_outlives_bounds<'tcx>(
// Ensure that those obligations that we had to solve
// get solved *here*.
match fulfill_cx.select_all_or_error(infcx).as_slice() {
- [] => Ok(implied_bounds),
- _ => Err(NoSolution),
+ [] => (),
+ _ => return Err(NoSolution),
}
+
+ // We lazily compute the outlives components as
+ // `select_all_or_error` constrains inference variables.
+ let implied_bounds = outlives_bounds
+ .into_iter()
+ .flat_map(|ty::OutlivesPredicate(a, r_b)| match a.unpack() {
+ ty::GenericArgKind::Lifetime(r_a) => vec![OutlivesBound::RegionSubRegion(r_b, r_a)],
+ ty::GenericArgKind::Type(ty_a) => {
+ let ty_a = infcx.resolve_vars_if_possible(ty_a);
+ let mut components = smallvec![];
+ push_outlives_components(tcx, ty_a, &mut components);
+ implied_bounds_from_components(r_b, components)
+ }
+ ty::GenericArgKind::Const(_) => unreachable!(),
+ })
+ .collect();
+
+ Ok(implied_bounds)
}
/// When we have an implied bound that `T: 'a`, we can further break
@@ -153,6 +156,9 @@ fn implied_bounds_from_components<'tcx>(
Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)),
Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)),
Component::Projection(p) => Some(OutlivesBound::RegionSubProjection(sub_region, p)),
+ Component::Opaque(def_id, substs) => {
+ Some(OutlivesBound::RegionSubOpaque(sub_region, def_id, substs))
+ }
Component::EscapingProjection(_) =>
// If the projection has escaping regions, don't
// try to infer any implied bounds even for its
diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs
index 2bea164c0..0da28737f 100644
--- a/compiler/rustc_traits/src/lib.rs
+++ b/compiler/rustc_traits/src/lib.rs
@@ -1,7 +1,8 @@
//! New recursive solver modeled on Chalk's recursive solver. Most of
//! the guts are broken up into modules; see the comments in those modules.
-#![feature(let_else)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#![recursion_limit = "256"]
#[macro_use]
diff --git a/compiler/rustc_traits/src/normalize_erasing_regions.rs b/compiler/rustc_traits/src/normalize_erasing_regions.rs
index 5d394ed22..2da64d73d 100644
--- a/compiler/rustc_traits/src/normalize_erasing_regions.rs
+++ b/compiler/rustc_traits/src/normalize_erasing_regions.rs
@@ -18,9 +18,6 @@ pub(crate) fn provide(p: &mut Providers) {
try_normalize_after_erasing_regions(tcx, goal)
},
- try_normalize_mir_const_after_erasing_regions: |tcx, goal| {
- try_normalize_after_erasing_regions(tcx, goal)
- },
..*p
};
}
@@ -30,30 +27,29 @@ fn try_normalize_after_erasing_regions<'tcx, T: TypeFoldable<'tcx> + PartialEq +
goal: ParamEnvAnd<'tcx, T>,
) -> Result<T, NoSolution> {
let ParamEnvAnd { param_env, value } = goal;
- tcx.infer_ctxt().enter(|infcx| {
- let cause = ObligationCause::dummy();
- match infcx.at(&cause, param_env).normalize(value) {
- Ok(Normalized { value: normalized_value, obligations: normalized_obligations }) => {
- // We don't care about the `obligations`; they are
- // always only region relations, and we are about to
- // erase those anyway:
- debug_assert_eq!(
- normalized_obligations.iter().find(|p| not_outlives_predicate(p.predicate)),
- None,
- );
+ let infcx = tcx.infer_ctxt().build();
+ let cause = ObligationCause::dummy();
+ match infcx.at(&cause, param_env).normalize(value) {
+ Ok(Normalized { value: normalized_value, obligations: normalized_obligations }) => {
+ // We don't care about the `obligations`; they are
+ // always only region relations, and we are about to
+ // erase those anyway:
+ debug_assert_eq!(
+ normalized_obligations.iter().find(|p| not_outlives_predicate(p.predicate)),
+ None,
+ );
- let resolved_value = infcx.resolve_vars_if_possible(normalized_value);
- // It's unclear when `resolve_vars` would have an effect in a
- // fresh `InferCtxt`. If this assert does trigger, it will give
- // us a test case.
- debug_assert_eq!(normalized_value, resolved_value);
- let erased = infcx.tcx.erase_regions(resolved_value);
- debug_assert!(!erased.needs_infer(), "{:?}", erased);
- Ok(erased)
- }
- Err(NoSolution) => Err(NoSolution),
+ let resolved_value = infcx.resolve_vars_if_possible(normalized_value);
+ // It's unclear when `resolve_vars` would have an effect in a
+ // fresh `InferCtxt`. If this assert does trigger, it will give
+ // us a test case.
+ debug_assert_eq!(normalized_value, resolved_value);
+ let erased = infcx.tcx.erase_regions(resolved_value);
+ debug_assert!(!erased.needs_infer(), "{:?}", erased);
+ Ok(erased)
}
- })
+ Err(NoSolution) => Err(NoSolution),
+ }
}
fn not_outlives_predicate<'tcx>(p: ty::Predicate<'tcx>) -> bool {
diff --git a/compiler/rustc_traits/src/type_op.rs b/compiler/rustc_traits/src/type_op.rs
index d895b647d..bca7458ed 100644
--- a/compiler/rustc_traits/src/type_op.rs
+++ b/compiler/rustc_traits/src/type_op.rs
@@ -3,12 +3,12 @@ use rustc_hir::def_id::DefId;
use rustc_infer::infer::at::ToTrace;
use rustc_infer::infer::canonical::{Canonical, QueryResponse};
use rustc_infer::infer::{DefiningAnchor, InferCtxt, TyCtxtInferExt};
-use rustc_infer::traits::TraitEngineExt as _;
+use rustc_infer::traits::{ObligationCauseCode, TraitEngineExt as _};
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::{GenericArg, Subst, UserSelfTy, UserSubsts};
use rustc_middle::ty::{
self, EarlyBinder, FnSig, Lift, PolyFnSig, Ty, TyCtxt, TypeFoldable, Variance,
};
+use rustc_middle::ty::{GenericArg, UserSelfTy, UserSubsts};
use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Predicate, ToPredicate};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
@@ -22,6 +22,7 @@ use rustc_trait_selection::traits::query::type_op::subtype::Subtype;
use rustc_trait_selection::traits::query::{Fallible, NoSolution};
use rustc_trait_selection::traits::{Normalized, Obligation, ObligationCause, TraitEngine};
use std::fmt;
+use std::iter::zip;
pub(crate) fn provide(p: &mut Providers) {
*p = Providers {
@@ -50,7 +51,7 @@ fn type_op_ascribe_user_type<'tcx>(
/// this query can be re-run to better track the span of the obligation cause, and improve the error
/// message. Do not call directly unless you're in that very specific context.
pub fn type_op_ascribe_user_type_with_span<'a, 'tcx: 'a>(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
fulfill_cx: &'a mut dyn TraitEngine<'tcx>,
key: ParamEnvAnd<'tcx, AscribeUserType<'tcx>>,
span: Option<Span>,
@@ -61,14 +62,15 @@ pub fn type_op_ascribe_user_type_with_span<'a, 'tcx: 'a>(
mir_ty, def_id, user_substs
);
- let mut cx = AscribeUserTypeCx { infcx, param_env, fulfill_cx };
- cx.relate_mir_and_user_ty(mir_ty, def_id, user_substs, span)?;
+ let mut cx = AscribeUserTypeCx { infcx, param_env, span: span.unwrap_or(DUMMY_SP), fulfill_cx };
+ cx.relate_mir_and_user_ty(mir_ty, def_id, user_substs)?;
Ok(())
}
struct AscribeUserTypeCx<'me, 'tcx> {
- infcx: &'me InferCtxt<'me, 'tcx>,
+ infcx: &'me InferCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
+ span: Span,
fulfill_cx: &'me mut dyn TraitEngine<'tcx>,
}
@@ -77,12 +79,15 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
where
T: TypeFoldable<'tcx>,
{
+ self.normalize_with_cause(value, ObligationCause::misc(self.span, hir::CRATE_HIR_ID))
+ }
+
+ fn normalize_with_cause<T>(&mut self, value: T, cause: ObligationCause<'tcx>) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
self.infcx
- .partially_normalize_associated_types_in(
- ObligationCause::misc(DUMMY_SP, hir::CRATE_HIR_ID),
- self.param_env,
- value,
- )
+ .partially_normalize_associated_types_in(cause, self.param_env, value)
.into_value_registering_obligations(self.infcx, self.fulfill_cx)
}
@@ -91,18 +96,13 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
T: ToTrace<'tcx>,
{
self.infcx
- .at(&ObligationCause::dummy(), self.param_env)
+ .at(&ObligationCause::dummy_with_span(self.span), self.param_env)
.relate(a, variance, b)?
.into_value_registering_obligations(self.infcx, self.fulfill_cx);
Ok(())
}
- fn prove_predicate(&mut self, predicate: Predicate<'tcx>, span: Option<Span>) {
- let cause = if let Some(span) = span {
- ObligationCause::dummy_with_span(span)
- } else {
- ObligationCause::dummy()
- };
+ fn prove_predicate(&mut self, predicate: Predicate<'tcx>, cause: ObligationCause<'tcx>) {
self.fulfill_cx.register_predicate_obligation(
self.infcx,
Obligation::new(cause, self.param_env, predicate),
@@ -120,20 +120,20 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
EarlyBinder(value).subst(self.tcx(), substs)
}
+ #[instrument(level = "debug", skip(self))]
fn relate_mir_and_user_ty(
&mut self,
mir_ty: Ty<'tcx>,
def_id: DefId,
user_substs: UserSubsts<'tcx>,
- span: Option<Span>,
) -> Result<(), NoSolution> {
let UserSubsts { user_self_ty, substs } = user_substs;
let tcx = self.tcx();
let ty = tcx.type_of(def_id);
let ty = self.subst(ty, substs);
- debug!("relate_type_and_user_type: ty of def-id is {:?}", ty);
let ty = self.normalize(ty);
+ debug!("relate_type_and_user_type: ty of def-id is {:?}", ty);
self.relate(mir_ty, Variance::Invariant, ty)?;
@@ -144,10 +144,22 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
// outlives" error messages.
let instantiated_predicates =
self.tcx().predicates_of(def_id).instantiate(self.tcx(), substs);
- debug!(?instantiated_predicates.predicates);
- for instantiated_predicate in instantiated_predicates.predicates {
- let instantiated_predicate = self.normalize(instantiated_predicate);
- self.prove_predicate(instantiated_predicate, span);
+
+ let cause = ObligationCause::dummy_with_span(self.span);
+
+ debug!(?instantiated_predicates);
+ for (instantiated_predicate, predicate_span) in
+ zip(instantiated_predicates.predicates, instantiated_predicates.spans)
+ {
+ let span = if self.span == DUMMY_SP { predicate_span } else { self.span };
+ let cause = ObligationCause::new(
+ span,
+ hir::CRATE_HIR_ID,
+ ObligationCauseCode::AscribeUserTypeProvePredicate(predicate_span),
+ );
+ let instantiated_predicate =
+ self.normalize_with_cause(instantiated_predicate, cause.clone());
+ self.prove_predicate(instantiated_predicate, cause);
}
if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
@@ -160,7 +172,7 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
self.prove_predicate(
ty::Binder::dummy(ty::PredicateKind::WellFormed(impl_self_ty.into()))
.to_predicate(self.tcx()),
- span,
+ cause.clone(),
);
}
@@ -177,7 +189,7 @@ impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
// which...could happen with normalization...
self.prove_predicate(
ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into())).to_predicate(self.tcx()),
- span,
+ cause,
);
Ok(())
}
@@ -198,7 +210,7 @@ fn type_op_eq<'tcx>(
}
fn type_op_normalize<'tcx, T>(
- infcx: &InferCtxt<'_, 'tcx>,
+ infcx: &InferCtxt<'tcx>,
fulfill_cx: &mut dyn TraitEngine<'tcx>,
key: ParamEnvAnd<'tcx, Normalize<T>>,
) -> Fallible<T>
@@ -273,7 +285,7 @@ fn type_op_prove_predicate<'tcx>(
/// this query can be re-run to better track the span of the obligation cause, and improve the error
/// message. Do not call directly unless you're in that very specific context.
pub fn type_op_prove_predicate_with_cause<'a, 'tcx: 'a>(
- infcx: &'a InferCtxt<'a, 'tcx>,
+ infcx: &'a InferCtxt<'tcx>,
fulfill_cx: &'a mut dyn TraitEngine<'tcx>,
key: ParamEnvAnd<'tcx, ProvePredicate<'tcx>>,
cause: ObligationCause<'tcx>,
diff --git a/compiler/rustc_transmute/Cargo.toml b/compiler/rustc_transmute/Cargo.toml
index 9dc96e08a..aa6fe7d24 100644
--- a/compiler/rustc_transmute/Cargo.toml
+++ b/compiler/rustc_transmute/Cargo.toml
@@ -7,7 +7,8 @@ edition = "2021"
[dependencies]
tracing = "0.1"
-rustc_data_structures = { path = "../rustc_data_structures", optional = true}
+rustc_data_structures = { path = "../rustc_data_structures"}
+rustc_hir = { path = "../rustc_hir", optional = true}
rustc_infer = { path = "../rustc_infer", optional = true}
rustc_macros = { path = "../rustc_macros", optional = true}
rustc_middle = { path = "../rustc_middle", optional = true}
@@ -17,7 +18,7 @@ rustc_target = { path = "../rustc_target", optional = true}
[features]
rustc = [
"rustc_middle",
- "rustc_data_structures",
+ "rustc_hir",
"rustc_infer",
"rustc_macros",
"rustc_span",
diff --git a/compiler/rustc_transmute/src/layout/dfa.rs b/compiler/rustc_transmute/src/layout/dfa.rs
index b60ea6e7a..b8922696e 100644
--- a/compiler/rustc_transmute/src/layout/dfa.rs
+++ b/compiler/rustc_transmute/src/layout/dfa.rs
@@ -104,7 +104,6 @@ where
}
#[instrument(level = "debug")]
- #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
pub(crate) fn from_nfa(nfa: Nfa<R>) -> Self {
let Nfa { transitions: nfa_transitions, start: nfa_start, accepting: nfa_accepting } = nfa;
diff --git a/compiler/rustc_transmute/src/layout/nfa.rs b/compiler/rustc_transmute/src/layout/nfa.rs
index f25e3c1fd..c2bc47bc0 100644
--- a/compiler/rustc_transmute/src/layout/nfa.rs
+++ b/compiler/rustc_transmute/src/layout/nfa.rs
@@ -119,8 +119,6 @@ where
let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = self.transitions;
- // the iteration order doesn't matter
- #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
for (source, transition) in other.transitions {
let fix_state = |state| if state == other.start { self.accepting } else { state };
let entry = transitions.entry(fix_state(source)).or_default();
@@ -142,8 +140,6 @@ where
let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = self.transitions.clone();
- // the iteration order doesn't matter
- #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
for (&(mut source), transition) in other.transitions.iter() {
// if source is starting state of `other`, replace with starting state of `self`
if source == other.start {
@@ -152,8 +148,6 @@ where
let entry = transitions.entry(source).or_default();
for (edge, destinations) in transition {
let entry = entry.entry(edge.clone()).or_default();
- // the iteration order doesn't matter
- #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
for &(mut destination) in destinations {
// if dest is accepting state of `other`, replace with accepting state of `self`
if destination == other.accepting {
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index 70b3ba02b..acd4fa63d 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -1,4 +1,5 @@
use super::{Byte, Def, Ref};
+use std::ops::ControlFlow;
#[cfg(test)]
mod tests;
@@ -86,17 +87,18 @@ where
F: Fn(D) -> bool,
{
match self {
- Self::Seq(elts) => elts
- .into_iter()
- .map(|elt| elt.prune(f))
- .try_fold(Tree::unit(), |elts, elt| {
+ Self::Seq(elts) => match elts.into_iter().map(|elt| elt.prune(f)).try_fold(
+ Tree::unit(),
+ |elts, elt| {
if elt == Tree::uninhabited() {
- Err(Tree::uninhabited())
+ ControlFlow::Break(Tree::uninhabited())
} else {
- Ok(elts.then(elt))
+ ControlFlow::Continue(elts.then(elt))
}
- })
- .into_ok_or_err(),
+ },
+ ) {
+ ControlFlow::Break(node) | ControlFlow::Continue(node) => node,
+ },
Self::Alt(alts) => alts
.into_iter()
.map(|alt| alt.prune(f))
@@ -315,7 +317,7 @@ pub(crate) mod rustc {
tcx,
)?,
AdtKind::Enum => {
- tracing::trace!(?adt_def, "treeifying enum");
+ trace!(?adt_def, "treeifying enum");
let mut tree = Tree::uninhabited();
for (idx, discr) in adt_def.discriminants(tcx) {
@@ -379,7 +381,7 @@ pub(crate) mod rustc {
let clamp =
|align: Align| align.clamp(min_align, max_align).bytes().try_into().unwrap();
- let variant_span = tracing::trace_span!(
+ let variant_span = trace_span!(
"treeifying variant",
min_align = ?min_align,
max_align = ?max_align,
@@ -394,27 +396,27 @@ pub(crate) mod rustc {
// The layout of the variant is prefixed by the discriminant, if any.
if let Some(discr) = discr {
- tracing::trace!(?discr, "treeifying discriminant");
+ trace!(?discr, "treeifying discriminant");
let discr_layout = alloc::Layout::from_size_align(
layout_summary.discriminant_size,
clamp(layout_summary.discriminant_align),
)
.unwrap();
- tracing::trace!(?discr_layout, "computed discriminant layout");
+ trace!(?discr_layout, "computed discriminant layout");
variant_layout = variant_layout.extend(discr_layout).unwrap().0;
- tree = tree.then(Self::from_disr(discr, tcx, layout_summary.discriminant_size));
+ tree = tree.then(Self::from_discr(discr, tcx, layout_summary.discriminant_size));
}
// Next come fields.
- let fields_span = tracing::trace_span!("treeifying fields").entered();
+ let fields_span = trace_span!("treeifying fields").entered();
for field_def in variant_def.fields.iter() {
let field_ty = field_def.ty(tcx, substs_ref);
- let _span = tracing::trace_span!("treeifying field", field = ?field_ty).entered();
+ let _span = trace_span!("treeifying field", field = ?field_ty).entered();
// begin with the field's visibility
tree = tree.then(Self::def(Def::Field(field_def)));
- // compute the field's layout charactaristics
+ // compute the field's layout characteristics
let field_layout = layout_of(tcx, field_ty)?.clamp_align(min_align, max_align);
// next comes the field's padding
@@ -432,7 +434,7 @@ pub(crate) mod rustc {
drop(fields_span);
// finally: padding
- let padding_span = tracing::trace_span!("adding trailing padding").entered();
+ let padding_span = trace_span!("adding trailing padding").entered();
let padding_needed = layout_summary.total_size - variant_layout.size();
if padding_needed > 0 {
tree = tree.then(Self::padding(padding_needed));
@@ -442,11 +444,21 @@ pub(crate) mod rustc {
Ok(tree)
}
- pub fn from_disr(discr: Discr<'tcx>, tcx: TyCtxt<'tcx>, size: usize) -> Self {
- // FIXME(@jswrenn): I'm certain this is missing needed endian nuance.
- let bytes = discr.val.to_ne_bytes();
- let bytes = &bytes[..size];
- Self::Seq(bytes.into_iter().copied().map(|b| Self::from_bits(b)).collect())
+ pub fn from_discr(discr: Discr<'tcx>, tcx: TyCtxt<'tcx>, size: usize) -> Self {
+ use rustc_target::abi::Endian;
+
+ let bytes: [u8; 16];
+ let bytes = match tcx.data_layout.endian {
+ Endian::Little => {
+ bytes = discr.val.to_le_bytes();
+ &bytes[..size]
+ }
+ Endian::Big => {
+ bytes = discr.val.to_be_bytes();
+ &bytes[bytes.len() - size..]
+ }
+ };
+ Self::Seq(bytes.iter().map(|&b| Self::from_bits(b)).collect())
}
}
@@ -465,7 +477,7 @@ pub(crate) mod rustc {
layout.align().abi.bytes().try_into().unwrap(),
)
.unwrap();
- tracing::trace!(?ty, ?layout, "computed layout for type");
+ trace!(?ty, ?layout, "computed layout for type");
Ok(layout)
}
}
diff --git a/compiler/rustc_transmute/src/lib.rs b/compiler/rustc_transmute/src/lib.rs
index cfc7c752a..f7cc94e53 100644
--- a/compiler/rustc_transmute/src/lib.rs
+++ b/compiler/rustc_transmute/src/lib.rs
@@ -1,21 +1,12 @@
-#![feature(
- alloc_layout_extra,
- control_flow_enum,
- decl_macro,
- iterator_try_reduce,
- never_type,
- result_into_ok_or_err
-)]
+#![feature(alloc_layout_extra, control_flow_enum, decl_macro, iterator_try_reduce, never_type)]
#![allow(dead_code, unused_variables)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
-#[cfg(feature = "rustc")]
-pub(crate) use rustc_data_structures::fx::{FxHashMap as Map, FxHashSet as Set};
-
-#[cfg(not(feature = "rustc"))]
-pub(crate) use std::collections::{HashMap as Map, HashSet as Set};
+pub(crate) use rustc_data_structures::fx::{FxIndexMap as Map, FxIndexSet as Set};
pub(crate) mod layout;
pub(crate) mod maybe_transmutable;
@@ -24,8 +15,8 @@ pub(crate) mod maybe_transmutable;
pub struct Assume {
pub alignment: bool,
pub lifetimes: bool,
+ pub safety: bool,
pub validity: bool,
- pub visibility: bool,
}
/// The type encodes answers to the question: "Are these types transmutable?"
@@ -67,11 +58,17 @@ pub enum Reason {
#[cfg(feature = "rustc")]
mod rustc {
+ use super::*;
+
+ use rustc_hir::lang_items::LangItem;
use rustc_infer::infer::InferCtxt;
use rustc_macros::{TypeFoldable, TypeVisitable};
use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::Binder;
+ use rustc_middle::ty::Const;
+ use rustc_middle::ty::ParamEnv;
use rustc_middle::ty::Ty;
+ use rustc_middle::ty::TyCtxt;
/// The source and destination types of a transmutation.
#[derive(TypeFoldable, TypeVisitable, Debug, Clone, Copy)]
@@ -83,11 +80,11 @@ mod rustc {
}
pub struct TransmuteTypeEnv<'cx, 'tcx> {
- infcx: &'cx InferCtxt<'cx, 'tcx>,
+ infcx: &'cx InferCtxt<'tcx>,
}
impl<'cx, 'tcx> TransmuteTypeEnv<'cx, 'tcx> {
- pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> Self {
+ pub fn new(infcx: &'cx InferCtxt<'tcx>) -> Self {
Self { infcx }
}
@@ -111,6 +108,59 @@ mod rustc {
.answer()
}
}
+
+ impl Assume {
+ /// Constructs an `Assume` from a given const-`Assume`.
+ pub fn from_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ c: Const<'tcx>,
+ ) -> Option<Self> {
+ use rustc_middle::ty::ScalarInt;
+ use rustc_middle::ty::TypeVisitable;
+ use rustc_span::symbol::sym;
+
+ let c = c.eval(tcx, param_env);
+
+ if let Some(err) = c.error_reported() {
+ return Some(Self {
+ alignment: true,
+ lifetimes: true,
+ safety: true,
+ validity: true,
+ });
+ }
+
+ let adt_def = c.ty().ty_adt_def()?;
+
+ assert_eq!(
+ tcx.require_lang_item(LangItem::TransmuteOpts, None),
+ adt_def.did(),
+ "The given `Const` was not marked with the `{}` lang item.",
+ LangItem::TransmuteOpts.name(),
+ );
+
+ let variant = adt_def.non_enum_variant();
+ let fields = c.to_valtree().unwrap_branch();
+
+ let get_field = |name| {
+ let (field_idx, _) = variant
+ .fields
+ .iter()
+ .enumerate()
+ .find(|(_, field_def)| name == field_def.name)
+ .expect(&format!("There were no fields named `{name}`."));
+ fields[field_idx].unwrap_leaf() == ScalarInt::TRUE
+ };
+
+ Some(Self {
+ alignment: get_field(sym::alignment),
+ lifetimes: get_field(sym::lifetimes),
+ safety: get_field(sym::safety),
+ validity: get_field(sym::validity),
+ })
+ }
+ }
}
#[cfg(feature = "rustc")]
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
index 076d922d1..1186eac37 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
@@ -105,12 +105,12 @@ where
#[inline(always)]
#[instrument(level = "debug", skip(self), fields(src = ?self.src, dst = ?self.dst))]
pub(crate) fn answer(self) -> Answer<<C as QueryContext>::Ref> {
- let assume_visibility = self.assume.visibility;
+ let assume_visibility = self.assume.safety;
let query_or_answer = self.map_layouts(|src, dst, scope, context| {
// Remove all `Def` nodes from `src`, without checking their visibility.
let src = src.prune(&|def| true);
- tracing::trace!(?src, "pruned src");
+ trace!(?src, "pruned src");
// Remove all `Def` nodes from `dst`, additionally...
let dst = if assume_visibility {
@@ -121,7 +121,7 @@ where
dst.prune(&|def| context.is_accessible_from(def, scope))
};
- tracing::trace!(?dst, "pruned dst");
+ trace!(?dst, "pruned dst");
// Convert `src` from a tree-based representation to an NFA-based representation.
// If the conversion fails because `src` is uninhabited, conclude that the transmutation
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs b/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs
index 9c2cf4c9a..adab343ac 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs
@@ -82,7 +82,7 @@ mod rustc {
false
};
- tracing::trace!(?ret, "ret");
+ trace!(?ret, "ret");
ret
}
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
index d9d125687..4d5772a4f 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
@@ -13,7 +13,7 @@ mod bool {
layout::Tree::<Def, !>::bool(),
layout::Tree::<Def, !>::bool(),
(),
- crate::Assume { alignment: false, lifetimes: false, validity: true, visibility: false },
+ crate::Assume { alignment: false, lifetimes: false, validity: true, safety: false },
UltraMinimal,
)
.answer();
@@ -26,7 +26,7 @@ mod bool {
layout::Dfa::<!>::bool(),
layout::Dfa::<!>::bool(),
(),
- crate::Assume { alignment: false, lifetimes: false, validity: true, visibility: false },
+ crate::Assume { alignment: false, lifetimes: false, validity: true, safety: false },
UltraMinimal,
)
.answer();
diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml
index caad2ed42..5e4ba4730 100644
--- a/compiler/rustc_ty_utils/Cargo.toml
+++ b/compiler/rustc_ty_utils/Cargo.toml
@@ -4,12 +4,15 @@ version = "0.0.0"
edition = "2021"
[dependencies]
+rand = "0.8.4"
+rand_xoshiro = "0.6.0"
tracing = "0.1"
rustc_middle = { path = "../rustc_middle" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_hir = { path = "../rustc_hir" }
rustc_infer = { path = "../rustc_infer" }
+rustc_macros = { path = "../rustc_macros" }
rustc_span = { path = "../rustc_span" }
rustc_session = { path = "../rustc_session" }
rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
new file mode 100644
index 000000000..73c7eb699
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -0,0 +1,551 @@
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::ty::layout::{
+ fn_can_unwind, FnAbiError, HasParamEnv, HasTyCtxt, LayoutCx, LayoutOf, TyAndLayout,
+};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::config::OptLevel;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
+};
+use rustc_target::abi::*;
+use rustc_target::spec::abi::Abi as SpecAbi;
+
+use std::iter;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
+}
+
+// NOTE(eddyb) this is private to avoid using it from outside of
+// `fn_abi_of_instance` - any other uses are either too high-level
+// for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
+// or should go through `FnAbi` instead, to avoid losing any
+// adjustments `fn_abi_of_instance` might be performing.
+#[tracing::instrument(level = "debug", skip(tcx, param_env))]
+fn fn_sig_for_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::Instance<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> ty::PolyFnSig<'tcx> {
+ let ty = instance.ty(tcx, param_env);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
+ // parameters unused if they show up in the signature, but not in the `mir::Body`
+ // (i.e. due to being inside a projection that got normalized, see
+ // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
+ // track of a polymorphization `ParamEnv` to allow normalizing later.
+ //
+ // We normalize the `fn_sig` again after substituting at a later point.
+ let mut sig = match *ty.kind() {
+ ty::FnDef(def_id, substs) => tcx
+ .bound_fn_sig(def_id)
+ .map_bound(|fn_sig| {
+ tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
+ })
+ .subst(tcx, substs),
+ _ => unreachable!(),
+ };
+
+ if let ty::InstanceDef::VTableShim(..) = instance.def {
+ // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
+ sig = sig.map_bound(|mut sig| {
+ let mut inputs_and_output = sig.inputs_and_output.to_vec();
+ inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
+ sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+ sig
+ });
+ }
+ sig
+ }
+ ty::Closure(def_id, substs) => {
+ let sig = substs.as_closure().sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars().iter().chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+
+ let sig = sig.skip_binder();
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ iter::once(env_ty).chain(sig.inputs().iter().cloned()),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ ),
+ bound_vars,
+ )
+ }
+ ty::Generator(_, substs, _) => {
+ let sig = substs.as_generator().poly_sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars().iter().chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
+
+ let pin_did = tcx.require_lang_item(LangItem::Pin, None);
+ let pin_adt_ref = tcx.adt_def(pin_did);
+ let pin_substs = tcx.intern_substs(&[env_ty.into()]);
+ let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
+
+ let sig = sig.skip_binder();
+ let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+ let state_adt_ref = tcx.adt_def(state_did);
+ let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+ let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ [env_ty, sig.resume_ty].iter(),
+ &ret_ty,
+ false,
+ hir::Unsafety::Normal,
+ rustc_target::spec::abi::Abi::Rust,
+ ),
+ bound_vars,
+ )
+ }
+ _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
+ }
+}
+
+#[inline]
+fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
+ use rustc_target::spec::abi::Abi::*;
+ match tcx.sess.target.adjust_abi(abi) {
+ RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
+ RustCold => Conv::RustCold,
+
+ // It's the ABI's job to select this, not ours.
+ System { .. } => bug!("system abi should be selected elsewhere"),
+ EfiApi => bug!("eficall abi should be selected elsewhere"),
+
+ Stdcall { .. } => Conv::X86Stdcall,
+ Fastcall { .. } => Conv::X86Fastcall,
+ Vectorcall { .. } => Conv::X86VectorCall,
+ Thiscall { .. } => Conv::X86ThisCall,
+ C { .. } => Conv::C,
+ Unadjusted => Conv::C,
+ Win64 { .. } => Conv::X86_64Win64,
+ SysV64 { .. } => Conv::X86_64SysV,
+ Aapcs { .. } => Conv::ArmAapcs,
+ CCmseNonSecureCall => Conv::CCmseNonSecureCall,
+ PtxKernel => Conv::PtxKernel,
+ Msp430Interrupt => Conv::Msp430Intr,
+ X86Interrupt => Conv::X86Intr,
+ AmdGpuKernel => Conv::AmdGpuKernel,
+ AvrInterrupt => Conv::AvrInterrupt,
+ AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
+ Wasm => Conv::C,
+
+ // These API constants ought to be more specific...
+ Cdecl { .. } => Conv::C,
+ }
+}
+
+fn fn_abi_of_fn_ptr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (sig, extra_args)) = query.into_parts();
+
+ let cx = LayoutCx { tcx, param_env };
+ fn_abi_new_uncached(&cx, sig, extra_args, None, None, false)
+}
+
+fn fn_abi_of_instance<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (instance, extra_args)) = query.into_parts();
+
+ let sig = fn_sig_for_fn_abi(tcx, instance, param_env);
+
+ let caller_location = if instance.def.requires_caller_location(tcx) {
+ Some(tcx.caller_location_ty())
+ } else {
+ None
+ };
+
+ fn_abi_new_uncached(
+ &LayoutCx { tcx, param_env },
+ sig,
+ extra_args,
+ caller_location,
+ Some(instance.def_id()),
+ matches!(instance.def, ty::InstanceDef::Virtual(..)),
+ )
+}
+
+// Handle safe Rust thin and fat pointers.
+fn adjust_for_rust_scalar<'tcx>(
+ cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
+ attrs: &mut ArgAttributes,
+ scalar: Scalar,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ is_return: bool,
+) {
+ // Booleans are always a noundef i1 that needs to be zero-extended.
+ if scalar.is_bool() {
+ attrs.ext(ArgExtension::Zext);
+ attrs.set(ArgAttribute::NoUndef);
+ return;
+ }
+
+ // Scalars which have invalid values cannot be undef.
+ if !scalar.is_always_valid(&cx) {
+ attrs.set(ArgAttribute::NoUndef);
+ }
+
+ // Only pointer types handled below.
+ let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
+
+ if !valid_range.contains(0) {
+ attrs.set(ArgAttribute::NonNull);
+ }
+
+ if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
+ if let Some(kind) = pointee.safe {
+ attrs.pointee_align = Some(pointee.align);
+
+ // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
+ // for the entire duration of the function as they can be deallocated
+ // at any time. Same for shared mutable references. If LLVM had a
+ // way to say "dereferenceable on entry" we could use it here.
+ attrs.pointee_size = match kind {
+ PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned
+ | PointerKind::Frozen => pointee.size,
+ PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
+ };
+
+ // `Box`, `&T`, and `&mut T` cannot be undef.
+ // Note that this only applies to the value of the pointer itself;
+ // this attribute doesn't make it UB for the pointed-to data to be undef.
+ attrs.set(ArgAttribute::NoUndef);
+
+ // The aliasing rules for `Box<T>` are still not decided, but currently we emit
+ // `noalias` for it. This can be turned off using an unstable flag.
+ // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
+ let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
+
+ // `&mut` pointer parameters never alias other parameters,
+ // or mutable global data
+ //
+ // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
+ // and can be marked as both `readonly` and `noalias`, as
+ // LLVM's definition of `noalias` is based solely on memory
+ // dependencies rather than pointer equality
+ //
+ // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
+ // for UniqueBorrowed arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled with the
+ // `-Zmutable-noalias` debugging option.
+ let no_alias = match kind {
+ PointerKind::SharedMutable
+ | PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned => false,
+ PointerKind::UniqueOwned => noalias_for_box,
+ PointerKind::Frozen => !is_return,
+ };
+ if no_alias {
+ attrs.set(ArgAttribute::NoAlias);
+ }
+
+ if kind == PointerKind::Frozen && !is_return {
+ attrs.set(ArgAttribute::ReadOnly);
+ }
+
+ if kind == PointerKind::UniqueBorrowed && !is_return {
+ attrs.set(ArgAttribute::NoAliasMutRef);
+ }
+ }
+ }
+}
+
+// FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
+// arguments of this method, into a separate `struct`.
+#[tracing::instrument(level = "debug", skip(cx, caller_location, fn_def_id, force_thin_self_ptr))]
+fn fn_abi_new_uncached<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ sig: ty::PolyFnSig<'tcx>,
+ extra_args: &[Ty<'tcx>],
+ caller_location: Option<Ty<'tcx>>,
+ fn_def_id: Option<DefId>,
+ // FIXME(eddyb) replace this with something typed, like an `enum`.
+ force_thin_self_ptr: bool,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let sig = cx.tcx.normalize_erasing_late_bound_regions(cx.param_env, sig);
+
+ let conv = conv_from_spec_abi(cx.tcx(), sig.abi);
+
+ let mut inputs = sig.inputs();
+ let extra_args = if sig.abi == RustCall {
+ assert!(!sig.c_variadic && extra_args.is_empty());
+
+ if let Some(input) = sig.inputs().last() {
+ if let ty::Tuple(tupled_arguments) = input.kind() {
+ inputs = &sig.inputs()[0..sig.inputs().len() - 1];
+ tupled_arguments
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ assert!(sig.c_variadic || extra_args.is_empty());
+ extra_args
+ };
+
+ let target = &cx.tcx.sess.target;
+ let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
+ let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
+ let linux_s390x_gnu_like =
+ target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
+ let linux_sparc64_gnu_like =
+ target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
+ let linux_powerpc_gnu_like =
+ target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
+ use SpecAbi::*;
+ let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
+
+ let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
+ let span = tracing::debug_span!("arg_of");
+ let _entered = span.enter();
+ let is_return = arg_idx.is_none();
+
+ let layout = cx.layout_of(ty)?;
+ let layout = if force_thin_self_ptr && arg_idx == Some(0) {
+ // Don't pass the vtable, it's not an argument of the virtual fn.
+ // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+ // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
+ make_thin_self_ptr(cx, layout)
+ } else {
+ layout
+ };
+
+ let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
+ let mut attrs = ArgAttributes::new();
+ adjust_for_rust_scalar(*cx, &mut attrs, scalar, *layout, offset, is_return);
+ attrs
+ });
+
+ if arg.layout.is_zst() {
+ // For some forsaken reason, x86_64-pc-windows-gnu
+ // doesn't ignore zero-sized struct arguments.
+ // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
+ if is_return
+ || rust_abi
+ || (!win_x64_gnu
+ && !linux_s390x_gnu_like
+ && !linux_sparc64_gnu_like
+ && !linux_powerpc_gnu_like)
+ {
+ arg.mode = PassMode::Ignore;
+ }
+ }
+
+ Ok(arg)
+ };
+
+ let mut fn_abi = FnAbi {
+ ret: arg_of(sig.output(), None)?,
+ args: inputs
+ .iter()
+ .copied()
+ .chain(extra_args.iter().copied())
+ .chain(caller_location)
+ .enumerate()
+ .map(|(i, ty)| arg_of(ty, Some(i)))
+ .collect::<Result<_, _>>()?,
+ c_variadic: sig.c_variadic,
+ fixed_count: inputs.len() as u32,
+ conv,
+ can_unwind: fn_can_unwind(cx.tcx(), fn_def_id, sig.abi),
+ };
+ fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id)?;
+ debug!("fn_abi_new_uncached = {:?}", fn_abi);
+ Ok(cx.tcx.arena.alloc(fn_abi))
+}
+
+#[tracing::instrument(level = "trace", skip(cx))]
+fn fn_abi_adjust_for_abi<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
+ abi: SpecAbi,
+ fn_def_id: Option<DefId>,
+) -> Result<(), FnAbiError<'tcx>> {
+ if abi == SpecAbi::Unadjusted {
+ return Ok(());
+ }
+
+ if abi == SpecAbi::Rust
+ || abi == SpecAbi::RustCall
+ || abi == SpecAbi::RustIntrinsic
+ || abi == SpecAbi::PlatformIntrinsic
+ {
+ // Look up the deduced parameter attributes for this function, if we have its def ID and
+ // we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
+ // as appropriate.
+ let deduced_param_attrs = if cx.tcx.sess.opts.optimize != OptLevel::No
+ && cx.tcx.sess.opts.incremental.is_none()
+ {
+ fn_def_id.map(|fn_def_id| cx.tcx.deduced_param_attrs(fn_def_id)).unwrap_or_default()
+ } else {
+ &[]
+ };
+
+ let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option<usize>| {
+ if arg.is_ignore() {
+ return;
+ }
+
+ match arg.layout.abi {
+ Abi::Aggregate { .. } => {}
+
+ // This is a fun case! The gist of what this is doing is
+ // that we want callers and callees to always agree on the
+ // ABI of how they pass SIMD arguments. If we were to *not*
+ // make these arguments indirect then they'd be immediates
+ // in LLVM, which means that they'd used whatever the
+ // appropriate ABI is for the callee and the caller. That
+ // means, for example, if the caller doesn't have AVX
+ // enabled but the callee does, then passing an AVX argument
+ // across this boundary would cause corrupt data to show up.
+ //
+ // This problem is fixed by unconditionally passing SIMD
+ // arguments through memory between callers and callees
+ // which should get them all to agree on ABI regardless of
+ // target feature sets. Some more information about this
+ // issue can be found in #44367.
+ //
+ // Note that the platform intrinsic ABI is exempt here as
+ // that's how we connect up to LLVM and it's unstable
+ // anyway, we control all calls to it in libstd.
+ Abi::Vector { .. }
+ if abi != SpecAbi::PlatformIntrinsic
+ && cx.tcx.sess.target.simd_types_indirect =>
+ {
+ arg.make_indirect();
+ return;
+ }
+
+ _ => return,
+ }
+
+ let size = arg.layout.size;
+ if arg.layout.is_unsized() || size > Pointer.size(cx) {
+ arg.make_indirect();
+ } else {
+ // We want to pass small aggregates as immediates, but using
+ // a LLVM aggregate type for this leads to bad optimizations,
+ // so we pick an appropriately sized integer type instead.
+ arg.cast_to(Reg { kind: RegKind::Integer, size });
+ }
+
+ // If we deduced that this parameter was read-only, add that to the attribute list now.
+ //
+ // The `readonly` parameter only applies to pointers, so we can only do this if the
+ // argument was passed indirectly. (If the argument is passed directly, it's an SSA
+ // value, so it's implicitly immutable.)
+ if let (Some(arg_idx), &mut PassMode::Indirect { ref mut attrs, .. }) =
+ (arg_idx, &mut arg.mode)
+ {
+ // The `deduced_param_attrs` list could be empty if this is a type of function
+ // we can't deduce any parameters for, so make sure the argument index is in
+ // bounds.
+ if let Some(deduced_param_attrs) = deduced_param_attrs.get(arg_idx) {
+ if deduced_param_attrs.read_only {
+ attrs.regular.insert(ArgAttribute::ReadOnly);
+ debug!("added deduced read-only attribute");
+ }
+ }
+ }
+ };
+
+ fixup(&mut fn_abi.ret, None);
+ for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
+ fixup(arg, Some(arg_idx));
+ }
+ } else {
+ fn_abi.adjust_for_foreign_abi(cx, abi)?;
+ }
+
+ Ok(())
+}
+
+#[tracing::instrument(level = "debug", skip(cx))]
+fn make_thin_self_ptr<'tcx>(
+ cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
+ layout: TyAndLayout<'tcx>,
+) -> TyAndLayout<'tcx> {
+ let tcx = cx.tcx();
+ let fat_pointer_ty = if layout.is_unsized() {
+ // unsized `self` is passed as a pointer to `self`
+ // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+ tcx.mk_mut_ptr(layout.ty)
+ } else {
+ match layout.abi {
+ Abi::ScalarPair(..) | Abi::Scalar(..) => (),
+ _ => bug!("receiver type has unsupported layout: {:?}", layout),
+ }
+
+ // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+ // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+ // elsewhere in the compiler as a method on a `dyn Trait`.
+ // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+ // get a built-in pointer type
+ let mut fat_pointer_layout = layout;
+ 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+ && !fat_pointer_layout.ty.is_region_ptr()
+ {
+ for i in 0..fat_pointer_layout.fields.count() {
+ let field_layout = fat_pointer_layout.field(cx, i);
+
+ if !field_layout.is_zst() {
+ fat_pointer_layout = field_layout;
+ continue 'descend_newtypes;
+ }
+ }
+
+ bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+ }
+
+ fat_pointer_layout.ty
+ };
+
+ // we now have a type like `*mut RcBox<dyn Trait>`
+ // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+ // this is understood as a special case elsewhere in the compiler
+ let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
+
+ TyAndLayout {
+ ty: fat_pointer_ty,
+
+ // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
+ // should always work because the type is always `*mut ()`.
+ ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs
index 515a73ead..424b52309 100644
--- a/compiler/rustc_ty_utils/src/assoc.rs
+++ b/compiler/rustc_ty_utils/src/assoc.rs
@@ -17,10 +17,10 @@ fn associated_item_def_ids(tcx: TyCtxt<'_>, def_id: DefId) -> &[DefId] {
let item = tcx.hir().expect_item(def_id.expect_local());
match item.kind {
hir::ItemKind::Trait(.., ref trait_item_refs) => tcx.arena.alloc_from_iter(
- trait_item_refs.iter().map(|trait_item_ref| trait_item_ref.id.def_id.to_def_id()),
+ trait_item_refs.iter().map(|trait_item_ref| trait_item_ref.id.owner_id.to_def_id()),
),
hir::ItemKind::Impl(ref impl_) => tcx.arena.alloc_from_iter(
- impl_.items.iter().map(|impl_item_ref| impl_item_ref.id.def_id.to_def_id()),
+ impl_.items.iter().map(|impl_item_ref| impl_item_ref.id.owner_id.to_def_id()),
),
hir::ItemKind::TraitAlias(..) => &[],
_ => span_bug!(item.span, "associated_item_def_ids: not impl or trait"),
@@ -42,11 +42,11 @@ fn impl_item_implementor_ids(tcx: TyCtxt<'_>, impl_id: DefId) -> FxHashMap<DefId
fn associated_item(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItem {
let id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
let parent_def_id = tcx.hir().get_parent_item(id);
- let parent_item = tcx.hir().expect_item(parent_def_id);
+ let parent_item = tcx.hir().expect_item(parent_def_id.def_id);
match parent_item.kind {
hir::ItemKind::Impl(ref impl_) => {
if let Some(impl_item_ref) =
- impl_.items.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ impl_.items.iter().find(|i| i.id.owner_id.to_def_id() == def_id)
{
let assoc_item = associated_item_from_impl_item_ref(impl_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
@@ -56,7 +56,7 @@ fn associated_item(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItem {
hir::ItemKind::Trait(.., ref trait_item_refs) => {
if let Some(trait_item_ref) =
- trait_item_refs.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ trait_item_refs.iter().find(|i| i.id.owner_id.to_def_id() == def_id)
{
let assoc_item = associated_item_from_trait_item_ref(trait_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
@@ -75,7 +75,7 @@ fn associated_item(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItem {
}
fn associated_item_from_trait_item_ref(trait_item_ref: &hir::TraitItemRef) -> ty::AssocItem {
- let def_id = trait_item_ref.id.def_id;
+ let owner_id = trait_item_ref.id.owner_id;
let (kind, has_self) = match trait_item_ref.kind {
hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
@@ -85,15 +85,15 @@ fn associated_item_from_trait_item_ref(trait_item_ref: &hir::TraitItemRef) -> ty
ty::AssocItem {
name: trait_item_ref.ident.name,
kind,
- def_id: def_id.to_def_id(),
- trait_item_def_id: Some(def_id.to_def_id()),
+ def_id: owner_id.to_def_id(),
+ trait_item_def_id: Some(owner_id.to_def_id()),
container: ty::TraitContainer,
fn_has_self_parameter: has_self,
}
}
fn associated_item_from_impl_item_ref(impl_item_ref: &hir::ImplItemRef) -> ty::AssocItem {
- let def_id = impl_item_ref.id.def_id;
+ let def_id = impl_item_ref.id.owner_id;
let (kind, has_self) = match impl_item_ref.kind {
hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
diff --git a/compiler/rustc_ty_utils/src/common_traits.rs b/compiler/rustc_ty_utils/src/common_traits.rs
index cedc84d97..d3169b6d9 100644
--- a/compiler/rustc_ty_utils/src/common_traits.rs
+++ b/compiler/rustc_ty_utils/src/common_traits.rs
@@ -29,15 +29,8 @@ fn is_item_raw<'tcx>(
) -> bool {
let (param_env, ty) = query.into_parts();
let trait_def_id = tcx.require_lang_item(item, None);
- tcx.infer_ctxt().enter(|infcx| {
- traits::type_known_to_meet_bound_modulo_regions(
- &infcx,
- param_env,
- ty,
- trait_def_id,
- DUMMY_SP,
- )
- })
+ let infcx = tcx.infer_ctxt().build();
+ traits::type_known_to_meet_bound_modulo_regions(&infcx, param_env, ty, trait_def_id, DUMMY_SP)
}
pub(crate) fn provide(providers: &mut ty::query::Providers) {
diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs
index 7c2f4db94..e057bb668 100644
--- a/compiler/rustc_ty_utils/src/consts.rs
+++ b/compiler/rustc_ty_utils/src/consts.rs
@@ -11,6 +11,8 @@ use rustc_target::abi::VariantIdx;
use std::iter;
+use crate::errors::{GenericConstantTooComplex, GenericConstantTooComplexSub};
+
/// Destructures array, ADT or tuple constants into the constants
/// of their fields.
pub(crate) fn destructure_const<'tcx>(
@@ -93,26 +95,25 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
self.body.exprs[self.body_id].span
}
- fn error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
- let reported = self
- .tcx
- .sess
- .struct_span_err(self.root_span(), "overly complex generic constant")
- .span_label(span, msg)
- .help("consider moving this anonymous constant into a `const` function")
- .emit();
+ fn error(&mut self, sub: GenericConstantTooComplexSub) -> Result<!, ErrorGuaranteed> {
+ let reported = self.tcx.sess.emit_err(GenericConstantTooComplex {
+ span: self.root_span(),
+ maybe_supported: None,
+ sub,
+ });
Err(reported)
}
- fn maybe_supported_error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
- let reported = self
- .tcx
- .sess
- .struct_span_err(self.root_span(), "overly complex generic constant")
- .span_label(span, msg)
- .help("consider moving this anonymous constant into a `const` function")
- .note("this operation may be supported in the future")
- .emit();
+
+ fn maybe_supported_error(
+ &mut self,
+ sub: GenericConstantTooComplexSub,
+ ) -> Result<!, ErrorGuaranteed> {
+ let reported = self.tcx.sess.emit_err(GenericConstantTooComplex {
+ span: self.root_span(),
+ maybe_supported: Some(()),
+ sub,
+ });
Err(reported)
}
@@ -134,30 +135,30 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
fn expr_is_poly(&mut self, expr: &thir::Expr<'tcx>) -> bool {
- if expr.ty.has_param_types_or_consts() {
+ if expr.ty.has_non_region_param() {
return true;
}
match expr.kind {
- thir::ExprKind::NamedConst { substs, .. } => substs.has_param_types_or_consts(),
+ thir::ExprKind::NamedConst { substs, .. } => substs.has_non_region_param(),
thir::ExprKind::ConstParam { .. } => true,
thir::ExprKind::Repeat { value, count } => {
self.visit_expr(&self.thir()[value]);
- count.has_param_types_or_consts()
+ count.has_non_region_param()
}
_ => false,
}
}
fn pat_is_poly(&mut self, pat: &thir::Pat<'tcx>) -> bool {
- if pat.ty.has_param_types_or_consts() {
+ if pat.ty.has_non_region_param() {
return true;
}
- match pat.kind.as_ref() {
- thir::PatKind::Constant { value } => value.has_param_types_or_consts(),
- thir::PatKind::Range(thir::PatRange { lo, hi, .. }) => {
- lo.has_param_types_or_consts() || hi.has_param_types_or_consts()
+ match pat.kind {
+ thir::PatKind::Constant { value } => value.has_non_region_param(),
+ thir::PatKind::Range(box thir::PatRange { lo, hi, .. }) => {
+ lo.has_non_region_param() || hi.has_non_region_param()
}
_ => false,
}
@@ -221,17 +222,6 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
debug!("AbstractConstBuilder::build: body={:?}", &*self.body);
self.recurse_build(self.body_id)?;
- for n in self.nodes.iter() {
- if let Node::Leaf(ct) = n {
- if let ty::ConstKind::Unevaluated(ct) = ct.kind() {
- // `AbstractConst`s should not contain any promoteds as they require references which
- // are not allowed.
- assert_eq!(ct.promoted, None);
- assert_eq!(ct, self.tcx.erase_regions(ct));
- }
- }
- }
-
Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter()))
}
@@ -243,22 +233,23 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
&ExprKind::Scope { value, .. } => self.recurse_build(value)?,
&ExprKind::PlaceTypeAscription { source, .. }
| &ExprKind::ValueTypeAscription { source, .. } => self.recurse_build(source)?,
- &ExprKind::Literal { lit, neg} => {
+ &ExprKind::Literal { lit, neg } => {
let sp = node.span;
- let constant =
- match self.tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) {
- Ok(c) => c,
- Err(LitToConstError::Reported) => {
- self.tcx.const_error(node.ty)
- }
- Err(LitToConstError::TypeError) => {
- bug!("encountered type error in lit_to_const")
- }
- };
+ let constant = match self.tcx.at(sp).lit_to_const(LitToConstInput {
+ lit: &lit.node,
+ ty: node.ty,
+ neg,
+ }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => self.tcx.const_error(node.ty),
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in lit_to_const")
+ }
+ };
self.nodes.push(Node::Leaf(constant))
}
- &ExprKind::NonHirLiteral { lit , user_ty: _} => {
+ &ExprKind::NonHirLiteral { lit, user_ty: _ } => {
let val = ty::ValTree::from_scalar_int(lit);
self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
}
@@ -267,21 +258,20 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
}
&ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
- let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+ let uneval =
+ ty::UnevaluatedConst::new(ty::WithOptConstParam::unknown(def_id), substs);
- let constant = self.tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(uneval),
- ty: node.ty,
- });
+ let constant = self
+ .tcx
+ .mk_const(ty::ConstS { kind: ty::ConstKind::Unevaluated(uneval), ty: node.ty });
self.nodes.push(Node::Leaf(constant))
}
- ExprKind::ConstParam {param, ..} => {
- let const_param = self.tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Param(*param),
- ty: node.ty,
- });
+ ExprKind::ConstParam { param, .. } => {
+ let const_param = self
+ .tcx
+ .mk_const(ty::ConstS { kind: ty::ConstKind::Param(*param), ty: node.ty });
self.nodes.push(Node::Leaf(const_param))
}
@@ -311,8 +301,15 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
// bar::<{ N + 1 }>();
// }
// ```
- ExprKind::Block { body: thir::Block { stmts: box [], expr: Some(e), .. } } => {
- self.recurse_build(*e)?
+ ExprKind::Block { block } => {
+ if let thir::Block { stmts: box [], expr: Some(e), .. } = &self.body.blocks[*block]
+ {
+ self.recurse_build(*e)?
+ } else {
+ self.maybe_supported_error(GenericConstantTooComplexSub::BlockNotSupported(
+ node.span,
+ ))?
+ }
}
// `ExprKind::Use` happens when a `hir::ExprKind::Cast` is a
// "coercion cast" i.e. using a coercion or is a no-op.
@@ -325,7 +322,7 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
let arg = self.recurse_build(source)?;
self.nodes.push(Node::Cast(CastKind::As, arg, node.ty))
}
- ExprKind::Borrow{ arg, ..} => {
+ ExprKind::Borrow { arg, .. } => {
let arg_node = &self.body.exprs[*arg];
// Skip reborrows for now until we allow Deref/Borrow/AddressOf
@@ -334,80 +331,69 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
if let ExprKind::Deref { arg } = arg_node.kind {
self.recurse_build(arg)?
} else {
- self.maybe_supported_error(
+ self.maybe_supported_error(GenericConstantTooComplexSub::BorrowNotSupported(
node.span,
- "borrowing is not supported in generic constants",
- )?
+ ))?
}
}
// FIXME(generic_const_exprs): We may want to support these.
- ExprKind::AddressOf { .. } | ExprKind::Deref {..}=> self.maybe_supported_error(
- node.span,
- "dereferencing or taking the address is not supported in generic constants",
- )?,
- ExprKind::Repeat { .. } | ExprKind::Array { .. } => self.maybe_supported_error(
- node.span,
- "array construction is not supported in generic constants",
+ ExprKind::AddressOf { .. } | ExprKind::Deref { .. } => self.maybe_supported_error(
+ GenericConstantTooComplexSub::AddressAndDerefNotSupported(node.span),
)?,
- ExprKind::Block { .. } => self.maybe_supported_error(
- node.span,
- "blocks are not supported in generic constant",
+ ExprKind::Repeat { .. } | ExprKind::Array { .. } => self.maybe_supported_error(
+ GenericConstantTooComplexSub::ArrayNotSupported(node.span),
)?,
ExprKind::NeverToAny { .. } => self.maybe_supported_error(
- node.span,
- "converting nevers to any is not supported in generic constant",
+ GenericConstantTooComplexSub::NeverToAnyNotSupported(node.span),
)?,
ExprKind::Tuple { .. } => self.maybe_supported_error(
- node.span,
- "tuple construction is not supported in generic constants",
+ GenericConstantTooComplexSub::TupleNotSupported(node.span),
)?,
ExprKind::Index { .. } => self.maybe_supported_error(
- node.span,
- "indexing is not supported in generic constant",
+ GenericConstantTooComplexSub::IndexNotSupported(node.span),
)?,
ExprKind::Field { .. } => self.maybe_supported_error(
- node.span,
- "field access is not supported in generic constant",
+ GenericConstantTooComplexSub::FieldNotSupported(node.span),
)?,
ExprKind::ConstBlock { .. } => self.maybe_supported_error(
- node.span,
- "const blocks are not supported in generic constant",
- )?,
- ExprKind::Adt(_) => self.maybe_supported_error(
- node.span,
- "struct/enum construction is not supported in generic constants",
+ GenericConstantTooComplexSub::ConstBlockNotSupported(node.span),
)?,
+ ExprKind::Adt(_) => self
+ .maybe_supported_error(GenericConstantTooComplexSub::AdtNotSupported(node.span))?,
// dont know if this is correct
- ExprKind::Pointer { .. } =>
- self.error(node.span, "pointer casts are not allowed in generic constants")?,
- ExprKind::Yield { .. } =>
- self.error(node.span, "generator control flow is not allowed in generic constants")?,
- ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Loop { .. } => self
- .error(
- node.span,
- "loops and loop control flow are not supported in generic constants",
- )?,
- ExprKind::Box { .. } =>
- self.error(node.span, "allocations are not allowed in generic constants")?,
+ ExprKind::Pointer { .. } => {
+ self.error(GenericConstantTooComplexSub::PointerNotSupported(node.span))?
+ }
+ ExprKind::Yield { .. } => {
+ self.error(GenericConstantTooComplexSub::YieldNotSupported(node.span))?
+ }
+ ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Loop { .. } => {
+ self.error(GenericConstantTooComplexSub::LoopNotSupported(node.span))?
+ }
+ ExprKind::Box { .. } => {
+ self.error(GenericConstantTooComplexSub::BoxNotSupported(node.span))?
+ }
ExprKind::Unary { .. } => unreachable!(),
// we handle valid unary/binary ops above
- ExprKind::Binary { .. } =>
- self.error(node.span, "unsupported binary operation in generic constants")?,
- ExprKind::LogicalOp { .. } =>
- self.error(node.span, "unsupported operation in generic constants, short-circuiting operations would imply control flow")?,
+ ExprKind::Binary { .. } => {
+ self.error(GenericConstantTooComplexSub::BinaryNotSupported(node.span))?
+ }
+ ExprKind::LogicalOp { .. } => {
+ self.error(GenericConstantTooComplexSub::LogicalOpNotSupported(node.span))?
+ }
ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
- self.error(node.span, "assignment is not supported in generic constants")?
+ self.error(GenericConstantTooComplexSub::AssignNotSupported(node.span))?
+ }
+ ExprKind::Closure { .. } | ExprKind::Return { .. } => {
+ self.error(GenericConstantTooComplexSub::ClosureAndReturnNotSupported(node.span))?
}
- ExprKind::Closure { .. } | ExprKind::Return { .. } => self.error(
- node.span,
- "closures and function keywords are not supported in generic constants",
- )?,
// let expressions imply control flow
- ExprKind::Match { .. } | ExprKind::If { .. } | ExprKind::Let { .. } =>
- self.error(node.span, "control flow is not supported in generic constants")?,
+ ExprKind::Match { .. } | ExprKind::If { .. } | ExprKind::Let { .. } => {
+ self.error(GenericConstantTooComplexSub::ControlFlowNotSupported(node.span))?
+ }
ExprKind::InlineAsm { .. } => {
- self.error(node.span, "assembly is not supported in generic constants")?
+ self.error(GenericConstantTooComplexSub::InlineAsmNotSupported(node.span))?
}
// we dont permit let stmts so `VarRef` and `UpvarRef` cant happen
@@ -415,7 +401,7 @@ impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
| ExprKind::UpvarRef { .. }
| ExprKind::StaticRef { .. }
| ExprKind::ThreadLocalRef(_) => {
- self.error(node.span, "unsupported operation in generic constant")?
+ self.error(GenericConstantTooComplexSub::OperationNotSupported(node.span))?
}
})
}
diff --git a/compiler/rustc_ty_utils/src/errors.rs b/compiler/rustc_ty_utils/src/errors.rs
new file mode 100644
index 000000000..c05eeb353
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/errors.rs
@@ -0,0 +1,69 @@
+//! Errors emitted by ty_utils
+
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(ty_utils_needs_drop_overflow)]
+pub struct NeedsDropOverflow<'tcx> {
+ pub query_ty: Ty<'tcx>,
+}
+
+#[derive(Diagnostic)]
+#[diag(ty_utils_generic_constant_too_complex)]
+#[help]
+pub struct GenericConstantTooComplex {
+ #[primary_span]
+ pub span: Span,
+ #[note(maybe_supported)]
+ pub maybe_supported: Option<()>,
+ #[subdiagnostic]
+ pub sub: GenericConstantTooComplexSub,
+}
+
+#[derive(Subdiagnostic)]
+pub enum GenericConstantTooComplexSub {
+ #[label(ty_utils_borrow_not_supported)]
+ BorrowNotSupported(#[primary_span] Span),
+ #[label(ty_utils_address_and_deref_not_supported)]
+ AddressAndDerefNotSupported(#[primary_span] Span),
+ #[label(ty_utils_array_not_supported)]
+ ArrayNotSupported(#[primary_span] Span),
+ #[label(ty_utils_block_not_supported)]
+ BlockNotSupported(#[primary_span] Span),
+ #[label(ty_utils_never_to_any_not_supported)]
+ NeverToAnyNotSupported(#[primary_span] Span),
+ #[label(ty_utils_tuple_not_supported)]
+ TupleNotSupported(#[primary_span] Span),
+ #[label(ty_utils_index_not_supported)]
+ IndexNotSupported(#[primary_span] Span),
+ #[label(ty_utils_field_not_supported)]
+ FieldNotSupported(#[primary_span] Span),
+ #[label(ty_utils_const_block_not_supported)]
+ ConstBlockNotSupported(#[primary_span] Span),
+ #[label(ty_utils_adt_not_supported)]
+ AdtNotSupported(#[primary_span] Span),
+ #[label(ty_utils_pointer_not_supported)]
+ PointerNotSupported(#[primary_span] Span),
+ #[label(ty_utils_yield_not_supported)]
+ YieldNotSupported(#[primary_span] Span),
+ #[label(ty_utils_loop_not_supported)]
+ LoopNotSupported(#[primary_span] Span),
+ #[label(ty_utils_box_not_supported)]
+ BoxNotSupported(#[primary_span] Span),
+ #[label(ty_utils_binary_not_supported)]
+ BinaryNotSupported(#[primary_span] Span),
+ #[label(ty_utils_logical_op_not_supported)]
+ LogicalOpNotSupported(#[primary_span] Span),
+ #[label(ty_utils_assign_not_supported)]
+ AssignNotSupported(#[primary_span] Span),
+ #[label(ty_utils_closure_and_return_not_supported)]
+ ClosureAndReturnNotSupported(#[primary_span] Span),
+ #[label(ty_utils_control_flow_not_supported)]
+ ControlFlowNotSupported(#[primary_span] Span),
+ #[label(ty_utils_inline_asm_not_supported)]
+ InlineAsmNotSupported(#[primary_span] Span),
+ #[label(ty_utils_operation_not_supported)]
+ OperationNotSupported(#[primary_span] Span),
+}
diff --git a/compiler/rustc_ty_utils/src/implied_bounds.rs b/compiler/rustc_ty_utils/src/implied_bounds.rs
new file mode 100644
index 000000000..f0d8c240e
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/implied_bounds.rs
@@ -0,0 +1,61 @@
+use crate::rustc_middle::ty::DefIdTree;
+use rustc_hir::{def::DefKind, def_id::DefId};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { assumed_wf_types, ..*providers };
+}
+
+fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx ty::List<Ty<'tcx>> {
+ match tcx.def_kind(def_id) {
+ DefKind::Fn => {
+ let sig = tcx.fn_sig(def_id);
+ let liberated_sig = tcx.liberate_late_bound_regions(def_id, sig);
+ liberated_sig.inputs_and_output
+ }
+ DefKind::AssocFn => {
+ let sig = tcx.fn_sig(def_id);
+ let liberated_sig = tcx.liberate_late_bound_regions(def_id, sig);
+ let mut assumed_wf_types: Vec<_> =
+ tcx.assumed_wf_types(tcx.parent(def_id)).as_slice().into();
+ assumed_wf_types.extend(liberated_sig.inputs_and_output);
+ tcx.intern_type_list(&assumed_wf_types)
+ }
+ DefKind::Impl => match tcx.impl_trait_ref(def_id) {
+ Some(trait_ref) => {
+ let types: Vec<_> = trait_ref.substs.types().collect();
+ tcx.intern_type_list(&types)
+ }
+ // Only the impl self type
+ None => tcx.intern_type_list(&[tcx.type_of(def_id)]),
+ },
+ DefKind::AssocConst | DefKind::AssocTy => tcx.assumed_wf_types(tcx.parent(def_id)),
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::TyParam
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::Static(_)
+ | DefKind::Ctor(_, _)
+ | DefKind::Macro(_)
+ | DefKind::ExternCrate
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::OpaqueTy
+ | DefKind::ImplTraitPlaceholder
+ | DefKind::Field
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Generator => ty::List::empty(),
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
index bd1d568cd..6436713b3 100644
--- a/compiler/rustc_ty_utils/src/instance.rs
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -3,115 +3,11 @@ use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::traits::CodegenObligationError;
use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{
- self, Binder, Instance, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
-};
-use rustc_span::{sym, DUMMY_SP};
+use rustc_middle::ty::{self, Instance, TyCtxt, TypeVisitable};
+use rustc_span::sym;
use rustc_trait_selection::traits;
use traits::{translate_substs, Reveal};
-use rustc_data_structures::sso::SsoHashSet;
-use std::collections::btree_map::Entry;
-use std::collections::BTreeMap;
-use std::ops::ControlFlow;
-
-use tracing::debug;
-
-// FIXME(#86795): `BoundVarsCollector` here should **NOT** be used
-// outside of `resolve_associated_item`. It's just to address #64494,
-// #83765, and #85848 which are creating bound types/regions that lose
-// their `Binder` *unintentionally*.
-// It's ideal to remove `BoundVarsCollector` and just use
-// `ty::Binder::*` methods but we use this stopgap until we figure out
-// the "real" fix.
-struct BoundVarsCollector<'tcx> {
- binder_index: ty::DebruijnIndex,
- vars: BTreeMap<u32, ty::BoundVariableKind>,
- // We may encounter the same variable at different levels of binding, so
- // this can't just be `Ty`
- visited: SsoHashSet<(ty::DebruijnIndex, Ty<'tcx>)>,
-}
-
-impl<'tcx> BoundVarsCollector<'tcx> {
- fn new() -> Self {
- BoundVarsCollector {
- binder_index: ty::INNERMOST,
- vars: BTreeMap::new(),
- visited: SsoHashSet::default(),
- }
- }
-
- fn into_vars(self, tcx: TyCtxt<'tcx>) -> &'tcx ty::List<ty::BoundVariableKind> {
- let max = self.vars.iter().map(|(k, _)| *k).max().unwrap_or(0);
- for i in 0..max {
- if let None = self.vars.get(&i) {
- panic!("Unknown variable: {:?}", i);
- }
- }
-
- tcx.mk_bound_variable_kinds(self.vars.into_iter().map(|(_, v)| v))
- }
-}
-
-impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> {
- type BreakTy = ();
-
- fn visit_binder<T: TypeVisitable<'tcx>>(
- &mut self,
- t: &Binder<'tcx, T>,
- ) -> ControlFlow<Self::BreakTy> {
- self.binder_index.shift_in(1);
- let result = t.super_visit_with(self);
- self.binder_index.shift_out(1);
- result
- }
-
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if t.outer_exclusive_binder() < self.binder_index
- || !self.visited.insert((self.binder_index, t))
- {
- return ControlFlow::CONTINUE;
- }
- match *t.kind() {
- ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
- match self.vars.entry(bound_ty.var.as_u32()) {
- Entry::Vacant(entry) => {
- entry.insert(ty::BoundVariableKind::Ty(bound_ty.kind));
- }
- Entry::Occupied(entry) => match entry.get() {
- ty::BoundVariableKind::Ty(_) => {}
- _ => bug!("Conflicting bound vars"),
- },
- }
- }
-
- _ => (),
- };
-
- t.super_visit_with(self)
- }
-
- fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
- match *r {
- ty::ReLateBound(index, br) if index == self.binder_index => {
- match self.vars.entry(br.var.as_u32()) {
- Entry::Vacant(entry) => {
- entry.insert(ty::BoundVariableKind::Region(br.kind));
- }
- Entry::Occupied(entry) => match entry.get() {
- ty::BoundVariableKind::Region(_) => {}
- _ => bug!("Conflicting bound vars"),
- },
- }
- }
-
- _ => (),
- };
-
- r.super_visit_with(self)
- }
-}
-
fn resolve_instance<'tcx>(
tcx: TyCtxt<'tcx>,
key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>,
@@ -203,19 +99,14 @@ fn resolve_associated_item<'tcx>(
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
- // See FIXME on `BoundVarsCollector`.
- let mut bound_vars_collector = BoundVarsCollector::new();
- trait_ref.visit_with(&mut bound_vars_collector);
- let trait_binder = ty::Binder::bind_with_vars(trait_ref, bound_vars_collector.into_vars(tcx));
- let vtbl = match tcx.codegen_fulfill_obligation((param_env, trait_binder)) {
+ let vtbl = match tcx.codegen_select_candidate((param_env, ty::Binder::dummy(trait_ref))) {
Ok(vtbl) => vtbl,
Err(CodegenObligationError::Ambiguity) => {
let reported = tcx.sess.delay_span_bug(
tcx.def_span(trait_item_id),
&format!(
- "encountered ambiguity selecting `{:?}` during codegen, presuming due to \
+ "encountered ambiguity selecting `{trait_ref:?}` during codegen, presuming due to \
overflow or prior type error",
- trait_binder
),
);
return Err(reported);
@@ -243,19 +134,17 @@ fn resolve_associated_item<'tcx>(
.unwrap_or_else(|| {
bug!("{:?} not found in {:?}", trait_item_id, impl_data.impl_def_id);
});
-
- let substs = tcx.infer_ctxt().enter(|infcx| {
- let param_env = param_env.with_reveal_all_normalized(tcx);
- let substs = rcvr_substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
- let substs = translate_substs(
- &infcx,
- param_env,
- impl_data.impl_def_id,
- substs,
- leaf_def.defining_node,
- );
- infcx.tcx.erase_regions(substs)
- });
+ let infcx = tcx.infer_ctxt().build();
+ let param_env = param_env.with_reveal_all_normalized(tcx);
+ let substs = rcvr_substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
+ let substs = translate_substs(
+ &infcx,
+ param_env,
+ impl_data.impl_def_id,
+ substs,
+ leaf_def.defining_node,
+ );
+ let substs = infcx.tcx.erase_regions(substs);
// Since this is a trait item, we need to see if the item is either a trait default item
// or a specialization because we can't resolve those unless we can `Reveal::All`.
@@ -280,9 +169,13 @@ fn resolve_associated_item<'tcx>(
return Ok(None);
}
- // If the item does not have a value, then we cannot return an instance.
+ // Any final impl is required to define all associated items.
if !leaf_def.item.defaultness(tcx).has_value() {
- return Ok(None);
+ let guard = tcx.sess.delay_span_bug(
+ tcx.def_span(leaf_def.item.def_id),
+ "missing value for assoc item in impl",
+ );
+ return Err(guard);
}
let substs = tcx.erase_regions(substs);
@@ -291,40 +184,14 @@ fn resolve_associated_item<'tcx>(
// a `trait` to an associated `const` definition in an `impl`, where
// the definition in the `impl` has the wrong type (for which an
// error has already been/will be emitted elsewhere).
- //
- // NB: this may be expensive, we try to skip it in all the cases where
- // we know the error would've been caught (e.g. in an upstream crate).
- //
- // A better approach might be to just introduce a query (returning
- // `Result<(), ErrorGuaranteed>`) for the check that `rustc_typeck`
- // performs (i.e. that the definition's type in the `impl` matches
- // the declaration in the `trait`), so that we can cheaply check
- // here if it failed, instead of approximating it.
if leaf_def.item.kind == ty::AssocKind::Const
&& trait_item_id != leaf_def.item.def_id
- && leaf_def.item.def_id.is_local()
+ && let Some(leaf_def_item) = leaf_def.item.def_id.as_local()
{
- let normalized_type_of = |def_id, substs| {
- tcx.subst_and_normalize_erasing_regions(substs, param_env, tcx.type_of(def_id))
- };
-
- let original_ty = normalized_type_of(trait_item_id, rcvr_substs);
- let resolved_ty = normalized_type_of(leaf_def.item.def_id, substs);
-
- if original_ty != resolved_ty {
- let msg = format!(
- "Instance::resolve: inconsistent associated `const` type: \
- was `{}: {}` but resolved to `{}: {}`",
- tcx.def_path_str_with_substs(trait_item_id, rcvr_substs),
- original_ty,
- tcx.def_path_str_with_substs(leaf_def.item.def_id, substs),
- resolved_ty,
- );
- let span = tcx.def_span(leaf_def.item.def_id);
- let reported = tcx.sess.delay_span_bug(span, &msg);
-
- return Err(reported);
- }
+ tcx.compare_assoc_const_impl_item_with_trait_item((
+ leaf_def_item,
+ trait_item_id,
+ ))?;
}
Some(ty::Instance::new(leaf_def.item.def_id, substs))
@@ -369,10 +236,13 @@ fn resolve_associated_item<'tcx>(
if name == sym::clone {
let self_ty = trait_ref.self_ty();
- let is_copy = self_ty.is_copy_modulo_regions(tcx.at(DUMMY_SP), param_env);
+ let is_copy = self_ty.is_copy_modulo_regions(tcx, param_env);
match self_ty.kind() {
_ if is_copy => (),
- ty::Closure(..) | ty::Tuple(..) => {}
+ ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Closure(..)
+ | ty::Tuple(..) => {}
_ => return Ok(None),
};
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
new file mode 100644
index 000000000..52ba0eee9
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -0,0 +1,1803 @@
+use rustc_hir as hir;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
+use rustc_middle::ty::layout::{
+ IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
+};
+use rustc_middle::ty::{
+ self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable,
+};
+use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
+use rustc_span::symbol::Symbol;
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::*;
+
+use std::cmp::{self, Ordering};
+use std::iter;
+use std::num::NonZeroUsize;
+use std::ops::Bound;
+
+use rand::{seq::SliceRandom, SeedableRng};
+use rand_xoshiro::Xoshiro128StarStar;
+
+use crate::layout_sanity_check::sanity_check_layout;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { layout_of, ..*providers };
+}
+
+#[instrument(skip(tcx, query), level = "debug")]
+fn layout_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
+ let (param_env, ty) = query.into_parts();
+ debug!(?ty);
+
+ let param_env = param_env.with_reveal_all_normalized(tcx);
+ let unnormalized_ty = ty;
+
+ // FIXME: We might want to have two different versions of `layout_of`:
+ // One that can be called after typecheck has completed and can use
+ // `normalize_erasing_regions` here and another one that can be called
+ // before typecheck has completed and uses `try_normalize_erasing_regions`.
+ let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
+ Ok(t) => t,
+ Err(normalization_error) => {
+ return Err(LayoutError::NormalizationFailure(ty, normalization_error));
+ }
+ };
+
+ if ty != unnormalized_ty {
+ // Ensure this layout is also cached for the normalized type.
+ return tcx.layout_of(param_env.and(ty));
+ }
+
+ let cx = LayoutCx { tcx, param_env };
+
+ let layout = layout_of_uncached(&cx, ty)?;
+ let layout = TyAndLayout { ty, layout };
+
+ record_layout_for_printing(&cx, layout);
+
+ sanity_check_layout(&cx, &layout);
+
+ Ok(layout)
+}
+
+#[derive(Copy, Clone, Debug)]
+enum StructKind {
+ /// A tuple, closure, or univariant which cannot be coerced to unsized.
+ AlwaysSized,
+ /// A univariant, the last field of which may be coerced to unsized.
+ MaybeUnsized,
+ /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+ Prefixed(Size, Align),
+}
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec<u32> {
+ let mut inverse = vec![0; map.len()];
+ for i in 0..map.len() {
+ inverse[map[i] as usize] = i as u32;
+ }
+ inverse
+}
+
+fn scalar_pair<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
+ let dl = cx.data_layout();
+ let b_align = b.align(dl);
+ let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+ // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+ // returns the last maximum.
+ let largest_niche = Niche::from_scalar(dl, b_offset, b)
+ .into_iter()
+ .chain(Niche::from_scalar(dl, Size::ZERO, a))
+ .max_by_key(|niche| niche.available(dl));
+
+ LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1],
+ },
+ abi: Abi::ScalarPair(a, b),
+ largest_niche,
+ align,
+ size,
+ }
+}
+
+fn univariant_uninterned<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ ty: Ty<'tcx>,
+ fields: &[TyAndLayout<'_>],
+ repr: &ReprOptions,
+ kind: StructKind,
+) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+ let dl = cx.data_layout();
+ let pack = repr.pack;
+ if pack.is_some() && repr.align.is_some() {
+ cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+
+ let optimize = !repr.inhibit_struct_field_reordering_opt();
+ if optimize {
+ let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+ let optimizing = &mut inverse_memory_index[..end];
+ let field_align = |f: &TyAndLayout<'_>| {
+ if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
+ };
+
+ // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+ // the field ordering to try and catch some code making assumptions about layouts
+ // we don't guarantee
+ if repr.can_randomize_type_layout() {
+ // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+ // randomize field ordering with
+ let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+ // Shuffle the ordering of the fields
+ optimizing.shuffle(&mut rng);
+
+ // Otherwise we just leave things alone and actually optimize the type's fields
+ } else {
+ match kind {
+ StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ let f = &fields[x as usize];
+ (!f.is_zst(), cmp::Reverse(field_align(f)))
+ });
+ }
+
+ StructKind::Prefixed(..) => {
+ // Sort in ascending alignment so that the layout stays optimal
+ // regardless of the prefix
+ optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
+ }
+ }
+
+ // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+ // regardless of the status of `-Z randomize-layout`
+ }
+ }
+
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we invert `inverse_memory_index` to
+ // produce `memory_index` (see `invert_mapping`).
+
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut offset = Size::ZERO;
+ let mut largest_niche = None;
+ let mut largest_niche_available = 0;
+
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align =
+ if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+
+ for &i in &inverse_memory_index {
+ let field = fields[i as usize];
+ if !sized {
+ cx.tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "univariant: field #{} of `{}` comes after unsized field",
+ offsets.len(),
+ ty
+ ),
+ );
+ }
+
+ if field.is_unsized() {
+ sized = false;
+ }
+
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if let Some(pack) = pack {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
+
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
+
+ if let Some(mut niche) = field.largest_niche {
+ let available = niche.available(dl);
+ if available > largest_niche_available {
+ largest_niche_available = available;
+ niche.offset += offset;
+ largest_niche = Some(niche);
+ }
+ }
+
+ offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ }
+
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
+
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
+
+ let memory_index =
+ if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs.
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi;
+ }
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, a)), Some((j, b)), None) => {
+ match (a.abi, b.abi) {
+ (Abi::Scalar(a), Abi::Scalar(b)) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = scalar_pair(cx, a, b);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if offsets[i] == pair_offsets[0]
+ && offsets[j] == pair_offsets[1]
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
+
+ Ok(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary { offsets, memory_index },
+ abi,
+ largest_niche,
+ align,
+ size,
+ })
+}
+
+fn layout_of_uncached<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ ty: Ty<'tcx>,
+) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ let tcx = cx.tcx;
+ let param_env = cx.param_env;
+ let dl = cx.data_layout();
+ let scalar_unit = |value: Primitive| {
+ let size = value.size(dl);
+ assert!(size.bits() <= 128);
+ Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+ };
+ let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value)));
+
+ let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
+ Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
+ };
+ debug_assert!(!ty.has_non_region_infer());
+
+ Ok(match *ty.kind() {
+ // Basic scalars.
+ ty::Bool => tcx.intern_layout(LayoutS::scalar(
+ cx,
+ Scalar::Initialized {
+ value: Int(I8, false),
+ valid_range: WrappingRange { start: 0, end: 1 },
+ },
+ )),
+ ty::Char => tcx.intern_layout(LayoutS::scalar(
+ cx,
+ Scalar::Initialized {
+ value: Int(I32, false),
+ valid_range: WrappingRange { start: 0, end: 0x10FFFF },
+ },
+ )),
+ ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
+ ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
+ ty::Float(fty) => scalar(match fty {
+ ty::FloatTy::F32 => F32,
+ ty::FloatTy::F64 => F64,
+ }),
+ ty::FnPtr(_) => {
+ let mut ptr = scalar_unit(Pointer);
+ ptr.valid_range_mut().start = 1;
+ tcx.intern_layout(LayoutS::scalar(cx, ptr))
+ }
+
+ // The never type.
+ ty::Never => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Potentially-wide pointers.
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ let mut data_ptr = scalar_unit(Pointer);
+ if !ty.is_unsafe_ptr() {
+ data_ptr.valid_range_mut().start = 1;
+ }
+
+ let pointee = tcx.normalize_erasing_regions(param_env, pointee);
+ if pointee.is_sized(tcx, param_env) {
+ return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
+ }
+
+ let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+ let metadata = match unsized_part.kind() {
+ ty::Foreign(..) => {
+ return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr)));
+ }
+ ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
+ ty::Dynamic(..) => {
+ let mut vtable = scalar_unit(Pointer);
+ vtable.valid_range_mut().start = 1;
+ vtable
+ }
+ _ => return Err(LayoutError::Unknown(unsized_part)),
+ };
+
+ // Effectively a (ptr, meta) tuple.
+ tcx.intern_layout(scalar_pair(cx, data_ptr, metadata))
+ }
+
+ ty::Dynamic(_, _, ty::DynStar) => {
+ let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
+ data.valid_range_mut().start = 0;
+ let mut vtable = scalar_unit(Pointer);
+ vtable.valid_range_mut().start = 1;
+ tcx.intern_layout(scalar_pair(cx, data, vtable))
+ }
+
+ // Arrays and slices.
+ ty::Array(element, mut count) => {
+ if count.has_projections() {
+ count = tcx.normalize_erasing_regions(param_env, count);
+ if count.has_projections() {
+ return Err(LayoutError::Unknown(ty));
+ }
+ }
+
+ let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
+ let element = cx.layout_of(element)?;
+ let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+
+ let abi = if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty))
+ {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let largest_niche = if count != 0 { element.largest_niche } else { None };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count },
+ abi,
+ largest_niche,
+ align: element.align,
+ size,
+ })
+ }
+ ty::Slice(element) => {
+ let element = cx.layout_of(element)?;
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: element.align,
+ size: Size::ZERO,
+ })
+ }
+ ty::Str => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Odd unit types.
+ ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
+ ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
+ let mut unit = univariant_uninterned(
+ cx,
+ ty,
+ &[],
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+ match unit.abi {
+ Abi::Aggregate { ref mut sized } => *sized = false,
+ _ => bug!(),
+ }
+ tcx.intern_layout(unit)
+ }
+
+ ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
+
+ ty::Closure(_, ref substs) => {
+ let tys = substs.as_closure().upvar_tys();
+ univariant(
+ &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?
+ }
+
+ ty::Tuple(tys) => {
+ let kind =
+ if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
+
+ univariant(
+ &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ kind,
+ )?
+ }
+
+ // SIMD vector types.
+ ty::Adt(def, substs) if def.repr().simd() => {
+ if !def.is_struct() {
+ // Should have yielded E0517 by now.
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "#[repr(simd)] was applied to an ADT that is not a struct",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ // Supported SIMD vectors are homogeneous ADTs with at least one field:
+ //
+ // * #[repr(simd)] struct S(T, T, T, T);
+ // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
+ // * #[repr(simd)] struct S([T; 4])
+ //
+ // where T is a primitive scalar (integer/float/pointer).
+
+ // SIMD vectors with zero fields are not supported.
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.is_empty() {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ }
+
+ // Type of the first ADT field:
+ let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
+
+ // Heterogeneous SIMD vectors are not supported:
+ // (should be caught by typeck)
+ for fi in &def.non_enum_variant().fields {
+ if fi.ty(tcx, substs) != f0_ty {
+ tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
+ }
+ }
+
+ // The element type and number of elements of the SIMD vector
+ // are obtained from:
+ //
+ // * the element type and length of the single array field, if
+ // the first field is of array type, or
+ //
+ // * the homogeneous field type and the number of fields.
+ let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
+ // First ADT field is an array:
+
+ // SIMD vectors with multiple array fields are not supported:
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.len() != 1 {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with more than one array field",
+ ty
+ ));
+ }
+
+ // Extract the number of elements from the layout of the array field:
+ let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
+ return Err(LayoutError::Unknown(ty));
+ };
+
+ (*e_ty, *count, true)
+ } else {
+ // First ADT field is not an array:
+ (f0_ty, def.non_enum_variant().fields.len() as _, false)
+ };
+
+ // SIMD vectors of zero length are not supported.
+ // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
+ // support.
+ //
+ // Can't be caught in typeck if the array length is generic.
+ if e_len == 0 {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ } else if e_len > MAX_SIMD_LANES {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` of length greater than {}",
+ ty, MAX_SIMD_LANES,
+ ));
+ }
+
+ // Compute the ABI of the element type:
+ let e_ly = cx.layout_of(e_ty)?;
+ let Abi::Scalar(e_abi) = e_ly.abi else {
+ // This error isn't caught in typeck, e.g., if
+ // the element type of the vector is generic.
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with a non-primitive-scalar \
+ (integer/float/pointer) element type `{}`",
+ ty, e_ty
+ ))
+ };
+
+ // Compute the size and alignment of the vector:
+ let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ let align = dl.vector_align(size);
+ let size = size.align_to(align.abi);
+
+ // Compute the placement of the vector fields:
+ let fields = if is_array {
+ FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
+ } else {
+ FieldsShape::Array { stride: e_ly.size, count: e_len }
+ };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields,
+ abi: Abi::Vector { element: e_abi, count: e_len },
+ largest_niche: e_ly.largest_niche,
+ size,
+ align,
+ })
+ }
+
+ // ADTs.
+ ty::Adt(def, substs) => {
+ // Cache the field layouts.
+ let variants = def
+ .variants()
+ .iter()
+ .map(|v| {
+ v.fields
+ .iter()
+ .map(|field| cx.layout_of(field.ty(tcx, substs)))
+ .collect::<Result<Vec<_>, _>>()
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ if def.is_union() {
+ if def.repr().pack.is_some() && def.repr().align.is_some() {
+ cx.tcx.sess.delay_span_bug(
+ tcx.def_span(def.did()),
+ "union cannot be packed and aligned",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align =
+ if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ if let Some(repr_align) = def.repr().align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ let optimize = !def.repr().inhibit_union_abi_opt();
+ let mut size = Size::ZERO;
+ let mut abi = Abi::Aggregate { sized: true };
+ let index = VariantIdx::new(0);
+ for field in &variants[index] {
+ assert!(!field.is_unsized());
+ align = align.max(field.align);
+
+ // If all non-ZST fields have the same ABI, forward this ABI
+ if optimize && !field.is_zst() {
+ // Discard valid range information and allow undef
+ let field_abi = match field.abi {
+ Abi::Scalar(x) => Abi::Scalar(x.to_union()),
+ Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()),
+ Abi::Vector { element: x, count } => {
+ Abi::Vector { element: x.to_union(), count }
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {
+ Abi::Aggregate { sized: true }
+ }
+ };
+
+ if size == Size::ZERO {
+ // first non ZST: initialize 'abi'
+ abi = field_abi;
+ } else if abi != field_abi {
+ // different fields have different ABI: reset to Aggregate
+ abi = Abi::Aggregate { sized: true };
+ }
+ }
+
+ size = cmp::max(size, field.size);
+ }
+
+ if let Some(pack) = def.repr().pack {
+ align = align.min(AbiAndPrefAlign::new(pack));
+ }
+
+ return Ok(tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index },
+ fields: FieldsShape::Union(
+ NonZeroUsize::new(variants[index].len()).ok_or(LayoutError::Unknown(ty))?,
+ ),
+ abi,
+ largest_niche: None,
+ align,
+ size: size.align_to(align.abi),
+ }));
+ }
+
+ // A variant is absent if it's uninhabited and only has ZST fields.
+ // Present uninhabited variants only require space for their fields,
+ // but *not* an encoding of the discriminant (e.g., a tag value).
+ // See issue #49298 for more details on the need to leave space
+ // for non-ZST uninhabited data (mostly partial initialization).
+ let absent = |fields: &[TyAndLayout<'_>]| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.is_zst());
+ uninhabited && is_zst
+ };
+ let (present_first, present_second) = {
+ let mut present_variants = variants
+ .iter_enumerated()
+ .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+ (present_variants.next(), present_variants.next())
+ };
+ let present_first = match present_first {
+ Some(present_first) => present_first,
+ // Uninhabited because it has no variants, or only absent ones.
+ None if def.is_enum() => {
+ return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
+ }
+ // If it's a struct, still compute a layout so that we can still compute the
+ // field offsets.
+ None => VariantIdx::new(0),
+ };
+
+ let is_struct = !def.is_enum() ||
+ // Only one variant is present.
+ (present_second.is_none() &&
+ // Representation optimizations are allowed.
+ !def.repr().inhibit_enum_layout_opt());
+ if is_struct {
+ // Struct, or univariant enum equivalent to a struct.
+ // (Typechecking will reject discriminant-sizing attrs.)
+
+ let v = present_first;
+ let kind = if def.is_enum() || variants[v].is_empty() {
+ StructKind::AlwaysSized
+ } else {
+ let param_env = tcx.param_env(def.did());
+ let last_field = def.variant(v).fields.last().unwrap();
+ let always_sized = tcx.type_of(last_field.did).is_sized(tcx, param_env);
+ if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
+ };
+
+ let mut st = univariant_uninterned(cx, ty, &variants[v], &def.repr(), kind)?;
+ st.variants = Variants::Single { index: v };
+
+ if def.is_unsafe_cell() {
+ let hide_niches = |scalar: &mut _| match scalar {
+ Scalar::Initialized { value, valid_range } => {
+ *valid_range = WrappingRange::full(value.size(dl))
+ }
+ // Already doesn't have any niches
+ Scalar::Union { .. } => {}
+ };
+ match &mut st.abi {
+ Abi::Uninhabited => {}
+ Abi::Scalar(scalar) => hide_niches(scalar),
+ Abi::ScalarPair(a, b) => {
+ hide_niches(a);
+ hide_niches(b);
+ }
+ Abi::Vector { element, count: _ } => hide_niches(element),
+ Abi::Aggregate { sized: _ } => {}
+ }
+ st.largest_niche = None;
+ return Ok(tcx.intern_layout(st));
+ }
+
+ let (start, end) = cx.tcx.layout_scalar_valid_range(def.did());
+ match st.abi {
+ Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ // the asserts ensure that we are not using the
+ // `#[rustc_layout_scalar_valid_range(n)]`
+ // attribute to widen the range of anything as that would probably
+ // result in UB somewhere
+ // FIXME(eddyb) the asserts are probably not needed,
+ // as larger validity ranges would result in missed
+ // optimizations, *not* wrongly assuming the inner
+ // value is valid. e.g. unions enlarge validity ranges,
+ // because the values may be uninitialized.
+ if let Bound::Included(start) = start {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.start <= start);
+ valid_range.start = start;
+ }
+ if let Bound::Included(end) = end {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.end >= end);
+ valid_range.end = end;
+ }
+
+ // Update `largest_niche` if we have introduced a larger niche.
+ let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+ if let Some(niche) = niche {
+ match st.largest_niche {
+ Some(largest_niche) => {
+ // Replace the existing niche even if they're equal,
+ // because this one is at a lower offset.
+ if largest_niche.available(dl) <= niche.available(dl) {
+ st.largest_niche = Some(niche);
+ }
+ }
+ None => st.largest_niche = Some(niche),
+ }
+ }
+ }
+ _ => assert!(
+ start == Bound::Unbounded && end == Bound::Unbounded,
+ "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
+ def,
+ st,
+ ),
+ }
+
+ return Ok(tcx.intern_layout(st));
+ }
+
+ // At this point, we have handled all unions and
+ // structs. (We have also handled univariant enums
+ // that allow representation optimization.)
+ assert!(def.is_enum());
+
+ // Until we've decided whether to use the tagged or
+ // niche filling LayoutS, we don't want to intern the
+ // variant layouts, so we can't store them in the
+ // overall LayoutS. Store the overall LayoutS
+ // and the variant LayoutSs here until then.
+ struct TmpLayout<'tcx> {
+ layout: LayoutS<'tcx>,
+ variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
+ }
+
+ let calculate_niche_filling_layout =
+ || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
+ // The current code for niche-filling relies on variant indices
+ // instead of actual discriminants, so enums with
+ // explicit discriminants (RFC #2363) would misbehave.
+ if def.repr().inhibit_enum_layout_opt()
+ || def
+ .variants()
+ .iter_enumerated()
+ .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
+ {
+ return Ok(None);
+ }
+
+ if variants.len() < 2 {
+ return Ok(None);
+ }
+
+ let mut align = dl.aggregate_align;
+ let mut variant_layouts = variants
+ .iter_enumerated()
+ .map(|(j, v)| {
+ let mut st = univariant_uninterned(
+ cx,
+ ty,
+ v,
+ &def.repr(),
+ StructKind::AlwaysSized,
+ )?;
+ st.variants = Variants::Single { index: j };
+
+ align = align.max(st.align);
+
+ Ok(st)
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ let largest_variant_index = match variant_layouts
+ .iter_enumerated()
+ .max_by_key(|(_i, layout)| layout.size.bytes())
+ .map(|(i, _layout)| i)
+ {
+ None => return Ok(None),
+ Some(i) => i,
+ };
+
+ let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
+ let needs_disc = |index: VariantIdx| {
+ index != largest_variant_index && !absent(&variants[index])
+ };
+ let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
+ ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
+
+ let count = niche_variants.size_hint().1.unwrap() as u128;
+
+ // Find the field with the largest niche
+ let (field_index, niche, (niche_start, niche_scalar)) = match variants
+ [largest_variant_index]
+ .iter()
+ .enumerate()
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+ .max_by_key(|(_, niche)| niche.available(dl))
+ .and_then(|(j, niche)| Some((j, niche, niche.reserve(cx, count)?)))
+ {
+ None => return Ok(None),
+ Some(x) => x,
+ };
+
+ let niche_offset = niche.offset
+ + variant_layouts[largest_variant_index].fields.offset(field_index);
+ let niche_size = niche.value.size(dl);
+ let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
+
+ let all_variants_fit =
+ variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
+ if i == largest_variant_index {
+ return true;
+ }
+
+ layout.largest_niche = None;
+
+ if layout.size <= niche_offset {
+ // This variant will fit before the niche.
+ return true;
+ }
+
+ // Determine if it'll fit after the niche.
+ let this_align = layout.align.abi;
+ let this_offset = (niche_offset + niche_size).align_to(this_align);
+
+ if this_offset + layout.size > size {
+ return false;
+ }
+
+ // It'll fit, but we need to make some adjustments.
+ match layout.fields {
+ FieldsShape::Arbitrary { ref mut offsets, .. } => {
+ for (j, offset) in offsets.iter_mut().enumerate() {
+ if !variants[i][j].is_zst() {
+ *offset += this_offset;
+ }
+ }
+ }
+ _ => {
+ panic!("Layout of fields should be Arbitrary for variants")
+ }
+ }
+
+ // It can't be a Scalar or ScalarPair because the offset isn't 0.
+ if !layout.abi.is_uninhabited() {
+ layout.abi = Abi::Aggregate { sized: true };
+ }
+ layout.size += this_offset;
+
+ true
+ });
+
+ if !all_variants_fit {
+ return Ok(None);
+ }
+
+ let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
+
+ let others_zst = variant_layouts
+ .iter_enumerated()
+ .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
+ let same_size = size == variant_layouts[largest_variant_index].size;
+ let same_align = align == variant_layouts[largest_variant_index].align;
+
+ let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
+ Abi::Uninhabited
+ } else if same_size && same_align && others_zst {
+ match variant_layouts[largest_variant_index].abi {
+ // When the total alignment and size match, we can use the
+ // same ABI as the scalar variant with the reserved niche.
+ Abi::Scalar(_) => Abi::Scalar(niche_scalar),
+ Abi::ScalarPair(first, second) => {
+ // Only the niche is guaranteed to be initialised,
+ // so use union layouts for the other primitive.
+ if niche_offset == Size::ZERO {
+ Abi::ScalarPair(niche_scalar, second.to_union())
+ } else {
+ Abi::ScalarPair(first.to_union(), niche_scalar)
+ }
+ }
+ _ => Abi::Aggregate { sized: true },
+ }
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let layout = LayoutS {
+ variants: Variants::Multiple {
+ tag: niche_scalar,
+ tag_encoding: TagEncoding::Niche {
+ untagged_variant: largest_variant_index,
+ niche_variants,
+ niche_start,
+ },
+ tag_field: 0,
+ variants: IndexVec::new(),
+ },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![niche_offset],
+ memory_index: vec![0],
+ },
+ abi,
+ largest_niche,
+ size,
+ align,
+ };
+
+ Ok(Some(TmpLayout { layout, variants: variant_layouts }))
+ };
+
+ let niche_filling_layout = calculate_niche_filling_layout()?;
+
+ let (mut min, mut max) = (i128::MAX, i128::MIN);
+ let discr_type = def.repr().discr_type();
+ let bits = Integer::from_attr(cx, discr_type).size().bits();
+ for (i, discr) in def.discriminants(tcx) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+ continue;
+ }
+ let mut x = discr.val as i128;
+ if discr_type.is_signed() {
+ // sign extend the raw representation to be an i128
+ x = (x << (128 - bits)) >> (128 - bits);
+ }
+ if x < min {
+ min = x;
+ }
+ if x > max {
+ max = x;
+ }
+ }
+ // We might have no inhabited variants, so pretend there's at least one.
+ if (min, max) == (i128::MAX, i128::MIN) {
+ min = 0;
+ max = 0;
+ }
+ assert!(min <= max, "discriminant range is {}...{}", min, max);
+ let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
+
+ let mut align = dl.aggregate_align;
+ let mut size = Size::ZERO;
+
+ // We're interested in the smallest alignment, so start large.
+ let mut start_align = Align::from_bytes(256).unwrap();
+ assert_eq!(Integer::for_align(dl, start_align), None);
+
+ // repr(C) on an enum tells us to make a (tag, union) layout,
+ // so we need to grow the prefix alignment to be at least
+ // the alignment of the union. (This value is used both for
+ // determining the alignment of the overall enum, and the
+ // determining the alignment of the payload after the tag.)
+ let mut prefix_align = min_ity.align(dl).abi;
+ if def.repr().c() {
+ for fields in &variants {
+ for field in fields {
+ prefix_align = prefix_align.max(field.align.abi);
+ }
+ }
+ }
+
+ // Create the set of structs that represent each variant.
+ let mut layout_variants = variants
+ .iter_enumerated()
+ .map(|(i, field_layouts)| {
+ let mut st = univariant_uninterned(
+ cx,
+ ty,
+ &field_layouts,
+ &def.repr(),
+ StructKind::Prefixed(min_ity.size(), prefix_align),
+ )?;
+ st.variants = Variants::Single { index: i };
+ // Find the first field we can't move later
+ // to make room for a larger discriminant.
+ for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
+ if !field.is_zst() || field.align.abi.bytes() != 1 {
+ start_align = start_align.min(field.align.abi);
+ break;
+ }
+ }
+ size = cmp::max(size, st.size);
+ align = align.max(st.align);
+ Ok(st)
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ // Align the maximum variant size to the largest alignment.
+ size = size.align_to(align.abi);
+
+ if size.bytes() >= dl.obj_size_bound() {
+ return Err(LayoutError::SizeOverflow(ty));
+ }
+
+ let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
+ if typeck_ity < min_ity {
+ // It is a bug if Layout decided on a greater discriminant size than typeck for
+ // some reason at this point (based on values discriminant can take on). Mostly
+ // because this discriminant will be loaded, and then stored into variable of
+ // type calculated by typeck. Consider such case (a bug): typeck decided on
+ // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+ // discriminant values. That would be a bug, because then, in codegen, in order
+ // to store this 16-bit discriminant into 8-bit sized temporary some of the
+ // space necessary to represent would have to be discarded (or layout is wrong
+ // on thinking it needs 16 bits)
+ bug!(
+ "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
+ min_ity,
+ typeck_ity
+ );
+ // However, it is fine to make discr type however large (as an optimisation)
+ // after this point – we’ll just truncate the value we load in codegen.
+ }
+
+ // Check to see if we should use a different type for the
+ // discriminant. We can safely use a type with the same size
+ // as the alignment of the first field of each variant.
+ // We increase the size of the discriminant to avoid LLVM copying
+ // padding when it doesn't need to. This normally causes unaligned
+ // load/stores and excessive memcpy/memset operations. By using a
+ // bigger integer size, LLVM can be sure about its contents and
+ // won't be so conservative.
+
+ // Use the initial field alignment
+ let mut ity = if def.repr().c() || def.repr().int.is_some() {
+ min_ity
+ } else {
+ Integer::for_align(dl, start_align).unwrap_or(min_ity)
+ };
+
+ // If the alignment is not larger than the chosen discriminant size,
+ // don't use the alignment as the final size.
+ if ity <= min_ity {
+ ity = min_ity;
+ } else {
+ // Patch up the variants' first few fields.
+ let old_ity_size = min_ity.size();
+ let new_ity_size = ity.size();
+ for variant in &mut layout_variants {
+ match variant.fields {
+ FieldsShape::Arbitrary { ref mut offsets, .. } => {
+ for i in offsets {
+ if *i <= old_ity_size {
+ assert_eq!(*i, old_ity_size);
+ *i = new_ity_size;
+ }
+ }
+ // We might be making the struct larger.
+ if variant.size <= old_ity_size {
+ variant.size = new_ity_size;
+ }
+ }
+ _ => bug!(),
+ }
+ }
+ }
+
+ let tag_mask = ity.size().unsigned_int_max();
+ let tag = Scalar::Initialized {
+ value: Int(ity, signed),
+ valid_range: WrappingRange {
+ start: (min as u128 & tag_mask),
+ end: (max as u128 & tag_mask),
+ },
+ };
+ let mut abi = Abi::Aggregate { sized: true };
+
+ if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ } else if tag.size(dl) == size {
+ // Make sure we only use scalar layout when the enum is entirely its
+ // own tag (i.e. it has no padding nor any non-ZST variant fields).
+ abi = Abi::Scalar(tag);
+ } else {
+ // Try to use a ScalarPair for all tagged enums.
+ let mut common_prim = None;
+ let mut common_prim_initialized_in_all_variants = true;
+ for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
+ let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
+ bug!();
+ };
+ let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+ let (field, offset) = match (fields.next(), fields.next()) {
+ (None, None) => {
+ common_prim_initialized_in_all_variants = false;
+ continue;
+ }
+ (Some(pair), None) => pair,
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ let prim = match field.abi {
+ Abi::Scalar(scalar) => {
+ common_prim_initialized_in_all_variants &=
+ matches!(scalar, Scalar::Initialized { .. });
+ scalar.primitive()
+ }
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ if let Some(pair) = common_prim {
+ // This is pretty conservative. We could go fancier
+ // by conflating things like i32 and u32, or even
+ // realising that (u8, u8) could just cohabit with
+ // u16 or even u32.
+ if pair != (prim, offset) {
+ common_prim = None;
+ break;
+ }
+ } else {
+ common_prim = Some((prim, offset));
+ }
+ }
+ if let Some((prim, offset)) = common_prim {
+ let prim_scalar = if common_prim_initialized_in_all_variants {
+ scalar_unit(prim)
+ } else {
+ // Common prim might be uninit.
+ Scalar::Union { value: prim }
+ };
+ let pair = scalar_pair(cx, tag, prim_scalar);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if pair_offsets[0] == Size::ZERO
+ && pair_offsets[1] == *offset
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ }
+
+ // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
+ // variants to ensure they are consistent. This is because a downcast is
+ // semantically a NOP, and thus should not affect layout.
+ if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ for variant in &mut layout_variants {
+ // We only do this for variants with fields; the others are not accessed anyway.
+ // Also do not overwrite any already existing "clever" ABIs.
+ if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
+ variant.abi = abi;
+ // Also need to bump up the size and alignment, so that the entire value fits in here.
+ variant.size = cmp::max(variant.size, size);
+ variant.align.abi = cmp::max(variant.align.abi, align.abi);
+ }
+ }
+ }
+
+ let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
+
+ let tagged_layout = LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: 0,
+ variants: IndexVec::new(),
+ },
+ fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] },
+ largest_niche,
+ abi,
+ align,
+ size,
+ };
+
+ let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
+
+ let mut best_layout = match (tagged_layout, niche_filling_layout) {
+ (tl, Some(nl)) => {
+ // Pick the smaller layout; otherwise,
+ // pick the layout with the larger niche; otherwise,
+ // pick tagged as it has simpler codegen.
+ use Ordering::*;
+ let niche_size = |tmp_l: &TmpLayout<'_>| {
+ tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
+ };
+ match (
+ tl.layout.size.cmp(&nl.layout.size),
+ niche_size(&tl).cmp(&niche_size(&nl)),
+ ) {
+ (Greater, _) => nl,
+ (Equal, Less) => nl,
+ _ => tl,
+ }
+ }
+ (tl, None) => tl,
+ };
+
+ // Now we can intern the variant layouts and store them in the enum layout.
+ best_layout.layout.variants = match best_layout.layout.variants {
+ Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
+ tag,
+ tag_encoding,
+ tag_field,
+ variants: best_layout
+ .variants
+ .into_iter()
+ .map(|layout| tcx.intern_layout(layout))
+ .collect(),
+ },
+ _ => bug!(),
+ };
+
+ tcx.intern_layout(best_layout.layout)
+ }
+
+ // Types with no meaningful known layout.
+ ty::Projection(_) | ty::Opaque(..) => {
+ // NOTE(eddyb) `layout_of` query should've normalized these away,
+ // if that was possible, so there's no reason to try again here.
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
+ bug!("Layout::compute: unexpected type `{}`", ty)
+ }
+
+ ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
+ return Err(LayoutError::Unknown(ty));
+ }
+ })
+}
+
+/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility {
+ Unassigned,
+ Assigned(VariantIdx),
+ // FIXME: Use newtype_index so we aren't wasting bytes
+ Ineligible(Option<u32>),
+}
+
+// When laying out generators, we divide our saved local fields into two
+// categories: overlap-eligible and overlap-ineligible.
+//
+// Those fields which are ineligible for overlap go in a "prefix" at the
+// beginning of the layout, and always have space reserved for them.
+//
+// Overlap-eligible fields are only assigned to one variant, so we lay
+// those fields out for each variant and put them right after the
+// prefix.
+//
+// Finally, in the layout details, we point to the fields from the
+// variants they are assigned to. It is possible for some fields to be
+// included in multiple variants. No field ever "moves around" in the
+// layout; its offset is always the same.
+//
+// Also included in the layout are the upvars and the discriminant.
+// These are included as fields on the "outer" layout; they are not part
+// of any variant.
+
+/// Compute the eligibility and assignment of each local.
+fn generator_saved_local_eligibility<'tcx>(
+ info: &GeneratorLayout<'tcx>,
+) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
+ use SavedLocalEligibility::*;
+
+ let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
+ IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+
+ // The saved locals not eligible for overlap. These will get
+ // "promoted" to the prefix of our generator.
+ let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
+
+ // Figure out which of our saved locals are fields in only
+ // one variant. The rest are deemed ineligible for overlap.
+ for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+ for local in fields {
+ match assignments[*local] {
+ Unassigned => {
+ assignments[*local] = Assigned(variant_index);
+ }
+ Assigned(idx) => {
+ // We've already seen this local at another suspension
+ // point, so it is no longer a candidate.
+ trace!(
+ "removing local {:?} in >1 variant ({:?}, {:?})",
+ local,
+ variant_index,
+ idx
+ );
+ ineligible_locals.insert(*local);
+ assignments[*local] = Ineligible(None);
+ }
+ Ineligible(_) => {}
+ }
+ }
+ }
+
+ // Next, check every pair of eligible locals to see if they
+ // conflict.
+ for local_a in info.storage_conflicts.rows() {
+ let conflicts_a = info.storage_conflicts.count(local_a);
+ if ineligible_locals.contains(local_a) {
+ continue;
+ }
+
+ for local_b in info.storage_conflicts.iter(local_a) {
+ // local_a and local_b are storage live at the same time, therefore they
+ // cannot overlap in the generator layout. The only way to guarantee
+ // this is if they are in the same variant, or one is ineligible
+ // (which means it is stored in every variant).
+ if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
+ continue;
+ }
+
+ // If they conflict, we will choose one to make ineligible.
+ // This is not always optimal; it's just a greedy heuristic that
+ // seems to produce good results most of the time.
+ let conflicts_b = info.storage_conflicts.count(local_b);
+ let (remove, other) =
+ if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
+ ineligible_locals.insert(remove);
+ assignments[remove] = Ineligible(None);
+ trace!("removing local {:?} due to conflict with {:?}", remove, other);
+ }
+ }
+
+ // Count the number of variants in use. If only one of them, then it is
+ // impossible to overlap any locals in our layout. In this case it's
+ // always better to make the remaining locals ineligible, so we can
+ // lay them out with the other locals in the prefix and eliminate
+ // unnecessary padding bytes.
+ {
+ let mut used_variants = BitSet::new_empty(info.variant_fields.len());
+ for assignment in &assignments {
+ if let Assigned(idx) = assignment {
+ used_variants.insert(*idx);
+ }
+ }
+ if used_variants.count() < 2 {
+ for assignment in assignments.iter_mut() {
+ *assignment = Ineligible(None);
+ }
+ ineligible_locals.insert_all();
+ }
+ }
+
+ // Write down the order of our locals that will be promoted to the prefix.
+ {
+ for (idx, local) in ineligible_locals.iter().enumerate() {
+ assignments[local] = Ineligible(Some(idx as u32));
+ }
+ }
+ debug!("generator saved local assignments: {:?}", assignments);
+
+ (ineligible_locals, assignments)
+}
+
+/// Compute the full generator layout.
+fn generator_layout<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ ty: Ty<'tcx>,
+ def_id: hir::def_id::DefId,
+ substs: SubstsRef<'tcx>,
+) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ use SavedLocalEligibility::*;
+ let tcx = cx.tcx;
+ let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
+
+ let Some(info) = tcx.generator_layout(def_id) else {
+ return Err(LayoutError::Unknown(ty));
+ };
+ let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
+
+ // Build a prefix layout, including "promoting" all ineligible
+ // locals as part of the prefix. We compute the layout of all of
+ // these fields at once to get optimal packing.
+ let tag_index = substs.as_generator().prefix_tys().count();
+
+ // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
+ let max_discr = (info.variant_fields.len() - 1) as u128;
+ let discr_int = Integer::fit_unsigned(max_discr);
+ let discr_int_ty = discr_int.to_ty(tcx, false);
+ let tag = Scalar::Initialized {
+ value: Primitive::Int(discr_int, false),
+ valid_range: WrappingRange { start: 0, end: max_discr },
+ };
+ let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag));
+ let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
+
+ let promoted_layouts = ineligible_locals
+ .iter()
+ .map(|local| subst_field(info.field_tys[local]))
+ .map(|ty| tcx.mk_maybe_uninit(ty))
+ .map(|ty| cx.layout_of(ty));
+ let prefix_layouts = substs
+ .as_generator()
+ .prefix_tys()
+ .map(|ty| cx.layout_of(ty))
+ .chain(iter::once(Ok(tag_layout)))
+ .chain(promoted_layouts)
+ .collect::<Result<Vec<_>, _>>()?;
+ let prefix = univariant_uninterned(
+ cx,
+ ty,
+ &prefix_layouts,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+
+ let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+ // Split the prefix layout into the "outer" fields (upvars and
+ // discriminant) and the "promoted" fields. Promoted fields will
+ // get included in each variant that requested them in
+ // GeneratorLayout.
+ debug!("prefix = {:#?}", prefix);
+ let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
+ FieldsShape::Arbitrary { mut offsets, memory_index } => {
+ let mut inverse_memory_index = invert_mapping(&memory_index);
+
+ // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
+ // "outer" and "promoted" fields respectively.
+ let b_start = (tag_index + 1) as u32;
+ let offsets_b = offsets.split_off(b_start as usize);
+ let offsets_a = offsets;
+
+ // Disentangle the "a" and "b" components of `inverse_memory_index`
+ // by preserving the order but keeping only one disjoint "half" each.
+ // FIXME(eddyb) build a better abstraction for permutations, if possible.
+ let inverse_memory_index_b: Vec<_> =
+ inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
+ inverse_memory_index.retain(|&i| i < b_start);
+ let inverse_memory_index_a = inverse_memory_index;
+
+ // Since `inverse_memory_index_{a,b}` each only refer to their
+ // respective fields, they can be safely inverted
+ let memory_index_a = invert_mapping(&inverse_memory_index_a);
+ let memory_index_b = invert_mapping(&inverse_memory_index_b);
+
+ let outer_fields =
+ FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
+ (outer_fields, offsets_b, memory_index_b)
+ }
+ _ => bug!(),
+ };
+
+ let mut size = prefix.size;
+ let mut align = prefix.align;
+ let variants = info
+ .variant_fields
+ .iter_enumerated()
+ .map(|(index, variant_fields)| {
+ // Only include overlap-eligible fields when we compute our variant layout.
+ let variant_only_tys = variant_fields
+ .iter()
+ .filter(|local| match assignments[**local] {
+ Unassigned => bug!(),
+ Assigned(v) if v == index => true,
+ Assigned(_) => bug!("assignment does not match variant"),
+ Ineligible(_) => false,
+ })
+ .map(|local| subst_field(info.field_tys[*local]));
+
+ let mut variant = univariant_uninterned(
+ cx,
+ ty,
+ &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::Prefixed(prefix_size, prefix_align.abi),
+ )?;
+ variant.variants = Variants::Single { index };
+
+ let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
+ bug!();
+ };
+
+ // Now, stitch the promoted and variant-only fields back together in
+ // the order they are mentioned by our GeneratorLayout.
+ // Because we only use some subset (that can differ between variants)
+ // of the promoted fields, we can't just pick those elements of the
+ // `promoted_memory_index` (as we'd end up with gaps).
+ // So instead, we build an "inverse memory_index", as if all of the
+ // promoted fields were being used, but leave the elements not in the
+ // subset as `INVALID_FIELD_IDX`, which we can filter out later to
+ // obtain a valid (bijective) mapping.
+ const INVALID_FIELD_IDX: u32 = !0;
+ let mut combined_inverse_memory_index =
+ vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
+ let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
+ let combined_offsets = variant_fields
+ .iter()
+ .enumerate()
+ .map(|(i, local)| {
+ let (offset, memory_index) = match assignments[*local] {
+ Unassigned => bug!(),
+ Assigned(_) => {
+ let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
+ (offset, promoted_memory_index.len() as u32 + memory_index)
+ }
+ Ineligible(field_idx) => {
+ let field_idx = field_idx.unwrap() as usize;
+ (promoted_offsets[field_idx], promoted_memory_index[field_idx])
+ }
+ };
+ combined_inverse_memory_index[memory_index as usize] = i as u32;
+ offset
+ })
+ .collect();
+
+ // Remove the unused slots and invert the mapping to obtain the
+ // combined `memory_index` (also see previous comment).
+ combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
+ let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
+
+ variant.fields = FieldsShape::Arbitrary {
+ offsets: combined_offsets,
+ memory_index: combined_memory_index,
+ };
+
+ size = size.max(variant.size);
+ align = align.max(variant.align);
+ Ok(tcx.intern_layout(variant))
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ size = size.align_to(align.abi);
+
+ let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let layout = tcx.intern_layout(LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: tag_index,
+ variants,
+ },
+ fields: outer_fields,
+ abi,
+ largest_niche: prefix.largest_niche,
+ size,
+ align,
+ });
+ debug!("generator layout ({:?}): {:#?}", ty, layout);
+ Ok(layout)
+}
+
+/// This is invoked by the `layout_of` query to record the final
+/// layout of each type.
+#[inline(always)]
+fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
+ // If we are running with `-Zprint-type-sizes`, maybe record layouts
+ // for dumping later.
+ if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
+ record_layout_for_printing_outlined(cx, layout)
+ }
+}
+
+fn record_layout_for_printing_outlined<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: TyAndLayout<'tcx>,
+) {
+ // Ignore layouts that are done with non-empty environments or
+ // non-monomorphic layouts, as the user only wants to see the stuff
+ // resulting from the final codegen session.
+ if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
+ return;
+ }
+
+ // (delay format until we actually need it)
+ let record = |kind, packed, opt_discr_size, variants| {
+ let type_desc = format!("{:?}", layout.ty);
+ cx.tcx.sess.code_stats.record_type_size(
+ kind,
+ type_desc,
+ layout.align.abi,
+ layout.size,
+ packed,
+ opt_discr_size,
+ variants,
+ );
+ };
+
+ let adt_def = match *layout.ty.kind() {
+ ty::Adt(ref adt_def, _) => {
+ debug!("print-type-size t: `{:?}` process adt", layout.ty);
+ adt_def
+ }
+
+ ty::Closure(..) => {
+ debug!("print-type-size t: `{:?}` record closure", layout.ty);
+ record(DataTypeKind::Closure, false, None, vec![]);
+ return;
+ }
+
+ _ => {
+ debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
+ return;
+ }
+ };
+
+ let adt_kind = adt_def.adt_kind();
+ let adt_packed = adt_def.repr().pack.is_some();
+
+ let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
+ let mut min_size = Size::ZERO;
+ let field_info: Vec<_> = flds
+ .iter()
+ .enumerate()
+ .map(|(i, &name)| {
+ let field_layout = layout.field(cx, i);
+ let offset = layout.fields.offset(i);
+ let field_end = offset + field_layout.size;
+ if min_size < field_end {
+ min_size = field_end;
+ }
+ FieldInfo {
+ name,
+ offset: offset.bytes(),
+ size: field_layout.size.bytes(),
+ align: field_layout.align.abi.bytes(),
+ }
+ })
+ .collect();
+
+ VariantInfo {
+ name: n,
+ kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
+ align: layout.align.abi.bytes(),
+ size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
+ fields: field_info,
+ }
+ };
+
+ match layout.variants {
+ Variants::Single { index } => {
+ if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
+ debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
+ let variant_def = &adt_def.variant(index);
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ None,
+ vec![build_variant_info(Some(variant_def.name), &fields, layout)],
+ );
+ } else {
+ // (This case arises for *empty* enums; so give it
+ // zero variants.)
+ record(adt_kind.into(), adt_packed, None, vec![]);
+ }
+ }
+
+ Variants::Multiple { tag, ref tag_encoding, .. } => {
+ debug!(
+ "print-type-size `{:#?}` adt general variants def {}",
+ layout.ty,
+ adt_def.variants().len()
+ );
+ let variant_infos: Vec<_> = adt_def
+ .variants()
+ .iter_enumerated()
+ .map(|(i, variant_def)| {
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
+ })
+ .collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ match tag_encoding {
+ TagEncoding::Direct => Some(tag.size(cx)),
+ _ => None,
+ },
+ variant_infos,
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/layout_sanity_check.rs b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
new file mode 100644
index 000000000..100926ad4
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/layout_sanity_check.rs
@@ -0,0 +1,303 @@
+use rustc_middle::ty::{
+ layout::{LayoutCx, TyAndLayout},
+ TyCtxt,
+};
+use rustc_target::abi::*;
+
+use std::cmp;
+
+/// Enforce some basic invariants on layouts.
+pub(super) fn sanity_check_layout<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: &TyAndLayout<'tcx>,
+) {
+ // Type-level uninhabitedness should always imply ABI uninhabitedness.
+ if cx.tcx.conservative_is_privately_uninhabited(cx.param_env.and(layout.ty)) {
+ assert!(layout.abi.is_uninhabited());
+ }
+
+ if layout.size.bytes() % layout.align.abi.bytes() != 0 {
+ bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
+ }
+
+ if cfg!(debug_assertions) {
+ /// Yields non-ZST fields of the type
+ fn non_zst_fields<'tcx, 'a>(
+ cx: &'a LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: &'a TyAndLayout<'tcx>,
+ ) -> impl Iterator<Item = (Size, TyAndLayout<'tcx>)> + 'a {
+ (0..layout.layout.fields().count()).filter_map(|i| {
+ let field = layout.field(cx, i);
+ // Also checking `align == 1` here leads to test failures in
+ // `layout/zero-sized-array-union.rs`, where a type has a zero-size field with
+ // alignment 4 that still gets ignored during layout computation (which is okay
+ // since other fields already force alignment 4).
+ let zst = field.is_zst();
+ (!zst).then(|| (layout.fields.offset(i), field))
+ })
+ }
+
+ fn skip_newtypes<'tcx>(
+ cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+ layout: &TyAndLayout<'tcx>,
+ ) -> TyAndLayout<'tcx> {
+ if matches!(layout.layout.variants(), Variants::Multiple { .. }) {
+ // Definitely not a newtype of anything.
+ return *layout;
+ }
+ let mut fields = non_zst_fields(cx, layout);
+ let Some(first) = fields.next() else {
+ // No fields here, so this could be a primitive or enum -- either way it's not a newtype around a thing
+ return *layout
+ };
+ if fields.next().is_none() {
+ let (offset, first) = first;
+ if offset == Size::ZERO && first.layout.size() == layout.size {
+ // This is a newtype, so keep recursing.
+ // FIXME(RalfJung): I don't think it would be correct to do any checks for
+ // alignment here, so we don't. Is that correct?
+ return skip_newtypes(cx, &first);
+ }
+ }
+ // No more newtypes here.
+ *layout
+ }
+
+ fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>) {
+ match layout.layout.abi() {
+ Abi::Scalar(scalar) => {
+ // No padding in scalars.
+ let size = scalar.size(cx);
+ let align = scalar.align(cx).abi;
+ assert_eq!(
+ layout.layout.size(),
+ size,
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.layout.align().abi,
+ align,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ // Check that this matches the underlying field.
+ let inner = skip_newtypes(cx, layout);
+ assert!(
+ matches!(inner.layout.abi(), Abi::Scalar(_)),
+ "`Scalar` type {} is newtype around non-`Scalar` type {}",
+ layout.ty,
+ inner.ty
+ );
+ match inner.layout.fields() {
+ FieldsShape::Primitive => {
+ // Fine.
+ }
+ FieldsShape::Union(..) => {
+ // FIXME: I guess we could also check something here? Like, look at all fields?
+ return;
+ }
+ FieldsShape::Arbitrary { .. } => {
+ // Should be an enum, the only field is the discriminant.
+ assert!(
+ inner.ty.is_enum(),
+ "`Scalar` layout for non-primitive non-enum type {}",
+ inner.ty
+ );
+ assert_eq!(
+ inner.layout.fields().count(),
+ 1,
+ "`Scalar` layout for multiple-field type in {inner:#?}",
+ );
+ let offset = inner.layout.fields().offset(0);
+ let field = inner.field(cx, 0);
+ // The field should be at the right offset, and match the `scalar` layout.
+ assert_eq!(
+ offset,
+ Size::ZERO,
+ "`Scalar` field at non-0 offset in {inner:#?}",
+ );
+ assert_eq!(
+ field.size, size,
+ "`Scalar` field with bad size in {inner:#?}",
+ );
+ assert_eq!(
+ field.align.abi, align,
+ "`Scalar` field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field.abi, Abi::Scalar(_)),
+ "`Scalar` field with bad ABI in {inner:#?}",
+ );
+ }
+ _ => {
+ panic!("`Scalar` layout for non-primitive non-enum type {}", inner.ty);
+ }
+ }
+ }
+ Abi::ScalarPair(scalar1, scalar2) => {
+ // Sanity-check scalar pairs. These are a bit more flexible and support
+ // padding, but we can at least ensure both fields actually fit into the layout
+ // and the alignment requirement has not been weakened.
+ let size1 = scalar1.size(cx);
+ let align1 = scalar1.align(cx).abi;
+ let size2 = scalar2.size(cx);
+ let align2 = scalar2.align(cx).abi;
+ assert!(
+ layout.layout.align().abi >= cmp::max(align1, align2),
+ "alignment mismatch between ABI and layout in {layout:#?}",
+ );
+ let field2_offset = size1.align_to(align2);
+ assert!(
+ layout.layout.size() >= field2_offset + size2,
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ // Check that the underlying pair of fields matches.
+ let inner = skip_newtypes(cx, layout);
+ assert!(
+ matches!(inner.layout.abi(), Abi::ScalarPair(..)),
+ "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
+ layout.ty,
+ inner.ty
+ );
+ if matches!(inner.layout.variants(), Variants::Multiple { .. }) {
+ // FIXME: ScalarPair for enums is enormously complicated and it is very hard
+ // to check anything about them.
+ return;
+ }
+ match inner.layout.fields() {
+ FieldsShape::Arbitrary { .. } => {
+ // Checked below.
+ }
+ FieldsShape::Union(..) => {
+ // FIXME: I guess we could also check something here? Like, look at all fields?
+ return;
+ }
+ _ => {
+ panic!("`ScalarPair` layout with unexpected field shape in {inner:#?}");
+ }
+ }
+ let mut fields = non_zst_fields(cx, &inner);
+ let (offset1, field1) = fields.next().unwrap_or_else(|| {
+ panic!("`ScalarPair` layout for type with not even one non-ZST field: {inner:#?}")
+ });
+ let (offset2, field2) = fields.next().unwrap_or_else(|| {
+ panic!("`ScalarPair` layout for type with less than two non-ZST fields: {inner:#?}")
+ });
+ assert!(
+ fields.next().is_none(),
+ "`ScalarPair` layout for type with at least three non-ZST fields: {inner:#?}"
+ );
+ // The fields might be in opposite order.
+ let (offset1, field1, offset2, field2) = if offset1 <= offset2 {
+ (offset1, field1, offset2, field2)
+ } else {
+ (offset2, field2, offset1, field1)
+ };
+ // The fields should be at the right offset, and match the `scalar` layout.
+ assert_eq!(
+ offset1,
+ Size::ZERO,
+ "`ScalarPair` first field at non-0 offset in {inner:#?}",
+ );
+ assert_eq!(
+ field1.size, size1,
+ "`ScalarPair` first field with bad size in {inner:#?}",
+ );
+ assert_eq!(
+ field1.align.abi, align1,
+ "`ScalarPair` first field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field1.abi, Abi::Scalar(_)),
+ "`ScalarPair` first field with bad ABI in {inner:#?}",
+ );
+ assert_eq!(
+ offset2, field2_offset,
+ "`ScalarPair` second field at bad offset in {inner:#?}",
+ );
+ assert_eq!(
+ field2.size, size2,
+ "`ScalarPair` second field with bad size in {inner:#?}",
+ );
+ assert_eq!(
+ field2.align.abi, align2,
+ "`ScalarPair` second field with bad align in {inner:#?}",
+ );
+ assert!(
+ matches!(field2.abi, Abi::Scalar(_)),
+ "`ScalarPair` second field with bad ABI in {inner:#?}",
+ );
+ }
+ Abi::Vector { count, element } => {
+ // No padding in vectors. Alignment can be strengthened, though.
+ assert!(
+ layout.layout.align().abi >= element.align(cx).abi,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ let size = element.size(cx) * count;
+ assert_eq!(
+ layout.layout.size(),
+ size.align_to(cx.data_layout().vector_align(size).abi),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
+ }
+ }
+
+ check_layout_abi(cx, layout);
+
+ if let Variants::Multiple { variants, .. } = &layout.variants {
+ for variant in variants.iter() {
+ // No nested "multiple".
+ assert!(matches!(variant.variants(), Variants::Single { .. }));
+ // Variants should have the same or a smaller size as the full thing,
+ // and same for alignment.
+ if variant.size() > layout.size {
+ bug!(
+ "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
+ layout.size.bytes(),
+ variant.size().bytes(),
+ )
+ }
+ if variant.align().abi > layout.align.abi {
+ bug!(
+ "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
+ layout.align.abi.bytes(),
+ variant.align().abi.bytes(),
+ )
+ }
+ // Skip empty variants.
+ if variant.size() == Size::ZERO
+ || variant.fields().count() == 0
+ || variant.abi().is_uninhabited()
+ {
+ // These are never actually accessed anyway, so we can skip the coherence check
+ // for them. They also fail that check, since they have
+ // `Aggregate`/`Uninhbaited` ABI even when the main type is
+ // `Scalar`/`ScalarPair`. (Note that sometimes, variants with fields have size
+ // 0, and sometimes, variants without fields have non-0 size.)
+ continue;
+ }
+ // The top-level ABI and the ABI of the variants should be coherent.
+ let scalar_coherent = |s1: Scalar, s2: Scalar| {
+ s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
+ };
+ let abi_coherent = match (layout.abi, variant.abi()) {
+ (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
+ (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
+ scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
+ }
+ (Abi::Uninhabited, _) => true,
+ (Abi::Aggregate { .. }, _) => true,
+ _ => false,
+ };
+ if !abi_coherent {
+ bug!(
+ "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
+ variant
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
index 09f5c2a11..cce5a79dd 100644
--- a/compiler/rustc_ty_utils/src/lib.rs
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -5,8 +5,8 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(let_chains)]
#![feature(control_flow_enum)]
-#![feature(let_else)]
#![feature(never_type)]
#![feature(box_patterns)]
#![recursion_limit = "256"]
@@ -18,19 +18,28 @@ extern crate tracing;
use rustc_middle::ty::query::Providers;
+mod abi;
mod assoc;
mod common_traits;
mod consts;
+mod errors;
+mod implied_bounds;
pub mod instance;
+mod layout;
+mod layout_sanity_check;
mod needs_drop;
pub mod representability;
mod ty;
pub fn provide(providers: &mut Providers) {
+ abi::provide(providers);
assoc::provide(providers);
common_traits::provide(providers);
consts::provide(providers);
+ implied_bounds::provide(providers);
+ layout::provide(providers);
needs_drop::provide(providers);
+ representability::provide(providers);
ty::provide(providers);
instance::provide(providers);
}
diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs
index 9ad44d14d..024dcd591 100644
--- a/compiler/rustc_ty_utils/src/needs_drop.rs
+++ b/compiler/rustc_ty_utils/src/needs_drop.rs
@@ -2,13 +2,14 @@
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::util::{needs_drop_components, AlwaysRequiresDrop};
use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
use rustc_session::Limit;
use rustc_span::{sym, DUMMY_SP};
+use crate::errors::NeedsDropOverflow;
+
type NeedsDropResult<T> = Result<T, AlwaysRequiresDrop>;
fn needs_drop_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
@@ -90,10 +91,7 @@ where
if !self.recursion_limit.value_within_limit(level) {
// Not having a `Span` isn't great. But there's hopefully some other
// recursion limit error as well.
- tcx.sess.span_err(
- DUMMY_SP,
- &format!("overflow while checking whether `{}` requires drop", self.query_ty),
- );
+ tcx.sess.emit_err(NeedsDropOverflow { query_ty: self.query_ty });
return Some(Err(AlwaysRequiresDrop));
}
@@ -111,7 +109,7 @@ where
for component in components {
match *component.kind() {
- _ if component.is_copy_modulo_regions(tcx.at(DUMMY_SP), self.param_env) => (),
+ _ if component.is_copy_modulo_regions(tcx, self.param_env) => (),
ty::Closure(_, substs) => {
queue_type(self, substs.as_closure().tupled_upvars_ty());
@@ -266,7 +264,7 @@ fn adt_consider_insignificant_dtor<'tcx>(
if is_marked_insig {
// In some cases like `std::collections::HashMap` where the struct is a wrapper around
// a type that is a Drop type, and the wrapped type (eg: `hashbrown::HashMap`) lies
- // outside stdlib, we might choose to still annotate the the wrapper (std HashMap) with
+ // outside stdlib, we might choose to still annotate the wrapper (std HashMap) with
// `rustc_insignificant_dtor`, even if the type itself doesn't have a `Drop` impl.
Some(DtorType::Insignificant)
} else if adt_def.destructor(tcx).is_some() {
diff --git a/compiler/rustc_ty_utils/src/representability.rs b/compiler/rustc_ty_utils/src/representability.rs
index eded78916..7f48fb804 100644
--- a/compiler/rustc_ty_utils/src/representability.rs
+++ b/compiler/rustc_ty_utils/src/representability.rs
@@ -1,386 +1,119 @@
-//! Check whether a type is representable.
-use rustc_data_structures::fx::FxHashMap;
-use rustc_hir as hir;
-use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::Span;
-use std::cmp;
+#![allow(rustc::untranslatable_diagnostic, rustc::diagnostic_outside_of_impl)]
-/// Describes whether a type is representable. For types that are not
-/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
-/// distinguish between types that are recursive with themselves and types that
-/// contain a different recursive type. These cases can therefore be treated
-/// differently when reporting errors.
-///
-/// The ordering of the cases is significant. They are sorted so that cmp::max
-/// will keep the "more erroneous" of two values.
-#[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
-pub enum Representability {
- Representable,
- ContainsRecursive,
- /// Return a list of types that are included in themselves:
- /// the spans where they are self-included, and (if found)
- /// the HirId of the FieldDef that defines the self-inclusion.
- SelfRecursive(Vec<(Span, Option<hir::HirId>)>),
-}
+use rustc_hir::def::DefKind;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Representability, Ty, TyCtxt};
+use rustc_span::def_id::{DefId, LocalDefId};
-/// Check whether a type is representable. This means it cannot contain unboxed
-/// structural recursion. This check is needed for structs and enums.
-pub fn ty_is_representable<'tcx>(
- tcx: TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- sp: Span,
- field_id: Option<hir::HirId>,
-) -> Representability {
- debug!("is_type_representable: {:?}", ty);
- // To avoid a stack overflow when checking an enum variant or struct that
- // contains a different, structurally recursive type, maintain a stack of
- // seen types and check recursion for each of them (issues #3008, #3779,
- // #74224, #84611). `shadow_seen` contains the full stack and `seen` only
- // the one for the current type (e.g. if we have structs A and B, B contains
- // a field of type A, and we're currently looking at B, then `seen` will be
- // cleared when recursing to check A, but `shadow_seen` won't, so that we
- // can catch cases of mutual recursion where A also contains B).
- let mut seen: Vec<Ty<'_>> = Vec::new();
- let mut shadow_seen: Vec<ty::AdtDef<'tcx>> = Vec::new();
- let mut representable_cache = FxHashMap::default();
- let mut force_result = false;
- let r = is_type_structurally_recursive(
- tcx,
- &mut seen,
- &mut shadow_seen,
- &mut representable_cache,
- ty,
- sp,
- field_id,
- &mut force_result,
- );
- debug!("is_type_representable: {:?} is {:?}", ty, r);
- r
+pub fn provide(providers: &mut Providers) {
+ *providers =
+ Providers { representability, representability_adt_ty, params_in_repr, ..*providers };
}
-// Iterate until something non-representable is found
-fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
- iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
- (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
- Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
+macro_rules! rtry {
+ ($e:expr) => {
+ match $e {
+ e @ Representability::Infinite => return e,
+ Representability::Representable => {}
}
- (r1, r2) => cmp::max(r1, r2),
- })
+ };
}
-fn are_inner_types_recursive<'tcx>(
- tcx: TyCtxt<'tcx>,
- seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
- representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
- ty: Ty<'tcx>,
- sp: Span,
- field_id: Option<hir::HirId>,
- force_result: &mut bool,
-) -> Representability {
- debug!("are_inner_types_recursive({:?}, {:?}, {:?})", ty, seen, shadow_seen);
- match ty.kind() {
- ty::Tuple(fields) => {
- // Find non representable
- fold_repr(fields.iter().map(|ty| {
- is_type_structurally_recursive(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- ty,
- sp,
- field_id,
- force_result,
- )
- }))
- }
- // Fixed-length vectors.
- // FIXME(#11924) Behavior undecided for zero-length vectors.
- ty::Array(ty, _) => is_type_structurally_recursive(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- *ty,
- sp,
- field_id,
- force_result,
- ),
- ty::Adt(def, substs) => {
- // Find non representable fields with their spans
- fold_repr(def.all_fields().map(|field| {
- let ty = field.ty(tcx, substs);
- let (sp, field_id) = match field
- .did
- .as_local()
- .map(|id| tcx.hir().local_def_id_to_hir_id(id))
- .and_then(|id| tcx.hir().find(id))
- {
- Some(hir::Node::Field(field)) => (field.ty.span, Some(field.hir_id)),
- _ => (sp, field_id),
- };
-
- let mut result = None;
-
- // First, we check whether the field type per se is representable.
- // This catches cases as in #74224 and #84611. There is a special
- // case related to mutual recursion, though; consider this example:
- //
- // struct A<T> {
- // z: T,
- // x: B<T>,
- // }
- //
- // struct B<T> {
- // y: A<T>
- // }
- //
- // Here, without the following special case, both A and B are
- // ContainsRecursive, which is a problem because we only report
- // errors for SelfRecursive. We fix this by detecting this special
- // case (shadow_seen.first() is the type we are originally
- // interested in, and if we ever encounter the same AdtDef again,
- // we know that it must be SelfRecursive) and "forcibly" returning
- // SelfRecursive (by setting force_result, which tells the calling
- // invocations of are_inner_types_representable to forward the
- // result without adjusting).
- if shadow_seen.len() > seen.len() && shadow_seen.first() == Some(def) {
- *force_result = true;
- result = Some(Representability::SelfRecursive(vec![(sp, field_id)]));
- }
-
- if result == None {
- result = Some(Representability::Representable);
-
- // Now, we check whether the field types per se are representable, e.g.
- // for struct Foo { x: Option<Foo> }, we first check whether Option<_>
- // by itself is representable (which it is), and the nesting of Foo
- // will be detected later. This is necessary for #74224 and #84611.
-
- // If we have encountered an ADT definition that we have not seen
- // before (no need to check them twice), recurse to see whether that
- // definition is SelfRecursive. If so, we must be ContainsRecursive.
- if shadow_seen.len() > 1
- && !shadow_seen
- .iter()
- .take(shadow_seen.len() - 1)
- .any(|seen_def| seen_def == def)
- {
- let adt_def_id = def.did();
- let raw_adt_ty = tcx.type_of(adt_def_id);
- debug!("are_inner_types_recursive: checking nested type: {:?}", raw_adt_ty);
-
- // Check independently whether the ADT is SelfRecursive. If so,
- // we must be ContainsRecursive (except for the special case
- // mentioned above).
- let mut nested_seen: Vec<Ty<'_>> = vec![];
- result = Some(
- match is_type_structurally_recursive(
- tcx,
- &mut nested_seen,
- shadow_seen,
- representable_cache,
- raw_adt_ty,
- sp,
- field_id,
- force_result,
- ) {
- Representability::SelfRecursive(_) => {
- if *force_result {
- Representability::SelfRecursive(vec![(sp, field_id)])
- } else {
- Representability::ContainsRecursive
- }
- }
- x => x,
- },
- );
- }
-
- // We only enter the following block if the type looks representable
- // so far. This is necessary for cases such as this one (#74224):
- //
- // struct A<T> {
- // x: T,
- // y: A<A<T>>,
- // }
- //
- // struct B {
- // z: A<usize>
- // }
- //
- // When checking B, we recurse into A and check field y of type
- // A<A<usize>>. We haven't seen this exact type before, so we recurse
- // into A<A<usize>>, which contains, A<A<A<usize>>>, and so forth,
- // ad infinitum. We can prevent this from happening by first checking
- // A separately (the code above) and only checking for nested Bs if
- // A actually looks representable (which it wouldn't in this example).
- if result == Some(Representability::Representable) {
- // Now, even if the type is representable (e.g. Option<_>),
- // it might still contribute to a recursive type, e.g.:
- // struct Foo { x: Option<Option<Foo>> }
- // These cases are handled by passing the full `seen`
- // stack to is_type_structurally_recursive (instead of the
- // empty `nested_seen` above):
- result = Some(
- match is_type_structurally_recursive(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- ty,
- sp,
- field_id,
- force_result,
- ) {
- Representability::SelfRecursive(_) => {
- Representability::SelfRecursive(vec![(sp, field_id)])
- }
- x => x,
- },
- );
- }
+fn representability(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Representability {
+ match tcx.def_kind(def_id) {
+ DefKind::Struct | DefKind::Union | DefKind::Enum => {
+ let adt_def = tcx.adt_def(def_id);
+ for variant in adt_def.variants() {
+ for field in variant.fields.iter() {
+ rtry!(tcx.representability(field.did.expect_local()));
}
-
- result.unwrap()
- }))
- }
- ty::Closure(..) => {
- // this check is run on type definitions, so we don't expect
- // to see closure types
- bug!("requires check invoked on inapplicable type: {:?}", ty)
+ }
+ Representability::Representable
}
- _ => Representability::Representable,
+ DefKind::Field => representability_ty(tcx, tcx.type_of(def_id)),
+ def_kind => bug!("unexpected {def_kind:?}"),
}
}
-fn same_adt<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
+fn representability_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Representability {
match *ty.kind() {
- ty::Adt(ty_def, _) => ty_def == def,
- _ => false,
+ ty::Adt(..) => tcx.representability_adt_ty(ty),
+ // FIXME(#11924) allow zero-length arrays?
+ ty::Array(ty, _) => representability_ty(tcx, ty),
+ ty::Tuple(tys) => {
+ for ty in tys {
+ rtry!(representability_ty(tcx, ty));
+ }
+ Representability::Representable
+ }
+ _ => Representability::Representable,
}
}
-// Does the type `ty` directly (without indirection through a pointer)
-// contain any types on stack `seen`?
-fn is_type_structurally_recursive<'tcx>(
- tcx: TyCtxt<'tcx>,
- seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
- representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
- ty: Ty<'tcx>,
- sp: Span,
- field_id: Option<hir::HirId>,
- force_result: &mut bool,
-) -> Representability {
- debug!("is_type_structurally_recursive: {:?} {:?} {:?}", ty, sp, field_id);
- if let Some(representability) = representable_cache.get(&ty) {
- debug!(
- "is_type_structurally_recursive: {:?} {:?} {:?} - (cached) {:?}",
- ty, sp, field_id, representability
- );
- return representability.clone();
+/*
+The reason for this being a separate query is very subtle:
+Consider this infinitely sized struct: `struct Foo(Box<Foo>, Bar<Foo>)`:
+When calling representability(Foo), a query cycle will occur:
+ representability(Foo)
+ -> representability_adt_ty(Bar<Foo>)
+ -> representability(Foo)
+For the diagnostic output (in `Value::from_cycle_error`), we want to detect that
+the `Foo` in the *second* field of the struct is culpable. This requires
+traversing the HIR of the struct and calling `params_in_repr(Bar)`. But we can't
+call params_in_repr for a given type unless it is known to be representable.
+params_in_repr will cycle/panic on infinitely sized types. Looking at the query
+cycle above, we know that `Bar` is representable because
+representability_adt_ty(Bar<..>) is in the cycle and representability(Bar) is
+*not* in the cycle.
+*/
+fn representability_adt_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Representability {
+ let ty::Adt(adt, substs) = ty.kind() else { bug!("expected adt") };
+ if let Some(def_id) = adt.did().as_local() {
+ rtry!(tcx.representability(def_id));
}
-
- let representability = is_type_structurally_recursive_inner(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- ty,
- sp,
- field_id,
- force_result,
- );
-
- representable_cache.insert(ty, representability.clone());
- representability
+ // At this point, we know that the item of the ADT type is representable;
+ // but the type parameters may cause a cycle with an upstream type
+ let params_in_repr = tcx.params_in_repr(adt.did());
+ for (i, subst) in substs.iter().enumerate() {
+ if let ty::GenericArgKind::Type(ty) = subst.unpack() {
+ if params_in_repr.contains(i as u32) {
+ rtry!(representability_ty(tcx, ty));
+ }
+ }
+ }
+ Representability::Representable
}
-fn is_type_structurally_recursive_inner<'tcx>(
- tcx: TyCtxt<'tcx>,
- seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
- representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
- ty: Ty<'tcx>,
- sp: Span,
- field_id: Option<hir::HirId>,
- force_result: &mut bool,
-) -> Representability {
- match ty.kind() {
- ty::Adt(def, _) => {
- {
- debug!("is_type_structurally_recursive_inner: adt: {:?}, seen: {:?}", ty, seen);
-
- // Iterate through stack of previously seen types.
- let mut iter = seen.iter();
-
- // The first item in `seen` is the type we are actually curious about.
- // We want to return SelfRecursive if this type contains itself.
- // It is important that we DON'T take generic parameters into account
- // for this check, so that Bar<T> in this example counts as SelfRecursive:
- //
- // struct Foo;
- // struct Bar<T> { x: Bar<Foo> }
-
- if let Some(&seen_adt) = iter.next() {
- if same_adt(seen_adt, *def) {
- debug!("SelfRecursive: {:?} contains {:?}", seen_adt, ty);
- return Representability::SelfRecursive(vec![(sp, field_id)]);
- }
- }
-
- // We also need to know whether the first item contains other types
- // that are structurally recursive. If we don't catch this case, we
- // will recurse infinitely for some inputs.
- //
- // It is important that we DO take generic parameters into account
- // here, because nesting e.g. Options is allowed (as long as the
- // definition of Option doesn't itself include an Option field, which
- // would be a case of SelfRecursive above). The following, too, counts
- // as SelfRecursive:
- //
- // struct Foo { Option<Option<Foo>> }
+fn params_in_repr(tcx: TyCtxt<'_>, def_id: DefId) -> BitSet<u32> {
+ let adt_def = tcx.adt_def(def_id);
+ let generics = tcx.generics_of(def_id);
+ let mut params_in_repr = BitSet::new_empty(generics.params.len());
+ for variant in adt_def.variants() {
+ for field in variant.fields.iter() {
+ params_in_repr_ty(tcx, tcx.type_of(field.did), &mut params_in_repr);
+ }
+ }
+ params_in_repr
+}
- for &seen_adt in iter {
- if ty == seen_adt {
- debug!("ContainsRecursive: {:?} contains {:?}", seen_adt, ty);
- return Representability::ContainsRecursive;
+fn params_in_repr_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, params_in_repr: &mut BitSet<u32>) {
+ match *ty.kind() {
+ ty::Adt(adt, substs) => {
+ let inner_params_in_repr = tcx.params_in_repr(adt.did());
+ for (i, subst) in substs.iter().enumerate() {
+ if let ty::GenericArgKind::Type(ty) = subst.unpack() {
+ if inner_params_in_repr.contains(i as u32) {
+ params_in_repr_ty(tcx, ty, params_in_repr);
}
}
}
-
- // For structs and enums, track all previously seen types by pushing them
- // onto the 'seen' stack.
- seen.push(ty);
- shadow_seen.push(*def);
- let out = are_inner_types_recursive(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- ty,
- sp,
- field_id,
- force_result,
- );
- shadow_seen.pop();
- seen.pop();
- out
}
- _ => {
- // No need to push in other cases.
- are_inner_types_recursive(
- tcx,
- seen,
- shadow_seen,
- representable_cache,
- ty,
- sp,
- field_id,
- force_result,
- )
+ ty::Array(ty, _) => params_in_repr_ty(tcx, ty, params_in_repr),
+ ty::Tuple(tys) => tys.iter().for_each(|ty| params_in_repr_ty(tcx, ty, params_in_repr)),
+ ty::Param(param) => {
+ params_in_repr.insert(param.index);
}
+ _ => {}
}
}
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
index db0d45b86..3eebb4ace 100644
--- a/compiler/rustc_ty_utils/src/ty.rs
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -1,7 +1,6 @@
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, Binder, Predicate, PredicateKind, ToPredicate, Ty, TyCtxt};
use rustc_trait_selection::traits;
@@ -86,9 +85,13 @@ fn impl_defaultness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Defaultness {
/// - a type parameter or projection whose Sizedness can't be known
/// - a tuple of type parameters or projections, if there are multiple
/// such.
-/// - an Error, if a type contained itself. The representability
-/// check should catch this case.
-fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AdtSizedConstraint<'_> {
+/// - an Error, if a type is infinitely sized
+fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> &[Ty<'_>] {
+ if let Some(def_id) = def_id.as_local() {
+ if matches!(tcx.representability(def_id), ty::Representability::Infinite) {
+ return tcx.intern_type_list(&[tcx.ty_error()]);
+ }
+ }
let def = tcx.adt_def(def_id);
let result = tcx.mk_type_list(
@@ -100,11 +103,10 @@ fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AdtSizedConstrain
debug!("adt_sized_constraint: {:?} => {:?}", def, result);
- ty::AdtSizedConstraint(result)
+ result
}
/// See `ParamEnv` struct definition for details.
-#[instrument(level = "debug", skip(tcx))]
fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
// The param_env of an impl Trait type is its defining function's param_env
if let Some(parent) = ty::is_impl_trait_defn(tcx, def_id) {
@@ -135,6 +137,7 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
let local_did = def_id.as_local();
let hir_id = local_did.map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id));
+ // FIXME(consts): This is not exactly in line with the constness query.
let constness = match hir_id {
Some(hir_id) => match tcx.hir().get(hir_id) {
hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
@@ -163,7 +166,7 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
}) => hir::Constness::Const,
hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::TyAlias(..) | hir::ImplItemKind::Fn(..),
+ kind: hir::ImplItemKind::Type(..) | hir::ImplItemKind::Fn(..),
..
}) => {
let parent_hir_id = tcx.hir().get_parent_node(hir_id);
@@ -199,6 +202,10 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
_ => hir::Constness::NotConst,
},
+ // FIXME(consts): It's suspicious that a param-env for a foreign item
+ // will always have NotConst param-env, though we don't typically use
+ // that param-env for anything meaningful right now, so it's likely
+ // not an issue.
None => hir::Constness::NotConst,
};
@@ -348,7 +355,7 @@ fn instance_def_size_estimate<'tcx>(
match instance_def {
InstanceDef::Item(..) | InstanceDef::DropGlue(..) => {
let mir = tcx.instance_mir(instance_def);
- mir.basic_blocks().iter().map(|bb| bb.statements.len() + 1).sum()
+ mir.basic_blocks.iter().map(|bb| bb.statements.len() + 1).sum()
}
// Estimate the size of other compiler-generated shims to be 1.
_ => 1,
@@ -390,7 +397,7 @@ fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Ty<'_>> {
let self_ty = trait_ref.self_ty();
let self_ty_matches = match self_ty.kind() {
- ty::Dynamic(ref data, re) if re.is_static() => data.principal().is_none(),
+ ty::Dynamic(ref data, re, _) if re.is_static() => data.principal().is_none(),
_ => false,
};
@@ -410,7 +417,6 @@ fn asyncness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::IsAsync {
}
/// Don't call this directly: use ``tcx.conservative_is_privately_uninhabited`` instead.
-#[instrument(level = "debug", skip(tcx))]
pub fn conservative_is_privately_uninhabited_raw<'tcx>(
tcx: TyCtxt<'tcx>,
param_env_and: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
diff --git a/compiler/rustc_type_ir/Cargo.toml b/compiler/rustc_type_ir/Cargo.toml
index 5aa3cf017..c4008e9b6 100644
--- a/compiler/rustc_type_ir/Cargo.toml
+++ b/compiler/rustc_type_ir/Cargo.toml
@@ -4,7 +4,6 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
bitflags = "1.2.1"
diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs
index 791e9e0f5..7fbe78aa5 100644
--- a/compiler/rustc_type_ir/src/lib.rs
+++ b/compiler/rustc_type_ir/src/lib.rs
@@ -1,6 +1,8 @@
#![feature(fmt_helpers_for_derive)]
#![feature(min_specialization)]
#![feature(rustc_attrs)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate bitflags;
@@ -21,6 +23,9 @@ pub mod sty;
pub use codec::*;
pub use sty::*;
+/// Needed so we can use #[derive(HashStable_Generic)]
+pub trait HashStableContext {}
+
pub trait Interner {
type AdtDef: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
type SubstsRef: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
@@ -293,6 +298,7 @@ rustc_index::newtype_index! {
/// is the outer fn.
///
/// [dbi]: https://en.wikipedia.org/wiki/De_Bruijn_index
+ #[derive(HashStable_Generic)]
pub struct DebruijnIndex {
DEBUG_FORMAT = "DebruijnIndex({})",
const INNERMOST = 0,
@@ -364,7 +370,7 @@ impl DebruijnIndex {
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(Encodable, Decodable)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum IntTy {
Isize,
I8,
@@ -411,7 +417,7 @@ impl IntTy {
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Debug)]
-#[derive(Encodable, Decodable)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum UintTy {
Usize,
U8,
@@ -458,7 +464,7 @@ impl UintTy {
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(Encodable, Decodable)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
pub enum FloatTy {
F32,
F64,
@@ -595,7 +601,7 @@ impl UnifyKey for FloatVid {
}
}
-#[derive(Copy, Clone, PartialEq, Decodable, Encodable, Hash)]
+#[derive(Copy, Clone, PartialEq, Decodable, Encodable, Hash, HashStable_Generic)]
#[rustc_pass_by_value]
pub enum Variance {
Covariant, // T<A> <: T<B> iff A <: B -- e.g., function return type
@@ -664,49 +670,19 @@ impl Variance {
}
}
-impl<CTX> HashStable<CTX> for DebruijnIndex {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- self.as_u32().hash_stable(ctx, hasher);
- }
-}
-
-impl<CTX> HashStable<CTX> for IntTy {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- discriminant(self).hash_stable(ctx, hasher);
- }
-}
-
-impl<CTX> HashStable<CTX> for UintTy {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- discriminant(self).hash_stable(ctx, hasher);
- }
-}
-
-impl<CTX> HashStable<CTX> for FloatTy {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- discriminant(self).hash_stable(ctx, hasher);
- }
-}
-
impl<CTX> HashStable<CTX> for InferTy {
fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
use InferTy::*;
discriminant(self).hash_stable(ctx, hasher);
match self {
- TyVar(v) => v.as_u32().hash_stable(ctx, hasher),
- IntVar(v) => v.index.hash_stable(ctx, hasher),
- FloatVar(v) => v.index.hash_stable(ctx, hasher),
+ TyVar(_) | IntVar(_) | FloatVar(_) => {
+ panic!("type variables should not be hashed: {self:?}")
+ }
FreshTy(v) | FreshIntTy(v) | FreshFloatTy(v) => v.hash_stable(ctx, hasher),
}
}
}
-impl<CTX> HashStable<CTX> for Variance {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- discriminant(self).hash_stable(ctx, hasher);
- }
-}
-
impl fmt::Debug for IntVarValue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
@@ -809,6 +785,7 @@ rustc_index::newtype_index! {
/// declared, but a type name in a non-zero universe is a placeholder
/// type -- an idealized representative of "types in general" that we
/// use for checking generic functions.
+ #[derive(HashStable_Generic)]
pub struct UniverseIndex {
DEBUG_FORMAT = "U{}",
}
@@ -848,9 +825,3 @@ impl UniverseIndex {
self.private < other.private
}
}
-
-impl<CTX> HashStable<CTX> for UniverseIndex {
- fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
- self.private.hash_stable(ctx, hasher);
- }
-}
diff --git a/compiler/rustc_type_ir/src/sty.rs b/compiler/rustc_type_ir/src/sty.rs
index 74737e30b..a4fb1480f 100644
--- a/compiler/rustc_type_ir/src/sty.rs
+++ b/compiler/rustc_type_ir/src/sty.rs
@@ -5,12 +5,12 @@ use std::{fmt, hash};
use crate::DebruijnIndex;
use crate::FloatTy;
+use crate::HashStableContext;
use crate::IntTy;
use crate::Interner;
use crate::TyDecoder;
use crate::TyEncoder;
use crate::UintTy;
-use crate::UniverseIndex;
use self::RegionKind::*;
use self::TyKind::*;
@@ -18,6 +18,34 @@ use self::TyKind::*;
use rustc_data_structures::stable_hasher::HashStable;
use rustc_serialize::{Decodable, Decoder, Encodable};
+/// Specifies how a trait object is represented.
+#[derive(
+ Clone,
+ Copy,
+ PartialEq,
+ Eq,
+ PartialOrd,
+ Ord,
+ Hash,
+ Debug,
+ Encodable,
+ Decodable,
+ HashStable_Generic
+)]
+pub enum DynKind {
+ /// An unsized `dyn Trait` object
+ Dyn,
+ /// A sized `dyn* Trait` object
+ ///
+ /// These objects are represented as a `(data, vtable)` pair where `data` is a ptr-sized value
+ /// (often a pointer to the real object, but not necessarily) and `vtable` is a pointer to
+ /// the vtable for `dyn* Trait`. The representation is essentially the same as `&dyn Trait`
+ /// or similar, but the drop function included in the vtable is responsible for freeing the
+ /// underlying storage if needed. This allows a `dyn*` object to be treated agnostically with
+ /// respect to whether it points to a `Box<T>`, `Rc<T>`, etc.
+ DynStar,
+}
+
/// Defines the kinds of types used by the type system.
///
/// Types written by the user start out as `hir::TyKind` and get
@@ -95,7 +123,7 @@ pub enum TyKind<I: Interner> {
FnPtr(I::PolyFnSig),
/// A trait object. Written as `dyn for<'b> Trait<'b, Assoc = u32> + Send + 'a`.
- Dynamic(I::ListBinderExistentialPredicate, I::Region),
+ Dynamic(I::ListBinderExistentialPredicate, I::Region, DynKind),
/// The anonymous type of a closure. Used to represent the type of `|a| a`.
///
@@ -218,7 +246,7 @@ const fn tykind_discriminant<I: Interner>(value: &TyKind<I>) -> usize {
Ref(_, _, _) => 11,
FnDef(_, _) => 12,
FnPtr(_) => 13,
- Dynamic(_, _) => 14,
+ Dynamic(..) => 14,
Closure(_, _) => 15,
Generator(_, _, _) => 16,
GeneratorWitness(_) => 17,
@@ -252,7 +280,7 @@ impl<I: Interner> Clone for TyKind<I> {
Ref(r, t, m) => Ref(r.clone(), t.clone(), m.clone()),
FnDef(d, s) => FnDef(d.clone(), s.clone()),
FnPtr(s) => FnPtr(s.clone()),
- Dynamic(p, r) => Dynamic(p.clone(), r.clone()),
+ Dynamic(p, r, repr) => Dynamic(p.clone(), r.clone(), repr.clone()),
Closure(d, s) => Closure(d.clone(), s.clone()),
Generator(d, s, m) => Generator(d.clone(), s.clone(), m.clone()),
GeneratorWitness(g) => GeneratorWitness(g.clone()),
@@ -297,9 +325,10 @@ impl<I: Interner> PartialEq for TyKind<I> {
__self_0 == __arg_1_0 && __self_1 == __arg_1_1
}
(&FnPtr(ref __self_0), &FnPtr(ref __arg_1_0)) => __self_0 == __arg_1_0,
- (&Dynamic(ref __self_0, ref __self_1), &Dynamic(ref __arg_1_0, ref __arg_1_1)) => {
- __self_0 == __arg_1_0 && __self_1 == __arg_1_1
- }
+ (
+ &Dynamic(ref __self_0, ref __self_1, ref self_repr),
+ &Dynamic(ref __arg_1_0, ref __arg_1_1, ref arg_repr),
+ ) => __self_0 == __arg_1_0 && __self_1 == __arg_1_1 && self_repr == arg_repr,
(&Closure(ref __self_0, ref __self_1), &Closure(ref __arg_1_0, ref __arg_1_1)) => {
__self_0 == __arg_1_0 && __self_1 == __arg_1_1
}
@@ -384,12 +413,16 @@ impl<I: Interner> Ord for TyKind<I> {
}
}
(&FnPtr(ref __self_0), &FnPtr(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
- (&Dynamic(ref __self_0, ref __self_1), &Dynamic(ref __arg_1_0, ref __arg_1_1)) => {
- match Ord::cmp(__self_0, __arg_1_0) {
- Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ (
+ &Dynamic(ref __self_0, ref __self_1, ref self_repr),
+ &Dynamic(ref __arg_1_0, ref __arg_1_1, ref arg_repr),
+ ) => match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => match Ord::cmp(__self_1, __arg_1_1) {
+ Ordering::Equal => Ord::cmp(self_repr, arg_repr),
cmp => cmp,
- }
- }
+ },
+ cmp => cmp,
+ },
(&Closure(ref __self_0, ref __self_1), &Closure(ref __arg_1_0, ref __arg_1_1)) => {
match Ord::cmp(__self_0, __arg_1_0) {
Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
@@ -492,10 +525,11 @@ impl<I: Interner> hash::Hash for TyKind<I> {
hash::Hash::hash(&tykind_discriminant(self), state);
hash::Hash::hash(__self_0, state)
}
- (&Dynamic(ref __self_0, ref __self_1),) => {
+ (&Dynamic(ref __self_0, ref __self_1, ref repr),) => {
hash::Hash::hash(&tykind_discriminant(self), state);
hash::Hash::hash(__self_0, state);
- hash::Hash::hash(__self_1, state)
+ hash::Hash::hash(__self_1, state);
+ hash::Hash::hash(repr, state)
}
(&Closure(ref __self_0, ref __self_1),) => {
hash::Hash::hash(&tykind_discriminant(self), state);
@@ -570,7 +604,7 @@ impl<I: Interner> fmt::Debug for TyKind<I> {
Ref(f0, f1, f2) => Formatter::debug_tuple_field3_finish(f, "Ref", f0, f1, f2),
FnDef(f0, f1) => Formatter::debug_tuple_field2_finish(f, "FnDef", f0, f1),
FnPtr(f0) => Formatter::debug_tuple_field1_finish(f, "FnPtr", f0),
- Dynamic(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Dynamic", f0, f1),
+ Dynamic(f0, f1, f2) => Formatter::debug_tuple_field3_finish(f, "Dynamic", f0, f1, f2),
Closure(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Closure", f0, f1),
Generator(f0, f1, f2) => {
Formatter::debug_tuple_field3_finish(f, "Generator", f0, f1, f2)
@@ -659,9 +693,10 @@ where
FnPtr(polyfnsig) => e.emit_enum_variant(disc, |e| {
polyfnsig.encode(e);
}),
- Dynamic(l, r) => e.emit_enum_variant(disc, |e| {
+ Dynamic(l, r, repr) => e.emit_enum_variant(disc, |e| {
l.encode(e);
r.encode(e);
+ repr.encode(e);
}),
Closure(def_id, substs) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
@@ -748,7 +783,7 @@ where
11 => Ref(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
12 => FnDef(Decodable::decode(d), Decodable::decode(d)),
13 => FnPtr(Decodable::decode(d)),
- 14 => Dynamic(Decodable::decode(d), Decodable::decode(d)),
+ 14 => Dynamic(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
15 => Closure(Decodable::decode(d), Decodable::decode(d)),
16 => Generator(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
17 => GeneratorWitness(Decodable::decode(d)),
@@ -774,7 +809,7 @@ where
// This is not a derived impl because a derive would require `I: HashStable`
#[allow(rustc::usage_of_ty_tykind)]
-impl<CTX, I: Interner> HashStable<CTX> for TyKind<I>
+impl<CTX: HashStableContext, I: Interner> HashStable<CTX> for TyKind<I>
where
I::AdtDef: HashStable<CTX>,
I::DefId: HashStable<CTX>,
@@ -845,9 +880,10 @@ where
FnPtr(polyfnsig) => {
polyfnsig.hash_stable(__hcx, __hasher);
}
- Dynamic(l, r) => {
+ Dynamic(l, r, repr) => {
l.hash_stable(__hcx, __hasher);
r.hash_stable(__hcx, __hasher);
+ repr.hash_stable(__hcx, __hasher);
}
Closure(def_id, substs) => {
def_id.hash_stable(__hcx, __hasher);
@@ -1023,14 +1059,6 @@ pub enum RegionKind<I: Interner> {
/// Should not exist outside of type inference.
RePlaceholder(I::PlaceholderRegion),
- /// Empty lifetime is for data that is never accessed. We tag the
- /// empty lifetime with a universe -- the idea is that we don't
- /// want `exists<'a> { forall<'b> { 'b: 'a } }` to be satisfiable.
- /// Therefore, the `'empty` in a universe `U` is less than all
- /// regions visible from `U`, but not less than regions not visible
- /// from `U`.
- ReEmpty(UniverseIndex),
-
/// Erased region, used by trait selection, in MIR and during codegen.
ReErased,
}
@@ -1046,8 +1074,7 @@ const fn regionkind_discriminant<I: Interner>(value: &RegionKind<I>) -> usize {
ReStatic => 3,
ReVar(_) => 4,
RePlaceholder(_) => 5,
- ReEmpty(_) => 6,
- ReErased => 7,
+ ReErased => 6,
}
}
@@ -1072,7 +1099,6 @@ impl<I: Interner> Clone for RegionKind<I> {
ReStatic => ReStatic,
ReVar(a) => ReVar(a.clone()),
RePlaceholder(a) => RePlaceholder(a.clone()),
- ReEmpty(a) => ReEmpty(a.clone()),
ReErased => ReErased,
}
}
@@ -1099,7 +1125,6 @@ impl<I: Interner> PartialEq for RegionKind<I> {
(&RePlaceholder(ref __self_0), &RePlaceholder(ref __arg_1_0)) => {
__self_0 == __arg_1_0
}
- (&ReEmpty(ref __self_0), &ReEmpty(ref __arg_1_0)) => __self_0 == __arg_1_0,
(&ReErased, &ReErased) => true,
_ => true,
}
@@ -1144,7 +1169,6 @@ impl<I: Interner> Ord for RegionKind<I> {
(&RePlaceholder(ref __self_0), &RePlaceholder(ref __arg_1_0)) => {
Ord::cmp(__self_0, __arg_1_0)
}
- (&ReEmpty(ref __self_0), &ReEmpty(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
(&ReErased, &ReErased) => Ordering::Equal,
_ => Ordering::Equal,
}
@@ -1182,10 +1206,6 @@ impl<I: Interner> hash::Hash for RegionKind<I> {
hash::Hash::hash(&regionkind_discriminant(self), state);
hash::Hash::hash(__self_0, state)
}
- (&ReEmpty(ref __self_0),) => {
- hash::Hash::hash(&regionkind_discriminant(self), state);
- hash::Hash::hash(__self_0, state)
- }
(&ReErased,) => {
hash::Hash::hash(&regionkind_discriminant(self), state);
}
@@ -1211,8 +1231,6 @@ impl<I: Interner> fmt::Debug for RegionKind<I> {
RePlaceholder(placeholder) => write!(f, "RePlaceholder({:?})", placeholder),
- ReEmpty(ui) => write!(f, "ReEmpty({:?})", ui),
-
ReErased => write!(f, "ReErased"),
}
}
@@ -1247,9 +1265,6 @@ where
RePlaceholder(a) => e.emit_enum_variant(disc, |e| {
a.encode(e);
}),
- ReEmpty(a) => e.emit_enum_variant(disc, |e| {
- a.encode(e);
- }),
ReErased => e.emit_enum_variant(disc, |_| {}),
}
}
@@ -1272,8 +1287,7 @@ where
3 => ReStatic,
4 => ReVar(Decodable::decode(d)),
5 => RePlaceholder(Decodable::decode(d)),
- 6 => ReEmpty(Decodable::decode(d)),
- 7 => ReErased,
+ 6 => ReErased,
_ => panic!(
"{}",
format!(
@@ -1286,7 +1300,7 @@ where
}
// This is not a derived impl because a derive would require `I: HashStable`
-impl<CTX, I: Interner> HashStable<CTX> for RegionKind<I>
+impl<CTX: HashStableContext, I: Interner> HashStable<CTX> for RegionKind<I>
where
I::EarlyBoundRegion: HashStable<CTX>,
I::BoundRegion: HashStable<CTX>,
@@ -1305,9 +1319,6 @@ where
ReErased | ReStatic => {
// No variant fields to hash for these ...
}
- ReEmpty(universe) => {
- universe.hash_stable(hcx, hasher);
- }
ReLateBound(db, br) => {
db.hash_stable(hcx, hasher);
br.hash_stable(hcx, hasher);
@@ -1321,8 +1332,8 @@ where
RePlaceholder(p) => {
p.hash_stable(hcx, hasher);
}
- ReVar(reg) => {
- reg.hash_stable(hcx, hasher);
+ ReVar(_) => {
+ panic!("region variables should not be hashed: {self:?}")
}
}
}
diff --git a/compiler/rustc_typeck/Cargo.toml b/compiler/rustc_typeck/Cargo.toml
deleted file mode 100644
index faf52e269..000000000
--- a/compiler/rustc_typeck/Cargo.toml
+++ /dev/null
@@ -1,32 +0,0 @@
-[package]
-name = "rustc_typeck"
-version = "0.0.0"
-edition = "2021"
-
-[lib]
-test = false
-doctest = false
-
-[dependencies]
-rustc_arena = { path = "../rustc_arena" }
-tracing = "0.1"
-rustc_macros = { path = "../rustc_macros" }
-rustc_middle = { path = "../rustc_middle" }
-rustc_attr = { path = "../rustc_attr" }
-rustc_data_structures = { path = "../rustc_data_structures" }
-rustc_errors = { path = "../rustc_errors" }
-rustc_graphviz = { path = "../rustc_graphviz" }
-rustc_hir = { path = "../rustc_hir" }
-rustc_hir_pretty = { path = "../rustc_hir_pretty" }
-rustc_target = { path = "../rustc_target" }
-rustc_session = { path = "../rustc_session" }
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
-rustc_ast = { path = "../rustc_ast" }
-rustc_span = { path = "../rustc_span" }
-rustc_index = { path = "../rustc_index" }
-rustc_infer = { path = "../rustc_infer" }
-rustc_trait_selection = { path = "../rustc_trait_selection" }
-rustc_ty_utils = { path = "../rustc_ty_utils" }
-rustc_lint = { path = "../rustc_lint" }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_typeck/src/astconv/errors.rs b/compiler/rustc_typeck/src/astconv/errors.rs
deleted file mode 100644
index ff39bf361..000000000
--- a/compiler/rustc_typeck/src/astconv/errors.rs
+++ /dev/null
@@ -1,410 +0,0 @@
-use crate::astconv::AstConv;
-use crate::errors::{ManualImplementation, MissingTypeParams};
-use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{pluralize, struct_span_err, Applicability, ErrorGuaranteed};
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
-use rustc_middle::ty;
-use rustc_session::parse::feature_err;
-use rustc_span::lev_distance::find_best_match_for_name;
-use rustc_span::symbol::{sym, Ident};
-use rustc_span::{Span, Symbol, DUMMY_SP};
-
-use std::collections::BTreeSet;
-
-impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
- /// On missing type parameters, emit an E0393 error and provide a structured suggestion using
- /// the type parameter's name as a placeholder.
- pub(crate) fn complain_about_missing_type_params(
- &self,
- missing_type_params: Vec<Symbol>,
- def_id: DefId,
- span: Span,
- empty_generic_args: bool,
- ) {
- if missing_type_params.is_empty() {
- return;
- }
-
- self.tcx().sess.emit_err(MissingTypeParams {
- span,
- def_span: self.tcx().def_span(def_id),
- missing_type_params,
- empty_generic_args,
- });
- }
-
- /// When the code is using the `Fn` traits directly, instead of the `Fn(A) -> B` syntax, emit
- /// an error and attempt to build a reasonable structured suggestion.
- pub(crate) fn complain_about_internal_fn_trait(
- &self,
- span: Span,
- trait_def_id: DefId,
- trait_segment: &'_ hir::PathSegment<'_>,
- is_impl: bool,
- ) {
- if self.tcx().features().unboxed_closures {
- return;
- }
-
- let trait_def = self.tcx().trait_def(trait_def_id);
- if !trait_def.paren_sugar {
- if trait_segment.args().parenthesized {
- // For now, require that parenthetical notation be used only with `Fn()` etc.
- let mut err = feature_err(
- &self.tcx().sess.parse_sess,
- sym::unboxed_closures,
- span,
- "parenthetical notation is only stable when used with `Fn`-family traits",
- );
- err.emit();
- }
-
- return;
- }
-
- let sess = self.tcx().sess;
-
- if !trait_segment.args().parenthesized {
- // For now, require that parenthetical notation be used only with `Fn()` etc.
- let mut err = feature_err(
- &sess.parse_sess,
- sym::unboxed_closures,
- span,
- "the precise format of `Fn`-family traits' type parameters is subject to change",
- );
- // Do not suggest the other syntax if we are in trait impl:
- // the desugaring would contain an associated type constraint.
- if !is_impl {
- let args = trait_segment
- .args
- .as_ref()
- .and_then(|args| args.args.get(0))
- .and_then(|arg| match arg {
- hir::GenericArg::Type(ty) => match ty.kind {
- hir::TyKind::Tup(t) => t
- .iter()
- .map(|e| sess.source_map().span_to_snippet(e.span))
- .collect::<Result<Vec<_>, _>>()
- .map(|a| a.join(", ")),
- _ => sess.source_map().span_to_snippet(ty.span),
- }
- .map(|s| format!("({})", s))
- .ok(),
- _ => None,
- })
- .unwrap_or_else(|| "()".to_string());
- let ret = trait_segment
- .args()
- .bindings
- .iter()
- .find_map(|b| match (b.ident.name == sym::Output, &b.kind) {
- (true, hir::TypeBindingKind::Equality { term }) => {
- let span = match term {
- hir::Term::Ty(ty) => ty.span,
- hir::Term::Const(c) => self.tcx().hir().span(c.hir_id),
- };
- sess.source_map().span_to_snippet(span).ok()
- }
- _ => None,
- })
- .unwrap_or_else(|| "()".to_string());
- err.span_suggestion(
- span,
- "use parenthetical notation instead",
- format!("{}{} -> {}", trait_segment.ident, args, ret),
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- }
-
- if is_impl {
- let trait_name = self.tcx().def_path_str(trait_def_id);
- self.tcx().sess.emit_err(ManualImplementation { span, trait_name });
- }
- }
-
- pub(crate) fn complain_about_assoc_type_not_found<I>(
- &self,
- all_candidates: impl Fn() -> I,
- ty_param_name: &str,
- assoc_name: Ident,
- span: Span,
- ) -> ErrorGuaranteed
- where
- I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
- {
- // The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a
- // valid span, so we point at the whole path segment instead.
- let span = if assoc_name.span != DUMMY_SP { assoc_name.span } else { span };
- let mut err = struct_span_err!(
- self.tcx().sess,
- span,
- E0220,
- "associated type `{}` not found for `{}`",
- assoc_name,
- ty_param_name
- );
-
- let all_candidate_names: Vec<_> = all_candidates()
- .flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order())
- .filter_map(
- |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
- )
- .collect();
-
- if let (Some(suggested_name), true) = (
- find_best_match_for_name(&all_candidate_names, assoc_name.name, None),
- assoc_name.span != DUMMY_SP,
- ) {
- err.span_suggestion(
- assoc_name.span,
- "there is an associated type with a similar name",
- suggested_name,
- Applicability::MaybeIncorrect,
- );
- return err.emit();
- }
-
- // If we didn't find a good item in the supertraits (or couldn't get
- // the supertraits), like in ItemCtxt, then look more generally from
- // all visible traits. If there's one clear winner, just suggest that.
-
- let visible_traits: Vec<_> = self
- .tcx()
- .all_traits()
- .filter(|trait_def_id| {
- let viz = self.tcx().visibility(*trait_def_id);
- if let Some(def_id) = self.item_def_id() {
- viz.is_accessible_from(def_id, self.tcx())
- } else {
- viz.is_visible_locally()
- }
- })
- .collect();
-
- let wider_candidate_names: Vec<_> = visible_traits
- .iter()
- .flat_map(|trait_def_id| {
- self.tcx().associated_items(*trait_def_id).in_definition_order()
- })
- .filter_map(
- |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
- )
- .collect();
-
- if let (Some(suggested_name), true) = (
- find_best_match_for_name(&wider_candidate_names, assoc_name.name, None),
- assoc_name.span != DUMMY_SP,
- ) {
- if let [best_trait] = visible_traits
- .iter()
- .filter(|trait_def_id| {
- self.tcx()
- .associated_items(*trait_def_id)
- .filter_by_name_unhygienic(suggested_name)
- .any(|item| item.kind == ty::AssocKind::Type)
- })
- .collect::<Vec<_>>()[..]
- {
- err.span_label(
- assoc_name.span,
- format!(
- "there is a similarly named associated type `{suggested_name}` in the trait `{}`",
- self.tcx().def_path_str(*best_trait)
- ),
- );
- return err.emit();
- }
- }
-
- err.span_label(span, format!("associated type `{}` not found", assoc_name));
- err.emit()
- }
-
- /// When there are any missing associated types, emit an E0191 error and attempt to supply a
- /// reasonable suggestion on how to write it. For the case of multiple associated types in the
- /// same trait bound have the same name (as they come from different supertraits), we instead
- /// emit a generic note suggesting using a `where` clause to constraint instead.
- pub(crate) fn complain_about_missing_associated_types(
- &self,
- associated_types: FxHashMap<Span, BTreeSet<DefId>>,
- potential_assoc_types: Vec<Span>,
- trait_bounds: &[hir::PolyTraitRef<'_>],
- ) {
- if associated_types.values().all(|v| v.is_empty()) {
- return;
- }
- let tcx = self.tcx();
- // FIXME: Marked `mut` so that we can replace the spans further below with a more
- // appropriate one, but this should be handled earlier in the span assignment.
- let mut associated_types: FxHashMap<Span, Vec<_>> = associated_types
- .into_iter()
- .map(|(span, def_ids)| {
- (span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect())
- })
- .collect();
- let mut names = vec![];
-
- // Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and
- // `issue-22560.rs`.
- let mut trait_bound_spans: Vec<Span> = vec![];
- for (span, items) in &associated_types {
- if !items.is_empty() {
- trait_bound_spans.push(*span);
- }
- for assoc_item in items {
- let trait_def_id = assoc_item.container_id(tcx);
- names.push(format!(
- "`{}` (from trait `{}`)",
- assoc_item.name,
- tcx.def_path_str(trait_def_id),
- ));
- }
- }
- if let ([], [bound]) = (&potential_assoc_types[..], &trait_bounds) {
- match bound.trait_ref.path.segments {
- // FIXME: `trait_ref.path.span` can point to a full path with multiple
- // segments, even though `trait_ref.path.segments` is of length `1`. Work
- // around that bug here, even though it should be fixed elsewhere.
- // This would otherwise cause an invalid suggestion. For an example, look at
- // `src/test/ui/issues/issue-28344.rs` where instead of the following:
- //
- // error[E0191]: the value of the associated type `Output`
- // (from trait `std::ops::BitXor`) must be specified
- // --> $DIR/issue-28344.rs:4:17
- // |
- // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
- // | ^^^^^^ help: specify the associated type:
- // | `BitXor<Output = Type>`
- //
- // we would output:
- //
- // error[E0191]: the value of the associated type `Output`
- // (from trait `std::ops::BitXor`) must be specified
- // --> $DIR/issue-28344.rs:4:17
- // |
- // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
- // | ^^^^^^^^^^^^^ help: specify the associated type:
- // | `BitXor::bitor<Output = Type>`
- [segment] if segment.args.is_none() => {
- trait_bound_spans = vec![segment.ident.span];
- associated_types = associated_types
- .into_iter()
- .map(|(_, items)| (segment.ident.span, items))
- .collect();
- }
- _ => {}
- }
- }
- names.sort();
- trait_bound_spans.sort();
- let mut err = struct_span_err!(
- tcx.sess,
- trait_bound_spans,
- E0191,
- "the value of the associated type{} {} must be specified",
- pluralize!(names.len()),
- names.join(", "),
- );
- let mut suggestions = vec![];
- let mut types_count = 0;
- let mut where_constraints = vec![];
- let mut already_has_generics_args_suggestion = false;
- for (span, assoc_items) in &associated_types {
- let mut names: FxHashMap<_, usize> = FxHashMap::default();
- for item in assoc_items {
- types_count += 1;
- *names.entry(item.name).or_insert(0) += 1;
- }
- let mut dupes = false;
- for item in assoc_items {
- let prefix = if names[&item.name] > 1 {
- let trait_def_id = item.container_id(tcx);
- dupes = true;
- format!("{}::", tcx.def_path_str(trait_def_id))
- } else {
- String::new()
- };
- if let Some(sp) = tcx.hir().span_if_local(item.def_id) {
- err.span_label(sp, format!("`{}{}` defined here", prefix, item.name));
- }
- }
- if potential_assoc_types.len() == assoc_items.len() {
- // When the amount of missing associated types equals the number of
- // extra type arguments present. A suggesting to replace the generic args with
- // associated types is already emitted.
- already_has_generics_args_suggestion = true;
- } else if let (Ok(snippet), false) =
- (tcx.sess.source_map().span_to_snippet(*span), dupes)
- {
- let types: Vec<_> =
- assoc_items.iter().map(|item| format!("{} = Type", item.name)).collect();
- let code = if snippet.ends_with('>') {
- // The user wrote `Trait<'a>` or similar and we don't have a type we can
- // suggest, but at least we can clue them to the correct syntax
- // `Trait<'a, Item = Type>` while accounting for the `<'a>` in the
- // suggestion.
- format!("{}, {}>", &snippet[..snippet.len() - 1], types.join(", "))
- } else {
- // The user wrote `Iterator`, so we don't have a type we can suggest, but at
- // least we can clue them to the correct syntax `Iterator<Item = Type>`.
- format!("{}<{}>", snippet, types.join(", "))
- };
- suggestions.push((*span, code));
- } else if dupes {
- where_constraints.push(*span);
- }
- }
- let where_msg = "consider introducing a new type parameter, adding `where` constraints \
- using the fully-qualified path to the associated types";
- if !where_constraints.is_empty() && suggestions.is_empty() {
- // If there are duplicates associated type names and a single trait bound do not
- // use structured suggestion, it means that there are multiple supertraits with
- // the same associated type name.
- err.help(where_msg);
- }
- if suggestions.len() != 1 || already_has_generics_args_suggestion {
- // We don't need this label if there's an inline suggestion, show otherwise.
- for (span, assoc_items) in &associated_types {
- let mut names: FxHashMap<_, usize> = FxHashMap::default();
- for item in assoc_items {
- types_count += 1;
- *names.entry(item.name).or_insert(0) += 1;
- }
- let mut label = vec![];
- for item in assoc_items {
- let postfix = if names[&item.name] > 1 {
- let trait_def_id = item.container_id(tcx);
- format!(" (from trait `{}`)", tcx.def_path_str(trait_def_id))
- } else {
- String::new()
- };
- label.push(format!("`{}`{}", item.name, postfix));
- }
- if !label.is_empty() {
- err.span_label(
- *span,
- format!(
- "associated type{} {} must be specified",
- pluralize!(label.len()),
- label.join(", "),
- ),
- );
- }
- }
- }
- if !suggestions.is_empty() {
- err.multipart_suggestion(
- &format!("specify the associated type{}", pluralize!(types_count)),
- suggestions,
- Applicability::HasPlaceholders,
- );
- if !where_constraints.is_empty() {
- err.span_help(where_constraints, where_msg);
- }
- }
- err.emit();
- }
-}
diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs
deleted file mode 100644
index 40aa27a29..000000000
--- a/compiler/rustc_typeck/src/astconv/generics.rs
+++ /dev/null
@@ -1,664 +0,0 @@
-use super::IsMethodCall;
-use crate::astconv::{
- AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
- GenericArgCountResult, GenericArgPosition,
-};
-use crate::errors::AssocTypeBindingNotAllowed;
-use crate::structured_errors::{GenericArgsInfo, StructuredDiagnostic, WrongNumberOfGenericArgs};
-use rustc_ast::ast::ParamKindOrd;
-use rustc_errors::{struct_span_err, Applicability, Diagnostic, MultiSpan};
-use rustc_hir as hir;
-use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::GenericArg;
-use rustc_infer::infer::TyCtxtInferExt;
-use rustc_middle::ty::{
- self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt,
-};
-use rustc_session::lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS;
-use rustc_span::{symbol::kw, Span};
-use smallvec::SmallVec;
-
-impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
- /// Report an error that a generic argument did not match the generic parameter that was
- /// expected.
- fn generic_arg_mismatch_err(
- tcx: TyCtxt<'_>,
- arg: &GenericArg<'_>,
- param: &GenericParamDef,
- possible_ordering_error: bool,
- help: Option<&str>,
- ) {
- let sess = tcx.sess;
- let mut err = struct_span_err!(
- sess,
- arg.span(),
- E0747,
- "{} provided when a {} was expected",
- arg.descr(),
- param.kind.descr(),
- );
-
- if let GenericParamDefKind::Const { .. } = param.kind {
- if matches!(arg, GenericArg::Type(hir::Ty { kind: hir::TyKind::Infer, .. })) {
- err.help("const arguments cannot yet be inferred with `_`");
- if sess.is_nightly_build() {
- err.help(
- "add `#![feature(generic_arg_infer)]` to the crate attributes to enable",
- );
- }
- }
- }
-
- let add_braces_suggestion = |arg: &GenericArg<'_>, err: &mut Diagnostic| {
- let suggestions = vec![
- (arg.span().shrink_to_lo(), String::from("{ ")),
- (arg.span().shrink_to_hi(), String::from(" }")),
- ];
- err.multipart_suggestion(
- "if this generic argument was intended as a const parameter, \
- surround it with braces",
- suggestions,
- Applicability::MaybeIncorrect,
- );
- };
-
- // Specific suggestion set for diagnostics
- match (arg, &param.kind) {
- (
- GenericArg::Type(hir::Ty {
- kind: hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)),
- ..
- }),
- GenericParamDefKind::Const { .. },
- ) => match path.res {
- Res::Err => {
- add_braces_suggestion(arg, &mut err);
- err.set_primary_message(
- "unresolved item provided when a constant was expected",
- )
- .emit();
- return;
- }
- Res::Def(DefKind::TyParam, src_def_id) => {
- if let Some(param_local_id) = param.def_id.as_local() {
- let param_name = tcx.hir().ty_param_name(param_local_id);
- let param_type = tcx.infer_ctxt().enter(|infcx| {
- infcx.resolve_numeric_literals_with_default(tcx.type_of(param.def_id))
- });
- if param_type.is_suggestable(tcx, false) {
- err.span_suggestion(
- tcx.def_span(src_def_id),
- "consider changing this type parameter to be a `const` generic",
- format!("const {}: {}", param_name, param_type),
- Applicability::MaybeIncorrect,
- );
- };
- }
- }
- _ => add_braces_suggestion(arg, &mut err),
- },
- (
- GenericArg::Type(hir::Ty { kind: hir::TyKind::Path(_), .. }),
- GenericParamDefKind::Const { .. },
- ) => add_braces_suggestion(arg, &mut err),
- (
- GenericArg::Type(hir::Ty { kind: hir::TyKind::Array(_, len), .. }),
- GenericParamDefKind::Const { .. },
- ) if tcx.type_of(param.def_id) == tcx.types.usize => {
- let snippet = sess.source_map().span_to_snippet(tcx.hir().span(len.hir_id()));
- if let Ok(snippet) = snippet {
- err.span_suggestion(
- arg.span(),
- "array type provided where a `usize` was expected, try",
- format!("{{ {} }}", snippet),
- Applicability::MaybeIncorrect,
- );
- }
- }
- (GenericArg::Const(cnst), GenericParamDefKind::Type { .. }) => {
- let body = tcx.hir().body(cnst.value.body);
- if let rustc_hir::ExprKind::Path(rustc_hir::QPath::Resolved(_, path)) =
- body.value.kind
- {
- if let Res::Def(DefKind::Fn { .. }, id) = path.res {
- err.help(&format!(
- "`{}` is a function item, not a type",
- tcx.item_name(id)
- ));
- err.help("function item types cannot be named directly");
- }
- }
- }
- _ => {}
- }
-
- let kind_ord = param.kind.to_ord();
- let arg_ord = arg.to_ord();
-
- // This note is only true when generic parameters are strictly ordered by their kind.
- if possible_ordering_error && kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal {
- let (first, last) = if kind_ord < arg_ord {
- (param.kind.descr(), arg.descr())
- } else {
- (arg.descr(), param.kind.descr())
- };
- err.note(&format!("{} arguments must be provided before {} arguments", first, last));
- if let Some(help) = help {
- err.help(help);
- }
- }
-
- err.emit();
- }
-
- /// Creates the relevant generic argument substitutions
- /// corresponding to a set of generic parameters. This is a
- /// rather complex function. Let us try to explain the role
- /// of each of its parameters:
- ///
- /// To start, we are given the `def_id` of the thing we are
- /// creating the substitutions for, and a partial set of
- /// substitutions `parent_substs`. In general, the substitutions
- /// for an item begin with substitutions for all the "parents" of
- /// that item -- e.g., for a method it might include the
- /// parameters from the impl.
- ///
- /// Therefore, the method begins by walking down these parents,
- /// starting with the outermost parent and proceed inwards until
- /// it reaches `def_id`. For each parent `P`, it will check `parent_substs`
- /// first to see if the parent's substitutions are listed in there. If so,
- /// we can append those and move on. Otherwise, it invokes the
- /// three callback functions:
- ///
- /// - `args_for_def_id`: given the `DefId` `P`, supplies back the
- /// generic arguments that were given to that parent from within
- /// the path; so e.g., if you have `<T as Foo>::Bar`, the `DefId`
- /// might refer to the trait `Foo`, and the arguments might be
- /// `[T]`. The boolean value indicates whether to infer values
- /// for arguments whose values were not explicitly provided.
- /// - `provided_kind`: given the generic parameter and the value from `args_for_def_id`,
- /// instantiate a `GenericArg`.
- /// - `inferred_kind`: if no parameter was provided, and inference is enabled, then
- /// creates a suitable inference variable.
- pub fn create_substs_for_generic_args<'a>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- parent_substs: &[subst::GenericArg<'tcx>],
- has_self: bool,
- self_ty: Option<Ty<'tcx>>,
- arg_count: &GenericArgCountResult,
- ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>,
- ) -> SubstsRef<'tcx> {
- // Collect the segments of the path; we need to substitute arguments
- // for parameters throughout the entire path (wherever there are
- // generic parameters).
- let mut parent_defs = tcx.generics_of(def_id);
- let count = parent_defs.count();
- let mut stack = vec![(def_id, parent_defs)];
- while let Some(def_id) = parent_defs.parent {
- parent_defs = tcx.generics_of(def_id);
- stack.push((def_id, parent_defs));
- }
-
- // We manually build up the substitution, rather than using convenience
- // methods in `subst.rs`, so that we can iterate over the arguments and
- // parameters in lock-step linearly, instead of trying to match each pair.
- let mut substs: SmallVec<[subst::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
- // Iterate over each segment of the path.
- while let Some((def_id, defs)) = stack.pop() {
- let mut params = defs.params.iter().peekable();
-
- // If we have already computed substitutions for parents, we can use those directly.
- while let Some(&param) = params.peek() {
- if let Some(&kind) = parent_substs.get(param.index as usize) {
- substs.push(kind);
- params.next();
- } else {
- break;
- }
- }
-
- // `Self` is handled first, unless it's been handled in `parent_substs`.
- if has_self {
- if let Some(&param) = params.peek() {
- if param.index == 0 {
- if let GenericParamDefKind::Type { .. } = param.kind {
- substs.push(
- self_ty
- .map(|ty| ty.into())
- .unwrap_or_else(|| ctx.inferred_kind(None, param, true)),
- );
- params.next();
- }
- }
- }
- }
-
- // Check whether this segment takes generic arguments and the user has provided any.
- let (generic_args, infer_args) = ctx.args_for_def_id(def_id);
-
- let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter());
- let mut args = args_iter.clone().peekable();
-
- // If we encounter a type or const when we expect a lifetime, we infer the lifetimes.
- // If we later encounter a lifetime, we know that the arguments were provided in the
- // wrong order. `force_infer_lt` records the type or const that forced lifetimes to be
- // inferred, so we can use it for diagnostics later.
- let mut force_infer_lt = None;
-
- loop {
- // We're going to iterate through the generic arguments that the user
- // provided, matching them with the generic parameters we expect.
- // Mismatches can occur as a result of elided lifetimes, or for malformed
- // input. We try to handle both sensibly.
- match (args.peek(), params.peek()) {
- (Some(&arg), Some(&param)) => {
- match (arg, &param.kind, arg_count.explicit_late_bound) {
- (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _)
- | (
- GenericArg::Type(_) | GenericArg::Infer(_),
- GenericParamDefKind::Type { .. },
- _,
- )
- | (
- GenericArg::Const(_) | GenericArg::Infer(_),
- GenericParamDefKind::Const { .. },
- _,
- ) => {
- substs.push(ctx.provided_kind(param, arg));
- args.next();
- params.next();
- }
- (
- GenericArg::Infer(_) | GenericArg::Type(_) | GenericArg::Const(_),
- GenericParamDefKind::Lifetime,
- _,
- ) => {
- // We expected a lifetime argument, but got a type or const
- // argument. That means we're inferring the lifetimes.
- substs.push(ctx.inferred_kind(None, param, infer_args));
- force_infer_lt = Some((arg, param));
- params.next();
- }
- (GenericArg::Lifetime(_), _, ExplicitLateBound::Yes) => {
- // We've come across a lifetime when we expected something else in
- // the presence of explicit late bounds. This is most likely
- // due to the presence of the explicit bound so we're just going to
- // ignore it.
- args.next();
- }
- (_, _, _) => {
- // We expected one kind of parameter, but the user provided
- // another. This is an error. However, if we already know that
- // the arguments don't match up with the parameters, we won't issue
- // an additional error, as the user already knows what's wrong.
- if arg_count.correct.is_ok() {
- // We're going to iterate over the parameters to sort them out, and
- // show that order to the user as a possible order for the parameters
- let mut param_types_present = defs
- .params
- .clone()
- .into_iter()
- .map(|param| (param.kind.to_ord(), param))
- .collect::<Vec<(ParamKindOrd, GenericParamDef)>>();
- param_types_present.sort_by_key(|(ord, _)| *ord);
- let (mut param_types_present, ordered_params): (
- Vec<ParamKindOrd>,
- Vec<GenericParamDef>,
- ) = param_types_present.into_iter().unzip();
- param_types_present.dedup();
-
- Self::generic_arg_mismatch_err(
- tcx,
- arg,
- param,
- !args_iter.clone().is_sorted_by_key(|arg| arg.to_ord()),
- Some(&format!(
- "reorder the arguments: {}: `<{}>`",
- param_types_present
- .into_iter()
- .map(|ord| format!("{}s", ord))
- .collect::<Vec<String>>()
- .join(", then "),
- ordered_params
- .into_iter()
- .filter_map(|param| {
- if param.name == kw::SelfUpper {
- None
- } else {
- Some(param.name.to_string())
- }
- })
- .collect::<Vec<String>>()
- .join(", ")
- )),
- );
- }
-
- // We've reported the error, but we want to make sure that this
- // problem doesn't bubble down and create additional, irrelevant
- // errors. In this case, we're simply going to ignore the argument
- // and any following arguments. The rest of the parameters will be
- // inferred.
- while args.next().is_some() {}
- }
- }
- }
-
- (Some(&arg), None) => {
- // We should never be able to reach this point with well-formed input.
- // There are three situations in which we can encounter this issue.
- //
- // 1. The number of arguments is incorrect. In this case, an error
- // will already have been emitted, and we can ignore it.
- // 2. There are late-bound lifetime parameters present, yet the
- // lifetime arguments have also been explicitly specified by the
- // user.
- // 3. We've inferred some lifetimes, which have been provided later (i.e.
- // after a type or const). We want to throw an error in this case.
-
- if arg_count.correct.is_ok()
- && arg_count.explicit_late_bound == ExplicitLateBound::No
- {
- let kind = arg.descr();
- assert_eq!(kind, "lifetime");
- let (provided_arg, param) =
- force_infer_lt.expect("lifetimes ought to have been inferred");
- Self::generic_arg_mismatch_err(tcx, provided_arg, param, false, None);
- }
-
- break;
- }
-
- (None, Some(&param)) => {
- // If there are fewer arguments than parameters, it means
- // we're inferring the remaining arguments.
- substs.push(ctx.inferred_kind(Some(&substs), param, infer_args));
- params.next();
- }
-
- (None, None) => break,
- }
- }
- }
-
- tcx.intern_substs(&substs)
- }
-
- /// Checks that the correct number of generic arguments have been provided.
- /// Used specifically for function calls.
- pub fn check_generic_arg_count_for_call(
- tcx: TyCtxt<'_>,
- span: Span,
- def_id: DefId,
- generics: &ty::Generics,
- seg: &hir::PathSegment<'_>,
- is_method_call: IsMethodCall,
- ) -> GenericArgCountResult {
- let empty_args = hir::GenericArgs::none();
- let gen_args = seg.args.unwrap_or(&empty_args);
- let gen_pos = if is_method_call == IsMethodCall::Yes {
- GenericArgPosition::MethodCall
- } else {
- GenericArgPosition::Value
- };
- let has_self = generics.parent.is_none() && generics.has_self;
-
- Self::check_generic_arg_count(
- tcx,
- span,
- def_id,
- seg,
- generics,
- gen_args,
- gen_pos,
- has_self,
- seg.infer_args,
- )
- }
-
- /// Checks that the correct number of generic arguments have been provided.
- /// This is used both for datatypes and function calls.
- #[instrument(skip(tcx, gen_pos), level = "debug")]
- pub(crate) fn check_generic_arg_count(
- tcx: TyCtxt<'_>,
- span: Span,
- def_id: DefId,
- seg: &hir::PathSegment<'_>,
- gen_params: &ty::Generics,
- gen_args: &hir::GenericArgs<'_>,
- gen_pos: GenericArgPosition,
- has_self: bool,
- infer_args: bool,
- ) -> GenericArgCountResult {
- let default_counts = gen_params.own_defaults();
- let param_counts = gen_params.own_counts();
-
- // Subtracting from param count to ensure type params synthesized from `impl Trait`
- // cannot be explicitly specified.
- let synth_type_param_count = gen_params
- .params
- .iter()
- .filter(|param| {
- matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. })
- })
- .count();
- let named_type_param_count =
- param_counts.types - has_self as usize - synth_type_param_count;
- let infer_lifetimes =
- (gen_pos != GenericArgPosition::Type || infer_args) && !gen_args.has_lifetime_params();
-
- if gen_pos != GenericArgPosition::Type && !gen_args.bindings.is_empty() {
- Self::prohibit_assoc_ty_binding(tcx, gen_args.bindings[0].span);
- }
-
- let explicit_late_bound =
- Self::prohibit_explicit_late_bound_lifetimes(tcx, gen_params, gen_args, gen_pos);
-
- let mut invalid_args = vec![];
-
- let mut check_lifetime_args =
- |min_expected_args: usize,
- max_expected_args: usize,
- provided_args: usize,
- late_bounds_ignore: bool| {
- if (min_expected_args..=max_expected_args).contains(&provided_args) {
- return Ok(());
- }
-
- if late_bounds_ignore {
- return Ok(());
- }
-
- if provided_args > max_expected_args {
- invalid_args.extend(
- gen_args.args[max_expected_args..provided_args]
- .iter()
- .map(|arg| arg.span()),
- );
- };
-
- let gen_args_info = if provided_args > min_expected_args {
- invalid_args.extend(
- gen_args.args[min_expected_args..provided_args]
- .iter()
- .map(|arg| arg.span()),
- );
- let num_redundant_args = provided_args - min_expected_args;
- GenericArgsInfo::ExcessLifetimes { num_redundant_args }
- } else {
- let num_missing_args = min_expected_args - provided_args;
- GenericArgsInfo::MissingLifetimes { num_missing_args }
- };
-
- let reported = WrongNumberOfGenericArgs::new(
- tcx,
- gen_args_info,
- seg,
- gen_params,
- has_self as usize,
- gen_args,
- def_id,
- )
- .diagnostic()
- .emit();
-
- Err(reported)
- };
-
- let min_expected_lifetime_args = if infer_lifetimes { 0 } else { param_counts.lifetimes };
- let max_expected_lifetime_args = param_counts.lifetimes;
- let num_provided_lifetime_args = gen_args.num_lifetime_params();
-
- let lifetimes_correct = check_lifetime_args(
- min_expected_lifetime_args,
- max_expected_lifetime_args,
- num_provided_lifetime_args,
- explicit_late_bound == ExplicitLateBound::Yes,
- );
-
- let mut check_types_and_consts = |expected_min,
- expected_max,
- expected_max_with_synth,
- provided,
- params_offset,
- args_offset| {
- debug!(
- ?expected_min,
- ?expected_max,
- ?provided,
- ?params_offset,
- ?args_offset,
- "check_types_and_consts"
- );
- if (expected_min..=expected_max).contains(&provided) {
- return Ok(());
- }
-
- let num_default_params = expected_max - expected_min;
-
- let gen_args_info = if provided > expected_max {
- invalid_args.extend(
- gen_args.args[args_offset + expected_max..args_offset + provided]
- .iter()
- .map(|arg| arg.span()),
- );
- let num_redundant_args = provided - expected_max;
-
- // Provide extra note if synthetic arguments like `impl Trait` are specified.
- let synth_provided = provided <= expected_max_with_synth;
-
- GenericArgsInfo::ExcessTypesOrConsts {
- num_redundant_args,
- num_default_params,
- args_offset,
- synth_provided,
- }
- } else {
- let num_missing_args = expected_max - provided;
-
- GenericArgsInfo::MissingTypesOrConsts {
- num_missing_args,
- num_default_params,
- args_offset,
- }
- };
-
- debug!(?gen_args_info);
-
- let reported = WrongNumberOfGenericArgs::new(
- tcx,
- gen_args_info,
- seg,
- gen_params,
- params_offset,
- gen_args,
- def_id,
- )
- .diagnostic()
- .emit_unless(gen_args.has_err());
-
- Err(reported)
- };
-
- let args_correct = {
- let expected_min = if infer_args {
- 0
- } else {
- param_counts.consts + named_type_param_count
- - default_counts.types
- - default_counts.consts
- };
- debug!(?expected_min);
- debug!(arg_counts.lifetimes=?gen_args.num_lifetime_params());
-
- check_types_and_consts(
- expected_min,
- param_counts.consts + named_type_param_count,
- param_counts.consts + named_type_param_count + synth_type_param_count,
- gen_args.num_generic_params(),
- param_counts.lifetimes + has_self as usize,
- gen_args.num_lifetime_params(),
- )
- };
-
- GenericArgCountResult {
- explicit_late_bound,
- correct: lifetimes_correct.and(args_correct).map_err(|reported| {
- GenericArgCountMismatch { reported: Some(reported), invalid_args }
- }),
- }
- }
-
- /// Emits an error regarding forbidden type binding associations
- pub fn prohibit_assoc_ty_binding(tcx: TyCtxt<'_>, span: Span) {
- tcx.sess.emit_err(AssocTypeBindingNotAllowed { span });
- }
-
- /// Prohibits explicit lifetime arguments if late-bound lifetime parameters
- /// are present. This is used both for datatypes and function calls.
- pub(crate) fn prohibit_explicit_late_bound_lifetimes(
- tcx: TyCtxt<'_>,
- def: &ty::Generics,
- args: &hir::GenericArgs<'_>,
- position: GenericArgPosition,
- ) -> ExplicitLateBound {
- let param_counts = def.own_counts();
- let infer_lifetimes = position != GenericArgPosition::Type && !args.has_lifetime_params();
-
- if infer_lifetimes {
- return ExplicitLateBound::No;
- }
-
- if let Some(span_late) = def.has_late_bound_regions {
- let msg = "cannot specify lifetime arguments explicitly \
- if late bound lifetime parameters are present";
- let note = "the late bound lifetime parameter is introduced here";
- let span = args.args[0].span();
-
- if position == GenericArgPosition::Value
- && args.num_lifetime_params() != param_counts.lifetimes
- {
- let mut err = tcx.sess.struct_span_err(span, msg);
- err.span_note(span_late, note);
- err.emit();
- } else {
- let mut multispan = MultiSpan::from_span(span);
- multispan.push_span_label(span_late, note);
- tcx.struct_span_lint_hir(
- LATE_BOUND_LIFETIME_ARGUMENTS,
- args.args[0].id(),
- multispan,
- |lint| {
- lint.build(msg).emit();
- },
- );
- }
-
- ExplicitLateBound::Yes
- } else {
- ExplicitLateBound::No
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs
deleted file mode 100644
index 8a5c7fee6..000000000
--- a/compiler/rustc_typeck/src/astconv/mod.rs
+++ /dev/null
@@ -1,3091 +0,0 @@
-//! Conversion from AST representation of types to the `ty.rs` representation.
-//! The main routine here is `ast_ty_to_ty()`; each use is parameterized by an
-//! instance of `AstConv`.
-
-mod errors;
-mod generics;
-
-use crate::bounds::Bounds;
-use crate::collect::HirPlaceholderCollector;
-use crate::errors::{
- AmbiguousLifetimeBound, MultipleRelaxedDefaultBounds, TraitObjectDeclaredWithNoTraits,
- TypeofReservedKeywordUsed, ValueOfAssociatedStructAlreadySpecified,
-};
-use crate::middle::resolve_lifetime as rl;
-use crate::require_c_abi_if_c_variadic;
-use rustc_ast::TraitObjectSyntax;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::{
- struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, MultiSpan,
-};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorOf, DefKind, Namespace, Res};
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::intravisit::{walk_generics, Visitor as _};
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::{GenericArg, GenericArgs, OpaqueTyOrigin};
-use rustc_middle::middle::stability::AllowUnstable;
-use rustc_middle::ty::subst::{self, GenericArgKind, InternalSubsts, Subst, SubstsRef};
-use rustc_middle::ty::GenericParamDefKind;
-use rustc_middle::ty::{
- self, Const, DefIdTree, EarlyBinder, IsSuggestable, Ty, TyCtxt, TypeVisitable,
-};
-use rustc_session::lint::builtin::{AMBIGUOUS_ASSOCIATED_ITEMS, BARE_TRAIT_OBJECTS};
-use rustc_span::edition::Edition;
-use rustc_span::lev_distance::find_best_match_for_name;
-use rustc_span::symbol::{kw, Ident, Symbol};
-use rustc_span::{Span, DUMMY_SP};
-use rustc_target::spec::abi;
-use rustc_trait_selection::traits;
-use rustc_trait_selection::traits::astconv_object_safety_violations;
-use rustc_trait_selection::traits::error_reporting::{
- report_object_safety_error, suggestions::NextTypeParamName,
-};
-use rustc_trait_selection::traits::wf::object_region_bounds;
-
-use smallvec::SmallVec;
-use std::collections::BTreeSet;
-use std::slice;
-
-#[derive(Debug)]
-pub struct PathSeg(pub DefId, pub usize);
-
-pub trait AstConv<'tcx> {
- fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
-
- fn item_def_id(&self) -> Option<DefId>;
-
- /// Returns predicates in scope of the form `X: Foo<T>`, where `X`
- /// is a type parameter `X` with the given id `def_id` and T
- /// matches `assoc_name`. This is a subset of the full set of
- /// predicates.
- ///
- /// This is used for one specific purpose: resolving "short-hand"
- /// associated type references like `T::Item`. In principle, we
- /// would do that by first getting the full set of predicates in
- /// scope and then filtering down to find those that apply to `T`,
- /// but this can lead to cycle errors. The problem is that we have
- /// to do this resolution *in order to create the predicates in
- /// the first place*. Hence, we have this "special pass".
- fn get_type_parameter_bounds(
- &self,
- span: Span,
- def_id: DefId,
- assoc_name: Ident,
- ) -> ty::GenericPredicates<'tcx>;
-
- /// Returns the lifetime to use when a lifetime is omitted (and not elided).
- fn re_infer(&self, param: Option<&ty::GenericParamDef>, span: Span)
- -> Option<ty::Region<'tcx>>;
-
- /// Returns the type to use when a type is omitted.
- fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx>;
-
- /// Returns `true` if `_` is allowed in type signatures in the current context.
- fn allow_ty_infer(&self) -> bool;
-
- /// Returns the const to use when a const is omitted.
- fn ct_infer(
- &self,
- ty: Ty<'tcx>,
- param: Option<&ty::GenericParamDef>,
- span: Span,
- ) -> Const<'tcx>;
-
- /// Projecting an associated type from a (potentially)
- /// higher-ranked trait reference is more complicated, because of
- /// the possibility of late-bound regions appearing in the
- /// associated type binding. This is not legal in function
- /// signatures for that reason. In a function body, we can always
- /// handle it because we can use inference variables to remove the
- /// late-bound regions.
- fn projected_ty_from_poly_trait_ref(
- &self,
- span: Span,
- item_def_id: DefId,
- item_segment: &hir::PathSegment<'_>,
- poly_trait_ref: ty::PolyTraitRef<'tcx>,
- ) -> Ty<'tcx>;
-
- /// Normalize an associated type coming from the user.
- fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx>;
-
- /// Invoked when we encounter an error from some prior pass
- /// (e.g., resolve) that is translated into a ty-error. This is
- /// used to help suppress derived errors typeck might otherwise
- /// report.
- fn set_tainted_by_errors(&self);
-
- fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span);
-}
-
-#[derive(Debug)]
-struct ConvertedBinding<'a, 'tcx> {
- hir_id: hir::HirId,
- item_name: Ident,
- kind: ConvertedBindingKind<'a, 'tcx>,
- gen_args: &'a GenericArgs<'a>,
- span: Span,
-}
-
-#[derive(Debug)]
-enum ConvertedBindingKind<'a, 'tcx> {
- Equality(ty::Term<'tcx>),
- Constraint(&'a [hir::GenericBound<'a>]),
-}
-
-/// New-typed boolean indicating whether explicit late-bound lifetimes
-/// are present in a set of generic arguments.
-///
-/// For example if we have some method `fn f<'a>(&'a self)` implemented
-/// for some type `T`, although `f` is generic in the lifetime `'a`, `'a`
-/// is late-bound so should not be provided explicitly. Thus, if `f` is
-/// instantiated with some generic arguments providing `'a` explicitly,
-/// we taint those arguments with `ExplicitLateBound::Yes` so that we
-/// can provide an appropriate diagnostic later.
-#[derive(Copy, Clone, PartialEq)]
-pub enum ExplicitLateBound {
- Yes,
- No,
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum IsMethodCall {
- Yes,
- No,
-}
-
-/// Denotes the "position" of a generic argument, indicating if it is a generic type,
-/// generic function or generic method call.
-#[derive(Copy, Clone, PartialEq)]
-pub(crate) enum GenericArgPosition {
- Type,
- Value, // e.g., functions
- MethodCall,
-}
-
-/// A marker denoting that the generic arguments that were
-/// provided did not match the respective generic parameters.
-#[derive(Clone, Default)]
-pub struct GenericArgCountMismatch {
- /// Indicates whether a fatal error was reported (`Some`), or just a lint (`None`).
- pub reported: Option<ErrorGuaranteed>,
- /// A list of spans of arguments provided that were not valid.
- pub invalid_args: Vec<Span>,
-}
-
-/// Decorates the result of a generic argument count mismatch
-/// check with whether explicit late bounds were provided.
-#[derive(Clone)]
-pub struct GenericArgCountResult {
- pub explicit_late_bound: ExplicitLateBound,
- pub correct: Result<(), GenericArgCountMismatch>,
-}
-
-pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> {
- fn args_for_def_id(&mut self, def_id: DefId) -> (Option<&'a GenericArgs<'a>>, bool);
-
- fn provided_kind(
- &mut self,
- param: &ty::GenericParamDef,
- arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx>;
-
- fn inferred_kind(
- &mut self,
- substs: Option<&[subst::GenericArg<'tcx>]>,
- param: &ty::GenericParamDef,
- infer_args: bool,
- ) -> subst::GenericArg<'tcx>;
-}
-
-impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
- #[tracing::instrument(level = "debug", skip(self))]
- pub fn ast_region_to_region(
- &self,
- lifetime: &hir::Lifetime,
- def: Option<&ty::GenericParamDef>,
- ) -> ty::Region<'tcx> {
- let tcx = self.tcx();
- let lifetime_name = |def_id| tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id));
-
- let r = match tcx.named_region(lifetime.hir_id) {
- Some(rl::Region::Static) => tcx.lifetimes.re_static,
-
- Some(rl::Region::LateBound(debruijn, index, def_id)) => {
- let name = lifetime_name(def_id.expect_local());
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_u32(index),
- kind: ty::BrNamed(def_id, name),
- };
- tcx.mk_region(ty::ReLateBound(debruijn, br))
- }
-
- Some(rl::Region::EarlyBound(index, id)) => {
- let name = lifetime_name(id.expect_local());
- tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id: id, index, name }))
- }
-
- Some(rl::Region::Free(scope, id)) => {
- let name = lifetime_name(id.expect_local());
- tcx.mk_region(ty::ReFree(ty::FreeRegion {
- scope,
- bound_region: ty::BrNamed(id, name),
- }))
-
- // (*) -- not late-bound, won't change
- }
-
- None => {
- self.re_infer(def, lifetime.span).unwrap_or_else(|| {
- debug!(?lifetime, "unelided lifetime in signature");
-
- // This indicates an illegal lifetime
- // elision. `resolve_lifetime` should have
- // reported an error in this case -- but if
- // not, let's error out.
- tcx.sess.delay_span_bug(lifetime.span, "unelided lifetime in signature");
-
- // Supply some dummy value. We don't have an
- // `re_error`, annoyingly, so use `'static`.
- tcx.lifetimes.re_static
- })
- }
- };
-
- debug!("ast_region_to_region(lifetime={:?}) yields {:?}", lifetime, r);
-
- r
- }
-
- /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
- /// returns an appropriate set of substitutions for this particular reference to `I`.
- pub fn ast_path_substs_for_ty(
- &self,
- span: Span,
- def_id: DefId,
- item_segment: &hir::PathSegment<'_>,
- ) -> SubstsRef<'tcx> {
- let (substs, _) = self.create_substs_for_ast_path(
- span,
- def_id,
- &[],
- item_segment,
- item_segment.args(),
- item_segment.infer_args,
- None,
- );
- let assoc_bindings = self.create_assoc_bindings_for_generic_args(item_segment.args());
-
- if let Some(b) = assoc_bindings.first() {
- Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
- }
-
- substs
- }
-
- /// Given the type/lifetime/const arguments provided to some path (along with
- /// an implicit `Self`, if this is a trait reference), returns the complete
- /// set of substitutions. This may involve applying defaulted type parameters.
- /// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`.
- ///
- /// Example:
- ///
- /// ```ignore (illustrative)
- /// T: std::ops::Index<usize, Output = u32>
- /// // ^1 ^^^^^^^^^^^^^^2 ^^^^3 ^^^^^^^^^^^4
- /// ```
- ///
- /// 1. The `self_ty` here would refer to the type `T`.
- /// 2. The path in question is the path to the trait `std::ops::Index`,
- /// which will have been resolved to a `def_id`
- /// 3. The `generic_args` contains info on the `<...>` contents. The `usize` type
- /// parameters are returned in the `SubstsRef`, the associated type bindings like
- /// `Output = u32` are returned from `create_assoc_bindings_for_generic_args`.
- ///
- /// Note that the type listing given here is *exactly* what the user provided.
- ///
- /// For (generic) associated types
- ///
- /// ```ignore (illustrative)
- /// <Vec<u8> as Iterable<u8>>::Iter::<'a>
- /// ```
- ///
- /// We have the parent substs are the substs for the parent trait:
- /// `[Vec<u8>, u8]` and `generic_args` are the arguments for the associated
- /// type itself: `['a]`. The returned `SubstsRef` concatenates these two
- /// lists: `[Vec<u8>, u8, 'a]`.
- #[tracing::instrument(level = "debug", skip(self, span))]
- fn create_substs_for_ast_path<'a>(
- &self,
- span: Span,
- def_id: DefId,
- parent_substs: &[subst::GenericArg<'tcx>],
- seg: &hir::PathSegment<'_>,
- generic_args: &'a hir::GenericArgs<'_>,
- infer_args: bool,
- self_ty: Option<Ty<'tcx>>,
- ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
- // If the type is parameterized by this region, then replace this
- // region with the current anon region binding (in other words,
- // whatever & would get replaced with).
-
- let tcx = self.tcx();
- let generics = tcx.generics_of(def_id);
- debug!("generics: {:?}", generics);
-
- if generics.has_self {
- if generics.parent.is_some() {
- // The parent is a trait so it should have at least one subst
- // for the `Self` type.
- assert!(!parent_substs.is_empty())
- } else {
- // This item (presumably a trait) needs a self-type.
- assert!(self_ty.is_some());
- }
- } else {
- assert!(self_ty.is_none() && parent_substs.is_empty());
- }
-
- let arg_count = Self::check_generic_arg_count(
- tcx,
- span,
- def_id,
- seg,
- generics,
- generic_args,
- GenericArgPosition::Type,
- self_ty.is_some(),
- infer_args,
- );
-
- // Skip processing if type has no generic parameters.
- // Traits always have `Self` as a generic parameter, which means they will not return early
- // here and so associated type bindings will be handled regardless of whether there are any
- // non-`Self` generic parameters.
- if generics.params.is_empty() {
- return (tcx.intern_substs(&[]), arg_count);
- }
-
- let is_object = self_ty.map_or(false, |ty| ty == self.tcx().types.trait_object_dummy_self);
-
- struct SubstsForAstPathCtxt<'a, 'tcx> {
- astconv: &'a (dyn AstConv<'tcx> + 'a),
- def_id: DefId,
- generic_args: &'a GenericArgs<'a>,
- span: Span,
- missing_type_params: Vec<Symbol>,
- inferred_params: Vec<Span>,
- infer_args: bool,
- is_object: bool,
- }
-
- impl<'tcx, 'a> SubstsForAstPathCtxt<'tcx, 'a> {
- fn default_needs_object_self(&mut self, param: &ty::GenericParamDef) -> bool {
- let tcx = self.astconv.tcx();
- if let GenericParamDefKind::Type { has_default, .. } = param.kind {
- if self.is_object && has_default {
- let default_ty = tcx.at(self.span).type_of(param.def_id);
- let self_param = tcx.types.self_param;
- if default_ty.walk().any(|arg| arg == self_param.into()) {
- // There is no suitable inference default for a type parameter
- // that references self, in an object type.
- return true;
- }
- }
- }
-
- false
- }
- }
-
- impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for SubstsForAstPathCtxt<'a, 'tcx> {
- fn args_for_def_id(&mut self, did: DefId) -> (Option<&'a GenericArgs<'a>>, bool) {
- if did == self.def_id {
- (Some(self.generic_args), self.infer_args)
- } else {
- // The last component of this tuple is unimportant.
- (None, false)
- }
- }
-
- fn provided_kind(
- &mut self,
- param: &ty::GenericParamDef,
- arg: &GenericArg<'_>,
- ) -> subst::GenericArg<'tcx> {
- let tcx = self.astconv.tcx();
-
- let mut handle_ty_args = |has_default, ty: &hir::Ty<'_>| {
- if has_default {
- tcx.check_optional_stability(
- param.def_id,
- Some(arg.id()),
- arg.span(),
- None,
- AllowUnstable::No,
- |_, _| {
- // Default generic parameters may not be marked
- // with stability attributes, i.e. when the
- // default parameter was defined at the same time
- // as the rest of the type. As such, we ignore missing
- // stability attributes.
- },
- );
- }
- if let (hir::TyKind::Infer, false) = (&ty.kind, self.astconv.allow_ty_infer()) {
- self.inferred_params.push(ty.span);
- tcx.ty_error().into()
- } else {
- self.astconv.ast_ty_to_ty(ty).into()
- }
- };
-
- match (&param.kind, arg) {
- (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
- self.astconv.ast_region_to_region(lt, Some(param)).into()
- }
- (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => {
- handle_ty_args(has_default, ty)
- }
- (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Infer(inf)) => {
- handle_ty_args(has_default, &inf.to_ty())
- }
- (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
- ty::Const::from_opt_const_arg_anon_const(
- tcx,
- ty::WithOptConstParam {
- did: tcx.hir().local_def_id(ct.value.hir_id),
- const_param_did: Some(param.def_id),
- },
- )
- .into()
- }
- (&GenericParamDefKind::Const { .. }, hir::GenericArg::Infer(inf)) => {
- let ty = tcx.at(self.span).type_of(param.def_id);
- if self.astconv.allow_ty_infer() {
- self.astconv.ct_infer(ty, Some(param), inf.span).into()
- } else {
- self.inferred_params.push(inf.span);
- tcx.const_error(ty).into()
- }
- }
- _ => unreachable!(),
- }
- }
-
- fn inferred_kind(
- &mut self,
- substs: Option<&[subst::GenericArg<'tcx>]>,
- param: &ty::GenericParamDef,
- infer_args: bool,
- ) -> subst::GenericArg<'tcx> {
- let tcx = self.astconv.tcx();
- match param.kind {
- GenericParamDefKind::Lifetime => self
- .astconv
- .re_infer(Some(param), self.span)
- .unwrap_or_else(|| {
- debug!(?param, "unelided lifetime in signature");
-
- // This indicates an illegal lifetime in a non-assoc-trait position
- tcx.sess.delay_span_bug(self.span, "unelided lifetime in signature");
-
- // Supply some dummy value. We don't have an
- // `re_error`, annoyingly, so use `'static`.
- tcx.lifetimes.re_static
- })
- .into(),
- GenericParamDefKind::Type { has_default, .. } => {
- if !infer_args && has_default {
- // No type parameter provided, but a default exists.
-
- // If we are converting an object type, then the
- // `Self` parameter is unknown. However, some of the
- // other type parameters may reference `Self` in their
- // defaults. This will lead to an ICE if we are not
- // careful!
- if self.default_needs_object_self(param) {
- self.missing_type_params.push(param.name);
- tcx.ty_error().into()
- } else {
- // This is a default type parameter.
- let substs = substs.unwrap();
- if substs.iter().any(|arg| match arg.unpack() {
- GenericArgKind::Type(ty) => ty.references_error(),
- _ => false,
- }) {
- // Avoid ICE #86756 when type error recovery goes awry.
- return tcx.ty_error().into();
- }
- self.astconv
- .normalize_ty(
- self.span,
- EarlyBinder(tcx.at(self.span).type_of(param.def_id))
- .subst(tcx, substs),
- )
- .into()
- }
- } else if infer_args {
- // No type parameters were provided, we can infer all.
- let param = if !self.default_needs_object_self(param) {
- Some(param)
- } else {
- None
- };
- self.astconv.ty_infer(param, self.span).into()
- } else {
- // We've already errored above about the mismatch.
- tcx.ty_error().into()
- }
- }
- GenericParamDefKind::Const { has_default } => {
- let ty = tcx.at(self.span).type_of(param.def_id);
- if !infer_args && has_default {
- tcx.bound_const_param_default(param.def_id)
- .subst(tcx, substs.unwrap())
- .into()
- } else {
- if infer_args {
- self.astconv.ct_infer(ty, Some(param), self.span).into()
- } else {
- // We've already errored above about the mismatch.
- tcx.const_error(ty).into()
- }
- }
- }
- }
- }
- }
-
- let mut substs_ctx = SubstsForAstPathCtxt {
- astconv: self,
- def_id,
- span,
- generic_args,
- missing_type_params: vec![],
- inferred_params: vec![],
- infer_args,
- is_object,
- };
- let substs = Self::create_substs_for_generic_args(
- tcx,
- def_id,
- parent_substs,
- self_ty.is_some(),
- self_ty,
- &arg_count,
- &mut substs_ctx,
- );
-
- self.complain_about_missing_type_params(
- substs_ctx.missing_type_params,
- def_id,
- span,
- generic_args.args.is_empty(),
- );
-
- debug!(
- "create_substs_for_ast_path(generic_params={:?}, self_ty={:?}) -> {:?}",
- generics, self_ty, substs
- );
-
- (substs, arg_count)
- }
-
- fn create_assoc_bindings_for_generic_args<'a>(
- &self,
- generic_args: &'a hir::GenericArgs<'_>,
- ) -> Vec<ConvertedBinding<'a, 'tcx>> {
- // Convert associated-type bindings or constraints into a separate vector.
- // Example: Given this:
- //
- // T: Iterator<Item = u32>
- //
- // The `T` is passed in as a self-type; the `Item = u32` is
- // not a "type parameter" of the `Iterator` trait, but rather
- // a restriction on `<T as Iterator>::Item`, so it is passed
- // back separately.
- let assoc_bindings = generic_args
- .bindings
- .iter()
- .map(|binding| {
- let kind = match binding.kind {
- hir::TypeBindingKind::Equality { ref term } => match term {
- hir::Term::Ty(ref ty) => {
- ConvertedBindingKind::Equality(self.ast_ty_to_ty(ty).into())
- }
- hir::Term::Const(ref c) => {
- let local_did = self.tcx().hir().local_def_id(c.hir_id);
- let c = Const::from_anon_const(self.tcx(), local_did);
- ConvertedBindingKind::Equality(c.into())
- }
- },
- hir::TypeBindingKind::Constraint { ref bounds } => {
- ConvertedBindingKind::Constraint(bounds)
- }
- };
- ConvertedBinding {
- hir_id: binding.hir_id,
- item_name: binding.ident,
- kind,
- gen_args: binding.gen_args,
- span: binding.span,
- }
- })
- .collect();
-
- assoc_bindings
- }
-
- pub(crate) fn create_substs_for_associated_item(
- &self,
- tcx: TyCtxt<'tcx>,
- span: Span,
- item_def_id: DefId,
- item_segment: &hir::PathSegment<'_>,
- parent_substs: SubstsRef<'tcx>,
- ) -> SubstsRef<'tcx> {
- debug!(
- "create_substs_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}",
- span, item_def_id, item_segment
- );
- if tcx.generics_of(item_def_id).params.is_empty() {
- self.prohibit_generics(slice::from_ref(item_segment).iter(), |_| {});
-
- parent_substs
- } else {
- self.create_substs_for_ast_path(
- span,
- item_def_id,
- parent_substs,
- item_segment,
- item_segment.args(),
- item_segment.infer_args,
- None,
- )
- .0
- }
- }
-
- /// Instantiates the path for the given trait reference, assuming that it's
- /// bound to a valid trait type. Returns the `DefId` of the defining trait.
- /// The type _cannot_ be a type other than a trait type.
- ///
- /// If the `projections` argument is `None`, then assoc type bindings like `Foo<T = X>`
- /// are disallowed. Otherwise, they are pushed onto the vector given.
- pub fn instantiate_mono_trait_ref(
- &self,
- trait_ref: &hir::TraitRef<'_>,
- self_ty: Ty<'tcx>,
- ) -> ty::TraitRef<'tcx> {
- self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
-
- self.ast_path_to_mono_trait_ref(
- trait_ref.path.span,
- trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()),
- self_ty,
- trait_ref.path.segments.last().unwrap(),
- true,
- )
- }
-
- fn instantiate_poly_trait_ref_inner(
- &self,
- hir_id: hir::HirId,
- span: Span,
- binding_span: Option<Span>,
- constness: ty::BoundConstness,
- bounds: &mut Bounds<'tcx>,
- speculative: bool,
- trait_ref_span: Span,
- trait_def_id: DefId,
- trait_segment: &hir::PathSegment<'_>,
- args: &GenericArgs<'_>,
- infer_args: bool,
- self_ty: Ty<'tcx>,
- ) -> GenericArgCountResult {
- let (substs, arg_count) = self.create_substs_for_ast_path(
- trait_ref_span,
- trait_def_id,
- &[],
- trait_segment,
- args,
- infer_args,
- Some(self_ty),
- );
-
- let tcx = self.tcx();
- let bound_vars = tcx.late_bound_vars(hir_id);
- debug!(?bound_vars);
-
- let assoc_bindings = self.create_assoc_bindings_for_generic_args(args);
-
- let poly_trait_ref =
- ty::Binder::bind_with_vars(ty::TraitRef::new(trait_def_id, substs), bound_vars);
-
- debug!(?poly_trait_ref, ?assoc_bindings);
- bounds.trait_bounds.push((poly_trait_ref, span, constness));
-
- let mut dup_bindings = FxHashMap::default();
- for binding in &assoc_bindings {
- // Specify type to assert that error was already reported in `Err` case.
- let _: Result<_, ErrorGuaranteed> = self.add_predicates_for_ast_type_binding(
- hir_id,
- poly_trait_ref,
- binding,
- bounds,
- speculative,
- &mut dup_bindings,
- binding_span.unwrap_or(binding.span),
- );
- // Okay to ignore `Err` because of `ErrorGuaranteed` (see above).
- }
-
- arg_count
- }
-
- /// Given a trait bound like `Debug`, applies that trait bound the given self-type to construct
- /// a full trait reference. The resulting trait reference is returned. This may also generate
- /// auxiliary bounds, which are added to `bounds`.
- ///
- /// Example:
- ///
- /// ```ignore (illustrative)
- /// poly_trait_ref = Iterator<Item = u32>
- /// self_ty = Foo
- /// ```
- ///
- /// this would return `Foo: Iterator` and add `<Foo as Iterator>::Item = u32` into `bounds`.
- ///
- /// **A note on binders:** against our usual convention, there is an implied bounder around
- /// the `self_ty` and `poly_trait_ref` parameters here. So they may reference bound regions.
- /// If for example you had `for<'a> Foo<'a>: Bar<'a>`, then the `self_ty` would be `Foo<'a>`
- /// where `'a` is a bound region at depth 0. Similarly, the `poly_trait_ref` would be
- /// `Bar<'a>`. The returned poly-trait-ref will have this binder instantiated explicitly,
- /// however.
- #[tracing::instrument(level = "debug", skip(self, span, constness, bounds, speculative))]
- pub(crate) fn instantiate_poly_trait_ref(
- &self,
- trait_ref: &hir::TraitRef<'_>,
- span: Span,
- constness: ty::BoundConstness,
- self_ty: Ty<'tcx>,
- bounds: &mut Bounds<'tcx>,
- speculative: bool,
- ) -> GenericArgCountResult {
- let hir_id = trait_ref.hir_ref_id;
- let binding_span = None;
- let trait_ref_span = trait_ref.path.span;
- let trait_def_id = trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise());
- let trait_segment = trait_ref.path.segments.last().unwrap();
- let args = trait_segment.args();
- let infer_args = trait_segment.infer_args;
-
- self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
- self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, false);
-
- self.instantiate_poly_trait_ref_inner(
- hir_id,
- span,
- binding_span,
- constness,
- bounds,
- speculative,
- trait_ref_span,
- trait_def_id,
- trait_segment,
- args,
- infer_args,
- self_ty,
- )
- }
-
- pub(crate) fn instantiate_lang_item_trait_ref(
- &self,
- lang_item: hir::LangItem,
- span: Span,
- hir_id: hir::HirId,
- args: &GenericArgs<'_>,
- self_ty: Ty<'tcx>,
- bounds: &mut Bounds<'tcx>,
- ) {
- let binding_span = Some(span);
- let constness = ty::BoundConstness::NotConst;
- let speculative = false;
- let trait_ref_span = span;
- let trait_def_id = self.tcx().require_lang_item(lang_item, Some(span));
- let trait_segment = &hir::PathSegment::invalid();
- let infer_args = false;
-
- self.instantiate_poly_trait_ref_inner(
- hir_id,
- span,
- binding_span,
- constness,
- bounds,
- speculative,
- trait_ref_span,
- trait_def_id,
- trait_segment,
- args,
- infer_args,
- self_ty,
- );
- }
-
- fn ast_path_to_mono_trait_ref(
- &self,
- span: Span,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- trait_segment: &hir::PathSegment<'_>,
- is_impl: bool,
- ) -> ty::TraitRef<'tcx> {
- let (substs, _) = self.create_substs_for_ast_trait_ref(
- span,
- trait_def_id,
- self_ty,
- trait_segment,
- is_impl,
- );
- let assoc_bindings = self.create_assoc_bindings_for_generic_args(trait_segment.args());
- if let Some(b) = assoc_bindings.first() {
- Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
- }
- ty::TraitRef::new(trait_def_id, substs)
- }
-
- #[tracing::instrument(level = "debug", skip(self, span))]
- fn create_substs_for_ast_trait_ref<'a>(
- &self,
- span: Span,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- trait_segment: &'a hir::PathSegment<'a>,
- is_impl: bool,
- ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
- self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, is_impl);
-
- self.create_substs_for_ast_path(
- span,
- trait_def_id,
- &[],
- trait_segment,
- trait_segment.args(),
- trait_segment.infer_args,
- Some(self_ty),
- )
- }
-
- fn trait_defines_associated_type_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
- self.tcx()
- .associated_items(trait_def_id)
- .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, trait_def_id)
- .is_some()
- }
- fn trait_defines_associated_const_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
- self.tcx()
- .associated_items(trait_def_id)
- .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Const, trait_def_id)
- .is_some()
- }
-
- // Sets `implicitly_sized` to true on `Bounds` if necessary
- pub(crate) fn add_implicitly_sized<'hir>(
- &self,
- bounds: &mut Bounds<'hir>,
- ast_bounds: &'hir [hir::GenericBound<'hir>],
- self_ty_where_predicates: Option<(hir::HirId, &'hir [hir::WherePredicate<'hir>])>,
- span: Span,
- ) {
- let tcx = self.tcx();
-
- // Try to find an unbound in bounds.
- let mut unbound = None;
- let mut search_bounds = |ast_bounds: &'hir [hir::GenericBound<'hir>]| {
- for ab in ast_bounds {
- if let hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = ab {
- if unbound.is_none() {
- unbound = Some(&ptr.trait_ref);
- } else {
- tcx.sess.emit_err(MultipleRelaxedDefaultBounds { span });
- }
- }
- }
- };
- search_bounds(ast_bounds);
- if let Some((self_ty, where_clause)) = self_ty_where_predicates {
- let self_ty_def_id = tcx.hir().local_def_id(self_ty).to_def_id();
- for clause in where_clause {
- if let hir::WherePredicate::BoundPredicate(pred) = clause {
- if pred.is_param_bound(self_ty_def_id) {
- search_bounds(pred.bounds);
- }
- }
- }
- }
-
- let sized_def_id = tcx.lang_items().require(LangItem::Sized);
- match (&sized_def_id, unbound) {
- (Ok(sized_def_id), Some(tpb))
- if tpb.path.res == Res::Def(DefKind::Trait, *sized_def_id) =>
- {
- // There was in fact a `?Sized` bound, return without doing anything
- return;
- }
- (_, Some(_)) => {
- // There was a `?Trait` bound, but it was not `?Sized`; warn.
- tcx.sess.span_warn(
- span,
- "default bound relaxed for a type parameter, but \
- this does nothing because the given bound is not \
- a default; only `?Sized` is supported",
- );
- // Otherwise, add implicitly sized if `Sized` is available.
- }
- _ => {
- // There was no `?Sized` bound; add implicitly sized if `Sized` is available.
- }
- }
- if sized_def_id.is_err() {
- // No lang item for `Sized`, so we can't add it as a bound.
- return;
- }
- bounds.implicitly_sized = Some(span);
- }
-
- /// This helper takes a *converted* parameter type (`param_ty`)
- /// and an *unconverted* list of bounds:
- ///
- /// ```text
- /// fn foo<T: Debug>
- /// ^ ^^^^^ `ast_bounds` parameter, in HIR form
- /// |
- /// `param_ty`, in ty form
- /// ```
- ///
- /// It adds these `ast_bounds` into the `bounds` structure.
- ///
- /// **A note on binders:** there is an implied binder around
- /// `param_ty` and `ast_bounds`. See `instantiate_poly_trait_ref`
- /// for more details.
- #[tracing::instrument(level = "debug", skip(self, ast_bounds, bounds))]
- pub(crate) fn add_bounds<'hir, I: Iterator<Item = &'hir hir::GenericBound<'hir>>>(
- &self,
- param_ty: Ty<'tcx>,
- ast_bounds: I,
- bounds: &mut Bounds<'tcx>,
- bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
- ) {
- for ast_bound in ast_bounds {
- match ast_bound {
- hir::GenericBound::Trait(poly_trait_ref, modifier) => {
- let constness = match modifier {
- hir::TraitBoundModifier::MaybeConst => ty::BoundConstness::ConstIfConst,
- hir::TraitBoundModifier::None => ty::BoundConstness::NotConst,
- hir::TraitBoundModifier::Maybe => continue,
- };
-
- let _ = self.instantiate_poly_trait_ref(
- &poly_trait_ref.trait_ref,
- poly_trait_ref.span,
- constness,
- param_ty,
- bounds,
- false,
- );
- }
- &hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => {
- self.instantiate_lang_item_trait_ref(
- lang_item, span, hir_id, args, param_ty, bounds,
- );
- }
- hir::GenericBound::Outlives(lifetime) => {
- let region = self.ast_region_to_region(lifetime, None);
- bounds
- .region_bounds
- .push((ty::Binder::bind_with_vars(region, bound_vars), lifetime.span));
- }
- }
- }
- }
-
- /// Translates a list of bounds from the HIR into the `Bounds` data structure.
- /// The self-type for the bounds is given by `param_ty`.
- ///
- /// Example:
- ///
- /// ```ignore (illustrative)
- /// fn foo<T: Bar + Baz>() { }
- /// // ^ ^^^^^^^^^ ast_bounds
- /// // param_ty
- /// ```
- ///
- /// The `sized_by_default` parameter indicates if, in this context, the `param_ty` should be
- /// considered `Sized` unless there is an explicit `?Sized` bound. This would be true in the
- /// example above, but is not true in supertrait listings like `trait Foo: Bar + Baz`.
- ///
- /// `span` should be the declaration size of the parameter.
- pub(crate) fn compute_bounds(
- &self,
- param_ty: Ty<'tcx>,
- ast_bounds: &[hir::GenericBound<'_>],
- ) -> Bounds<'tcx> {
- self.compute_bounds_inner(param_ty, ast_bounds)
- }
-
- /// Convert the bounds in `ast_bounds` that refer to traits which define an associated type
- /// named `assoc_name` into ty::Bounds. Ignore the rest.
- pub(crate) fn compute_bounds_that_match_assoc_type(
- &self,
- param_ty: Ty<'tcx>,
- ast_bounds: &[hir::GenericBound<'_>],
- assoc_name: Ident,
- ) -> Bounds<'tcx> {
- let mut result = Vec::new();
-
- for ast_bound in ast_bounds {
- if let Some(trait_ref) = ast_bound.trait_ref()
- && let Some(trait_did) = trait_ref.trait_def_id()
- && self.tcx().trait_may_define_assoc_type(trait_did, assoc_name)
- {
- result.push(ast_bound.clone());
- }
- }
-
- self.compute_bounds_inner(param_ty, &result)
- }
-
- fn compute_bounds_inner(
- &self,
- param_ty: Ty<'tcx>,
- ast_bounds: &[hir::GenericBound<'_>],
- ) -> Bounds<'tcx> {
- let mut bounds = Bounds::default();
-
- self.add_bounds(param_ty, ast_bounds.iter(), &mut bounds, ty::List::empty());
- debug!(?bounds);
-
- bounds
- }
-
- /// Given an HIR binding like `Item = Foo` or `Item: Foo`, pushes the corresponding predicates
- /// onto `bounds`.
- ///
- /// **A note on binders:** given something like `T: for<'a> Iterator<Item = &'a u32>`, the
- /// `trait_ref` here will be `for<'a> T: Iterator`. The `binding` data however is from *inside*
- /// the binder (e.g., `&'a u32`) and hence may reference bound regions.
- #[tracing::instrument(
- level = "debug",
- skip(self, bounds, speculative, dup_bindings, path_span)
- )]
- fn add_predicates_for_ast_type_binding(
- &self,
- hir_ref_id: hir::HirId,
- trait_ref: ty::PolyTraitRef<'tcx>,
- binding: &ConvertedBinding<'_, 'tcx>,
- bounds: &mut Bounds<'tcx>,
- speculative: bool,
- dup_bindings: &mut FxHashMap<DefId, Span>,
- path_span: Span,
- ) -> Result<(), ErrorGuaranteed> {
- // Given something like `U: SomeTrait<T = X>`, we want to produce a
- // predicate like `<U as SomeTrait>::T = X`. This is somewhat
- // subtle in the event that `T` is defined in a supertrait of
- // `SomeTrait`, because in that case we need to upcast.
- //
- // That is, consider this case:
- //
- // ```
- // trait SubTrait: SuperTrait<i32> { }
- // trait SuperTrait<A> { type T; }
- //
- // ... B: SubTrait<T = foo> ...
- // ```
- //
- // We want to produce `<B as SuperTrait<i32>>::T == foo`.
-
- let tcx = self.tcx();
-
- let candidate =
- if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) {
- // Simple case: X is defined in the current trait.
- trait_ref
- } else {
- // Otherwise, we have to walk through the supertraits to find
- // those that do.
- self.one_bound_for_assoc_type(
- || traits::supertraits(tcx, trait_ref),
- || trait_ref.print_only_trait_path().to_string(),
- binding.item_name,
- path_span,
- || match binding.kind {
- ConvertedBindingKind::Equality(ty) => Some(ty.to_string()),
- _ => None,
- },
- )?
- };
-
- let (assoc_ident, def_scope) =
- tcx.adjust_ident_and_get_scope(binding.item_name, candidate.def_id(), hir_ref_id);
-
- // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
- // of calling `filter_by_name_and_kind`.
- let find_item_of_kind = |kind| {
- tcx.associated_items(candidate.def_id())
- .filter_by_name_unhygienic(assoc_ident.name)
- .find(|i| i.kind == kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident)
- };
- let assoc_item = find_item_of_kind(ty::AssocKind::Type)
- .or_else(|| find_item_of_kind(ty::AssocKind::Const))
- .expect("missing associated type");
-
- if !assoc_item.visibility(tcx).is_accessible_from(def_scope, tcx) {
- tcx.sess
- .struct_span_err(
- binding.span,
- &format!("{} `{}` is private", assoc_item.kind, binding.item_name),
- )
- .span_label(binding.span, &format!("private {}", assoc_item.kind))
- .emit();
- }
- tcx.check_stability(assoc_item.def_id, Some(hir_ref_id), binding.span, None);
-
- if !speculative {
- dup_bindings
- .entry(assoc_item.def_id)
- .and_modify(|prev_span| {
- self.tcx().sess.emit_err(ValueOfAssociatedStructAlreadySpecified {
- span: binding.span,
- prev_span: *prev_span,
- item_name: binding.item_name,
- def_path: tcx.def_path_str(assoc_item.container_id(tcx)),
- });
- })
- .or_insert(binding.span);
- }
-
- // Include substitutions for generic parameters of associated types
- let projection_ty = candidate.map_bound(|trait_ref| {
- let ident = Ident::new(assoc_item.name, binding.item_name.span);
- let item_segment = hir::PathSegment {
- ident,
- hir_id: Some(binding.hir_id),
- res: None,
- args: Some(binding.gen_args),
- infer_args: false,
- };
-
- let substs_trait_ref_and_assoc_item = self.create_substs_for_associated_item(
- tcx,
- path_span,
- assoc_item.def_id,
- &item_segment,
- trait_ref.substs,
- );
-
- debug!(
- "add_predicates_for_ast_type_binding: substs for trait-ref and assoc_item: {:?}",
- substs_trait_ref_and_assoc_item
- );
-
- ty::ProjectionTy {
- item_def_id: assoc_item.def_id,
- substs: substs_trait_ref_and_assoc_item,
- }
- });
-
- if !speculative {
- // Find any late-bound regions declared in `ty` that are not
- // declared in the trait-ref or assoc_item. These are not well-formed.
- //
- // Example:
- //
- // for<'a> <T as Iterator>::Item = &'a str // <-- 'a is bad
- // for<'a> <T as FnMut<(&'a u32,)>>::Output = &'a str // <-- 'a is ok
- if let ConvertedBindingKind::Equality(ty) = binding.kind {
- let late_bound_in_trait_ref =
- tcx.collect_constrained_late_bound_regions(&projection_ty);
- let late_bound_in_ty =
- tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty));
- debug!("late_bound_in_trait_ref = {:?}", late_bound_in_trait_ref);
- debug!("late_bound_in_ty = {:?}", late_bound_in_ty);
-
- // FIXME: point at the type params that don't have appropriate lifetimes:
- // struct S1<F: for<'a> Fn(&i32, &i32) -> &'a i32>(F);
- // ---- ---- ^^^^^^^
- self.validate_late_bound_regions(
- late_bound_in_trait_ref,
- late_bound_in_ty,
- |br_name| {
- struct_span_err!(
- tcx.sess,
- binding.span,
- E0582,
- "binding for associated type `{}` references {}, \
- which does not appear in the trait input types",
- binding.item_name,
- br_name
- )
- },
- );
- }
- }
-
- match binding.kind {
- ConvertedBindingKind::Equality(mut term) => {
- // "Desugar" a constraint like `T: Iterator<Item = u32>` this to
- // the "projection predicate" for:
- //
- // `<T as Iterator>::Item = u32`
- let assoc_item_def_id = projection_ty.skip_binder().item_def_id;
- let def_kind = tcx.def_kind(assoc_item_def_id);
- match (def_kind, term) {
- (hir::def::DefKind::AssocTy, ty::Term::Ty(_))
- | (hir::def::DefKind::AssocConst, ty::Term::Const(_)) => (),
- (_, _) => {
- let got = if let ty::Term::Ty(_) = term { "type" } else { "constant" };
- let expected = def_kind.descr(assoc_item_def_id);
- tcx.sess
- .struct_span_err(
- binding.span,
- &format!("expected {expected} bound, found {got}"),
- )
- .span_note(
- tcx.def_span(assoc_item_def_id),
- &format!("{expected} defined here"),
- )
- .emit();
- term = match def_kind {
- hir::def::DefKind::AssocTy => tcx.ty_error().into(),
- hir::def::DefKind::AssocConst => tcx
- .const_error(
- tcx.bound_type_of(assoc_item_def_id)
- .subst(tcx, projection_ty.skip_binder().substs),
- )
- .into(),
- _ => unreachable!(),
- };
- }
- }
- bounds.projection_bounds.push((
- projection_ty.map_bound(|projection_ty| ty::ProjectionPredicate {
- projection_ty,
- term: term,
- }),
- binding.span,
- ));
- }
- ConvertedBindingKind::Constraint(ast_bounds) => {
- // "Desugar" a constraint like `T: Iterator<Item: Debug>` to
- //
- // `<T as Iterator>::Item: Debug`
- //
- // Calling `skip_binder` is okay, because `add_bounds` expects the `param_ty`
- // parameter to have a skipped binder.
- let param_ty = tcx.mk_ty(ty::Projection(projection_ty.skip_binder()));
- self.add_bounds(param_ty, ast_bounds.iter(), bounds, candidate.bound_vars());
- }
- }
- Ok(())
- }
-
- fn ast_path_to_ty(
- &self,
- span: Span,
- did: DefId,
- item_segment: &hir::PathSegment<'_>,
- ) -> Ty<'tcx> {
- let substs = self.ast_path_substs_for_ty(span, did, item_segment);
- self.normalize_ty(
- span,
- EarlyBinder(self.tcx().at(span).type_of(did)).subst(self.tcx(), substs),
- )
- }
-
- fn conv_object_ty_poly_trait_ref(
- &self,
- span: Span,
- trait_bounds: &[hir::PolyTraitRef<'_>],
- lifetime: &hir::Lifetime,
- borrowed: bool,
- ) -> Ty<'tcx> {
- let tcx = self.tcx();
-
- let mut bounds = Bounds::default();
- let mut potential_assoc_types = Vec::new();
- let dummy_self = self.tcx().types.trait_object_dummy_self;
- for trait_bound in trait_bounds.iter().rev() {
- if let GenericArgCountResult {
- correct:
- Err(GenericArgCountMismatch { invalid_args: cur_potential_assoc_types, .. }),
- ..
- } = self.instantiate_poly_trait_ref(
- &trait_bound.trait_ref,
- trait_bound.span,
- ty::BoundConstness::NotConst,
- dummy_self,
- &mut bounds,
- false,
- ) {
- potential_assoc_types.extend(cur_potential_assoc_types);
- }
- }
-
- // Expand trait aliases recursively and check that only one regular (non-auto) trait
- // is used and no 'maybe' bounds are used.
- let expanded_traits =
- traits::expand_trait_aliases(tcx, bounds.trait_bounds.iter().map(|&(a, b, _)| (a, b)));
- let (mut auto_traits, regular_traits): (Vec<_>, Vec<_>) = expanded_traits
- .filter(|i| i.trait_ref().self_ty().skip_binder() == dummy_self)
- .partition(|i| tcx.trait_is_auto(i.trait_ref().def_id()));
- if regular_traits.len() > 1 {
- let first_trait = &regular_traits[0];
- let additional_trait = &regular_traits[1];
- let mut err = struct_span_err!(
- tcx.sess,
- additional_trait.bottom().1,
- E0225,
- "only auto traits can be used as additional traits in a trait object"
- );
- additional_trait.label_with_exp_info(
- &mut err,
- "additional non-auto trait",
- "additional use",
- );
- first_trait.label_with_exp_info(&mut err, "first non-auto trait", "first use");
- err.help(&format!(
- "consider creating a new trait with all of these as supertraits and using that \
- trait here instead: `trait NewTrait: {} {{}}`",
- regular_traits
- .iter()
- .map(|t| t.trait_ref().print_only_trait_path().to_string())
- .collect::<Vec<_>>()
- .join(" + "),
- ));
- err.note(
- "auto-traits like `Send` and `Sync` are traits that have special properties; \
- for more information on them, visit \
- <https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits>",
- );
- err.emit();
- }
-
- if regular_traits.is_empty() && auto_traits.is_empty() {
- let trait_alias_span = bounds
- .trait_bounds
- .iter()
- .map(|&(trait_ref, _, _)| trait_ref.def_id())
- .find(|&trait_ref| tcx.is_trait_alias(trait_ref))
- .map(|trait_ref| tcx.def_span(trait_ref));
- tcx.sess.emit_err(TraitObjectDeclaredWithNoTraits { span, trait_alias_span });
- return tcx.ty_error();
- }
-
- // Check that there are no gross object safety violations;
- // most importantly, that the supertraits don't contain `Self`,
- // to avoid ICEs.
- for item in &regular_traits {
- let object_safety_violations =
- astconv_object_safety_violations(tcx, item.trait_ref().def_id());
- if !object_safety_violations.is_empty() {
- report_object_safety_error(
- tcx,
- span,
- item.trait_ref().def_id(),
- &object_safety_violations,
- )
- .emit();
- return tcx.ty_error();
- }
- }
-
- // Use a `BTreeSet` to keep output in a more consistent order.
- let mut associated_types: FxHashMap<Span, BTreeSet<DefId>> = FxHashMap::default();
-
- let regular_traits_refs_spans = bounds
- .trait_bounds
- .into_iter()
- .filter(|(trait_ref, _, _)| !tcx.trait_is_auto(trait_ref.def_id()));
-
- for (base_trait_ref, span, constness) in regular_traits_refs_spans {
- assert_eq!(constness, ty::BoundConstness::NotConst);
-
- for obligation in traits::elaborate_trait_ref(tcx, base_trait_ref) {
- debug!(
- "conv_object_ty_poly_trait_ref: observing object predicate `{:?}`",
- obligation.predicate
- );
-
- let bound_predicate = obligation.predicate.kind();
- match bound_predicate.skip_binder() {
- ty::PredicateKind::Trait(pred) => {
- let pred = bound_predicate.rebind(pred);
- associated_types.entry(span).or_default().extend(
- tcx.associated_items(pred.def_id())
- .in_definition_order()
- .filter(|item| item.kind == ty::AssocKind::Type)
- .map(|item| item.def_id),
- );
- }
- ty::PredicateKind::Projection(pred) => {
- let pred = bound_predicate.rebind(pred);
- // A `Self` within the original bound will be substituted with a
- // `trait_object_dummy_self`, so check for that.
- let references_self = match pred.skip_binder().term {
- ty::Term::Ty(ty) => ty.walk().any(|arg| arg == dummy_self.into()),
- ty::Term::Const(c) => c.ty().walk().any(|arg| arg == dummy_self.into()),
- };
-
- // If the projection output contains `Self`, force the user to
- // elaborate it explicitly to avoid a lot of complexity.
- //
- // The "classically useful" case is the following:
- // ```
- // trait MyTrait: FnMut() -> <Self as MyTrait>::MyOutput {
- // type MyOutput;
- // }
- // ```
- //
- // Here, the user could theoretically write `dyn MyTrait<Output = X>`,
- // but actually supporting that would "expand" to an infinitely-long type
- // `fix $ τ → dyn MyTrait<MyOutput = X, Output = <τ as MyTrait>::MyOutput`.
- //
- // Instead, we force the user to write
- // `dyn MyTrait<MyOutput = X, Output = X>`, which is uglier but works. See
- // the discussion in #56288 for alternatives.
- if !references_self {
- // Include projections defined on supertraits.
- bounds.projection_bounds.push((pred, span));
- }
- }
- _ => (),
- }
- }
- }
-
- for (projection_bound, _) in &bounds.projection_bounds {
- for def_ids in associated_types.values_mut() {
- def_ids.remove(&projection_bound.projection_def_id());
- }
- }
-
- self.complain_about_missing_associated_types(
- associated_types,
- potential_assoc_types,
- trait_bounds,
- );
-
- // De-duplicate auto traits so that, e.g., `dyn Trait + Send + Send` is the same as
- // `dyn Trait + Send`.
- // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering
- // the bounds
- let mut duplicates = FxHashSet::default();
- auto_traits.retain(|i| duplicates.insert(i.trait_ref().def_id()));
- debug!("regular_traits: {:?}", regular_traits);
- debug!("auto_traits: {:?}", auto_traits);
-
- // Erase the `dummy_self` (`trait_object_dummy_self`) used above.
- let existential_trait_refs = regular_traits.iter().map(|i| {
- i.trait_ref().map_bound(|trait_ref: ty::TraitRef<'tcx>| {
- if trait_ref.self_ty() != dummy_self {
- // FIXME: There appears to be a missing filter on top of `expand_trait_aliases`,
- // which picks up non-supertraits where clauses - but also, the object safety
- // completely ignores trait aliases, which could be object safety hazards. We
- // `delay_span_bug` here to avoid an ICE in stable even when the feature is
- // disabled. (#66420)
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!(
- "trait_ref_to_existential called on {:?} with non-dummy Self",
- trait_ref,
- ),
- );
- }
- ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
- })
- });
- let existential_projections = bounds.projection_bounds.iter().map(|(bound, _)| {
- bound.map_bound(|b| {
- if b.projection_ty.self_ty() != dummy_self {
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!("trait_ref_to_existential called on {:?} with non-dummy Self", b),
- );
- }
- ty::ExistentialProjection::erase_self_ty(tcx, b)
- })
- });
-
- let regular_trait_predicates = existential_trait_refs
- .map(|trait_ref| trait_ref.map_bound(ty::ExistentialPredicate::Trait));
- let auto_trait_predicates = auto_traits.into_iter().map(|trait_ref| {
- ty::Binder::dummy(ty::ExistentialPredicate::AutoTrait(trait_ref.trait_ref().def_id()))
- });
- // N.b. principal, projections, auto traits
- // FIXME: This is actually wrong with multiple principals in regards to symbol mangling
- let mut v = regular_trait_predicates
- .chain(
- existential_projections.map(|x| x.map_bound(ty::ExistentialPredicate::Projection)),
- )
- .chain(auto_trait_predicates)
- .collect::<SmallVec<[_; 8]>>();
- v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
- v.dedup();
- let existential_predicates = tcx.mk_poly_existential_predicates(v.into_iter());
-
- // Use explicitly-specified region bound.
- let region_bound = if !lifetime.is_elided() {
- self.ast_region_to_region(lifetime, None)
- } else {
- self.compute_object_lifetime_bound(span, existential_predicates).unwrap_or_else(|| {
- if tcx.named_region(lifetime.hir_id).is_some() {
- self.ast_region_to_region(lifetime, None)
- } else {
- self.re_infer(None, span).unwrap_or_else(|| {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0228,
- "the lifetime bound for this object type cannot be deduced \
- from context; please supply an explicit bound"
- );
- if borrowed {
- // We will have already emitted an error E0106 complaining about a
- // missing named lifetime in `&dyn Trait`, so we elide this one.
- err.delay_as_bug();
- } else {
- err.emit();
- }
- tcx.lifetimes.re_static
- })
- }
- })
- };
- debug!("region_bound: {:?}", region_bound);
-
- let ty = tcx.mk_dynamic(existential_predicates, region_bound);
- debug!("trait_object_type: {:?}", ty);
- ty
- }
-
- fn report_ambiguous_associated_type(
- &self,
- span: Span,
- type_str: &str,
- trait_str: &str,
- name: Symbol,
- ) -> ErrorGuaranteed {
- let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type");
- if self
- .tcx()
- .resolutions(())
- .confused_type_with_std_module
- .keys()
- .any(|full_span| full_span.contains(span))
- {
- err.span_suggestion(
- span.shrink_to_lo(),
- "you are looking for the module in `std`, not the primitive type",
- "std::",
- Applicability::MachineApplicable,
- );
- } else {
- err.span_suggestion(
- span,
- "use fully-qualified syntax",
- format!("<{} as {}>::{}", type_str, trait_str, name),
- Applicability::HasPlaceholders,
- );
- }
- err.emit()
- }
-
- // Search for a bound on a type parameter which includes the associated item
- // given by `assoc_name`. `ty_param_def_id` is the `DefId` of the type parameter
- // This function will fail if there are no suitable bounds or there is
- // any ambiguity.
- fn find_bound_for_assoc_item(
- &self,
- ty_param_def_id: LocalDefId,
- assoc_name: Ident,
- span: Span,
- ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed> {
- let tcx = self.tcx();
-
- debug!(
- "find_bound_for_assoc_item(ty_param_def_id={:?}, assoc_name={:?}, span={:?})",
- ty_param_def_id, assoc_name, span,
- );
-
- let predicates = &self
- .get_type_parameter_bounds(span, ty_param_def_id.to_def_id(), assoc_name)
- .predicates;
-
- debug!("find_bound_for_assoc_item: predicates={:#?}", predicates);
-
- let param_name = tcx.hir().ty_param_name(ty_param_def_id);
- self.one_bound_for_assoc_type(
- || {
- traits::transitive_bounds_that_define_assoc_type(
- tcx,
- predicates.iter().filter_map(|(p, _)| {
- Some(p.to_opt_poly_trait_pred()?.map_bound(|t| t.trait_ref))
- }),
- assoc_name,
- )
- },
- || param_name.to_string(),
- assoc_name,
- span,
- || None,
- )
- }
-
- // Checks that `bounds` contains exactly one element and reports appropriate
- // errors otherwise.
- fn one_bound_for_assoc_type<I>(
- &self,
- all_candidates: impl Fn() -> I,
- ty_param_name: impl Fn() -> String,
- assoc_name: Ident,
- span: Span,
- is_equality: impl Fn() -> Option<String>,
- ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed>
- where
- I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
- {
- let mut matching_candidates = all_candidates()
- .filter(|r| self.trait_defines_associated_type_named(r.def_id(), assoc_name));
- let mut const_candidates = all_candidates()
- .filter(|r| self.trait_defines_associated_const_named(r.def_id(), assoc_name));
-
- let (bound, next_cand) = match (matching_candidates.next(), const_candidates.next()) {
- (Some(bound), _) => (bound, matching_candidates.next()),
- (None, Some(bound)) => (bound, const_candidates.next()),
- (None, None) => {
- let reported = self.complain_about_assoc_type_not_found(
- all_candidates,
- &ty_param_name(),
- assoc_name,
- span,
- );
- return Err(reported);
- }
- };
- debug!("one_bound_for_assoc_type: bound = {:?}", bound);
-
- if let Some(bound2) = next_cand {
- debug!("one_bound_for_assoc_type: bound2 = {:?}", bound2);
-
- let is_equality = is_equality();
- let bounds = IntoIterator::into_iter([bound, bound2]).chain(matching_candidates);
- let mut err = if is_equality.is_some() {
- // More specific Error Index entry.
- struct_span_err!(
- self.tcx().sess,
- span,
- E0222,
- "ambiguous associated type `{}` in bounds of `{}`",
- assoc_name,
- ty_param_name()
- )
- } else {
- struct_span_err!(
- self.tcx().sess,
- span,
- E0221,
- "ambiguous associated type `{}` in bounds of `{}`",
- assoc_name,
- ty_param_name()
- )
- };
- err.span_label(span, format!("ambiguous associated type `{}`", assoc_name));
-
- let mut where_bounds = vec![];
- for bound in bounds {
- let bound_id = bound.def_id();
- let bound_span = self
- .tcx()
- .associated_items(bound_id)
- .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, bound_id)
- .and_then(|item| self.tcx().hir().span_if_local(item.def_id));
-
- if let Some(bound_span) = bound_span {
- err.span_label(
- bound_span,
- format!(
- "ambiguous `{}` from `{}`",
- assoc_name,
- bound.print_only_trait_path(),
- ),
- );
- if let Some(constraint) = &is_equality {
- where_bounds.push(format!(
- " T: {trait}::{assoc} = {constraint}",
- trait=bound.print_only_trait_path(),
- assoc=assoc_name,
- constraint=constraint,
- ));
- } else {
- err.span_suggestion_verbose(
- span.with_hi(assoc_name.span.lo()),
- "use fully qualified syntax to disambiguate",
- format!(
- "<{} as {}>::",
- ty_param_name(),
- bound.print_only_trait_path(),
- ),
- Applicability::MaybeIncorrect,
- );
- }
- } else {
- err.note(&format!(
- "associated type `{}` could derive from `{}`",
- ty_param_name(),
- bound.print_only_trait_path(),
- ));
- }
- }
- if !where_bounds.is_empty() {
- err.help(&format!(
- "consider introducing a new type parameter `T` and adding `where` constraints:\
- \n where\n T: {},\n{}",
- ty_param_name(),
- where_bounds.join(",\n"),
- ));
- }
- let reported = err.emit();
- if !where_bounds.is_empty() {
- return Err(reported);
- }
- }
-
- Ok(bound)
- }
-
- // Create a type from a path to an associated type.
- // For a path `A::B::C::D`, `qself_ty` and `qself_def` are the type and def for `A::B::C`
- // and item_segment is the path segment for `D`. We return a type and a def for
- // the whole path.
- // Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type
- // parameter or `Self`.
- // NOTE: When this function starts resolving `Trait::AssocTy` successfully
- // it should also start reporting the `BARE_TRAIT_OBJECTS` lint.
- pub fn associated_path_to_ty(
- &self,
- hir_ref_id: hir::HirId,
- span: Span,
- qself_ty: Ty<'tcx>,
- qself: &hir::Ty<'_>,
- assoc_segment: &hir::PathSegment<'_>,
- permit_variants: bool,
- ) -> Result<(Ty<'tcx>, DefKind, DefId), ErrorGuaranteed> {
- let tcx = self.tcx();
- let assoc_ident = assoc_segment.ident;
- let qself_res = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.kind {
- path.res
- } else {
- Res::Err
- };
-
- debug!("associated_path_to_ty: {:?}::{}", qself_ty, assoc_ident);
-
- // Check if we have an enum variant.
- let mut variant_resolution = None;
- if let ty::Adt(adt_def, _) = qself_ty.kind() {
- if adt_def.is_enum() {
- let variant_def = adt_def
- .variants()
- .iter()
- .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did()));
- if let Some(variant_def) = variant_def {
- if permit_variants {
- tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None);
- self.prohibit_generics(slice::from_ref(assoc_segment).iter(), |err| {
- err.note("enum variants can't have type parameters");
- let type_name = tcx.item_name(adt_def.did());
- let msg = format!(
- "you might have meant to specity type parameters on enum \
- `{type_name}`"
- );
- let Some(args) = assoc_segment.args else { return; };
- // Get the span of the generics args *including* the leading `::`.
- let args_span = assoc_segment.ident.span.shrink_to_hi().to(args.span_ext);
- if tcx.generics_of(adt_def.did()).count() == 0 {
- // FIXME(estebank): we could also verify that the arguments being
- // work for the `enum`, instead of just looking if it takes *any*.
- err.span_suggestion_verbose(
- args_span,
- &format!("{type_name} doesn't have generic parameters"),
- "",
- Applicability::MachineApplicable,
- );
- return;
- }
- let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span) else {
- err.note(&msg);
- return;
- };
- let (qself_sugg_span, is_self) = if let hir::TyKind::Path(
- hir::QPath::Resolved(_, ref path)
- ) = qself.kind {
- // If the path segment already has type params, we want to overwrite
- // them.
- match &path.segments[..] {
- // `segment` is the previous to last element on the path,
- // which would normally be the `enum` itself, while the last
- // `_` `PathSegment` corresponds to the variant.
- [.., hir::PathSegment {
- ident,
- args,
- res: Some(Res::Def(DefKind::Enum, _)),
- ..
- }, _] => (
- // We need to include the `::` in `Type::Variant::<Args>`
- // to point the span to `::<Args>`, not just `<Args>`.
- ident.span.shrink_to_hi().to(args.map_or(
- ident.span.shrink_to_hi(),
- |a| a.span_ext)),
- false,
- ),
- [segment] => (
- // We need to include the `::` in `Type::Variant::<Args>`
- // to point the span to `::<Args>`, not just `<Args>`.
- segment.ident.span.shrink_to_hi().to(segment.args.map_or(
- segment.ident.span.shrink_to_hi(),
- |a| a.span_ext)),
- kw::SelfUpper == segment.ident.name,
- ),
- _ => {
- err.note(&msg);
- return;
- }
- }
- } else {
- err.note(&msg);
- return;
- };
- let suggestion = vec![
- if is_self {
- // Account for people writing `Self::Variant::<Args>`, where
- // `Self` is the enum, and suggest replacing `Self` with the
- // appropriate type: `Type::<Args>::Variant`.
- (qself.span, format!("{type_name}{snippet}"))
- } else {
- (qself_sugg_span, snippet)
- },
- (args_span, String::new()),
- ];
- err.multipart_suggestion_verbose(
- &msg,
- suggestion,
- Applicability::MaybeIncorrect,
- );
- });
- return Ok((qself_ty, DefKind::Variant, variant_def.def_id));
- } else {
- variant_resolution = Some(variant_def.def_id);
- }
- }
- }
- }
-
- // Find the type of the associated item, and the trait where the associated
- // item is declared.
- let bound = match (&qself_ty.kind(), qself_res) {
- (_, Res::SelfTy { trait_: Some(_), alias_to: Some((impl_def_id, _)) }) => {
- // `Self` in an impl of a trait -- we have a concrete self type and a
- // trait reference.
- let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else {
- // A cycle error occurred, most likely.
- let guar = tcx.sess.delay_span_bug(span, "expected cycle error");
- return Err(guar);
- };
-
- self.one_bound_for_assoc_type(
- || traits::supertraits(tcx, ty::Binder::dummy(trait_ref)),
- || "Self".to_string(),
- assoc_ident,
- span,
- || None,
- )?
- }
- (
- &ty::Param(_),
- Res::SelfTy { trait_: Some(param_did), alias_to: None }
- | Res::Def(DefKind::TyParam, param_did),
- ) => self.find_bound_for_assoc_item(param_did.expect_local(), assoc_ident, span)?,
- _ => {
- let reported = if variant_resolution.is_some() {
- // Variant in type position
- let msg = format!("expected type, found variant `{}`", assoc_ident);
- tcx.sess.span_err(span, &msg)
- } else if qself_ty.is_enum() {
- let mut err = struct_span_err!(
- tcx.sess,
- assoc_ident.span,
- E0599,
- "no variant named `{}` found for enum `{}`",
- assoc_ident,
- qself_ty,
- );
-
- let adt_def = qself_ty.ty_adt_def().expect("enum is not an ADT");
- if let Some(suggested_name) = find_best_match_for_name(
- &adt_def
- .variants()
- .iter()
- .map(|variant| variant.name)
- .collect::<Vec<Symbol>>(),
- assoc_ident.name,
- None,
- ) {
- err.span_suggestion(
- assoc_ident.span,
- "there is a variant with a similar name",
- suggested_name,
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_label(
- assoc_ident.span,
- format!("variant not found in `{}`", qself_ty),
- );
- }
-
- if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) {
- err.span_label(sp, format!("variant `{}` not found here", assoc_ident));
- }
-
- err.emit()
- } else if let Some(reported) = qself_ty.error_reported() {
- reported
- } else {
- // Don't print `TyErr` to the user.
- self.report_ambiguous_associated_type(
- span,
- &qself_ty.to_string(),
- "Trait",
- assoc_ident.name,
- )
- };
- return Err(reported);
- }
- };
-
- let trait_did = bound.def_id();
- let (assoc_ident, def_scope) =
- tcx.adjust_ident_and_get_scope(assoc_ident, trait_did, hir_ref_id);
-
- // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
- // of calling `filter_by_name_and_kind`.
- let item = tcx.associated_items(trait_did).in_definition_order().find(|i| {
- i.kind.namespace() == Namespace::TypeNS
- && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident
- });
- // Assume that if it's not matched, there must be a const defined with the same name
- // but it was used in a type position.
- let Some(item) = item else {
- let msg = format!("found associated const `{assoc_ident}` when type was expected");
- let guar = tcx.sess.struct_span_err(span, &msg).emit();
- return Err(guar);
- };
-
- let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, assoc_segment, bound);
- let ty = self.normalize_ty(span, ty);
-
- let kind = DefKind::AssocTy;
- if !item.visibility(tcx).is_accessible_from(def_scope, tcx) {
- let kind = kind.descr(item.def_id);
- let msg = format!("{} `{}` is private", kind, assoc_ident);
- tcx.sess
- .struct_span_err(span, &msg)
- .span_label(span, &format!("private {}", kind))
- .emit();
- }
- tcx.check_stability(item.def_id, Some(hir_ref_id), span, None);
-
- if let Some(variant_def_id) = variant_resolution {
- tcx.struct_span_lint_hir(AMBIGUOUS_ASSOCIATED_ITEMS, hir_ref_id, span, |lint| {
- let mut err = lint.build("ambiguous associated item");
- let mut could_refer_to = |kind: DefKind, def_id, also| {
- let note_msg = format!(
- "`{}` could{} refer to the {} defined here",
- assoc_ident,
- also,
- kind.descr(def_id)
- );
- err.span_note(tcx.def_span(def_id), &note_msg);
- };
-
- could_refer_to(DefKind::Variant, variant_def_id, "");
- could_refer_to(kind, item.def_id, " also");
-
- err.span_suggestion(
- span,
- "use fully-qualified syntax",
- format!("<{} as {}>::{}", qself_ty, tcx.item_name(trait_did), assoc_ident),
- Applicability::MachineApplicable,
- );
-
- err.emit();
- });
- }
- Ok((ty, kind, item.def_id))
- }
-
- fn qpath_to_ty(
- &self,
- span: Span,
- opt_self_ty: Option<Ty<'tcx>>,
- item_def_id: DefId,
- trait_segment: &hir::PathSegment<'_>,
- item_segment: &hir::PathSegment<'_>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx();
-
- let trait_def_id = tcx.parent(item_def_id);
-
- debug!("qpath_to_ty: trait_def_id={:?}", trait_def_id);
-
- let Some(self_ty) = opt_self_ty else {
- let path_str = tcx.def_path_str(trait_def_id);
-
- let def_id = self.item_def_id();
-
- debug!("qpath_to_ty: self.item_def_id()={:?}", def_id);
-
- let parent_def_id = def_id
- .and_then(|def_id| {
- def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
- })
- .map(|hir_id| tcx.hir().get_parent_item(hir_id).to_def_id());
-
- debug!("qpath_to_ty: parent_def_id={:?}", parent_def_id);
-
- // If the trait in segment is the same as the trait defining the item,
- // use the `<Self as ..>` syntax in the error.
- let is_part_of_self_trait_constraints = def_id == Some(trait_def_id);
- let is_part_of_fn_in_self_trait = parent_def_id == Some(trait_def_id);
-
- let type_name = if is_part_of_self_trait_constraints || is_part_of_fn_in_self_trait {
- "Self"
- } else {
- "Type"
- };
-
- self.report_ambiguous_associated_type(
- span,
- type_name,
- &path_str,
- item_segment.ident.name,
- );
- return tcx.ty_error();
- };
-
- debug!("qpath_to_ty: self_type={:?}", self_ty);
-
- let trait_ref =
- self.ast_path_to_mono_trait_ref(span, trait_def_id, self_ty, trait_segment, false);
-
- let item_substs = self.create_substs_for_associated_item(
- tcx,
- span,
- item_def_id,
- item_segment,
- trait_ref.substs,
- );
-
- debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
-
- self.normalize_ty(span, tcx.mk_projection(item_def_id, item_substs))
- }
-
- pub fn prohibit_generics<'a>(
- &self,
- segments: impl Iterator<Item = &'a hir::PathSegment<'a>> + Clone,
- extend: impl Fn(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>),
- ) -> bool {
- let args = segments.clone().flat_map(|segment| segment.args().args);
-
- let (lt, ty, ct, inf) =
- args.clone().fold((false, false, false, false), |(lt, ty, ct, inf), arg| match arg {
- hir::GenericArg::Lifetime(_) => (true, ty, ct, inf),
- hir::GenericArg::Type(_) => (lt, true, ct, inf),
- hir::GenericArg::Const(_) => (lt, ty, true, inf),
- hir::GenericArg::Infer(_) => (lt, ty, ct, true),
- });
- let mut emitted = false;
- if lt || ty || ct || inf {
- let types_and_spans: Vec<_> = segments
- .clone()
- .flat_map(|segment| {
- segment.res.and_then(|res| {
- if segment.args().args.is_empty() {
- None
- } else {
- Some((
- match res {
- Res::PrimTy(ty) => format!("{} `{}`", res.descr(), ty.name()),
- Res::Def(_, def_id)
- if let Some(name) = self.tcx().opt_item_name(def_id) => {
- format!("{} `{name}`", res.descr())
- }
- Res::Err => "this type".to_string(),
- _ => res.descr().to_string(),
- },
- segment.ident.span,
- ))
- }
- })
- })
- .collect();
- let this_type = match &types_and_spans[..] {
- [.., _, (last, _)] => format!(
- "{} and {last}",
- types_and_spans[..types_and_spans.len() - 1]
- .iter()
- .map(|(x, _)| x.as_str())
- .intersperse(&", ")
- .collect::<String>()
- ),
- [(only, _)] => only.to_string(),
- [] => "this type".to_string(),
- };
-
- let arg_spans: Vec<Span> = args.map(|arg| arg.span()).collect();
-
- let mut kinds = Vec::with_capacity(4);
- if lt {
- kinds.push("lifetime");
- }
- if ty {
- kinds.push("type");
- }
- if ct {
- kinds.push("const");
- }
- if inf {
- kinds.push("generic");
- }
- let (kind, s) = match kinds[..] {
- [.., _, last] => (
- format!(
- "{} and {last}",
- kinds[..kinds.len() - 1]
- .iter()
- .map(|&x| x)
- .intersperse(", ")
- .collect::<String>()
- ),
- "s",
- ),
- [only] => (format!("{only}"), ""),
- [] => unreachable!(),
- };
- let last_span = *arg_spans.last().unwrap();
- let span: MultiSpan = arg_spans.into();
- let mut err = struct_span_err!(
- self.tcx().sess,
- span,
- E0109,
- "{kind} arguments are not allowed on {this_type}",
- );
- err.span_label(last_span, format!("{kind} argument{s} not allowed"));
- for (what, span) in types_and_spans {
- err.span_label(span, format!("not allowed on {what}"));
- }
- extend(&mut err);
- err.emit();
- emitted = true;
- }
-
- for segment in segments {
- // Only emit the first error to avoid overloading the user with error messages.
- if let [binding, ..] = segment.args().bindings {
- Self::prohibit_assoc_ty_binding(self.tcx(), binding.span);
- return true;
- }
- }
- emitted
- }
-
- // FIXME(eddyb, varkor) handle type paths here too, not just value ones.
- pub fn def_ids_for_value_path_segments(
- &self,
- segments: &[hir::PathSegment<'_>],
- self_ty: Option<Ty<'tcx>>,
- kind: DefKind,
- def_id: DefId,
- ) -> Vec<PathSeg> {
- // We need to extract the type parameters supplied by the user in
- // the path `path`. Due to the current setup, this is a bit of a
- // tricky-process; the problem is that resolve only tells us the
- // end-point of the path resolution, and not the intermediate steps.
- // Luckily, we can (at least for now) deduce the intermediate steps
- // just from the end-point.
- //
- // There are basically five cases to consider:
- //
- // 1. Reference to a constructor of a struct:
- //
- // struct Foo<T>(...)
- //
- // In this case, the parameters are declared in the type space.
- //
- // 2. Reference to a constructor of an enum variant:
- //
- // enum E<T> { Foo(...) }
- //
- // In this case, the parameters are defined in the type space,
- // but may be specified either on the type or the variant.
- //
- // 3. Reference to a fn item or a free constant:
- //
- // fn foo<T>() { }
- //
- // In this case, the path will again always have the form
- // `a::b::foo::<T>` where only the final segment should have
- // type parameters. However, in this case, those parameters are
- // declared on a value, and hence are in the `FnSpace`.
- //
- // 4. Reference to a method or an associated constant:
- //
- // impl<A> SomeStruct<A> {
- // fn foo<B>(...)
- // }
- //
- // Here we can have a path like
- // `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
- // may appear in two places. The penultimate segment,
- // `SomeStruct::<A>`, contains parameters in TypeSpace, and the
- // final segment, `foo::<B>` contains parameters in fn space.
- //
- // The first step then is to categorize the segments appropriately.
-
- let tcx = self.tcx();
-
- assert!(!segments.is_empty());
- let last = segments.len() - 1;
-
- let mut path_segs = vec![];
-
- match kind {
- // Case 1. Reference to a struct constructor.
- DefKind::Ctor(CtorOf::Struct, ..) => {
- // Everything but the final segment should have no
- // parameters at all.
- let generics = tcx.generics_of(def_id);
- // Variant and struct constructors use the
- // generics of their parent type definition.
- let generics_def_id = generics.parent.unwrap_or(def_id);
- path_segs.push(PathSeg(generics_def_id, last));
- }
-
- // Case 2. Reference to a variant constructor.
- DefKind::Ctor(CtorOf::Variant, ..) | DefKind::Variant => {
- let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap());
- let (generics_def_id, index) = if let Some(adt_def) = adt_def {
- debug_assert!(adt_def.is_enum());
- (adt_def.did(), last)
- } else if last >= 1 && segments[last - 1].args.is_some() {
- // Everything but the penultimate segment should have no
- // parameters at all.
- let mut def_id = def_id;
-
- // `DefKind::Ctor` -> `DefKind::Variant`
- if let DefKind::Ctor(..) = kind {
- def_id = tcx.parent(def_id);
- }
-
- // `DefKind::Variant` -> `DefKind::Enum`
- let enum_def_id = tcx.parent(def_id);
- (enum_def_id, last - 1)
- } else {
- // FIXME: lint here recommending `Enum::<...>::Variant` form
- // instead of `Enum::Variant::<...>` form.
-
- // Everything but the final segment should have no
- // parameters at all.
- let generics = tcx.generics_of(def_id);
- // Variant and struct constructors use the
- // generics of their parent type definition.
- (generics.parent.unwrap_or(def_id), last)
- };
- path_segs.push(PathSeg(generics_def_id, index));
- }
-
- // Case 3. Reference to a top-level value.
- DefKind::Fn | DefKind::Const | DefKind::ConstParam | DefKind::Static(_) => {
- path_segs.push(PathSeg(def_id, last));
- }
-
- // Case 4. Reference to a method or associated const.
- DefKind::AssocFn | DefKind::AssocConst => {
- if segments.len() >= 2 {
- let generics = tcx.generics_of(def_id);
- path_segs.push(PathSeg(generics.parent.unwrap(), last - 1));
- }
- path_segs.push(PathSeg(def_id, last));
- }
-
- kind => bug!("unexpected definition kind {:?} for {:?}", kind, def_id),
- }
-
- debug!("path_segs = {:?}", path_segs);
-
- path_segs
- }
-
- // Check a type `Path` and convert it to a `Ty`.
- pub fn res_to_ty(
- &self,
- opt_self_ty: Option<Ty<'tcx>>,
- path: &hir::Path<'_>,
- permit_variants: bool,
- ) -> Ty<'tcx> {
- let tcx = self.tcx();
-
- debug!(
- "res_to_ty(res={:?}, opt_self_ty={:?}, path_segments={:?})",
- path.res, opt_self_ty, path.segments
- );
-
- let span = path.span;
- match path.res {
- Res::Def(DefKind::OpaqueTy, did) => {
- // Check for desugared `impl Trait`.
- assert!(ty::is_impl_trait_defn(tcx, did).is_none());
- let item_segment = path.segments.split_last().unwrap();
- self.prohibit_generics(item_segment.1.iter(), |err| {
- err.note("`impl Trait` types can't have type parameters");
- });
- let substs = self.ast_path_substs_for_ty(span, did, item_segment.0);
- self.normalize_ty(span, tcx.mk_opaque(did, substs))
- }
- Res::Def(
- DefKind::Enum
- | DefKind::TyAlias
- | DefKind::Struct
- | DefKind::Union
- | DefKind::ForeignTy,
- did,
- ) => {
- assert_eq!(opt_self_ty, None);
- self.prohibit_generics(path.segments.split_last().unwrap().1.iter(), |_| {});
- self.ast_path_to_ty(span, did, path.segments.last().unwrap())
- }
- Res::Def(kind @ DefKind::Variant, def_id) if permit_variants => {
- // Convert "variant type" as if it were a real type.
- // The resulting `Ty` is type of the variant's enum for now.
- assert_eq!(opt_self_ty, None);
-
- let path_segs =
- self.def_ids_for_value_path_segments(path.segments, None, kind, def_id);
- let generic_segs: FxHashSet<_> =
- path_segs.iter().map(|PathSeg(_, index)| index).collect();
- self.prohibit_generics(
- path.segments.iter().enumerate().filter_map(|(index, seg)| {
- if !generic_segs.contains(&index) { Some(seg) } else { None }
- }),
- |err| {
- err.note("enum variants can't have type parameters");
- },
- );
-
- let PathSeg(def_id, index) = path_segs.last().unwrap();
- self.ast_path_to_ty(span, *def_id, &path.segments[*index])
- }
- Res::Def(DefKind::TyParam, def_id) => {
- assert_eq!(opt_self_ty, None);
- self.prohibit_generics(path.segments.iter(), |err| {
- if let Some(span) = tcx.def_ident_span(def_id) {
- let name = tcx.item_name(def_id);
- err.span_note(span, &format!("type parameter `{name}` defined here"));
- }
- });
-
- let def_id = def_id.expect_local();
- let item_def_id = tcx.hir().ty_param_owner(def_id);
- let generics = tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&def_id.to_def_id()];
- tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id))
- }
- Res::SelfTy { trait_: Some(_), alias_to: None } => {
- // `Self` in trait or type alias.
- assert_eq!(opt_self_ty, None);
- self.prohibit_generics(path.segments.iter(), |err| {
- if let [hir::PathSegment { args: Some(args), ident, .. }] = &path.segments[..] {
- err.span_suggestion_verbose(
- ident.span.shrink_to_hi().to(args.span_ext),
- "the `Self` type doesn't accept type parameters",
- "",
- Applicability::MaybeIncorrect,
- );
- }
- });
- tcx.types.self_param
- }
- Res::SelfTy { trait_: _, alias_to: Some((def_id, forbid_generic)) } => {
- // `Self` in impl (we know the concrete type).
- assert_eq!(opt_self_ty, None);
- // Try to evaluate any array length constants.
- let ty = tcx.at(span).type_of(def_id);
- let span_of_impl = tcx.span_of_impl(def_id);
- self.prohibit_generics(path.segments.iter(), |err| {
- let def_id = match *ty.kind() {
- ty::Adt(self_def, _) => self_def.did(),
- _ => return,
- };
-
- let type_name = tcx.item_name(def_id);
- let span_of_ty = tcx.def_ident_span(def_id);
- let generics = tcx.generics_of(def_id).count();
-
- let msg = format!("`Self` is of type `{ty}`");
- if let (Ok(i_sp), Some(t_sp)) = (span_of_impl, span_of_ty) {
- let mut span: MultiSpan = vec![t_sp].into();
- span.push_span_label(
- i_sp,
- &format!("`Self` is on type `{type_name}` in this `impl`"),
- );
- let mut postfix = "";
- if generics == 0 {
- postfix = ", which doesn't have generic parameters";
- }
- span.push_span_label(
- t_sp,
- &format!("`Self` corresponds to this type{postfix}"),
- );
- err.span_note(span, &msg);
- } else {
- err.note(&msg);
- }
- for segment in path.segments {
- if let Some(args) = segment.args && segment.ident.name == kw::SelfUpper {
- if generics == 0 {
- // FIXME(estebank): we could also verify that the arguments being
- // work for the `enum`, instead of just looking if it takes *any*.
- err.span_suggestion_verbose(
- segment.ident.span.shrink_to_hi().to(args.span_ext),
- "the `Self` type doesn't accept type parameters",
- "",
- Applicability::MachineApplicable,
- );
- return;
- } else {
- err.span_suggestion_verbose(
- segment.ident.span,
- format!(
- "the `Self` type doesn't accept type parameters, use the \
- concrete type's name `{type_name}` instead if you want to \
- specify its type parameters"
- ),
- type_name,
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
- });
- // HACK(min_const_generics): Forbid generic `Self` types
- // here as we can't easily do that during nameres.
- //
- // We do this before normalization as we otherwise allow
- // ```rust
- // trait AlwaysApplicable { type Assoc; }
- // impl<T: ?Sized> AlwaysApplicable for T { type Assoc = usize; }
- //
- // trait BindsParam<T> {
- // type ArrayTy;
- // }
- // impl<T> BindsParam<T> for <T as AlwaysApplicable>::Assoc {
- // type ArrayTy = [u8; Self::MAX];
- // }
- // ```
- // Note that the normalization happens in the param env of
- // the anon const, which is empty. This is why the
- // `AlwaysApplicable` impl needs a `T: ?Sized` bound for
- // this to compile if we were to normalize here.
- if forbid_generic && ty.needs_subst() {
- let mut err = tcx.sess.struct_span_err(
- path.span,
- "generic `Self` types are currently not permitted in anonymous constants",
- );
- if let Some(hir::Node::Item(&hir::Item {
- kind: hir::ItemKind::Impl(ref impl_),
- ..
- })) = tcx.hir().get_if_local(def_id)
- {
- err.span_note(impl_.self_ty.span, "not a concrete type");
- }
- err.emit();
- tcx.ty_error()
- } else {
- self.normalize_ty(span, ty)
- }
- }
- Res::Def(DefKind::AssocTy, def_id) => {
- debug_assert!(path.segments.len() >= 2);
- self.prohibit_generics(path.segments[..path.segments.len() - 2].iter(), |_| {});
- self.qpath_to_ty(
- span,
- opt_self_ty,
- def_id,
- &path.segments[path.segments.len() - 2],
- path.segments.last().unwrap(),
- )
- }
- Res::PrimTy(prim_ty) => {
- assert_eq!(opt_self_ty, None);
- self.prohibit_generics(path.segments.iter(), |err| {
- let name = prim_ty.name_str();
- for segment in path.segments {
- if let Some(args) = segment.args {
- err.span_suggestion_verbose(
- segment.ident.span.shrink_to_hi().to(args.span_ext),
- &format!("primitive type `{name}` doesn't have generic parameters"),
- "",
- Applicability::MaybeIncorrect,
- );
- }
- }
- });
- match prim_ty {
- hir::PrimTy::Bool => tcx.types.bool,
- hir::PrimTy::Char => tcx.types.char,
- hir::PrimTy::Int(it) => tcx.mk_mach_int(ty::int_ty(it)),
- hir::PrimTy::Uint(uit) => tcx.mk_mach_uint(ty::uint_ty(uit)),
- hir::PrimTy::Float(ft) => tcx.mk_mach_float(ty::float_ty(ft)),
- hir::PrimTy::Str => tcx.types.str_,
- }
- }
- Res::Err => {
- self.set_tainted_by_errors();
- self.tcx().ty_error()
- }
- _ => span_bug!(span, "unexpected resolution: {:?}", path.res),
- }
- }
-
- /// Parses the programmer's textual representation of a type into our
- /// internal notion of a type.
- pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
- self.ast_ty_to_ty_inner(ast_ty, false, false)
- }
-
- /// Parses the programmer's textual representation of a type into our
- /// internal notion of a type. This is meant to be used within a path.
- pub fn ast_ty_to_ty_in_path(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
- self.ast_ty_to_ty_inner(ast_ty, false, true)
- }
-
- /// Turns a `hir::Ty` into a `Ty`. For diagnostics' purposes we keep track of whether trait
- /// objects are borrowed like `&dyn Trait` to avoid emitting redundant errors.
- #[tracing::instrument(level = "debug", skip(self))]
- fn ast_ty_to_ty_inner(&self, ast_ty: &hir::Ty<'_>, borrowed: bool, in_path: bool) -> Ty<'tcx> {
- let tcx = self.tcx();
-
- let result_ty = match ast_ty.kind {
- hir::TyKind::Slice(ref ty) => tcx.mk_slice(self.ast_ty_to_ty(ty)),
- hir::TyKind::Ptr(ref mt) => {
- tcx.mk_ptr(ty::TypeAndMut { ty: self.ast_ty_to_ty(mt.ty), mutbl: mt.mutbl })
- }
- hir::TyKind::Rptr(ref region, ref mt) => {
- let r = self.ast_region_to_region(region, None);
- debug!(?r);
- let t = self.ast_ty_to_ty_inner(mt.ty, true, false);
- tcx.mk_ref(r, ty::TypeAndMut { ty: t, mutbl: mt.mutbl })
- }
- hir::TyKind::Never => tcx.types.never,
- hir::TyKind::Tup(fields) => tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(t))),
- hir::TyKind::BareFn(bf) => {
- require_c_abi_if_c_variadic(tcx, bf.decl, bf.abi, ast_ty.span);
-
- tcx.mk_fn_ptr(self.ty_of_fn(
- ast_ty.hir_id,
- bf.unsafety,
- bf.abi,
- bf.decl,
- None,
- Some(ast_ty),
- ))
- }
- hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
- self.maybe_lint_bare_trait(ast_ty, in_path);
- self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime, borrowed)
- }
- hir::TyKind::Path(hir::QPath::Resolved(ref maybe_qself, ref path)) => {
- debug!(?maybe_qself, ?path);
- let opt_self_ty = maybe_qself.as_ref().map(|qself| self.ast_ty_to_ty(qself));
- self.res_to_ty(opt_self_ty, path, false)
- }
- hir::TyKind::OpaqueDef(item_id, lifetimes) => {
- let opaque_ty = tcx.hir().item(item_id);
- let def_id = item_id.def_id.to_def_id();
-
- match opaque_ty.kind {
- hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
- self.impl_trait_ty_to_ty(def_id, lifetimes, origin)
- }
- ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
- }
- }
- hir::TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => {
- debug!(?qself, ?segment);
- let ty = self.ast_ty_to_ty_inner(qself, false, true);
- self.associated_path_to_ty(ast_ty.hir_id, ast_ty.span, ty, qself, segment, false)
- .map(|(ty, _, _)| ty)
- .unwrap_or_else(|_| tcx.ty_error())
- }
- hir::TyKind::Path(hir::QPath::LangItem(lang_item, span, _)) => {
- let def_id = tcx.require_lang_item(lang_item, Some(span));
- let (substs, _) = self.create_substs_for_ast_path(
- span,
- def_id,
- &[],
- &hir::PathSegment::invalid(),
- &GenericArgs::none(),
- true,
- None,
- );
- EarlyBinder(self.normalize_ty(span, tcx.at(span).type_of(def_id)))
- .subst(tcx, substs)
- }
- hir::TyKind::Array(ref ty, ref length) => {
- let length = match length {
- &hir::ArrayLen::Infer(_, span) => self.ct_infer(tcx.types.usize, None, span),
- hir::ArrayLen::Body(constant) => {
- let length_def_id = tcx.hir().local_def_id(constant.hir_id);
- ty::Const::from_anon_const(tcx, length_def_id)
- }
- };
-
- let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(ty), length));
- self.normalize_ty(ast_ty.span, array_ty)
- }
- hir::TyKind::Typeof(ref e) => {
- let ty = tcx.type_of(tcx.hir().local_def_id(e.hir_id));
- let span = ast_ty.span;
- tcx.sess.emit_err(TypeofReservedKeywordUsed {
- span,
- ty,
- opt_sugg: Some((span, Applicability::MachineApplicable))
- .filter(|_| ty.is_suggestable(tcx, false)),
- });
-
- ty
- }
- hir::TyKind::Infer => {
- // Infer also appears as the type of arguments or return
- // values in an ExprKind::Closure, or as
- // the type of local variables. Both of these cases are
- // handled specially and will not descend into this routine.
- self.ty_infer(None, ast_ty.span)
- }
- hir::TyKind::Err => tcx.ty_error(),
- };
-
- debug!(?result_ty);
-
- self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span);
- result_ty
- }
-
- fn impl_trait_ty_to_ty(
- &self,
- def_id: DefId,
- lifetimes: &[hir::GenericArg<'_>],
- origin: OpaqueTyOrigin,
- ) -> Ty<'tcx> {
- debug!("impl_trait_ty_to_ty(def_id={:?}, lifetimes={:?})", def_id, lifetimes);
- let tcx = self.tcx();
-
- let generics = tcx.generics_of(def_id);
-
- debug!("impl_trait_ty_to_ty: generics={:?}", generics);
- let substs = InternalSubsts::for_item(tcx, def_id, |param, _| {
- if let Some(i) = (param.index as usize).checked_sub(generics.parent_count) {
- // Our own parameters are the resolved lifetimes.
- if let GenericParamDefKind::Lifetime = param.kind {
- if let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] {
- self.ast_region_to_region(lifetime, None).into()
- } else {
- bug!()
- }
- } else {
- bug!()
- }
- } else {
- match param.kind {
- // For RPIT (return position impl trait), only lifetimes
- // mentioned in the impl Trait predicate are captured by
- // the opaque type, so the lifetime parameters from the
- // parent item need to be replaced with `'static`.
- //
- // For `impl Trait` in the types of statics, constants,
- // locals and type aliases. These capture all parent
- // lifetimes, so they can use their identity subst.
- GenericParamDefKind::Lifetime
- if matches!(
- origin,
- hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..)
- ) =>
- {
- tcx.lifetimes.re_static.into()
- }
- _ => tcx.mk_param_from_def(param),
- }
- }
- });
- debug!("impl_trait_ty_to_ty: substs={:?}", substs);
-
- let ty = tcx.mk_opaque(def_id, substs);
- debug!("impl_trait_ty_to_ty: {}", ty);
- ty
- }
-
- pub fn ty_of_arg(&self, ty: &hir::Ty<'_>, expected_ty: Option<Ty<'tcx>>) -> Ty<'tcx> {
- match ty.kind {
- hir::TyKind::Infer if expected_ty.is_some() => {
- self.record_ty(ty.hir_id, expected_ty.unwrap(), ty.span);
- expected_ty.unwrap()
- }
- _ => self.ast_ty_to_ty(ty),
- }
- }
-
- pub fn ty_of_fn(
- &self,
- hir_id: hir::HirId,
- unsafety: hir::Unsafety,
- abi: abi::Abi,
- decl: &hir::FnDecl<'_>,
- generics: Option<&hir::Generics<'_>>,
- hir_ty: Option<&hir::Ty<'_>>,
- ) -> ty::PolyFnSig<'tcx> {
- debug!("ty_of_fn");
-
- let tcx = self.tcx();
- let bound_vars = tcx.late_bound_vars(hir_id);
- debug!(?bound_vars);
-
- // We proactively collect all the inferred type params to emit a single error per fn def.
- let mut visitor = HirPlaceholderCollector::default();
- let mut infer_replacements = vec![];
-
- if let Some(generics) = generics {
- walk_generics(&mut visitor, generics);
- }
-
- let input_tys: Vec<_> = decl
- .inputs
- .iter()
- .enumerate()
- .map(|(i, a)| {
- if let hir::TyKind::Infer = a.kind && !self.allow_ty_infer() {
- if let Some(suggested_ty) =
- self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, Some(i))
- {
- infer_replacements.push((a.span, suggested_ty.to_string()));
- return suggested_ty;
- }
- }
-
- // Only visit the type looking for `_` if we didn't fix the type above
- visitor.visit_ty(a);
- self.ty_of_arg(a, None)
- })
- .collect();
-
- let output_ty = match decl.output {
- hir::FnRetTy::Return(output) => {
- if let hir::TyKind::Infer = output.kind
- && !self.allow_ty_infer()
- && let Some(suggested_ty) =
- self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, None)
- {
- infer_replacements.push((output.span, suggested_ty.to_string()));
- suggested_ty
- } else {
- visitor.visit_ty(output);
- self.ast_ty_to_ty(output)
- }
- }
- hir::FnRetTy::DefaultReturn(..) => tcx.mk_unit(),
- };
-
- debug!("ty_of_fn: output_ty={:?}", output_ty);
-
- let fn_ty = tcx.mk_fn_sig(input_tys.into_iter(), output_ty, decl.c_variadic, unsafety, abi);
- let bare_fn_ty = ty::Binder::bind_with_vars(fn_ty, bound_vars);
-
- if !self.allow_ty_infer() && !(visitor.0.is_empty() && infer_replacements.is_empty()) {
- // We always collect the spans for placeholder types when evaluating `fn`s, but we
- // only want to emit an error complaining about them if infer types (`_`) are not
- // allowed. `allow_ty_infer` gates this behavior. We check for the presence of
- // `ident_span` to not emit an error twice when we have `fn foo(_: fn() -> _)`.
-
- let mut diag = crate::collect::placeholder_type_error_diag(
- tcx,
- generics,
- visitor.0,
- infer_replacements.iter().map(|(s, _)| *s).collect(),
- true,
- hir_ty,
- "function",
- );
-
- if !infer_replacements.is_empty() {
- diag.multipart_suggestion(&format!(
- "try replacing `_` with the type{} in the corresponding trait method signature",
- rustc_errors::pluralize!(infer_replacements.len()),
- ), infer_replacements, Applicability::MachineApplicable);
- }
-
- diag.emit();
- }
-
- // Find any late-bound regions declared in return type that do
- // not appear in the arguments. These are not well-formed.
- //
- // Example:
- // for<'a> fn() -> &'a str <-- 'a is bad
- // for<'a> fn(&'a String) -> &'a str <-- 'a is ok
- let inputs = bare_fn_ty.inputs();
- let late_bound_in_args =
- tcx.collect_constrained_late_bound_regions(&inputs.map_bound(|i| i.to_owned()));
- let output = bare_fn_ty.output();
- let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output);
-
- self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| {
- struct_span_err!(
- tcx.sess,
- decl.output.span(),
- E0581,
- "return type references {}, which is not constrained by the fn input types",
- br_name
- )
- });
-
- bare_fn_ty
- }
-
- /// Given a fn_hir_id for a impl function, suggest the type that is found on the
- /// corresponding function in the trait that the impl implements, if it exists.
- /// If arg_idx is Some, then it corresponds to an input type index, otherwise it
- /// corresponds to the return type.
- fn suggest_trait_fn_ty_for_impl_fn_infer(
- &self,
- fn_hir_id: hir::HirId,
- arg_idx: Option<usize>,
- ) -> Option<Ty<'tcx>> {
- let tcx = self.tcx();
- let hir = tcx.hir();
-
- let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), ident, .. }) =
- hir.get(fn_hir_id) else { return None };
- let hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(i), .. }) =
- hir.get(hir.get_parent_node(fn_hir_id)) else { bug!("ImplItem should have Impl parent") };
-
- let trait_ref =
- self.instantiate_mono_trait_ref(i.of_trait.as_ref()?, self.ast_ty_to_ty(i.self_ty));
-
- let assoc = tcx.associated_items(trait_ref.def_id).find_by_name_and_kind(
- tcx,
- *ident,
- ty::AssocKind::Fn,
- trait_ref.def_id,
- )?;
-
- let fn_sig = tcx.bound_fn_sig(assoc.def_id).subst(
- tcx,
- trait_ref.substs.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)),
- );
-
- let ty = if let Some(arg_idx) = arg_idx { fn_sig.input(arg_idx) } else { fn_sig.output() };
-
- Some(tcx.liberate_late_bound_regions(fn_hir_id.expect_owner().to_def_id(), ty))
- }
-
- fn validate_late_bound_regions(
- &self,
- constrained_regions: FxHashSet<ty::BoundRegionKind>,
- referenced_regions: FxHashSet<ty::BoundRegionKind>,
- generate_err: impl Fn(&str) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
- ) {
- for br in referenced_regions.difference(&constrained_regions) {
- let br_name = match *br {
- ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) | ty::BrEnv => {
- "an anonymous lifetime".to_string()
- }
- ty::BrNamed(_, name) => format!("lifetime `{}`", name),
- };
-
- let mut err = generate_err(&br_name);
-
- if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) = *br {
- // The only way for an anonymous lifetime to wind up
- // in the return type but **also** be unconstrained is
- // if it only appears in "associated types" in the
- // input. See #47511 and #62200 for examples. In this case,
- // though we can easily give a hint that ought to be
- // relevant.
- err.note(
- "lifetimes appearing in an associated type are not considered constrained",
- );
- }
-
- err.emit();
- }
- }
-
- /// Given the bounds on an object, determines what single region bound (if any) we can
- /// use to summarize this type. The basic idea is that we will use the bound the user
- /// provided, if they provided one, and otherwise search the supertypes of trait bounds
- /// for region bounds. It may be that we can derive no bound at all, in which case
- /// we return `None`.
- fn compute_object_lifetime_bound(
- &self,
- span: Span,
- existential_predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
- ) -> Option<ty::Region<'tcx>> // if None, use the default
- {
- let tcx = self.tcx();
-
- debug!("compute_opt_region_bound(existential_predicates={:?})", existential_predicates);
-
- // No explicit region bound specified. Therefore, examine trait
- // bounds and see if we can derive region bounds from those.
- let derived_region_bounds = object_region_bounds(tcx, existential_predicates);
-
- // If there are no derived region bounds, then report back that we
- // can find no region bound. The caller will use the default.
- if derived_region_bounds.is_empty() {
- return None;
- }
-
- // If any of the derived region bounds are 'static, that is always
- // the best choice.
- if derived_region_bounds.iter().any(|r| r.is_static()) {
- return Some(tcx.lifetimes.re_static);
- }
-
- // Determine whether there is exactly one unique region in the set
- // of derived region bounds. If so, use that. Otherwise, report an
- // error.
- let r = derived_region_bounds[0];
- if derived_region_bounds[1..].iter().any(|r1| r != *r1) {
- tcx.sess.emit_err(AmbiguousLifetimeBound { span });
- }
- Some(r)
- }
-
- /// Make sure that we are in the condition to suggest the blanket implementation.
- fn maybe_lint_blanket_trait_impl<T: rustc_errors::EmissionGuarantee>(
- &self,
- self_ty: &hir::Ty<'_>,
- diag: &mut DiagnosticBuilder<'_, T>,
- ) {
- let tcx = self.tcx();
- let parent_id = tcx.hir().get_parent_item(self_ty.hir_id);
- if let hir::Node::Item(hir::Item {
- kind:
- hir::ItemKind::Impl(hir::Impl {
- self_ty: impl_self_ty, of_trait: Some(of_trait_ref), generics, ..
- }),
- ..
- }) = tcx.hir().get_by_def_id(parent_id) && self_ty.hir_id == impl_self_ty.hir_id
- {
- if !of_trait_ref.trait_def_id().map_or(false, |def_id| def_id.is_local()) {
- return;
- }
- let of_trait_span = of_trait_ref.path.span;
- // make sure that we are not calling unwrap to abort during the compilation
- let Ok(impl_trait_name) = tcx.sess.source_map().span_to_snippet(self_ty.span) else { return; };
- let Ok(of_trait_name) = tcx.sess.source_map().span_to_snippet(of_trait_span) else { return; };
- // check if the trait has generics, to make a correct suggestion
- let param_name = generics.params.next_type_param_name(None);
-
- let add_generic_sugg = if let Some(span) = generics.span_for_param_suggestion() {
- (span, format!(", {}: {}", param_name, impl_trait_name))
- } else {
- (generics.span, format!("<{}: {}>", param_name, impl_trait_name))
- };
- diag.multipart_suggestion(
- format!("alternatively use a blanket \
- implementation to implement `{of_trait_name}` for \
- all types that also implement `{impl_trait_name}`"),
- vec![
- (self_ty.span, param_name),
- add_generic_sugg,
- ],
- Applicability::MaybeIncorrect,
- );
- }
- }
-
- fn maybe_lint_bare_trait(&self, self_ty: &hir::Ty<'_>, in_path: bool) {
- let tcx = self.tcx();
- if let hir::TyKind::TraitObject([poly_trait_ref, ..], _, TraitObjectSyntax::None) =
- self_ty.kind
- {
- let needs_bracket = in_path
- && !tcx
- .sess
- .source_map()
- .span_to_prev_source(self_ty.span)
- .ok()
- .map_or(false, |s| s.trim_end().ends_with('<'));
-
- let is_global = poly_trait_ref.trait_ref.path.is_global();
- let sugg = Vec::from_iter([
- (
- self_ty.span.shrink_to_lo(),
- format!(
- "{}dyn {}",
- if needs_bracket { "<" } else { "" },
- if is_global { "(" } else { "" },
- ),
- ),
- (
- self_ty.span.shrink_to_hi(),
- format!(
- "{}{}",
- if is_global { ")" } else { "" },
- if needs_bracket { ">" } else { "" },
- ),
- ),
- ]);
- if self_ty.span.edition() >= Edition::Edition2021 {
- let msg = "trait objects must include the `dyn` keyword";
- let label = "add `dyn` keyword before this trait";
- let mut diag =
- rustc_errors::struct_span_err!(tcx.sess, self_ty.span, E0782, "{}", msg);
- diag.multipart_suggestion_verbose(label, sugg, Applicability::MachineApplicable);
- // check if the impl trait that we are considering is a impl of a local trait
- self.maybe_lint_blanket_trait_impl(&self_ty, &mut diag);
- diag.emit();
- } else {
- let msg = "trait objects without an explicit `dyn` are deprecated";
- tcx.struct_span_lint_hir(
- BARE_TRAIT_OBJECTS,
- self_ty.hir_id,
- self_ty.span,
- |lint| {
- let mut diag = lint.build(msg);
- diag.multipart_suggestion_verbose(
- "use `dyn`",
- sugg,
- Applicability::MachineApplicable,
- );
- self.maybe_lint_blanket_trait_impl::<()>(&self_ty, &mut diag);
- diag.emit();
- },
- );
- }
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs
deleted file mode 100644
index 75f5aced8..000000000
--- a/compiler/rustc_typeck/src/check/callee.rs
+++ /dev/null
@@ -1,675 +0,0 @@
-use super::method::MethodCallee;
-use super::{Expectation, FnCtxt, TupleArgumentsFlag};
-use crate::type_error_struct;
-
-use rustc_errors::{struct_span_err, Applicability, Diagnostic};
-use rustc_hir as hir;
-use rustc_hir::def::{self, Namespace, Res};
-use rustc_hir::def_id::DefId;
-use rustc_infer::{
- infer,
- traits::{self, Obligation},
-};
-use rustc_infer::{
- infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind},
- traits::ObligationCause,
-};
-use rustc_middle::ty::adjustment::{
- Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
-};
-use rustc_middle::ty::subst::{Subst, SubstsRef};
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
-use rustc_span::def_id::LocalDefId;
-use rustc_span::symbol::{sym, Ident};
-use rustc_span::Span;
-use rustc_target::spec::abi;
-use rustc_trait_selection::autoderef::Autoderef;
-use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
-
-use std::iter;
-
-/// Checks that it is legal to call methods of the trait corresponding
-/// to `trait_id` (this only cares about the trait, not the specific
-/// method that is called).
-pub fn check_legal_trait_for_method_call(
- tcx: TyCtxt<'_>,
- span: Span,
- receiver: Option<Span>,
- expr_span: Span,
- trait_id: DefId,
-) {
- if tcx.lang_items().drop_trait() == Some(trait_id) {
- let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
- err.span_label(span, "explicit destructor calls not allowed");
-
- let (sp, suggestion) = receiver
- .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok())
- .filter(|snippet| !snippet.is_empty())
- .map(|snippet| (expr_span, format!("drop({snippet})")))
- .unwrap_or_else(|| (span, "drop".to_string()));
-
- err.span_suggestion(
- sp,
- "consider using `drop` function",
- suggestion,
- Applicability::MaybeIncorrect,
- );
-
- err.emit();
- }
-}
-
-enum CallStep<'tcx> {
- Builtin(Ty<'tcx>),
- DeferredClosure(LocalDefId, ty::FnSig<'tcx>),
- /// E.g., enum variant constructors.
- Overloaded(MethodCallee<'tcx>),
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- pub fn check_call(
- &self,
- call_expr: &'tcx hir::Expr<'tcx>,
- callee_expr: &'tcx hir::Expr<'tcx>,
- arg_exprs: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let original_callee_ty = match &callee_expr.kind {
- hir::ExprKind::Path(hir::QPath::Resolved(..) | hir::QPath::TypeRelative(..)) => self
- .check_expr_with_expectation_and_args(
- callee_expr,
- Expectation::NoExpectation,
- arg_exprs,
- ),
- _ => self.check_expr(callee_expr),
- };
-
- let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty);
-
- let mut autoderef = self.autoderef(callee_expr.span, expr_ty);
- let mut result = None;
- while result.is_none() && autoderef.next().is_some() {
- result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef);
- }
- self.register_predicates(autoderef.into_obligations());
-
- let output = match result {
- None => {
- // this will report an error since original_callee_ty is not a fn
- self.confirm_builtin_call(
- call_expr,
- callee_expr,
- original_callee_ty,
- arg_exprs,
- expected,
- )
- }
-
- Some(CallStep::Builtin(callee_ty)) => {
- self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected)
- }
-
- Some(CallStep::DeferredClosure(def_id, fn_sig)) => {
- self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, def_id, fn_sig)
- }
-
- Some(CallStep::Overloaded(method_callee)) => {
- self.confirm_overloaded_call(call_expr, arg_exprs, expected, method_callee)
- }
- };
-
- // we must check that return type of called functions is WF:
- self.register_wf_obligation(output.into(), call_expr.span, traits::WellFormed(None));
-
- output
- }
-
- fn try_overloaded_call_step(
- &self,
- call_expr: &'tcx hir::Expr<'tcx>,
- callee_expr: &'tcx hir::Expr<'tcx>,
- arg_exprs: &'tcx [hir::Expr<'tcx>],
- autoderef: &Autoderef<'a, 'tcx>,
- ) -> Option<CallStep<'tcx>> {
- let adjusted_ty =
- self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
- debug!(
- "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})",
- call_expr, adjusted_ty
- );
-
- // If the callee is a bare function or a closure, then we're all set.
- match *adjusted_ty.kind() {
- ty::FnDef(..) | ty::FnPtr(_) => {
- let adjustments = self.adjust_steps(autoderef);
- self.apply_adjustments(callee_expr, adjustments);
- return Some(CallStep::Builtin(adjusted_ty));
- }
-
- ty::Closure(def_id, substs) => {
- let def_id = def_id.expect_local();
-
- // Check whether this is a call to a closure where we
- // haven't yet decided on whether the closure is fn vs
- // fnmut vs fnonce. If so, we have to defer further processing.
- if self.closure_kind(substs).is_none() {
- let closure_sig = substs.as_closure().sig();
- let closure_sig = self.replace_bound_vars_with_fresh_vars(
- call_expr.span,
- infer::FnCall,
- closure_sig,
- );
- let adjustments = self.adjust_steps(autoderef);
- self.record_deferred_call_resolution(
- def_id,
- DeferredCallResolution {
- call_expr,
- callee_expr,
- adjusted_ty,
- adjustments,
- fn_sig: closure_sig,
- closure_substs: substs,
- },
- );
- return Some(CallStep::DeferredClosure(def_id, closure_sig));
- }
- }
-
- // Hack: we know that there are traits implementing Fn for &F
- // where F:Fn and so forth. In the particular case of types
- // like `x: &mut FnMut()`, if there is a call `x()`, we would
- // normally translate to `FnMut::call_mut(&mut x, ())`, but
- // that winds up requiring `mut x: &mut FnMut()`. A little
- // over the top. The simplest fix by far is to just ignore
- // this case and deref again, so we wind up with
- // `FnMut::call_mut(&mut *x, ())`.
- ty::Ref(..) if autoderef.step_count() == 0 => {
- return None;
- }
-
- _ => {}
- }
-
- // Now, we look for the implementation of a Fn trait on the object's type.
- // We first do it with the explicit instruction to look for an impl of
- // `Fn<Tuple>`, with the tuple `Tuple` having an arity corresponding
- // to the number of call parameters.
- // If that fails (or_else branch), we try again without specifying the
- // shape of the tuple (hence the None). This allows to detect an Fn trait
- // is implemented, and use this information for diagnostic.
- self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs))
- .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None))
- .map(|(autoref, method)| {
- let mut adjustments = self.adjust_steps(autoderef);
- adjustments.extend(autoref);
- self.apply_adjustments(callee_expr, adjustments);
- CallStep::Overloaded(method)
- })
- }
-
- fn try_overloaded_call_traits(
- &self,
- call_expr: &hir::Expr<'_>,
- adjusted_ty: Ty<'tcx>,
- opt_arg_exprs: Option<&'tcx [hir::Expr<'tcx>]>,
- ) -> Option<(Option<Adjustment<'tcx>>, MethodCallee<'tcx>)> {
- // Try the options that are least restrictive on the caller first.
- for (opt_trait_def_id, method_name, borrow) in [
- (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true),
- (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true),
- (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false),
- ] {
- let Some(trait_def_id) = opt_trait_def_id else { continue };
-
- let opt_input_types = opt_arg_exprs.map(|arg_exprs| {
- [self.tcx.mk_tup(arg_exprs.iter().map(|e| {
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span: e.span,
- })
- }))]
- });
- let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref);
-
- if let Some(ok) = self.lookup_method_in_trait(
- call_expr.span,
- method_name,
- trait_def_id,
- adjusted_ty,
- opt_input_types,
- ) {
- let method = self.register_infer_ok_obligations(ok);
- let mut autoref = None;
- if borrow {
- // Check for &self vs &mut self in the method signature. Since this is either
- // the Fn or FnMut trait, it should be one of those.
- let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else {
- // The `fn`/`fn_mut` lang item is ill-formed, which should have
- // caused an error elsewhere.
- self.tcx
- .sess
- .delay_span_bug(call_expr.span, "input to call/call_mut is not a ref?");
- return None;
- };
-
- let mutbl = match mutbl {
- hir::Mutability::Not => AutoBorrowMutability::Not,
- hir::Mutability::Mut => AutoBorrowMutability::Mut {
- // For initial two-phase borrow
- // deployment, conservatively omit
- // overloaded function call ops.
- allow_two_phase_borrow: AllowTwoPhase::No,
- },
- };
- autoref = Some(Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
- target: method.sig.inputs()[0],
- });
- }
- return Some((autoref, method));
- }
- }
-
- None
- }
-
- /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the
- /// likely intention is to call the closure, suggest `(||{})()`. (#55851)
- fn identify_bad_closure_def_and_call(
- &self,
- err: &mut Diagnostic,
- hir_id: hir::HirId,
- callee_node: &hir::ExprKind<'_>,
- callee_span: Span,
- ) {
- let hir = self.tcx.hir();
- let parent_hir_id = hir.get_parent_node(hir_id);
- let parent_node = hir.get(parent_hir_id);
- if let (
- hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, body, .. }),
- ..
- }),
- hir::ExprKind::Block(..),
- ) = (parent_node, callee_node)
- {
- let fn_decl_span = if hir.body(body).generator_kind
- == Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure))
- {
- // Actually need to unwrap a few more layers of HIR to get to
- // the _real_ closure...
- let async_closure = hir.get_parent_node(hir.get_parent_node(parent_hir_id));
- if let hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }),
- ..
- }) = hir.get(async_closure)
- {
- fn_decl_span
- } else {
- return;
- }
- } else {
- fn_decl_span
- };
-
- let start = fn_decl_span.shrink_to_lo();
- let end = callee_span.shrink_to_hi();
- err.multipart_suggestion(
- "if you meant to create this closure and immediately call it, surround the \
- closure with parentheses",
- vec![(start, "(".to_string()), (end, ")".to_string())],
- Applicability::MaybeIncorrect,
- );
- }
- }
-
- /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the
- /// likely intention is to create an array containing tuples.
- fn maybe_suggest_bad_array_definition(
- &self,
- err: &mut Diagnostic,
- call_expr: &'tcx hir::Expr<'tcx>,
- callee_expr: &'tcx hir::Expr<'tcx>,
- ) -> bool {
- let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id);
- let parent_node = self.tcx.hir().get(hir_id);
- if let (
- hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }),
- hir::ExprKind::Tup(exp),
- hir::ExprKind::Call(_, args),
- ) = (parent_node, &callee_expr.kind, &call_expr.kind)
- && args.len() == exp.len()
- {
- let start = callee_expr.span.shrink_to_hi();
- err.span_suggestion(
- start,
- "consider separating array elements with a comma",
- ",",
- Applicability::MaybeIncorrect,
- );
- return true;
- }
- false
- }
-
- fn confirm_builtin_call(
- &self,
- call_expr: &'tcx hir::Expr<'tcx>,
- callee_expr: &'tcx hir::Expr<'tcx>,
- callee_ty: Ty<'tcx>,
- arg_exprs: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let (fn_sig, def_id) = match *callee_ty.kind() {
- ty::FnDef(def_id, subst) => {
- let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, subst);
-
- // Unit testing: function items annotated with
- // `#[rustc_evaluate_where_clauses]` trigger special output
- // to let us test the trait evaluation system.
- if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
- let predicates = self.tcx.predicates_of(def_id);
- let predicates = predicates.instantiate(self.tcx, subst);
- for (predicate, predicate_span) in
- predicates.predicates.iter().zip(&predicates.spans)
- {
- let obligation = Obligation::new(
- ObligationCause::dummy_with_span(callee_expr.span),
- self.param_env,
- *predicate,
- );
- let result = self.evaluate_obligation(&obligation);
- self.tcx
- .sess
- .struct_span_err(
- callee_expr.span,
- &format!("evaluate({:?}) = {:?}", predicate, result),
- )
- .span_label(*predicate_span, "predicate")
- .emit();
- }
- }
- (fn_sig, Some(def_id))
- }
- ty::FnPtr(sig) => (sig, None),
- _ => {
- let mut unit_variant = None;
- if let hir::ExprKind::Path(qpath) = &callee_expr.kind
- && let Res::Def(def::DefKind::Ctor(kind, def::CtorKind::Const), _)
- = self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
- // Only suggest removing parens if there are no arguments
- && arg_exprs.is_empty()
- {
- let descr = match kind {
- def::CtorOf::Struct => "struct",
- def::CtorOf::Variant => "enum variant",
- };
- let removal_span =
- callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
- unit_variant =
- Some((removal_span, descr, rustc_hir_pretty::qpath_to_string(qpath)));
- }
-
- let callee_ty = self.resolve_vars_if_possible(callee_ty);
- let mut err = type_error_struct!(
- self.tcx.sess,
- callee_expr.span,
- callee_ty,
- E0618,
- "expected function, found {}",
- match &unit_variant {
- Some((_, kind, path)) => format!("{kind} `{path}`"),
- None => format!("`{callee_ty}`"),
- }
- );
-
- self.identify_bad_closure_def_and_call(
- &mut err,
- call_expr.hir_id,
- &callee_expr.kind,
- callee_expr.span,
- );
-
- if let Some((removal_span, kind, path)) = &unit_variant {
- err.span_suggestion_verbose(
- *removal_span,
- &format!(
- "`{path}` is a unit {kind}, and does not take parentheses to be constructed",
- ),
- "",
- Applicability::MachineApplicable,
- );
- }
-
- let mut inner_callee_path = None;
- let def = match callee_expr.kind {
- hir::ExprKind::Path(ref qpath) => {
- self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
- }
- hir::ExprKind::Call(ref inner_callee, _) => {
- // If the call spans more than one line and the callee kind is
- // itself another `ExprCall`, that's a clue that we might just be
- // missing a semicolon (Issue #51055)
- let call_is_multiline =
- self.tcx.sess.source_map().is_multiline(call_expr.span);
- if call_is_multiline {
- err.span_suggestion(
- callee_expr.span.shrink_to_hi(),
- "consider using a semicolon here",
- ";",
- Applicability::MaybeIncorrect,
- );
- }
- if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
- inner_callee_path = Some(inner_qpath);
- self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
- } else {
- Res::Err
- }
- }
- _ => Res::Err,
- };
-
- if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) {
- err.span_label(call_expr.span, "call expression requires function");
- }
-
- if let Some(span) = self.tcx.hir().res_span(def) {
- let callee_ty = callee_ty.to_string();
- let label = match (unit_variant, inner_callee_path) {
- (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")),
- (_, Some(hir::QPath::Resolved(_, path))) => self
- .tcx
- .sess
- .source_map()
- .span_to_snippet(path.span)
- .ok()
- .map(|p| format!("`{p}` defined here returns `{callee_ty}`")),
- _ => {
- match def {
- // Emit a different diagnostic for local variables, as they are not
- // type definitions themselves, but rather variables *of* that type.
- Res::Local(hir_id) => Some(format!(
- "`{}` has type `{}`",
- self.tcx.hir().name(hir_id),
- callee_ty
- )),
- Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => {
- Some(format!(
- "`{}` defined here",
- self.tcx.def_path_str(def_id),
- ))
- }
- _ => Some(format!("`{callee_ty}` defined here")),
- }
- }
- };
- if let Some(label) = label {
- err.span_label(span, label);
- }
- }
- err.emit();
-
- // This is the "default" function signature, used in case of error.
- // In that case, we check each argument against "error" in order to
- // set up all the node type bindings.
- (
- ty::Binder::dummy(self.tcx.mk_fn_sig(
- self.err_args(arg_exprs.len()).into_iter(),
- self.tcx.ty_error(),
- false,
- hir::Unsafety::Normal,
- abi::Abi::Rust,
- )),
- None,
- )
- }
- };
-
- // Replace any late-bound regions that appear in the function
- // signature with region variables. We also have to
- // renormalize the associated types at this point, since they
- // previously appeared within a `Binder<>` and hence would not
- // have been normalized before.
- let fn_sig = self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig);
- let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig);
-
- // Call the generic checker.
- let expected_arg_tys = self.expected_inputs_for_expected_output(
- call_expr.span,
- expected,
- fn_sig.output(),
- fn_sig.inputs(),
- );
- self.check_argument_types(
- call_expr.span,
- call_expr,
- fn_sig.inputs(),
- expected_arg_tys,
- arg_exprs,
- fn_sig.c_variadic,
- TupleArgumentsFlag::DontTupleArguments,
- def_id,
- );
-
- fn_sig.output()
- }
-
- fn confirm_deferred_closure_call(
- &self,
- call_expr: &'tcx hir::Expr<'tcx>,
- arg_exprs: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- closure_def_id: LocalDefId,
- fn_sig: ty::FnSig<'tcx>,
- ) -> Ty<'tcx> {
- // `fn_sig` is the *signature* of the closure being called. We
- // don't know the full details yet (`Fn` vs `FnMut` etc), but we
- // do know the types expected for each argument and the return
- // type.
-
- let expected_arg_tys = self.expected_inputs_for_expected_output(
- call_expr.span,
- expected,
- fn_sig.output(),
- fn_sig.inputs(),
- );
-
- self.check_argument_types(
- call_expr.span,
- call_expr,
- fn_sig.inputs(),
- expected_arg_tys,
- arg_exprs,
- fn_sig.c_variadic,
- TupleArgumentsFlag::TupleArguments,
- Some(closure_def_id.to_def_id()),
- );
-
- fn_sig.output()
- }
-
- fn confirm_overloaded_call(
- &self,
- call_expr: &'tcx hir::Expr<'tcx>,
- arg_exprs: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- method_callee: MethodCallee<'tcx>,
- ) -> Ty<'tcx> {
- let output_type = self.check_method_argument_types(
- call_expr.span,
- call_expr,
- Ok(method_callee),
- arg_exprs,
- TupleArgumentsFlag::TupleArguments,
- expected,
- );
-
- self.write_method_call(call_expr.hir_id, method_callee);
- output_type
- }
-}
-
-#[derive(Debug)]
-pub struct DeferredCallResolution<'tcx> {
- call_expr: &'tcx hir::Expr<'tcx>,
- callee_expr: &'tcx hir::Expr<'tcx>,
- adjusted_ty: Ty<'tcx>,
- adjustments: Vec<Adjustment<'tcx>>,
- fn_sig: ty::FnSig<'tcx>,
- closure_substs: SubstsRef<'tcx>,
-}
-
-impl<'a, 'tcx> DeferredCallResolution<'tcx> {
- pub fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) {
- debug!("DeferredCallResolution::resolve() {:?}", self);
-
- // we should not be invoked until the closure kind has been
- // determined by upvar inference
- assert!(fcx.closure_kind(self.closure_substs).is_some());
-
- // We may now know enough to figure out fn vs fnmut etc.
- match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) {
- Some((autoref, method_callee)) => {
- // One problem is that when we get here, we are going
- // to have a newly instantiated function signature
- // from the call trait. This has to be reconciled with
- // the older function signature we had before. In
- // principle we *should* be able to fn_sigs(), but we
- // can't because of the annoying need for a TypeTrace.
- // (This always bites me, should find a way to
- // refactor it.)
- let method_sig = method_callee.sig;
-
- debug!("attempt_resolution: method_callee={:?}", method_callee);
-
- for (method_arg_ty, self_arg_ty) in
- iter::zip(method_sig.inputs().iter().skip(1), self.fn_sig.inputs())
- {
- fcx.demand_eqtype(self.call_expr.span, *self_arg_ty, *method_arg_ty);
- }
-
- fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output());
-
- let mut adjustments = self.adjustments;
- adjustments.extend(autoref);
- fcx.apply_adjustments(self.callee_expr, adjustments);
-
- fcx.write_method_call(self.call_expr.hir_id, method_callee);
- }
- None => {
- // This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once`
- // lang items are not defined (issue #86238).
- let mut err = fcx.inh.tcx.sess.struct_span_err(
- self.call_expr.span,
- "failed to find an overloaded call trait for closure call",
- );
- err.help(
- "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \
- and have associated `call`/`call_mut`/`call_once` functions",
- );
- err.emit();
- }
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs
deleted file mode 100644
index 9c1fd9b30..000000000
--- a/compiler/rustc_typeck/src/check/check.rs
+++ /dev/null
@@ -1,1712 +0,0 @@
-use crate::check::intrinsicck::InlineAsmCtxt;
-
-use super::coercion::CoerceMany;
-use super::compare_method::check_type_bounds;
-use super::compare_method::{compare_const_impl, compare_impl_method, compare_ty_impl};
-use super::*;
-use rustc_attr as attr;
-use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
-use rustc_hir as hir;
-use rustc_hir::def::{DefKind, Res};
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::intravisit::Visitor;
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::{ItemKind, Node, PathSegment};
-use rustc_infer::infer::outlives::env::OutlivesEnvironment;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::{DefiningAnchor, RegionVariableOrigin, TyCtxtInferExt};
-use rustc_infer::traits::Obligation;
-use rustc_lint::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
-use rustc_middle::hir::nested_filter;
-use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
-use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::util::{Discr, IntTypeExt};
-use rustc_middle::ty::{
- self, ParamEnv, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
-};
-use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS};
-use rustc_span::symbol::sym;
-use rustc_span::{self, Span};
-use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
-use rustc_trait_selection::traits::{self, ObligationCtxt};
-use rustc_ty_utils::representability::{self, Representability};
-
-use std::iter;
-use std::ops::ControlFlow;
-
-pub(super) fn check_abi(tcx: TyCtxt<'_>, hir_id: hir::HirId, span: Span, abi: Abi) {
- match tcx.sess.target.is_abi_supported(abi) {
- Some(true) => (),
- Some(false) => {
- struct_span_err!(
- tcx.sess,
- span,
- E0570,
- "`{abi}` is not a supported ABI for the current target",
- )
- .emit();
- }
- None => {
- tcx.struct_span_lint_hir(UNSUPPORTED_CALLING_CONVENTIONS, hir_id, span, |lint| {
- lint.build("use of calling convention not supported on this target").emit();
- });
- }
- }
-
- // This ABI is only allowed on function pointers
- if abi == Abi::CCmseNonSecureCall {
- struct_span_err!(
- tcx.sess,
- span,
- E0781,
- "the `\"C-cmse-nonsecure-call\"` ABI is only allowed on function pointers"
- )
- .emit();
- }
-}
-
-/// Helper used for fns and closures. Does the grungy work of checking a function
-/// body and returns the function context used for that purpose, since in the case of a fn item
-/// there is still a bit more to do.
-///
-/// * ...
-/// * inherited: other fields inherited from the enclosing fn (if any)
-#[instrument(skip(inherited, body), level = "debug")]
-pub(super) fn check_fn<'a, 'tcx>(
- inherited: &'a Inherited<'a, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- fn_sig: ty::FnSig<'tcx>,
- decl: &'tcx hir::FnDecl<'tcx>,
- fn_id: hir::HirId,
- body: &'tcx hir::Body<'tcx>,
- can_be_generator: Option<hir::Movability>,
- return_type_pre_known: bool,
-) -> (FnCtxt<'a, 'tcx>, Option<GeneratorTypes<'tcx>>) {
- // Create the function context. This is either derived from scratch or,
- // in the case of closures, based on the outer context.
- let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id);
- fcx.ps.set(UnsafetyState::function(fn_sig.unsafety, fn_id));
- fcx.return_type_pre_known = return_type_pre_known;
-
- let tcx = fcx.tcx;
- let hir = tcx.hir();
-
- let declared_ret_ty = fn_sig.output();
-
- let ret_ty =
- fcx.register_infer_ok_obligations(fcx.infcx.replace_opaque_types_with_inference_vars(
- declared_ret_ty,
- body.value.hir_id,
- decl.output.span(),
- param_env,
- ));
- // If we replaced declared_ret_ty with infer vars, then we must be infering
- // an opaque type, so set a flag so we can improve diagnostics.
- fcx.return_type_has_opaque = ret_ty != declared_ret_ty;
-
- fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(ret_ty)));
- fcx.ret_type_span = Some(decl.output.span());
-
- let span = body.value.span;
-
- fn_maybe_err(tcx, span, fn_sig.abi);
-
- if fn_sig.abi == Abi::RustCall {
- let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 };
-
- let err = || {
- let item = match tcx.hir().get(fn_id) {
- Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header),
- Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::Fn(header, ..), ..
- }) => Some(header),
- Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Fn(header, ..),
- ..
- }) => Some(header),
- // Closures are RustCall, but they tuple their arguments, so shouldn't be checked
- Node::Expr(hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => None,
- node => bug!("Item being checked wasn't a function/closure: {:?}", node),
- };
-
- if let Some(header) = item {
- tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple");
- }
- };
-
- if fn_sig.inputs().len() != expected_args {
- err()
- } else {
- // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on
- // This will probably require wide-scale changes to support a TupleKind obligation
- // We can't resolve this without knowing the type of the param
- if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) {
- err()
- }
- }
- }
-
- if body.generator_kind.is_some() && can_be_generator.is_some() {
- let yield_ty = fcx
- .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span });
- fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
-
- // Resume type defaults to `()` if the generator has no argument.
- let resume_ty = fn_sig.inputs().get(0).copied().unwrap_or_else(|| tcx.mk_unit());
-
- fcx.resume_yield_tys = Some((resume_ty, yield_ty));
- }
-
- GatherLocalsVisitor::new(&fcx).visit_body(body);
-
- // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
- // (as it's created inside the body itself, not passed in from outside).
- let maybe_va_list = if fn_sig.c_variadic {
- let span = body.params.last().unwrap().span;
- let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span));
- let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span));
-
- Some(tcx.bound_type_of(va_list_did).subst(tcx, &[region.into()]))
- } else {
- None
- };
-
- // Add formal parameters.
- let inputs_hir = hir.fn_decl_by_hir_id(fn_id).map(|decl| &decl.inputs);
- let inputs_fn = fn_sig.inputs().iter().copied();
- for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() {
- // Check the pattern.
- let ty_span = try { inputs_hir?.get(idx)?.span };
- fcx.check_pat_top(&param.pat, param_ty, ty_span, false);
-
- // Check that argument is Sized.
- // The check for a non-trivial pattern is a hack to avoid duplicate warnings
- // for simple cases like `fn foo(x: Trait)`,
- // where we would error once on the parameter as a whole, and once on the binding `x`.
- if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params {
- fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span));
- }
-
- fcx.write_ty(param.hir_id, param_ty);
- }
-
- inherited.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
-
- fcx.in_tail_expr = true;
- if let ty::Dynamic(..) = declared_ret_ty.kind() {
- // FIXME: We need to verify that the return type is `Sized` after the return expression has
- // been evaluated so that we have types available for all the nodes being returned, but that
- // requires the coerced evaluated type to be stored. Moving `check_return_expr` before this
- // causes unsized errors caused by the `declared_ret_ty` to point at the return expression,
- // while keeping the current ordering we will ignore the tail expression's type because we
- // don't know it yet. We can't do `check_expr_kind` while keeping `check_return_expr`
- // because we will trigger "unreachable expression" lints unconditionally.
- // Because of all of this, we perform a crude check to know whether the simplest `!Sized`
- // case that a newcomer might make, returning a bare trait, and in that case we populate
- // the tail expression's type so that the suggestion will be correct, but ignore all other
- // possible cases.
- fcx.check_expr(&body.value);
- fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
- } else {
- fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
- fcx.check_return_expr(&body.value, false);
- }
- fcx.in_tail_expr = false;
-
- // We insert the deferred_generator_interiors entry after visiting the body.
- // This ensures that all nested generators appear before the entry of this generator.
- // resolve_generator_interiors relies on this property.
- let gen_ty = if let (Some(_), Some(gen_kind)) = (can_be_generator, body.generator_kind) {
- let interior = fcx
- .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span });
- fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior, gen_kind));
-
- let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap();
- Some(GeneratorTypes {
- resume_ty,
- yield_ty,
- interior,
- movability: can_be_generator.unwrap(),
- })
- } else {
- None
- };
-
- // Finalize the return check by taking the LUB of the return types
- // we saw and assigning it to the expected return type. This isn't
- // really expected to fail, since the coercions would have failed
- // earlier when trying to find a LUB.
- let coercion = fcx.ret_coercion.take().unwrap().into_inner();
- let mut actual_return_ty = coercion.complete(&fcx);
- debug!("actual_return_ty = {:?}", actual_return_ty);
- if let ty::Dynamic(..) = declared_ret_ty.kind() {
- // We have special-cased the case where the function is declared
- // `-> dyn Foo` and we don't actually relate it to the
- // `fcx.ret_coercion`, so just substitute a type variable.
- actual_return_ty =
- fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::DynReturnFn, span });
- debug!("actual_return_ty replaced with {:?}", actual_return_ty);
- }
-
- // HACK(oli-obk, compiler-errors): We should be comparing this against
- // `declared_ret_ty`, but then anything uninferred would be inferred to
- // the opaque type itself. That again would cause writeback to assume
- // we have a recursive call site and do the sadly stabilized fallback to `()`.
- fcx.demand_suptype(span, ret_ty, actual_return_ty);
-
- // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
- if let Some(panic_impl_did) = tcx.lang_items().panic_impl()
- && panic_impl_did == hir.local_def_id(fn_id).to_def_id()
- {
- check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty);
- }
-
- // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
- if let Some(alloc_error_handler_did) = tcx.lang_items().oom()
- && alloc_error_handler_did == hir.local_def_id(fn_id).to_def_id()
- {
- check_alloc_error_fn(tcx, alloc_error_handler_did.expect_local(), fn_sig, decl, declared_ret_ty);
- }
-
- (fcx, gen_ty)
-}
-
-fn check_panic_info_fn(
- tcx: TyCtxt<'_>,
- fn_id: LocalDefId,
- fn_sig: ty::FnSig<'_>,
- decl: &hir::FnDecl<'_>,
- declared_ret_ty: Ty<'_>,
-) {
- let Some(panic_info_did) = tcx.lang_items().panic_info() else {
- tcx.sess.err("language item required, but not found: `panic_info`");
- return;
- };
-
- if *declared_ret_ty.kind() != ty::Never {
- tcx.sess.span_err(decl.output.span(), "return type should be `!`");
- }
-
- let inputs = fn_sig.inputs();
- if inputs.len() != 1 {
- tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
- return;
- }
-
- let arg_is_panic_info = match *inputs[0].kind() {
- ty::Ref(region, ty, mutbl) => match *ty.kind() {
- ty::Adt(ref adt, _) => {
- adt.did() == panic_info_did && mutbl == hir::Mutability::Not && !region.is_static()
- }
- _ => false,
- },
- _ => false,
- };
-
- if !arg_is_panic_info {
- tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`");
- }
-
- let DefKind::Fn = tcx.def_kind(fn_id) else {
- let span = tcx.def_span(fn_id);
- tcx.sess.span_err(span, "should be a function");
- return;
- };
-
- let generic_counts = tcx.generics_of(fn_id).own_counts();
- if generic_counts.types != 0 {
- let span = tcx.def_span(fn_id);
- tcx.sess.span_err(span, "should have no type parameters");
- }
- if generic_counts.consts != 0 {
- let span = tcx.def_span(fn_id);
- tcx.sess.span_err(span, "should have no const parameters");
- }
-}
-
-fn check_alloc_error_fn(
- tcx: TyCtxt<'_>,
- fn_id: LocalDefId,
- fn_sig: ty::FnSig<'_>,
- decl: &hir::FnDecl<'_>,
- declared_ret_ty: Ty<'_>,
-) {
- let Some(alloc_layout_did) = tcx.lang_items().alloc_layout() else {
- tcx.sess.err("language item required, but not found: `alloc_layout`");
- return;
- };
-
- if *declared_ret_ty.kind() != ty::Never {
- tcx.sess.span_err(decl.output.span(), "return type should be `!`");
- }
-
- let inputs = fn_sig.inputs();
- if inputs.len() != 1 {
- tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
- return;
- }
-
- let arg_is_alloc_layout = match inputs[0].kind() {
- ty::Adt(ref adt, _) => adt.did() == alloc_layout_did,
- _ => false,
- };
-
- if !arg_is_alloc_layout {
- tcx.sess.span_err(decl.inputs[0].span, "argument should be `Layout`");
- }
-
- let DefKind::Fn = tcx.def_kind(fn_id) else {
- let span = tcx.def_span(fn_id);
- tcx.sess.span_err(span, "`#[alloc_error_handler]` should be a function");
- return;
- };
-
- let generic_counts = tcx.generics_of(fn_id).own_counts();
- if generic_counts.types != 0 {
- let span = tcx.def_span(fn_id);
- tcx.sess.span_err(span, "`#[alloc_error_handler]` function should have no type parameters");
- }
- if generic_counts.consts != 0 {
- let span = tcx.def_span(fn_id);
- tcx.sess
- .span_err(span, "`#[alloc_error_handler]` function should have no const parameters");
- }
-}
-
-fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) {
- let def = tcx.adt_def(def_id);
- let span = tcx.def_span(def_id);
- def.destructor(tcx); // force the destructor to be evaluated
- check_representable(tcx, span, def_id);
-
- if def.repr().simd() {
- check_simd(tcx, span, def_id);
- }
-
- check_transparent(tcx, span, def);
- check_packed(tcx, span, def);
-}
-
-fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) {
- let def = tcx.adt_def(def_id);
- let span = tcx.def_span(def_id);
- def.destructor(tcx); // force the destructor to be evaluated
- check_representable(tcx, span, def_id);
- check_transparent(tcx, span, def);
- check_union_fields(tcx, span, def_id);
- check_packed(tcx, span, def);
-}
-
-/// Check that the fields of the `union` do not need dropping.
-fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
- let item_type = tcx.type_of(item_def_id);
- if let ty::Adt(def, substs) = item_type.kind() {
- assert!(def.is_union());
-
- fn allowed_union_field<'tcx>(
- ty: Ty<'tcx>,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- span: Span,
- ) -> bool {
- // We don't just accept all !needs_drop fields, due to semver concerns.
- match ty.kind() {
- ty::Ref(..) => true, // references never drop (even mutable refs, which are non-Copy and hence fail the later check)
- ty::Tuple(tys) => {
- // allow tuples of allowed types
- tys.iter().all(|ty| allowed_union_field(ty, tcx, param_env, span))
- }
- ty::Array(elem, _len) => {
- // Like `Copy`, we do *not* special-case length 0.
- allowed_union_field(*elem, tcx, param_env, span)
- }
- _ => {
- // Fallback case: allow `ManuallyDrop` and things that are `Copy`.
- ty.ty_adt_def().is_some_and(|adt_def| adt_def.is_manually_drop())
- || ty.is_copy_modulo_regions(tcx.at(span), param_env)
- }
- }
- }
-
- let param_env = tcx.param_env(item_def_id);
- for field in &def.non_enum_variant().fields {
- let field_ty = field.ty(tcx, substs);
-
- if !allowed_union_field(field_ty, tcx, param_env, span) {
- let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) {
- // We are currently checking the type this field came from, so it must be local.
- Some(Node::Field(field)) => (field.span, field.ty.span),
- _ => unreachable!("mir field has to correspond to hir field"),
- };
- struct_span_err!(
- tcx.sess,
- field_span,
- E0740,
- "unions cannot contain fields that may need dropping"
- )
- .note(
- "a type is guaranteed not to need dropping \
- when it implements `Copy`, or when it is the special `ManuallyDrop<_>` type",
- )
- .multipart_suggestion_verbose(
- "when the type does not implement `Copy`, \
- wrap it inside a `ManuallyDrop<_>` and ensure it is manually dropped",
- vec![
- (ty_span.shrink_to_lo(), "std::mem::ManuallyDrop<".into()),
- (ty_span.shrink_to_hi(), ">".into()),
- ],
- Applicability::MaybeIncorrect,
- )
- .emit();
- return false;
- } else if field_ty.needs_drop(tcx, param_env) {
- // This should never happen. But we can get here e.g. in case of name resolution errors.
- tcx.sess.delay_span_bug(span, "we should never accept maybe-dropping union fields");
- }
- }
- } else {
- span_bug!(span, "unions must be ty::Adt, but got {:?}", item_type.kind());
- }
- true
-}
-
-/// Check that a `static` is inhabited.
-fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) {
- // Make sure statics are inhabited.
- // Other parts of the compiler assume that there are no uninhabited places. In principle it
- // would be enough to check this for `extern` statics, as statics with an initializer will
- // have UB during initialization if they are uninhabited, but there also seems to be no good
- // reason to allow any statics to be uninhabited.
- let ty = tcx.type_of(def_id);
- let span = tcx.def_span(def_id);
- let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
- Ok(l) => l,
- // Foreign statics that overflow their allowed size should emit an error
- Err(LayoutError::SizeOverflow(_))
- if {
- let node = tcx.hir().get_by_def_id(def_id);
- matches!(
- node,
- hir::Node::ForeignItem(hir::ForeignItem {
- kind: hir::ForeignItemKind::Static(..),
- ..
- })
- )
- } =>
- {
- tcx.sess
- .struct_span_err(span, "extern static is too large for the current architecture")
- .emit();
- return;
- }
- // Generic statics are rejected, but we still reach this case.
- Err(e) => {
- tcx.sess.delay_span_bug(span, &e.to_string());
- return;
- }
- };
- if layout.abi.is_uninhabited() {
- tcx.struct_span_lint_hir(
- UNINHABITED_STATIC,
- tcx.hir().local_def_id_to_hir_id(def_id),
- span,
- |lint| {
- lint.build("static of uninhabited type")
- .note("uninhabited statics cannot be initialized, and any access would be an immediate error")
- .emit();
- },
- );
- }
-}
-
-/// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo`
-/// projections that would result in "inheriting lifetimes".
-pub(super) fn check_opaque<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
- substs: SubstsRef<'tcx>,
- origin: &hir::OpaqueTyOrigin,
-) {
- let span = tcx.def_span(def_id);
- check_opaque_for_inheriting_lifetimes(tcx, def_id, span);
- if tcx.type_of(def_id).references_error() {
- return;
- }
- if check_opaque_for_cycles(tcx, def_id, substs, span, origin).is_err() {
- return;
- }
- check_opaque_meets_bounds(tcx, def_id, substs, span, origin);
-}
-
-/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
-/// in "inheriting lifetimes".
-#[instrument(level = "debug", skip(tcx, span))]
-pub(super) fn check_opaque_for_inheriting_lifetimes<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
- span: Span,
-) {
- let item = tcx.hir().expect_item(def_id);
- debug!(?item, ?span);
-
- struct FoundParentLifetime;
- struct FindParentLifetimeVisitor<'tcx>(&'tcx ty::Generics);
- impl<'tcx> ty::visit::TypeVisitor<'tcx> for FindParentLifetimeVisitor<'tcx> {
- type BreakTy = FoundParentLifetime;
-
- fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("FindParentLifetimeVisitor: r={:?}", r);
- if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *r {
- if index < self.0.parent_count as u32 {
- return ControlFlow::Break(FoundParentLifetime);
- } else {
- return ControlFlow::CONTINUE;
- }
- }
-
- r.super_visit_with(self)
- }
-
- fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
- if let ty::ConstKind::Unevaluated(..) = c.kind() {
- // FIXME(#72219) We currently don't detect lifetimes within substs
- // which would violate this check. Even though the particular substitution is not used
- // within the const, this should still be fixed.
- return ControlFlow::CONTINUE;
- }
- c.super_visit_with(self)
- }
- }
-
- struct ProhibitOpaqueVisitor<'tcx> {
- tcx: TyCtxt<'tcx>,
- opaque_identity_ty: Ty<'tcx>,
- generics: &'tcx ty::Generics,
- selftys: Vec<(Span, Option<String>)>,
- }
-
- impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
- type BreakTy = Ty<'tcx>;
-
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t);
- if t == self.opaque_identity_ty {
- ControlFlow::CONTINUE
- } else {
- t.super_visit_with(&mut FindParentLifetimeVisitor(self.generics))
- .map_break(|FoundParentLifetime| t)
- }
- }
- }
-
- impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
- type NestedFilter = nested_filter::OnlyBodies;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
- match arg.kind {
- hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
- [
- PathSegment {
- res: Some(Res::SelfTy { trait_: _, alias_to: impl_ref }),
- ..
- },
- ] => {
- let impl_ty_name =
- impl_ref.map(|(def_id, _)| self.tcx.def_path_str(def_id));
- self.selftys.push((path.span, impl_ty_name));
- }
- _ => {}
- },
- _ => {}
- }
- hir::intravisit::walk_ty(self, arg);
- }
- }
-
- if let ItemKind::OpaqueTy(hir::OpaqueTy {
- origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
- ..
- }) = item.kind
- {
- let mut visitor = ProhibitOpaqueVisitor {
- opaque_identity_ty: tcx.mk_opaque(
- def_id.to_def_id(),
- InternalSubsts::identity_for_item(tcx, def_id.to_def_id()),
- ),
- generics: tcx.generics_of(def_id),
- tcx,
- selftys: vec![],
- };
- let prohibit_opaque = tcx
- .explicit_item_bounds(def_id)
- .iter()
- .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
- debug!(
- "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}",
- prohibit_opaque, visitor.opaque_identity_ty, visitor.generics
- );
-
- if let Some(ty) = prohibit_opaque.break_value() {
- visitor.visit_item(&item);
- let is_async = match item.kind {
- ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
- matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..))
- }
- _ => unreachable!(),
- };
-
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0760,
- "`{}` return type cannot contain a projection or `Self` that references lifetimes from \
- a parent scope",
- if is_async { "async fn" } else { "impl Trait" },
- );
-
- for (span, name) in visitor.selftys {
- err.span_suggestion(
- span,
- "consider spelling out the type instead",
- name.unwrap_or_else(|| format!("{:?}", ty)),
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- }
- }
-}
-
-/// Checks that an opaque type does not contain cycles.
-pub(super) fn check_opaque_for_cycles<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
- substs: SubstsRef<'tcx>,
- span: Span,
- origin: &hir::OpaqueTyOrigin,
-) -> Result<(), ErrorGuaranteed> {
- if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
- let reported = match origin {
- hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
- _ => opaque_type_cycle_error(tcx, def_id, span),
- };
- Err(reported)
- } else {
- Ok(())
- }
-}
-
-/// Check that the concrete type behind `impl Trait` actually implements `Trait`.
-///
-/// This is mostly checked at the places that specify the opaque type, but we
-/// check those cases in the `param_env` of that function, which may have
-/// bounds not on this opaque type:
-///
-/// type X<T> = impl Clone
-/// fn f<T: Clone>(t: T) -> X<T> {
-/// t
-/// }
-///
-/// Without this check the above code is incorrectly accepted: we would ICE if
-/// some tried, for example, to clone an `Option<X<&mut ()>>`.
-#[instrument(level = "debug", skip(tcx))]
-fn check_opaque_meets_bounds<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
- substs: SubstsRef<'tcx>,
- span: Span,
- origin: &hir::OpaqueTyOrigin,
-) {
- let hidden_type = tcx.bound_type_of(def_id.to_def_id()).subst(tcx, substs);
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let defining_use_anchor = match *origin {
- hir::OpaqueTyOrigin::FnReturn(did) | hir::OpaqueTyOrigin::AsyncFn(did) => did,
- hir::OpaqueTyOrigin::TyAlias => def_id,
- };
- let param_env = tcx.param_env(defining_use_anchor);
-
- tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(defining_use_anchor)).enter(
- move |infcx| {
- let ocx = ObligationCtxt::new(&infcx);
- let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs);
-
- let misc_cause = traits::ObligationCause::misc(span, hir_id);
-
- match infcx.at(&misc_cause, param_env).eq(opaque_ty, hidden_type) {
- Ok(infer_ok) => ocx.register_infer_ok_obligations(infer_ok),
- Err(ty_err) => {
- tcx.sess.delay_span_bug(
- span,
- &format!("could not unify `{hidden_type}` with revealed type:\n{ty_err}"),
- );
- }
- }
-
- // Additionally require the hidden type to be well-formed with only the generics of the opaque type.
- // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
- // hidden type is well formed even without those bounds.
- let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(hidden_type.into()))
- .to_predicate(tcx);
- ocx.register_obligation(Obligation::new(misc_cause, param_env, predicate));
-
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- }
- match origin {
- // Checked when type checking the function containing them.
- hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {}
- // Can have different predicates to their defining use
- hir::OpaqueTyOrigin::TyAlias => {
- let outlives_environment = OutlivesEnvironment::new(param_env);
- infcx.check_region_obligations_and_report_errors(
- defining_use_anchor,
- &outlives_environment,
- );
- }
- }
- // Clean up after ourselves
- let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
- },
- );
-}
-
-fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
- debug!(
- "check_item_type(it.def_id={:?}, it.name={})",
- id.def_id,
- tcx.def_path_str(id.def_id.to_def_id())
- );
- let _indenter = indenter();
- match tcx.def_kind(id.def_id) {
- DefKind::Static(..) => {
- tcx.ensure().typeck(id.def_id);
- maybe_check_static_with_link_section(tcx, id.def_id);
- check_static_inhabited(tcx, id.def_id);
- }
- DefKind::Const => {
- tcx.ensure().typeck(id.def_id);
- }
- DefKind::Enum => {
- let item = tcx.hir().item(id);
- let hir::ItemKind::Enum(ref enum_definition, _) = item.kind else {
- return;
- };
- check_enum(tcx, &enum_definition.variants, item.def_id);
- }
- DefKind::Fn => {} // entirely within check_item_body
- DefKind::Impl => {
- let it = tcx.hir().item(id);
- let hir::ItemKind::Impl(ref impl_) = it.kind else {
- return;
- };
- debug!("ItemKind::Impl {} with id {:?}", it.ident, it.def_id);
- if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.def_id) {
- check_impl_items_against_trait(
- tcx,
- it.span,
- it.def_id,
- impl_trait_ref,
- &impl_.items,
- );
- check_on_unimplemented(tcx, it);
- }
- }
- DefKind::Trait => {
- let it = tcx.hir().item(id);
- let hir::ItemKind::Trait(_, _, _, _, ref items) = it.kind else {
- return;
- };
- check_on_unimplemented(tcx, it);
-
- for item in items.iter() {
- let item = tcx.hir().trait_item(item.id);
- match item.kind {
- hir::TraitItemKind::Fn(ref sig, _) => {
- let abi = sig.header.abi;
- fn_maybe_err(tcx, item.ident.span, abi);
- }
- hir::TraitItemKind::Type(.., Some(default)) => {
- let assoc_item = tcx.associated_item(item.def_id);
- let trait_substs =
- InternalSubsts::identity_for_item(tcx, it.def_id.to_def_id());
- let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds(
- tcx,
- assoc_item,
- assoc_item,
- default.span,
- ty::TraitRef { def_id: it.def_id.to_def_id(), substs: trait_substs },
- );
- }
- _ => {}
- }
- }
- }
- DefKind::Struct => {
- check_struct(tcx, id.def_id);
- }
- DefKind::Union => {
- check_union(tcx, id.def_id);
- }
- DefKind::OpaqueTy => {
- let item = tcx.hir().item(id);
- let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item.kind else {
- return;
- };
- // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting
- // `async-std` (and `pub async fn` in general).
- // Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it!
- // See https://github.com/rust-lang/rust/issues/75100
- if !tcx.sess.opts.actually_rustdoc {
- let substs = InternalSubsts::identity_for_item(tcx, item.def_id.to_def_id());
- check_opaque(tcx, item.def_id, substs, &origin);
- }
- }
- DefKind::TyAlias => {
- let pty_ty = tcx.type_of(id.def_id);
- let generics = tcx.generics_of(id.def_id);
- check_type_params_are_used(tcx, &generics, pty_ty);
- }
- DefKind::ForeignMod => {
- let it = tcx.hir().item(id);
- let hir::ItemKind::ForeignMod { abi, items } = it.kind else {
- return;
- };
- check_abi(tcx, it.hir_id(), it.span, abi);
-
- if abi == Abi::RustIntrinsic {
- for item in items {
- let item = tcx.hir().foreign_item(item.id);
- intrinsic::check_intrinsic_type(tcx, item);
- }
- } else if abi == Abi::PlatformIntrinsic {
- for item in items {
- let item = tcx.hir().foreign_item(item.id);
- intrinsic::check_platform_intrinsic_type(tcx, item);
- }
- } else {
- for item in items {
- let def_id = item.id.def_id;
- let generics = tcx.generics_of(def_id);
- let own_counts = generics.own_counts();
- if generics.params.len() - own_counts.lifetimes != 0 {
- let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) {
- (_, 0) => ("type", "types", Some("u32")),
- // We don't specify an example value, because we can't generate
- // a valid value for any type.
- (0, _) => ("const", "consts", None),
- _ => ("type or const", "types or consts", None),
- };
- struct_span_err!(
- tcx.sess,
- item.span,
- E0044,
- "foreign items may not have {kinds} parameters",
- )
- .span_label(item.span, &format!("can't have {kinds} parameters"))
- .help(
- // FIXME: once we start storing spans for type arguments, turn this
- // into a suggestion.
- &format!(
- "replace the {} parameters with concrete {}{}",
- kinds,
- kinds_pl,
- egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(),
- ),
- )
- .emit();
- }
-
- let item = tcx.hir().foreign_item(item.id);
- match item.kind {
- hir::ForeignItemKind::Fn(ref fn_decl, _, _) => {
- require_c_abi_if_c_variadic(tcx, fn_decl, abi, item.span);
- }
- hir::ForeignItemKind::Static(..) => {
- check_static_inhabited(tcx, def_id);
- }
- _ => {}
- }
- }
- }
- }
- DefKind::GlobalAsm => {
- let it = tcx.hir().item(id);
- let hir::ItemKind::GlobalAsm(asm) = it.kind else { span_bug!(it.span, "DefKind::GlobalAsm but got {:#?}", it) };
- InlineAsmCtxt::new_global_asm(tcx).check_asm(asm, id.hir_id());
- }
- _ => {}
- }
-}
-
-pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
- // an error would be reported if this fails.
- let _ = traits::OnUnimplementedDirective::of_item(tcx, item.def_id.to_def_id());
-}
-
-pub(super) fn check_specialization_validity<'tcx>(
- tcx: TyCtxt<'tcx>,
- trait_def: &ty::TraitDef,
- trait_item: &ty::AssocItem,
- impl_id: DefId,
- impl_item: &hir::ImplItemRef,
-) {
- let Ok(ancestors) = trait_def.ancestors(tcx, impl_id) else { return };
- let mut ancestor_impls = ancestors.skip(1).filter_map(|parent| {
- if parent.is_from_trait() {
- None
- } else {
- Some((parent, parent.item(tcx, trait_item.def_id)))
- }
- });
-
- let opt_result = ancestor_impls.find_map(|(parent_impl, parent_item)| {
- match parent_item {
- // Parent impl exists, and contains the parent item we're trying to specialize, but
- // doesn't mark it `default`.
- Some(parent_item) if traits::impl_item_is_final(tcx, &parent_item) => {
- Some(Err(parent_impl.def_id()))
- }
-
- // Parent impl contains item and makes it specializable.
- Some(_) => Some(Ok(())),
-
- // Parent impl doesn't mention the item. This means it's inherited from the
- // grandparent. In that case, if parent is a `default impl`, inherited items use the
- // "defaultness" from the grandparent, else they are final.
- None => {
- if tcx.impl_defaultness(parent_impl.def_id()).is_default() {
- None
- } else {
- Some(Err(parent_impl.def_id()))
- }
- }
- }
- });
-
- // If `opt_result` is `None`, we have only encountered `default impl`s that don't contain the
- // item. This is allowed, the item isn't actually getting specialized here.
- let result = opt_result.unwrap_or(Ok(()));
-
- if let Err(parent_impl) = result {
- report_forbidden_specialization(tcx, impl_item, parent_impl);
- }
-}
-
-fn check_impl_items_against_trait<'tcx>(
- tcx: TyCtxt<'tcx>,
- full_impl_span: Span,
- impl_id: LocalDefId,
- impl_trait_ref: ty::TraitRef<'tcx>,
- impl_item_refs: &[hir::ImplItemRef],
-) {
- // If the trait reference itself is erroneous (so the compilation is going
- // to fail), skip checking the items here -- the `impl_item` table in `tcx`
- // isn't populated for such impls.
- if impl_trait_ref.references_error() {
- return;
- }
-
- // Negative impls are not expected to have any items
- match tcx.impl_polarity(impl_id) {
- ty::ImplPolarity::Reservation | ty::ImplPolarity::Positive => {}
- ty::ImplPolarity::Negative => {
- if let [first_item_ref, ..] = impl_item_refs {
- let first_item_span = tcx.hir().impl_item(first_item_ref.id).span;
- struct_span_err!(
- tcx.sess,
- first_item_span,
- E0749,
- "negative impls cannot have any items"
- )
- .emit();
- }
- return;
- }
- }
-
- let trait_def = tcx.trait_def(impl_trait_ref.def_id);
-
- for impl_item in impl_item_refs {
- let ty_impl_item = tcx.associated_item(impl_item.id.def_id);
- let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id {
- tcx.associated_item(trait_item_id)
- } else {
- // Checked in `associated_item`.
- tcx.sess.delay_span_bug(impl_item.span, "missing associated item in trait");
- continue;
- };
- let impl_item_full = tcx.hir().impl_item(impl_item.id);
- match impl_item_full.kind {
- hir::ImplItemKind::Const(..) => {
- // Find associated const definition.
- compare_const_impl(
- tcx,
- &ty_impl_item,
- impl_item.span,
- &ty_trait_item,
- impl_trait_ref,
- );
- }
- hir::ImplItemKind::Fn(..) => {
- let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
- compare_impl_method(
- tcx,
- &ty_impl_item,
- &ty_trait_item,
- impl_trait_ref,
- opt_trait_span,
- );
- }
- hir::ImplItemKind::TyAlias(impl_ty) => {
- let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
- compare_ty_impl(
- tcx,
- &ty_impl_item,
- impl_ty.span,
- &ty_trait_item,
- impl_trait_ref,
- opt_trait_span,
- );
- }
- }
-
- check_specialization_validity(
- tcx,
- trait_def,
- &ty_trait_item,
- impl_id.to_def_id(),
- impl_item,
- );
- }
-
- if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) {
- // Check for missing items from trait
- let mut missing_items = Vec::new();
-
- let mut must_implement_one_of: Option<&[Ident]> =
- trait_def.must_implement_one_of.as_deref();
-
- for &trait_item_id in tcx.associated_item_def_ids(impl_trait_ref.def_id) {
- let is_implemented = ancestors
- .leaf_def(tcx, trait_item_id)
- .map_or(false, |node_item| node_item.item.defaultness(tcx).has_value());
-
- if !is_implemented && tcx.impl_defaultness(impl_id).is_final() {
- missing_items.push(tcx.associated_item(trait_item_id));
- }
-
- if let Some(required_items) = &must_implement_one_of {
- // true if this item is specifically implemented in this impl
- let is_implemented_here = ancestors
- .leaf_def(tcx, trait_item_id)
- .map_or(false, |node_item| !node_item.defining_node.is_from_trait());
-
- if is_implemented_here {
- let trait_item = tcx.associated_item(trait_item_id);
- if required_items.contains(&trait_item.ident(tcx)) {
- must_implement_one_of = None;
- }
- }
- }
- }
-
- if !missing_items.is_empty() {
- missing_items_err(tcx, tcx.def_span(impl_id), &missing_items, full_impl_span);
- }
-
- if let Some(missing_items) = must_implement_one_of {
- let attr_span = tcx
- .get_attr(impl_trait_ref.def_id, sym::rustc_must_implement_one_of)
- .map(|attr| attr.span);
-
- missing_items_must_implement_one_of_err(
- tcx,
- tcx.def_span(impl_id),
- missing_items,
- attr_span,
- );
- }
- }
-}
-
-/// Checks whether a type can be represented in memory. In particular, it
-/// identifies types that contain themselves without indirection through a
-/// pointer, which would mean their size is unbounded.
-pub(super) fn check_representable(tcx: TyCtxt<'_>, sp: Span, item_def_id: LocalDefId) -> bool {
- let rty = tcx.type_of(item_def_id);
-
- // Check that it is possible to represent this type. This call identifies
- // (1) types that contain themselves and (2) types that contain a different
- // recursive type. It is only necessary to throw an error on those that
- // contain themselves. For case 2, there must be an inner type that will be
- // caught by case 1.
- match representability::ty_is_representable(tcx, rty, sp, None) {
- Representability::SelfRecursive(spans) => {
- recursive_type_with_infinite_size_error(tcx, item_def_id.to_def_id(), spans);
- return false;
- }
- Representability::Representable | Representability::ContainsRecursive => (),
- }
- true
-}
-
-pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
- let t = tcx.type_of(def_id);
- if let ty::Adt(def, substs) = t.kind()
- && def.is_struct()
- {
- let fields = &def.non_enum_variant().fields;
- if fields.is_empty() {
- struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
- return;
- }
- let e = fields[0].ty(tcx, substs);
- if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
- struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
- .span_label(sp, "SIMD elements must have the same type")
- .emit();
- return;
- }
-
- let len = if let ty::Array(_ty, c) = e.kind() {
- c.try_eval_usize(tcx, tcx.param_env(def.did()))
- } else {
- Some(fields.len() as u64)
- };
- if let Some(len) = len {
- if len == 0 {
- struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
- return;
- } else if len > MAX_SIMD_LANES {
- struct_span_err!(
- tcx.sess,
- sp,
- E0075,
- "SIMD vector cannot have more than {MAX_SIMD_LANES} elements",
- )
- .emit();
- return;
- }
- }
-
- // Check that we use types valid for use in the lanes of a SIMD "vector register"
- // These are scalar types which directly match a "machine" type
- // Yes: Integers, floats, "thin" pointers
- // No: char, "fat" pointers, compound types
- match e.kind() {
- ty::Param(_) => (), // pass struct<T>(T, T, T, T) through, let monomorphization catch errors
- ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) => (), // struct(u8, u8, u8, u8) is ok
- ty::Array(t, _) if matches!(t.kind(), ty::Param(_)) => (), // pass struct<T>([T; N]) through, let monomorphization catch errors
- ty::Array(t, _clen)
- if matches!(
- t.kind(),
- ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_)
- ) =>
- { /* struct([f32; 4]) is ok */ }
- _ => {
- struct_span_err!(
- tcx.sess,
- sp,
- E0077,
- "SIMD vector element type should be a \
- primitive scalar (integer/float/pointer) type"
- )
- .emit();
- return;
- }
- }
- }
-}
-
-pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
- let repr = def.repr();
- if repr.packed() {
- for attr in tcx.get_attrs(def.did(), sym::repr) {
- for r in attr::parse_repr_attr(&tcx.sess, attr) {
- if let attr::ReprPacked(pack) = r
- && let Some(repr_pack) = repr.pack
- && pack as u64 != repr_pack.bytes()
- {
- struct_span_err!(
- tcx.sess,
- sp,
- E0634,
- "type has conflicting packed representation hints"
- )
- .emit();
- }
- }
- }
- if repr.align.is_some() {
- struct_span_err!(
- tcx.sess,
- sp,
- E0587,
- "type has conflicting packed and align representation hints"
- )
- .emit();
- } else {
- if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) {
- let mut err = struct_span_err!(
- tcx.sess,
- sp,
- E0588,
- "packed type cannot transitively contain a `#[repr(align)]` type"
- );
-
- err.span_note(
- tcx.def_span(def_spans[0].0),
- &format!(
- "`{}` has a `#[repr(align)]` attribute",
- tcx.item_name(def_spans[0].0)
- ),
- );
-
- if def_spans.len() > 2 {
- let mut first = true;
- for (adt_def, span) in def_spans.iter().skip(1).rev() {
- let ident = tcx.item_name(*adt_def);
- err.span_note(
- *span,
- &if first {
- format!(
- "`{}` contains a field of type `{}`",
- tcx.type_of(def.did()),
- ident
- )
- } else {
- format!("...which contains a field of type `{ident}`")
- },
- );
- first = false;
- }
- }
-
- err.emit();
- }
- }
- }
-}
-
-pub(super) fn check_packed_inner(
- tcx: TyCtxt<'_>,
- def_id: DefId,
- stack: &mut Vec<DefId>,
-) -> Option<Vec<(DefId, Span)>> {
- if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() {
- if def.is_struct() || def.is_union() {
- if def.repr().align.is_some() {
- return Some(vec![(def.did(), DUMMY_SP)]);
- }
-
- stack.push(def_id);
- for field in &def.non_enum_variant().fields {
- if let ty::Adt(def, _) = field.ty(tcx, substs).kind()
- && !stack.contains(&def.did())
- && let Some(mut defs) = check_packed_inner(tcx, def.did(), stack)
- {
- defs.push((def.did(), field.ident(tcx).span));
- return Some(defs);
- }
- }
- stack.pop();
- }
- }
-
- None
-}
-
-pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) {
- if !adt.repr().transparent() {
- return;
- }
-
- if adt.is_union() && !tcx.features().transparent_unions {
- feature_err(
- &tcx.sess.parse_sess,
- sym::transparent_unions,
- sp,
- "transparent unions are unstable",
- )
- .emit();
- }
-
- if adt.variants().len() != 1 {
- bad_variant_count(tcx, adt, sp, adt.did());
- if adt.variants().is_empty() {
- // Don't bother checking the fields. No variants (and thus no fields) exist.
- return;
- }
- }
-
- // For each field, figure out if it's known to be a ZST and align(1), with "known"
- // respecting #[non_exhaustive] attributes.
- let field_infos = adt.all_fields().map(|field| {
- let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
- let param_env = tcx.param_env(field.did);
- let layout = tcx.layout_of(param_env.and(ty));
- // We are currently checking the type this field came from, so it must be local
- let span = tcx.hir().span_if_local(field.did).unwrap();
- let zst = layout.map_or(false, |layout| layout.is_zst());
- let align1 = layout.map_or(false, |layout| layout.align.abi.bytes() == 1);
- if !zst {
- return (span, zst, align1, None);
- }
-
- fn check_non_exhaustive<'tcx>(
- tcx: TyCtxt<'tcx>,
- t: Ty<'tcx>,
- ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> {
- match t.kind() {
- ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
- ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
- ty::Adt(def, subst) => {
- if !def.did().is_local() {
- let non_exhaustive = def.is_variant_list_non_exhaustive()
- || def
- .variants()
- .iter()
- .any(ty::VariantDef::is_field_list_non_exhaustive);
- let has_priv = def.all_fields().any(|f| !f.vis.is_public());
- if non_exhaustive || has_priv {
- return ControlFlow::Break((
- def.descr(),
- def.did(),
- subst,
- non_exhaustive,
- ));
- }
- }
- def.all_fields()
- .map(|field| field.ty(tcx, subst))
- .try_for_each(|t| check_non_exhaustive(tcx, t))
- }
- _ => ControlFlow::Continue(()),
- }
- }
-
- (span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
- });
-
- let non_zst_fields = field_infos
- .clone()
- .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
- let non_zst_count = non_zst_fields.clone().count();
- if non_zst_count >= 2 {
- bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, sp);
- }
- let incompatible_zst_fields =
- field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
- let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
- for (span, zst, align1, non_exhaustive) in field_infos {
- if zst && !align1 {
- struct_span_err!(
- tcx.sess,
- span,
- E0691,
- "zero-sized field in transparent {} has alignment larger than 1",
- adt.descr(),
- )
- .span_label(span, "has alignment larger than 1")
- .emit();
- }
- if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive {
- tcx.struct_span_lint_hir(
- REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
- tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
- span,
- |lint| {
- let note = if non_exhaustive {
- "is marked with `#[non_exhaustive]`"
- } else {
- "contains private fields"
- };
- let field_ty = tcx.def_path_str_with_substs(def_id, substs);
- lint.build("zero-sized fields in repr(transparent) cannot contain external non-exhaustive types")
- .note(format!("this {descr} contains `{field_ty}`, which {note}, \
- and makes it not a breaking change to become non-zero-sized in the future."))
- .emit();
- },
- )
- }
- }
-}
-
-#[allow(trivial_numeric_casts)]
-fn check_enum<'tcx>(tcx: TyCtxt<'tcx>, vs: &'tcx [hir::Variant<'tcx>], def_id: LocalDefId) {
- let def = tcx.adt_def(def_id);
- let sp = tcx.def_span(def_id);
- def.destructor(tcx); // force the destructor to be evaluated
-
- if vs.is_empty() {
- if let Some(attr) = tcx.get_attr(def_id.to_def_id(), sym::repr) {
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0084,
- "unsupported representation for zero-variant enum"
- )
- .span_label(sp, "zero-variant enum")
- .emit();
- }
- }
-
- let repr_type_ty = def.repr().discr_type().to_ty(tcx);
- if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
- if !tcx.features().repr128 {
- feature_err(
- &tcx.sess.parse_sess,
- sym::repr128,
- sp,
- "repr with 128-bit type is unstable",
- )
- .emit();
- }
- }
-
- for v in vs {
- if let Some(ref e) = v.disr_expr {
- tcx.ensure().typeck(tcx.hir().local_def_id(e.hir_id));
- }
- }
-
- if tcx.adt_def(def_id).repr().int.is_none() && tcx.features().arbitrary_enum_discriminant {
- let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..));
-
- let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some();
- let has_non_units = vs.iter().any(|var| !is_unit(var));
- let disr_units = vs.iter().any(|var| is_unit(&var) && has_disr(&var));
- let disr_non_unit = vs.iter().any(|var| !is_unit(&var) && has_disr(&var));
-
- if disr_non_unit || (disr_units && has_non_units) {
- let mut err =
- struct_span_err!(tcx.sess, sp, E0732, "`#[repr(inttype)]` must be specified");
- err.emit();
- }
- }
-
- let mut disr_vals: Vec<Discr<'tcx>> = Vec::with_capacity(vs.len());
- // This tracks the previous variant span (in the loop) incase we need it for diagnostics
- let mut prev_variant_span: Span = DUMMY_SP;
- for ((_, discr), v) in iter::zip(def.discriminants(tcx), vs) {
- // Check for duplicate discriminant values
- if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) {
- let variant_did = def.variant(VariantIdx::new(i)).def_id;
- let variant_i_hir_id = tcx.hir().local_def_id_to_hir_id(variant_did.expect_local());
- let variant_i = tcx.hir().expect_variant(variant_i_hir_id);
- let i_span = match variant_i.disr_expr {
- Some(ref expr) => tcx.hir().span(expr.hir_id),
- None => tcx.def_span(variant_did),
- };
- let span = match v.disr_expr {
- Some(ref expr) => tcx.hir().span(expr.hir_id),
- None => v.span,
- };
- let display_discr = format_discriminant_overflow(tcx, v, discr);
- let display_discr_i = format_discriminant_overflow(tcx, variant_i, disr_vals[i]);
- let no_disr = v.disr_expr.is_none();
- let mut err = struct_span_err!(
- tcx.sess,
- sp,
- E0081,
- "discriminant value `{}` assigned more than once",
- discr,
- );
-
- err.span_label(i_span, format!("first assignment of {display_discr_i}"));
- err.span_label(span, format!("second assignment of {display_discr}"));
-
- if no_disr {
- err.span_label(
- prev_variant_span,
- format!(
- "assigned discriminant for `{}` was incremented from this discriminant",
- v.ident
- ),
- );
- }
- err.emit();
- }
-
- disr_vals.push(discr);
- prev_variant_span = v.span;
- }
-
- check_representable(tcx, sp, def_id);
- check_transparent(tcx, sp, def);
-}
-
-/// In the case that a discriminant is both a duplicate and an overflowing literal,
-/// we insert both the assigned discriminant and the literal it overflowed from into the formatted
-/// output. Otherwise we format the discriminant normally.
-fn format_discriminant_overflow<'tcx>(
- tcx: TyCtxt<'tcx>,
- variant: &hir::Variant<'_>,
- dis: Discr<'tcx>,
-) -> String {
- if let Some(expr) = &variant.disr_expr {
- let body = &tcx.hir().body(expr.body).value;
- if let hir::ExprKind::Lit(lit) = &body.kind
- && let rustc_ast::LitKind::Int(lit_value, _int_kind) = &lit.node
- && dis.val != *lit_value
- {
- return format!("`{dis}` (overflowed from `{lit_value}`)");
- }
- }
-
- format!("`{dis}`")
-}
-
-pub(super) fn check_type_params_are_used<'tcx>(
- tcx: TyCtxt<'tcx>,
- generics: &ty::Generics,
- ty: Ty<'tcx>,
-) {
- debug!("check_type_params_are_used(generics={:?}, ty={:?})", generics, ty);
-
- assert_eq!(generics.parent, None);
-
- if generics.own_counts().types == 0 {
- return;
- }
-
- let mut params_used = BitSet::new_empty(generics.params.len());
-
- if ty.references_error() {
- // If there is already another error, do not emit
- // an error for not using a type parameter.
- assert!(tcx.sess.has_errors().is_some());
- return;
- }
-
- for leaf in ty.walk() {
- if let GenericArgKind::Type(leaf_ty) = leaf.unpack()
- && let ty::Param(param) = leaf_ty.kind()
- {
- debug!("found use of ty param {:?}", param);
- params_used.insert(param.index);
- }
- }
-
- for param in &generics.params {
- if !params_used.contains(param.index)
- && let ty::GenericParamDefKind::Type { .. } = param.kind
- {
- let span = tcx.def_span(param.def_id);
- struct_span_err!(
- tcx.sess,
- span,
- E0091,
- "type parameter `{}` is unused",
- param.name,
- )
- .span_label(span, "unused type parameter")
- .emit();
- }
- }
-}
-
-pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
- let module = tcx.hir_module_items(module_def_id);
- for id in module.items() {
- check_item_type(tcx, id);
- }
-}
-
-fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
- struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
- .span_label(span, "recursive `async fn`")
- .note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
- .note(
- "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
- )
- .emit()
-}
-
-/// Emit an error for recursive opaque types.
-///
-/// If this is a return `impl Trait`, find the item's return expressions and point at them. For
-/// direct recursion this is enough, but for indirect recursion also point at the last intermediary
-/// `impl Trait`.
-///
-/// If all the return expressions evaluate to `!`, then we explain that the error will go away
-/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder.
-fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed {
- let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type");
-
- let mut label = false;
- if let Some((def_id, visitor)) = get_owner_return_paths(tcx, def_id) {
- let typeck_results = tcx.typeck(def_id);
- if visitor
- .returns
- .iter()
- .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id))
- .all(|ty| matches!(ty.kind(), ty::Never))
- {
- let spans = visitor
- .returns
- .iter()
- .filter(|expr| typeck_results.node_type_opt(expr.hir_id).is_some())
- .map(|expr| expr.span)
- .collect::<Vec<Span>>();
- let span_len = spans.len();
- if span_len == 1 {
- err.span_label(spans[0], "this returned value is of `!` type");
- } else {
- let mut multispan: MultiSpan = spans.clone().into();
- for span in spans {
- multispan.push_span_label(span, "this returned value is of `!` type");
- }
- err.span_note(multispan, "these returned values have a concrete \"never\" type");
- }
- err.help("this error will resolve once the item's body returns a concrete type");
- } else {
- let mut seen = FxHashSet::default();
- seen.insert(span);
- err.span_label(span, "recursive opaque type");
- label = true;
- for (sp, ty) in visitor
- .returns
- .iter()
- .filter_map(|e| typeck_results.node_type_opt(e.hir_id).map(|t| (e.span, t)))
- .filter(|(_, ty)| !matches!(ty.kind(), ty::Never))
- {
- struct OpaqueTypeCollector(Vec<DefId>);
- impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypeCollector {
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- match *t.kind() {
- ty::Opaque(def, _) => {
- self.0.push(def);
- ControlFlow::CONTINUE
- }
- _ => t.super_visit_with(self),
- }
- }
- }
- let mut visitor = OpaqueTypeCollector(vec![]);
- ty.visit_with(&mut visitor);
- for def_id in visitor.0 {
- let ty_span = tcx.def_span(def_id);
- if !seen.contains(&ty_span) {
- err.span_label(ty_span, &format!("returning this opaque type `{ty}`"));
- seen.insert(ty_span);
- }
- err.span_label(sp, &format!("returning here with type `{ty}`"));
- }
- }
- }
- }
- if !label {
- err.span_label(span, "cannot resolve opaque type");
- }
- err.emit()
-}
diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs
deleted file mode 100644
index fee872155..000000000
--- a/compiler/rustc_typeck/src/check/closure.rs
+++ /dev/null
@@ -1,805 +0,0 @@
-//! Code for type-checking closure expressions.
-
-use super::{check_fn, Expectation, FnCtxt, GeneratorTypes};
-
-use crate::astconv::AstConv;
-use crate::rustc_middle::ty::subst::Subst;
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
-use rustc_hir::lang_items::LangItem;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::LateBoundRegionConversionTime;
-use rustc_infer::infer::{InferOk, InferResult};
-use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{self, Ty};
-use rustc_span::source_map::Span;
-use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::traits::error_reporting::ArgKind;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
-use std::cmp;
-use std::iter;
-
-/// What signature do we *expect* the closure to have from context?
-#[derive(Debug)]
-struct ExpectedSig<'tcx> {
- /// Span that gave us this expectation, if we know that.
- cause_span: Option<Span>,
- sig: ty::PolyFnSig<'tcx>,
-}
-
-struct ClosureSignatures<'tcx> {
- bound_sig: ty::PolyFnSig<'tcx>,
- liberated_sig: ty::FnSig<'tcx>,
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- #[instrument(skip(self, expr, _capture, decl, body_id), level = "debug")]
- pub fn check_expr_closure(
- &self,
- expr: &hir::Expr<'_>,
- _capture: hir::CaptureBy,
- decl: &'tcx hir::FnDecl<'tcx>,
- body_id: hir::BodyId,
- gen: Option<hir::Movability>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- trace!("decl = {:#?}", decl);
- trace!("expr = {:#?}", expr);
-
- // It's always helpful for inference if we know the kind of
- // closure sooner rather than later, so first examine the expected
- // type, and see if can glean a closure kind from there.
- let (expected_sig, expected_kind) = match expected.to_option(self) {
- Some(ty) => self.deduce_expectations_from_expected_type(ty),
- None => (None, None),
- };
- let body = self.tcx.hir().body(body_id);
- self.check_closure(expr, expected_kind, decl, body, gen, expected_sig)
- }
-
- #[instrument(skip(self, expr, body, decl), level = "debug")]
- fn check_closure(
- &self,
- expr: &hir::Expr<'_>,
- opt_kind: Option<ty::ClosureKind>,
- decl: &'tcx hir::FnDecl<'tcx>,
- body: &'tcx hir::Body<'tcx>,
- gen: Option<hir::Movability>,
- expected_sig: Option<ExpectedSig<'tcx>>,
- ) -> Ty<'tcx> {
- trace!("decl = {:#?}", decl);
- let expr_def_id = self.tcx.hir().local_def_id(expr.hir_id);
- debug!(?expr_def_id);
-
- let ClosureSignatures { bound_sig, liberated_sig } =
- self.sig_of_closure(expr.hir_id, expr_def_id.to_def_id(), decl, body, expected_sig);
-
- debug!(?bound_sig, ?liberated_sig);
-
- let return_type_pre_known = !liberated_sig.output().is_ty_infer();
-
- let generator_types = check_fn(
- self,
- self.param_env.without_const(),
- liberated_sig,
- decl,
- expr.hir_id,
- body,
- gen,
- return_type_pre_known,
- )
- .1;
-
- let parent_substs = InternalSubsts::identity_for_item(
- self.tcx,
- self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
- );
-
- let tupled_upvars_ty = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::ClosureSynthetic,
- span: self.tcx.hir().span(expr.hir_id),
- });
-
- if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
- {
- let generator_substs = ty::GeneratorSubsts::new(
- self.tcx,
- ty::GeneratorSubstsParts {
- parent_substs,
- resume_ty,
- yield_ty,
- return_ty: liberated_sig.output(),
- witness: interior,
- tupled_upvars_ty,
- },
- );
-
- return self.tcx.mk_generator(
- expr_def_id.to_def_id(),
- generator_substs.substs,
- movability,
- );
- }
-
- // Tuple up the arguments and insert the resulting function type into
- // the `closures` table.
- let sig = bound_sig.map_bound(|sig| {
- self.tcx.mk_fn_sig(
- iter::once(self.tcx.intern_tup(sig.inputs())),
- sig.output(),
- sig.c_variadic,
- sig.unsafety,
- sig.abi,
- )
- });
-
- debug!(?sig, ?opt_kind);
-
- let closure_kind_ty = match opt_kind {
- Some(kind) => kind.to_ty(self.tcx),
-
- // Create a type variable (for now) to represent the closure kind.
- // It will be unified during the upvar inference phase (`upvar.rs`)
- None => self.next_ty_var(TypeVariableOrigin {
- // FIXME(eddyb) distinguish closure kind inference variables from the rest.
- kind: TypeVariableOriginKind::ClosureSynthetic,
- span: expr.span,
- }),
- };
-
- let closure_substs = ty::ClosureSubsts::new(
- self.tcx,
- ty::ClosureSubstsParts {
- parent_substs,
- closure_kind_ty,
- closure_sig_as_fn_ptr_ty: self.tcx.mk_fn_ptr(sig),
- tupled_upvars_ty,
- },
- );
-
- let closure_type = self.tcx.mk_closure(expr_def_id.to_def_id(), closure_substs.substs);
-
- debug!(?expr.hir_id, ?closure_type);
-
- closure_type
- }
-
- /// Given the expected type, figures out what it can about this closure we
- /// are about to type check:
- #[instrument(skip(self), level = "debug")]
- fn deduce_expectations_from_expected_type(
- &self,
- expected_ty: Ty<'tcx>,
- ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
- match *expected_ty.kind() {
- ty::Opaque(def_id, substs) => {
- let bounds = self.tcx.bound_explicit_item_bounds(def_id);
- let sig = bounds
- .transpose_iter()
- .map(|e| e.map_bound(|e| *e).transpose_tuple2())
- .find_map(|(pred, span)| match pred.0.kind().skip_binder() {
- ty::PredicateKind::Projection(proj_predicate) => self
- .deduce_sig_from_projection(
- Some(span.0),
- pred.0
- .kind()
- .rebind(pred.rebind(proj_predicate).subst(self.tcx, substs)),
- ),
- _ => None,
- });
-
- let kind = bounds
- .transpose_iter()
- .map(|e| e.map_bound(|e| *e).transpose_tuple2())
- .filter_map(|(pred, _)| match pred.0.kind().skip_binder() {
- ty::PredicateKind::Trait(tp) => {
- self.tcx.fn_trait_kind_from_lang_item(tp.def_id())
- }
- _ => None,
- })
- .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
- trace!(?sig, ?kind);
- (sig, kind)
- }
- ty::Dynamic(ref object_type, ..) => {
- let sig = object_type.projection_bounds().find_map(|pb| {
- let pb = pb.with_self_ty(self.tcx, self.tcx.types.trait_object_dummy_self);
- self.deduce_sig_from_projection(None, pb)
- });
- let kind = object_type
- .principal_def_id()
- .and_then(|did| self.tcx.fn_trait_kind_from_lang_item(did));
- (sig, kind)
- }
- ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
- ty::FnPtr(sig) => {
- let expected_sig = ExpectedSig { cause_span: None, sig };
- (Some(expected_sig), Some(ty::ClosureKind::Fn))
- }
- _ => (None, None),
- }
- }
-
- fn deduce_expectations_from_obligations(
- &self,
- expected_vid: ty::TyVid,
- ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
- let expected_sig =
- self.obligations_for_self_ty(expected_vid).find_map(|(_, obligation)| {
- debug!(?obligation.predicate);
-
- let bound_predicate = obligation.predicate.kind();
- if let ty::PredicateKind::Projection(proj_predicate) =
- obligation.predicate.kind().skip_binder()
- {
- // Given a Projection predicate, we can potentially infer
- // the complete signature.
- self.deduce_sig_from_projection(
- Some(obligation.cause.span),
- bound_predicate.rebind(proj_predicate),
- )
- } else {
- None
- }
- });
-
- // Even if we can't infer the full signature, we may be able to
- // infer the kind. This can occur when we elaborate a predicate
- // like `F : Fn<A>`. Note that due to subtyping we could encounter
- // many viable options, so pick the most restrictive.
- let expected_kind = self
- .obligations_for_self_ty(expected_vid)
- .filter_map(|(tr, _)| self.tcx.fn_trait_kind_from_lang_item(tr.def_id()))
- .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
-
- (expected_sig, expected_kind)
- }
-
- /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
- /// everything we need to know about a closure or generator.
- ///
- /// The `cause_span` should be the span that caused us to
- /// have this expected signature, or `None` if we can't readily
- /// know that.
- #[instrument(level = "debug", skip(self, cause_span))]
- fn deduce_sig_from_projection(
- &self,
- cause_span: Option<Span>,
- projection: ty::PolyProjectionPredicate<'tcx>,
- ) -> Option<ExpectedSig<'tcx>> {
- let tcx = self.tcx;
-
- let trait_def_id = projection.trait_def_id(tcx);
-
- let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some();
- let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span);
- let is_gen = gen_trait == trait_def_id;
- if !is_fn && !is_gen {
- debug!("not fn or generator");
- return None;
- }
-
- if is_gen {
- // Check that we deduce the signature from the `<_ as std::ops::Generator>::Return`
- // associated item and not yield.
- let return_assoc_item = self.tcx.associated_item_def_ids(gen_trait)[1];
- if return_assoc_item != projection.projection_def_id() {
- debug!("not return assoc item of generator");
- return None;
- }
- }
-
- let input_tys = if is_fn {
- let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
- let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
- debug!(?arg_param_ty);
-
- match arg_param_ty.kind() {
- &ty::Tuple(tys) => tys,
- _ => return None,
- }
- } else {
- // Generators with a `()` resume type may be defined with 0 or 1 explicit arguments,
- // else they must have exactly 1 argument. For now though, just give up in this case.
- return None;
- };
-
- // Since this is a return parameter type it is safe to unwrap.
- let ret_param_ty = projection.skip_binder().term.ty().unwrap();
- let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty);
- debug!(?ret_param_ty);
-
- let sig = projection.rebind(self.tcx.mk_fn_sig(
- input_tys.iter(),
- ret_param_ty,
- false,
- hir::Unsafety::Normal,
- Abi::Rust,
- ));
- debug!(?sig);
-
- Some(ExpectedSig { cause_span, sig })
- }
-
- fn sig_of_closure(
- &self,
- hir_id: hir::HirId,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- expected_sig: Option<ExpectedSig<'tcx>>,
- ) -> ClosureSignatures<'tcx> {
- if let Some(e) = expected_sig {
- self.sig_of_closure_with_expectation(hir_id, expr_def_id, decl, body, e)
- } else {
- self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body)
- }
- }
-
- /// If there is no expected signature, then we will convert the
- /// types that the user gave into a signature.
- #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
- fn sig_of_closure_no_expectation(
- &self,
- hir_id: hir::HirId,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- ) -> ClosureSignatures<'tcx> {
- let bound_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
-
- self.closure_sigs(expr_def_id, body, bound_sig)
- }
-
- /// Invoked to compute the signature of a closure expression. This
- /// combines any user-provided type annotations (e.g., `|x: u32|
- /// -> u32 { .. }`) with the expected signature.
- ///
- /// The approach is as follows:
- ///
- /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations.
- /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any.
- /// - If we have no expectation `E`, then the signature of the closure is `S`.
- /// - Otherwise, the signature of the closure is E. Moreover:
- /// - Skolemize the late-bound regions in `E`, yielding `E'`.
- /// - Instantiate all the late-bound regions bound in the closure within `S`
- /// with fresh (existential) variables, yielding `S'`
- /// - Require that `E' = S'`
- /// - We could use some kind of subtyping relationship here,
- /// I imagine, but equality is easier and works fine for
- /// our purposes.
- ///
- /// The key intuition here is that the user's types must be valid
- /// from "the inside" of the closure, but the expectation
- /// ultimately drives the overall signature.
- ///
- /// # Examples
- ///
- /// ```ignore (illustrative)
- /// fn with_closure<F>(_: F)
- /// where F: Fn(&u32) -> &u32 { .. }
- ///
- /// with_closure(|x: &u32| { ... })
- /// ```
- ///
- /// Here:
- /// - E would be `fn(&u32) -> &u32`.
- /// - S would be `fn(&u32) ->
- /// - E' is `&'!0 u32 -> &'!0 u32`
- /// - S' is `&'?0 u32 -> ?T`
- ///
- /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`.
- ///
- /// # Arguments
- ///
- /// - `expr_def_id`: the `DefId` of the closure expression
- /// - `decl`: the HIR declaration of the closure
- /// - `body`: the body of the closure
- /// - `expected_sig`: the expected signature (if any). Note that
- /// this is missing a binder: that is, there may be late-bound
- /// regions with depth 1, which are bound then by the closure.
- #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
- fn sig_of_closure_with_expectation(
- &self,
- hir_id: hir::HirId,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- expected_sig: ExpectedSig<'tcx>,
- ) -> ClosureSignatures<'tcx> {
- // Watch out for some surprises and just ignore the
- // expectation if things don't see to match up with what we
- // expect.
- if expected_sig.sig.c_variadic() != decl.c_variadic {
- return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body);
- } else if expected_sig.sig.skip_binder().inputs_and_output.len() != decl.inputs.len() + 1 {
- return self.sig_of_closure_with_mismatched_number_of_arguments(
- expr_def_id,
- decl,
- body,
- expected_sig,
- );
- }
-
- // Create a `PolyFnSig`. Note the oddity that late bound
- // regions appearing free in `expected_sig` are now bound up
- // in this binder we are creating.
- assert!(!expected_sig.sig.skip_binder().has_vars_bound_above(ty::INNERMOST));
- let bound_sig = expected_sig.sig.map_bound(|sig| {
- self.tcx.mk_fn_sig(
- sig.inputs().iter().cloned(),
- sig.output(),
- sig.c_variadic,
- hir::Unsafety::Normal,
- Abi::RustCall,
- )
- });
-
- // `deduce_expectations_from_expected_type` introduces
- // late-bound lifetimes defined elsewhere, which we now
- // anonymize away, so as not to confuse the user.
- let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig);
-
- let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig);
-
- // Up till this point, we have ignored the annotations that the user
- // gave. This function will check that they unify successfully.
- // Along the way, it also writes out entries for types that the user
- // wrote into our typeck results, which are then later used by the privacy
- // check.
- match self.check_supplied_sig_against_expectation(
- hir_id,
- expr_def_id,
- decl,
- body,
- &closure_sigs,
- ) {
- Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok),
- Err(_) => return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body),
- }
-
- closure_sigs
- }
-
- fn sig_of_closure_with_mismatched_number_of_arguments(
- &self,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- expected_sig: ExpectedSig<'tcx>,
- ) -> ClosureSignatures<'tcx> {
- let hir = self.tcx.hir();
- let expr_map_node = hir.get_if_local(expr_def_id).unwrap();
- let expected_args: Vec<_> = expected_sig
- .sig
- .skip_binder()
- .inputs()
- .iter()
- .map(|ty| ArgKind::from_expected_ty(*ty, None))
- .collect();
- let (closure_span, found_args) = match self.get_fn_like_arguments(expr_map_node) {
- Some((sp, args)) => (Some(sp), args),
- None => (None, Vec::new()),
- };
- let expected_span =
- expected_sig.cause_span.unwrap_or_else(|| hir.span_if_local(expr_def_id).unwrap());
- self.report_arg_count_mismatch(
- expected_span,
- closure_span,
- expected_args,
- found_args,
- true,
- )
- .emit();
-
- let error_sig = self.error_sig_of_closure(decl);
-
- self.closure_sigs(expr_def_id, body, error_sig)
- }
-
- /// Enforce the user's types against the expectation. See
- /// `sig_of_closure_with_expectation` for details on the overall
- /// strategy.
- fn check_supplied_sig_against_expectation(
- &self,
- hir_id: hir::HirId,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- expected_sigs: &ClosureSignatures<'tcx>,
- ) -> InferResult<'tcx, ()> {
- // Get the signature S that the user gave.
- //
- // (See comment on `sig_of_closure_with_expectation` for the
- // meaning of these letters.)
- let supplied_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
-
- debug!("check_supplied_sig_against_expectation: supplied_sig={:?}", supplied_sig);
-
- // FIXME(#45727): As discussed in [this comment][c1], naively
- // forcing equality here actually results in suboptimal error
- // messages in some cases. For now, if there would have been
- // an obvious error, we fallback to declaring the type of the
- // closure to be the one the user gave, which allows other
- // error message code to trigger.
- //
- // However, I think [there is potential to do even better
- // here][c2], since in *this* code we have the precise span of
- // the type parameter in question in hand when we report the
- // error.
- //
- // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706
- // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796
- self.commit_if_ok(|_| {
- let mut all_obligations = vec![];
-
- // The liberated version of this signature should be a subtype
- // of the liberated form of the expectation.
- for ((hir_ty, &supplied_ty), expected_ty) in iter::zip(
- iter::zip(
- decl.inputs,
- supplied_sig.inputs().skip_binder(), // binder moved to (*) below
- ),
- expected_sigs.liberated_sig.inputs(), // `liberated_sig` is E'.
- ) {
- // Instantiate (this part of..) S to S', i.e., with fresh variables.
- let supplied_ty = self.replace_bound_vars_with_fresh_vars(
- hir_ty.span,
- LateBoundRegionConversionTime::FnCall,
- supplied_sig.inputs().rebind(supplied_ty),
- ); // recreated from (*) above
-
- // Check that E' = S'.
- let cause = self.misc(hir_ty.span);
- let InferOk { value: (), obligations } =
- self.at(&cause, self.param_env).eq(*expected_ty, supplied_ty)?;
- all_obligations.extend(obligations);
- }
-
- let supplied_output_ty = self.replace_bound_vars_with_fresh_vars(
- decl.output.span(),
- LateBoundRegionConversionTime::FnCall,
- supplied_sig.output(),
- );
- let cause = &self.misc(decl.output.span());
- let InferOk { value: (), obligations } = self
- .at(cause, self.param_env)
- .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?;
- all_obligations.extend(obligations);
-
- Ok(InferOk { value: (), obligations: all_obligations })
- })
- }
-
- /// If there is no expected signature, then we will convert the
- /// types that the user gave into a signature.
- ///
- /// Also, record this closure signature for later.
- #[instrument(skip(self, decl, body), level = "debug")]
- fn supplied_sig_of_closure(
- &self,
- hir_id: hir::HirId,
- expr_def_id: DefId,
- decl: &hir::FnDecl<'_>,
- body: &hir::Body<'_>,
- ) -> ty::PolyFnSig<'tcx> {
- let astconv: &dyn AstConv<'_> = self;
-
- trace!("decl = {:#?}", decl);
- debug!(?body.generator_kind);
-
- let bound_vars = self.tcx.late_bound_vars(hir_id);
-
- // First, convert the types that the user supplied (if any).
- let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a));
- let supplied_return = match decl.output {
- hir::FnRetTy::Return(ref output) => astconv.ast_ty_to_ty(&output),
- hir::FnRetTy::DefaultReturn(_) => match body.generator_kind {
- // In the case of the async block that we create for a function body,
- // we expect the return type of the block to match that of the enclosing
- // function.
- Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn)) => {
- debug!("closure is async fn body");
- self.deduce_future_output_from_obligations(expr_def_id, body.id().hir_id)
- .unwrap_or_else(|| {
- // AFAIK, deducing the future output
- // always succeeds *except* in error cases
- // like #65159. I'd like to return Error
- // here, but I can't because I can't
- // easily (and locally) prove that we
- // *have* reported an
- // error. --nikomatsakis
- astconv.ty_infer(None, decl.output.span())
- })
- }
-
- _ => astconv.ty_infer(None, decl.output.span()),
- },
- };
-
- let result = ty::Binder::bind_with_vars(
- self.tcx.mk_fn_sig(
- supplied_arguments,
- supplied_return,
- decl.c_variadic,
- hir::Unsafety::Normal,
- Abi::RustCall,
- ),
- bound_vars,
- );
-
- debug!(?result);
-
- let c_result = self.inh.infcx.canonicalize_response(result);
- self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result);
-
- result
- }
-
- /// Invoked when we are translating the generator that results
- /// from desugaring an `async fn`. Returns the "sugared" return
- /// type of the `async fn` -- that is, the return type that the
- /// user specified. The "desugared" return type is an `impl
- /// Future<Output = T>`, so we do this by searching through the
- /// obligations to extract the `T`.
- #[instrument(skip(self), level = "debug")]
- fn deduce_future_output_from_obligations(
- &self,
- expr_def_id: DefId,
- body_id: hir::HirId,
- ) -> Option<Ty<'tcx>> {
- let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
- span_bug!(self.tcx.def_span(expr_def_id), "async fn generator outside of a fn")
- });
-
- let ret_ty = ret_coercion.borrow().expected_ty();
- let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
-
- let get_future_output = |predicate: ty::Predicate<'tcx>, span| {
- // Search for a pending obligation like
- //
- // `<R as Future>::Output = T`
- //
- // where R is the return type we are expecting. This type `T`
- // will be our output.
- let bound_predicate = predicate.kind();
- if let ty::PredicateKind::Projection(proj_predicate) = bound_predicate.skip_binder() {
- self.deduce_future_output_from_projection(
- span,
- bound_predicate.rebind(proj_predicate),
- )
- } else {
- None
- }
- };
-
- let output_ty = match *ret_ty.kind() {
- ty::Infer(ty::TyVar(ret_vid)) => {
- self.obligations_for_self_ty(ret_vid).find_map(|(_, obligation)| {
- get_future_output(obligation.predicate, obligation.cause.span)
- })?
- }
- ty::Opaque(def_id, substs) => self
- .tcx
- .bound_explicit_item_bounds(def_id)
- .transpose_iter()
- .map(|e| e.map_bound(|e| *e).transpose_tuple2())
- .find_map(|(p, s)| get_future_output(p.subst(self.tcx, substs), s.0))?,
- ty::Error(_) => return None,
- _ => span_bug!(
- self.tcx.def_span(expr_def_id),
- "async fn generator return type not an inference variable"
- ),
- };
-
- // async fn that have opaque types in their return type need to redo the conversion to inference variables
- // as they fetch the still opaque version from the signature.
- let InferOk { value: output_ty, obligations } = self
- .replace_opaque_types_with_inference_vars(
- output_ty,
- body_id,
- self.tcx.def_span(expr_def_id),
- self.param_env,
- );
- self.register_predicates(obligations);
-
- debug!("deduce_future_output_from_obligations: output_ty={:?}", output_ty);
- Some(output_ty)
- }
-
- /// Given a projection like
- ///
- /// `<X as Future>::Output = T`
- ///
- /// where `X` is some type that has no late-bound regions, returns
- /// `Some(T)`. If the projection is for some other trait, returns
- /// `None`.
- fn deduce_future_output_from_projection(
- &self,
- cause_span: Span,
- predicate: ty::PolyProjectionPredicate<'tcx>,
- ) -> Option<Ty<'tcx>> {
- debug!("deduce_future_output_from_projection(predicate={:?})", predicate);
-
- // We do not expect any bound regions in our predicate, so
- // skip past the bound vars.
- let Some(predicate) = predicate.no_bound_vars() else {
- debug!("deduce_future_output_from_projection: has late-bound regions");
- return None;
- };
-
- // Check that this is a projection from the `Future` trait.
- let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx);
- let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span));
- if trait_def_id != future_trait {
- debug!("deduce_future_output_from_projection: not a future");
- return None;
- }
-
- // The `Future` trait has only one associated item, `Output`,
- // so check that this is what we see.
- let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0];
- if output_assoc_item != predicate.projection_ty.item_def_id {
- span_bug!(
- cause_span,
- "projecting associated item `{:?}` from future, which is not Output `{:?}`",
- predicate.projection_ty.item_def_id,
- output_assoc_item,
- );
- }
-
- // Extract the type from the projection. Note that there can
- // be no bound variables in this type because the "self type"
- // does not have any regions in it.
- let output_ty = self.resolve_vars_if_possible(predicate.term);
- debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty);
- // This is a projection on a Fn trait so will always be a type.
- Some(output_ty.ty().unwrap())
- }
-
- /// Converts the types that the user supplied, in case that doing
- /// so should yield an error, but returns back a signature where
- /// all parameters are of type `TyErr`.
- fn error_sig_of_closure(&self, decl: &hir::FnDecl<'_>) -> ty::PolyFnSig<'tcx> {
- let astconv: &dyn AstConv<'_> = self;
-
- let supplied_arguments = decl.inputs.iter().map(|a| {
- // Convert the types that the user supplied (if any), but ignore them.
- astconv.ast_ty_to_ty(a);
- self.tcx.ty_error()
- });
-
- if let hir::FnRetTy::Return(ref output) = decl.output {
- astconv.ast_ty_to_ty(&output);
- }
-
- let result = ty::Binder::dummy(self.tcx.mk_fn_sig(
- supplied_arguments,
- self.tcx.ty_error(),
- decl.c_variadic,
- hir::Unsafety::Normal,
- Abi::RustCall,
- ));
-
- debug!("supplied_sig_of_closure: result={:?}", result);
-
- result
- }
-
- fn closure_sigs(
- &self,
- expr_def_id: DefId,
- body: &hir::Body<'_>,
- bound_sig: ty::PolyFnSig<'tcx>,
- ) -> ClosureSignatures<'tcx> {
- let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig);
- let liberated_sig = self.inh.normalize_associated_types_in(
- body.value.span,
- body.value.hir_id,
- self.param_env,
- liberated_sig,
- );
- ClosureSignatures { bound_sig, liberated_sig }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs
deleted file mode 100644
index 666498403..000000000
--- a/compiler/rustc_typeck/src/check/compare_method.rs
+++ /dev/null
@@ -1,1547 +0,0 @@
-use super::potentially_plural_count;
-use crate::check::regionck::OutlivesEnvironmentExt;
-use crate::check::wfcheck;
-use crate::errors::LifetimesOrBoundsMismatchOnTrait;
-use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed};
-use rustc_hir as hir;
-use rustc_hir::def::{DefKind, Res};
-use rustc_hir::intravisit;
-use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind};
-use rustc_infer::infer::outlives::env::OutlivesEnvironment;
-use rustc_infer::infer::{self, TyCtxtInferExt};
-use rustc_infer::traits::util;
-use rustc_middle::ty::error::{ExpectedFound, TypeError};
-use rustc_middle::ty::subst::{InternalSubsts, Subst};
-use rustc_middle::ty::util::ExplicitSelf;
-use rustc_middle::ty::{self, DefIdTree};
-use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
-use rustc_span::Span;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
-use rustc_trait_selection::traits::{
- self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal,
-};
-use std::iter;
-
-/// Checks that a method from an impl conforms to the signature of
-/// the same method as declared in the trait.
-///
-/// # Parameters
-///
-/// - `impl_m`: type of the method we are checking
-/// - `impl_m_span`: span to use for reporting errors
-/// - `trait_m`: the method in the trait
-/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
-pub(crate) fn compare_impl_method<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- trait_m: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
- trait_item_span: Option<Span>,
-) {
- debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
-
- let impl_m_span = tcx.def_span(impl_m.def_id);
-
- if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) {
- return;
- }
-
- if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) {
- return;
- }
-
- if let Err(_) = compare_generic_param_kinds(tcx, impl_m, trait_m) {
- return;
- }
-
- if let Err(_) =
- compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
- {
- return;
- }
-
- if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) {
- return;
- }
-
- if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
- {
- return;
- }
-}
-
-fn compare_predicate_entailment<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- impl_m_span: Span,
- trait_m: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
-) -> Result<(), ErrorGuaranteed> {
- let trait_to_impl_substs = impl_trait_ref.substs;
-
- // This node-id should be used for the `body_id` field on each
- // `ObligationCause` (and the `FnCtxt`).
- //
- // FIXME(@lcnr): remove that after removing `cause.body_id` from
- // obligations.
- let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
- // We sometimes modify the span further down.
- let mut cause = ObligationCause::new(
- impl_m_span,
- impl_m_hir_id,
- ObligationCauseCode::CompareImplItemObligation {
- impl_item_def_id: impl_m.def_id.expect_local(),
- trait_item_def_id: trait_m.def_id,
- kind: impl_m.kind,
- },
- );
-
- // This code is best explained by example. Consider a trait:
- //
- // trait Trait<'t, T> {
- // fn method<'a, M>(t: &'t T, m: &'a M) -> Self;
- // }
- //
- // And an impl:
- //
- // impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
- // fn method<'b, N>(t: &'j &'i U, m: &'b N) -> Foo;
- // }
- //
- // We wish to decide if those two method types are compatible.
- //
- // We start out with trait_to_impl_substs, that maps the trait
- // type parameters to impl type parameters. This is taken from the
- // impl trait reference:
- //
- // trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
- //
- // We create a mapping `dummy_substs` that maps from the impl type
- // parameters to fresh types and regions. For type parameters,
- // this is the identity transform, but we could as well use any
- // placeholder types. For regions, we convert from bound to free
- // regions (Note: but only early-bound regions, i.e., those
- // declared on the impl or used in type parameter bounds).
- //
- // impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 }
- //
- // Now we can apply placeholder_substs to the type of the impl method
- // to yield a new function type in terms of our fresh, placeholder
- // types:
- //
- // <'b> fn(t: &'i0 U0, m: &'b) -> Foo
- //
- // We now want to extract and substitute the type of the *trait*
- // method and compare it. To do so, we must create a compound
- // substitution by combining trait_to_impl_substs and
- // impl_to_placeholder_substs, and also adding a mapping for the method
- // type parameters. We extend the mapping to also include
- // the method parameters.
- //
- // trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 }
- //
- // Applying this to the trait method type yields:
- //
- // <'a> fn(t: &'i0 U0, m: &'a) -> Foo
- //
- // This type is also the same but the name of the bound region ('a
- // vs 'b). However, the normal subtyping rules on fn types handle
- // this kind of equivalency just fine.
- //
- // We now use these substitutions to ensure that all declared bounds are
- // satisfied by the implementation's method.
- //
- // We do this by creating a parameter environment which contains a
- // substitution corresponding to impl_to_placeholder_substs. We then build
- // trait_to_placeholder_substs and use it to convert the predicates contained
- // in the trait_m.generics to the placeholder form.
- //
- // Finally we register each of these predicates as an obligation in
- // a fresh FulfillmentCtxt, and invoke select_all_or_error.
-
- // Create mapping from impl to placeholder.
- let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
-
- // Create mapping from trait to placeholder.
- let trait_to_placeholder_substs =
- impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
- debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs);
-
- let impl_m_generics = tcx.generics_of(impl_m.def_id);
- let trait_m_generics = tcx.generics_of(trait_m.def_id);
- let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
- let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
-
- // Check region bounds.
- check_region_bounds_on_impl_item(tcx, impl_m, trait_m, &trait_m_generics, &impl_m_generics)?;
-
- // Create obligations for each predicate declared by the impl
- // definition in the context of the trait's parameter
- // environment. We can't just use `impl_env.caller_bounds`,
- // however, because we want to replace all late-bound regions with
- // region variables.
- let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
- let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
-
- debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
-
- // This is the only tricky bit of the new way we check implementation methods
- // We need to build a set of predicates where only the method-level bounds
- // are from the trait and we assume all other bounds from the implementation
- // to be previously satisfied.
- //
- // We then register the obligations from the impl_m and check to see
- // if all constraints hold.
- hybrid_preds
- .predicates
- .extend(trait_m_predicates.instantiate_own(tcx, trait_to_placeholder_substs).predicates);
-
- // Construct trait parameter environment and then shift it into the placeholder viewpoint.
- // The key step here is to update the caller_bounds's predicates to be
- // the new hybrid bounds we computed.
- let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id);
- let param_env = ty::ParamEnv::new(
- tcx.intern_predicates(&hybrid_preds.predicates),
- Reveal::UserFacing,
- hir::Constness::NotConst,
- );
- let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
-
- tcx.infer_ctxt().enter(|ref infcx| {
- let ocx = ObligationCtxt::new(infcx);
-
- debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
-
- let mut selcx = traits::SelectionContext::new(&infcx);
- let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs);
- for (predicate, span) in iter::zip(impl_m_own_bounds.predicates, impl_m_own_bounds.spans) {
- let normalize_cause = traits::ObligationCause::misc(span, impl_m_hir_id);
- let traits::Normalized { value: predicate, obligations } =
- traits::normalize(&mut selcx, param_env, normalize_cause, predicate);
-
- ocx.register_obligations(obligations);
- let cause = ObligationCause::new(
- span,
- impl_m_hir_id,
- ObligationCauseCode::CompareImplItemObligation {
- impl_item_def_id: impl_m.def_id.expect_local(),
- trait_item_def_id: trait_m.def_id,
- kind: impl_m.kind,
- },
- );
- ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
- }
-
- // We now need to check that the signature of the impl method is
- // compatible with that of the trait method. We do this by
- // checking that `impl_fty <: trait_fty`.
- //
- // FIXME. Unfortunately, this doesn't quite work right now because
- // associated type normalization is not integrated into subtype
- // checks. For the comparison to be valid, we need to
- // normalize the associated types in the impl/trait methods
- // first. However, because function types bind regions, just
- // calling `normalize_associated_types_in` would have no effect on
- // any associated types appearing in the fn arguments or return
- // type.
-
- // Compute placeholder form of impl and trait method tys.
- let tcx = infcx.tcx;
-
- let mut wf_tys = FxHashSet::default();
-
- let impl_sig = infcx.replace_bound_vars_with_fresh_vars(
- impl_m_span,
- infer::HigherRankedType,
- tcx.fn_sig(impl_m.def_id),
- );
-
- let norm_cause = ObligationCause::misc(impl_m_span, impl_m_hir_id);
- let impl_sig = ocx.normalize(norm_cause.clone(), param_env, impl_sig);
- let impl_fty = tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig));
- debug!("compare_impl_method: impl_fty={:?}", impl_fty);
-
- let trait_sig = tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs);
- let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
- let trait_sig = ocx.normalize(norm_cause, param_env, trait_sig);
- // Add the resulting inputs and output as well-formed.
- wf_tys.extend(trait_sig.inputs_and_output.iter());
- let trait_fty = tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig));
-
- debug!("compare_impl_method: trait_fty={:?}", trait_fty);
-
- // FIXME: We'd want to keep more accurate spans than "the method signature" when
- // processing the comparison between the trait and impl fn, but we sadly lose them
- // and point at the whole signature when a trait bound or specific input or output
- // type would be more appropriate. In other places we have a `Vec<Span>`
- // corresponding to their `Vec<Predicate>`, but we don't have that here.
- // Fixing this would improve the output of test `issue-83765.rs`.
- let sub_result = infcx
- .at(&cause, param_env)
- .sup(trait_fty, impl_fty)
- .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok));
-
- if let Err(terr) = sub_result {
- debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty);
-
- let (impl_err_span, trait_err_span) =
- extract_spans_for_error_reporting(&infcx, &terr, &cause, impl_m, trait_m);
-
- cause.span = impl_err_span;
-
- let mut diag = struct_span_err!(
- tcx.sess,
- cause.span(),
- E0053,
- "method `{}` has an incompatible type for trait",
- trait_m.name
- );
- match &terr {
- TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0)
- if trait_m.fn_has_self_parameter =>
- {
- let ty = trait_sig.inputs()[0];
- let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty())
- {
- ExplicitSelf::ByValue => "self".to_owned(),
- ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
- ExplicitSelf::ByReference(_, hir::Mutability::Mut) => {
- "&mut self".to_owned()
- }
- _ => format!("self: {ty}"),
- };
-
- // When the `impl` receiver is an arbitrary self type, like `self: Box<Self>`, the
- // span points only at the type `Box<Self`>, but we want to cover the whole
- // argument pattern and type.
- let span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
- ImplItemKind::Fn(ref sig, body) => tcx
- .hir()
- .body_param_names(body)
- .zip(sig.decl.inputs.iter())
- .map(|(param, ty)| param.span.to(ty.span))
- .next()
- .unwrap_or(impl_err_span),
- _ => bug!("{:?} is not a method", impl_m),
- };
-
- diag.span_suggestion(
- span,
- "change the self-receiver type to match the trait",
- sugg,
- Applicability::MachineApplicable,
- );
- }
- TypeError::ArgumentMutability(i) | TypeError::ArgumentSorts(_, i) => {
- if trait_sig.inputs().len() == *i {
- // Suggestion to change output type. We do not suggest in `async` functions
- // to avoid complex logic or incorrect output.
- match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
- ImplItemKind::Fn(ref sig, _)
- if sig.header.asyncness == hir::IsAsync::NotAsync =>
- {
- let msg = "change the output type to match the trait";
- let ap = Applicability::MachineApplicable;
- match sig.decl.output {
- hir::FnRetTy::DefaultReturn(sp) => {
- let sugg = format!("-> {} ", trait_sig.output());
- diag.span_suggestion_verbose(sp, msg, sugg, ap);
- }
- hir::FnRetTy::Return(hir_ty) => {
- let sugg = trait_sig.output();
- diag.span_suggestion(hir_ty.span, msg, sugg, ap);
- }
- };
- }
- _ => {}
- };
- } else if let Some(trait_ty) = trait_sig.inputs().get(*i) {
- diag.span_suggestion(
- impl_err_span,
- "change the parameter type to match the trait",
- trait_ty,
- Applicability::MachineApplicable,
- );
- }
- }
- _ => {}
- }
-
- infcx.note_type_err(
- &mut diag,
- &cause,
- trait_err_span.map(|sp| (sp, "type in trait".to_owned())),
- Some(infer::ValuePairs::Terms(ExpectedFound {
- expected: trait_fty.into(),
- found: impl_fty.into(),
- })),
- &terr,
- false,
- false,
- );
-
- return Err(diag.emit());
- }
-
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- let reported = infcx.report_fulfillment_errors(&errors, None, false);
- return Err(reported);
- }
-
- // Finally, resolve all regions. This catches wily misuses of
- // lifetime parameters.
- let mut outlives_environment = OutlivesEnvironment::new(param_env);
- outlives_environment.add_implied_bounds(infcx, wf_tys, impl_m_hir_id);
- infcx.check_region_obligations_and_report_errors(
- impl_m.def_id.expect_local(),
- &outlives_environment,
- );
-
- Ok(())
- })
-}
-
-fn check_region_bounds_on_impl_item<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- trait_m: &ty::AssocItem,
- trait_generics: &ty::Generics,
- impl_generics: &ty::Generics,
-) -> Result<(), ErrorGuaranteed> {
- let trait_params = trait_generics.own_counts().lifetimes;
- let impl_params = impl_generics.own_counts().lifetimes;
-
- debug!(
- "check_region_bounds_on_impl_item: \
- trait_generics={:?} \
- impl_generics={:?}",
- trait_generics, impl_generics
- );
-
- // Must have same number of early-bound lifetime parameters.
- // Unfortunately, if the user screws up the bounds, then this
- // will change classification between early and late. E.g.,
- // if in trait we have `<'a,'b:'a>`, and in impl we just have
- // `<'a,'b>`, then we have 2 early-bound lifetime parameters
- // in trait but 0 in the impl. But if we report "expected 2
- // but found 0" it's confusing, because it looks like there
- // are zero. Since I don't quite know how to phrase things at
- // the moment, give a kind of vague error message.
- if trait_params != impl_params {
- let span = tcx
- .hir()
- .get_generics(impl_m.def_id.expect_local())
- .expect("expected impl item to have generics or else we can't compare them")
- .span;
- let generics_span = if let Some(local_def_id) = trait_m.def_id.as_local() {
- Some(
- tcx.hir()
- .get_generics(local_def_id)
- .expect("expected trait item to have generics or else we can't compare them")
- .span,
- )
- } else {
- None
- };
-
- let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
- span,
- item_kind: assoc_item_kind_str(impl_m),
- ident: impl_m.ident(tcx),
- generics_span,
- });
- return Err(reported);
- }
-
- Ok(())
-}
-
-#[instrument(level = "debug", skip(infcx))]
-fn extract_spans_for_error_reporting<'a, 'tcx>(
- infcx: &infer::InferCtxt<'a, 'tcx>,
- terr: &TypeError<'_>,
- cause: &ObligationCause<'tcx>,
- impl_m: &ty::AssocItem,
- trait_m: &ty::AssocItem,
-) -> (Span, Option<Span>) {
- let tcx = infcx.tcx;
- let mut impl_args = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
- ImplItemKind::Fn(ref sig, _) => {
- sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
- }
- _ => bug!("{:?} is not a method", impl_m),
- };
- let trait_args =
- trait_m.def_id.as_local().map(|def_id| match tcx.hir().expect_trait_item(def_id).kind {
- TraitItemKind::Fn(ref sig, _) => {
- sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
- }
- _ => bug!("{:?} is not a TraitItemKind::Fn", trait_m),
- });
-
- match *terr {
- TypeError::ArgumentMutability(i) => {
- (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
- }
- TypeError::ArgumentSorts(ExpectedFound { .. }, i) => {
- (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
- }
- _ => (cause.span(), tcx.hir().span_if_local(trait_m.def_id)),
- }
-}
-
-fn compare_self_type<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- impl_m_span: Span,
- trait_m: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
-) -> Result<(), ErrorGuaranteed> {
- // Try to give more informative error messages about self typing
- // mismatches. Note that any mismatch will also be detected
- // below, where we construct a canonical function type that
- // includes the self parameter as a normal parameter. It's just
- // that the error messages you get out of this code are a bit more
- // inscrutable, particularly for cases where one method has no
- // self.
-
- let self_string = |method: &ty::AssocItem| {
- let untransformed_self_ty = match method.container {
- ty::ImplContainer => impl_trait_ref.self_ty(),
- ty::TraitContainer => tcx.types.self_param,
- };
- let self_arg_ty = tcx.fn_sig(method.def_id).input(0);
- let param_env = ty::ParamEnv::reveal_all();
-
- tcx.infer_ctxt().enter(|infcx| {
- let self_arg_ty = tcx.liberate_late_bound_regions(method.def_id, self_arg_ty);
- let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok();
- match ExplicitSelf::determine(self_arg_ty, can_eq_self) {
- ExplicitSelf::ByValue => "self".to_owned(),
- ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
- ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
- _ => format!("self: {self_arg_ty}"),
- }
- })
- };
-
- match (trait_m.fn_has_self_parameter, impl_m.fn_has_self_parameter) {
- (false, false) | (true, true) => {}
-
- (false, true) => {
- let self_descr = self_string(impl_m);
- let mut err = struct_span_err!(
- tcx.sess,
- impl_m_span,
- E0185,
- "method `{}` has a `{}` declaration in the impl, but not in the trait",
- trait_m.name,
- self_descr
- );
- err.span_label(impl_m_span, format!("`{self_descr}` used in impl"));
- if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
- err.span_label(span, format!("trait method declared without `{self_descr}`"));
- } else {
- err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
- }
- let reported = err.emit();
- return Err(reported);
- }
-
- (true, false) => {
- let self_descr = self_string(trait_m);
- let mut err = struct_span_err!(
- tcx.sess,
- impl_m_span,
- E0186,
- "method `{}` has a `{}` declaration in the trait, but not in the impl",
- trait_m.name,
- self_descr
- );
- err.span_label(impl_m_span, format!("expected `{self_descr}` in impl"));
- if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
- err.span_label(span, format!("`{self_descr}` used in trait"));
- } else {
- err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
- }
- let reported = err.emit();
- return Err(reported);
- }
- }
-
- Ok(())
-}
-
-/// Checks that the number of generics on a given assoc item in a trait impl is the same
-/// as the number of generics on the respective assoc item in the trait definition.
-///
-/// For example this code emits the errors in the following code:
-/// ```
-/// trait Trait {
-/// fn foo();
-/// type Assoc<T>;
-/// }
-///
-/// impl Trait for () {
-/// fn foo<T>() {}
-/// //~^ error
-/// type Assoc = u32;
-/// //~^ error
-/// }
-/// ```
-///
-/// Notably this does not error on `foo<T>` implemented as `foo<const N: u8>` or
-/// `foo<const N: u8>` implemented as `foo<const N: u32>`. This is handled in
-/// [`compare_generic_param_kinds`]. This function also does not handle lifetime parameters
-fn compare_number_of_generics<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_: &ty::AssocItem,
- _impl_span: Span,
- trait_: &ty::AssocItem,
- trait_span: Option<Span>,
-) -> Result<(), ErrorGuaranteed> {
- let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts();
- let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts();
-
- // This avoids us erroring on `foo<T>` implemented as `foo<const N: u8>` as this is implemented
- // in `compare_generic_param_kinds` which will give a nicer error message than something like:
- // "expected 1 type parameter, found 0 type parameters"
- if (trait_own_counts.types + trait_own_counts.consts)
- == (impl_own_counts.types + impl_own_counts.consts)
- {
- return Ok(());
- }
-
- let matchings = [
- ("type", trait_own_counts.types, impl_own_counts.types),
- ("const", trait_own_counts.consts, impl_own_counts.consts),
- ];
-
- let item_kind = assoc_item_kind_str(impl_);
-
- let mut err_occurred = None;
- for (kind, trait_count, impl_count) in matchings {
- if impl_count != trait_count {
- let arg_spans = |kind: ty::AssocKind, generics: &hir::Generics<'_>| {
- let mut spans = generics
- .params
- .iter()
- .filter(|p| match p.kind {
- hir::GenericParamKind::Lifetime {
- kind: hir::LifetimeParamKind::Elided,
- } => {
- // A fn can have an arbitrary number of extra elided lifetimes for the
- // same signature.
- !matches!(kind, ty::AssocKind::Fn)
- }
- _ => true,
- })
- .map(|p| p.span)
- .collect::<Vec<Span>>();
- if spans.is_empty() {
- spans = vec![generics.span]
- }
- spans
- };
- let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() {
- let trait_item = tcx.hir().expect_trait_item(def_id);
- let arg_spans: Vec<Span> = arg_spans(trait_.kind, trait_item.generics);
- let impl_trait_spans: Vec<Span> = trait_item
- .generics
- .params
- .iter()
- .filter_map(|p| match p.kind {
- GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
- _ => None,
- })
- .collect();
- (Some(arg_spans), impl_trait_spans)
- } else {
- (trait_span.map(|s| vec![s]), vec![])
- };
-
- let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local());
- let impl_item_impl_trait_spans: Vec<Span> = impl_item
- .generics
- .params
- .iter()
- .filter_map(|p| match p.kind {
- GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
- _ => None,
- })
- .collect();
- let spans = arg_spans(impl_.kind, impl_item.generics);
- let span = spans.first().copied();
-
- let mut err = tcx.sess.struct_span_err_with_code(
- spans,
- &format!(
- "{} `{}` has {} {kind} parameter{} but its trait \
- declaration has {} {kind} parameter{}",
- item_kind,
- trait_.name,
- impl_count,
- pluralize!(impl_count),
- trait_count,
- pluralize!(trait_count),
- kind = kind,
- ),
- DiagnosticId::Error("E0049".into()),
- );
-
- let mut suffix = None;
-
- if let Some(spans) = trait_spans {
- let mut spans = spans.iter();
- if let Some(span) = spans.next() {
- err.span_label(
- *span,
- format!(
- "expected {} {} parameter{}",
- trait_count,
- kind,
- pluralize!(trait_count),
- ),
- );
- }
- for span in spans {
- err.span_label(*span, "");
- }
- } else {
- suffix = Some(format!(", expected {trait_count}"));
- }
-
- if let Some(span) = span {
- err.span_label(
- span,
- format!(
- "found {} {} parameter{}{}",
- impl_count,
- kind,
- pluralize!(impl_count),
- suffix.unwrap_or_else(String::new),
- ),
- );
- }
-
- for span in impl_trait_spans.iter().chain(impl_item_impl_trait_spans.iter()) {
- err.span_label(*span, "`impl Trait` introduces an implicit type parameter");
- }
-
- let reported = err.emit();
- err_occurred = Some(reported);
- }
- }
-
- if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) }
-}
-
-fn compare_number_of_method_arguments<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- impl_m_span: Span,
- trait_m: &ty::AssocItem,
- trait_item_span: Option<Span>,
-) -> Result<(), ErrorGuaranteed> {
- let impl_m_fty = tcx.fn_sig(impl_m.def_id);
- let trait_m_fty = tcx.fn_sig(trait_m.def_id);
- let trait_number_args = trait_m_fty.inputs().skip_binder().len();
- let impl_number_args = impl_m_fty.inputs().skip_binder().len();
- if trait_number_args != impl_number_args {
- let trait_span = if let Some(def_id) = trait_m.def_id.as_local() {
- match tcx.hir().expect_trait_item(def_id).kind {
- TraitItemKind::Fn(ref trait_m_sig, _) => {
- let pos = if trait_number_args > 0 { trait_number_args - 1 } else { 0 };
- if let Some(arg) = trait_m_sig.decl.inputs.get(pos) {
- Some(if pos == 0 {
- arg.span
- } else {
- arg.span.with_lo(trait_m_sig.decl.inputs[0].span.lo())
- })
- } else {
- trait_item_span
- }
- }
- _ => bug!("{:?} is not a method", impl_m),
- }
- } else {
- trait_item_span
- };
- let impl_span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
- ImplItemKind::Fn(ref impl_m_sig, _) => {
- let pos = if impl_number_args > 0 { impl_number_args - 1 } else { 0 };
- if let Some(arg) = impl_m_sig.decl.inputs.get(pos) {
- if pos == 0 {
- arg.span
- } else {
- arg.span.with_lo(impl_m_sig.decl.inputs[0].span.lo())
- }
- } else {
- impl_m_span
- }
- }
- _ => bug!("{:?} is not a method", impl_m),
- };
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0050,
- "method `{}` has {} but the declaration in trait `{}` has {}",
- trait_m.name,
- potentially_plural_count(impl_number_args, "parameter"),
- tcx.def_path_str(trait_m.def_id),
- trait_number_args
- );
- if let Some(trait_span) = trait_span {
- err.span_label(
- trait_span,
- format!(
- "trait requires {}",
- potentially_plural_count(trait_number_args, "parameter")
- ),
- );
- } else {
- err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
- }
- err.span_label(
- impl_span,
- format!(
- "expected {}, found {}",
- potentially_plural_count(trait_number_args, "parameter"),
- impl_number_args
- ),
- );
- let reported = err.emit();
- return Err(reported);
- }
-
- Ok(())
-}
-
-fn compare_synthetic_generics<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_m: &ty::AssocItem,
- trait_m: &ty::AssocItem,
-) -> Result<(), ErrorGuaranteed> {
- // FIXME(chrisvittal) Clean up this function, list of FIXME items:
- // 1. Better messages for the span labels
- // 2. Explanation as to what is going on
- // If we get here, we already have the same number of generics, so the zip will
- // be okay.
- let mut error_found = None;
- let impl_m_generics = tcx.generics_of(impl_m.def_id);
- let trait_m_generics = tcx.generics_of(trait_m.def_id);
- let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind {
- GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
- GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
- });
- let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| match param.kind {
- GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
- GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
- });
- for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in
- iter::zip(impl_m_type_params, trait_m_type_params)
- {
- if impl_synthetic != trait_synthetic {
- let impl_def_id = impl_def_id.expect_local();
- let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_def_id);
- let impl_span = tcx.hir().span(impl_hir_id);
- let trait_span = tcx.def_span(trait_def_id);
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0643,
- "method `{}` has incompatible signature for trait",
- trait_m.name
- );
- err.span_label(trait_span, "declaration in trait here");
- match (impl_synthetic, trait_synthetic) {
- // The case where the impl method uses `impl Trait` but the trait method uses
- // explicit generics
- (true, false) => {
- err.span_label(impl_span, "expected generic parameter, found `impl Trait`");
- (|| {
- // try taking the name from the trait impl
- // FIXME: this is obviously suboptimal since the name can already be used
- // as another generic argument
- let new_name = tcx.sess.source_map().span_to_snippet(trait_span).ok()?;
- let trait_m = trait_m.def_id.as_local()?;
- let trait_m = tcx.hir().trait_item(hir::TraitItemId { def_id: trait_m });
-
- let impl_m = impl_m.def_id.as_local()?;
- let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
-
- // in case there are no generics, take the spot between the function name
- // and the opening paren of the argument list
- let new_generics_span =
- tcx.sess.source_map().generate_fn_name_span(impl_span)?.shrink_to_hi();
- // in case there are generics, just replace them
- let generics_span =
- impl_m.generics.span.substitute_dummy(new_generics_span);
- // replace with the generics from the trait
- let new_generics =
- tcx.sess.source_map().span_to_snippet(trait_m.generics.span).ok()?;
-
- err.multipart_suggestion(
- "try changing the `impl Trait` argument to a generic parameter",
- vec![
- // replace `impl Trait` with `T`
- (impl_span, new_name),
- // replace impl method generics with trait method generics
- // This isn't quite right, as users might have changed the names
- // of the generics, but it works for the common case
- (generics_span, new_generics),
- ],
- Applicability::MaybeIncorrect,
- );
- Some(())
- })();
- }
- // The case where the trait method uses `impl Trait`, but the impl method uses
- // explicit generics.
- (false, true) => {
- err.span_label(impl_span, "expected `impl Trait`, found generic parameter");
- (|| {
- let impl_m = impl_m.def_id.as_local()?;
- let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
- let input_tys = match impl_m.kind {
- hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs,
- _ => unreachable!(),
- };
- struct Visitor(Option<Span>, hir::def_id::LocalDefId);
- impl<'v> intravisit::Visitor<'v> for Visitor {
- fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
- intravisit::walk_ty(self, ty);
- if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) =
- ty.kind
- && let Res::Def(DefKind::TyParam, def_id) = path.res
- && def_id == self.1.to_def_id()
- {
- self.0 = Some(ty.span);
- }
- }
- }
- let mut visitor = Visitor(None, impl_def_id);
- for ty in input_tys {
- intravisit::Visitor::visit_ty(&mut visitor, ty);
- }
- let span = visitor.0?;
-
- let bounds = impl_m.generics.bounds_for_param(impl_def_id).next()?.bounds;
- let bounds = bounds.first()?.span().to(bounds.last()?.span());
- let bounds = tcx.sess.source_map().span_to_snippet(bounds).ok()?;
-
- err.multipart_suggestion(
- "try removing the generic parameter and using `impl Trait` instead",
- vec![
- // delete generic parameters
- (impl_m.generics.span, String::new()),
- // replace param usage with `impl Trait`
- (span, format!("impl {bounds}")),
- ],
- Applicability::MaybeIncorrect,
- );
- Some(())
- })();
- }
- _ => unreachable!(),
- }
- let reported = err.emit();
- error_found = Some(reported);
- }
- }
- if let Some(reported) = error_found { Err(reported) } else { Ok(()) }
-}
-
-/// Checks that all parameters in the generics of a given assoc item in a trait impl have
-/// the same kind as the respective generic parameter in the trait def.
-///
-/// For example all 4 errors in the following code are emitted here:
-/// ```
-/// trait Foo {
-/// fn foo<const N: u8>();
-/// type bar<const N: u8>;
-/// fn baz<const N: u32>();
-/// type blah<T>;
-/// }
-///
-/// impl Foo for () {
-/// fn foo<const N: u64>() {}
-/// //~^ error
-/// type bar<const N: u64> {}
-/// //~^ error
-/// fn baz<T>() {}
-/// //~^ error
-/// type blah<const N: i64> = u32;
-/// //~^ error
-/// }
-/// ```
-///
-/// This function does not handle lifetime parameters
-fn compare_generic_param_kinds<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_item: &ty::AssocItem,
- trait_item: &ty::AssocItem,
-) -> Result<(), ErrorGuaranteed> {
- assert_eq!(impl_item.kind, trait_item.kind);
-
- let ty_const_params_of = |def_id| {
- tcx.generics_of(def_id).params.iter().filter(|param| {
- matches!(
- param.kind,
- GenericParamDefKind::Const { .. } | GenericParamDefKind::Type { .. }
- )
- })
- };
-
- for (param_impl, param_trait) in
- iter::zip(ty_const_params_of(impl_item.def_id), ty_const_params_of(trait_item.def_id))
- {
- use GenericParamDefKind::*;
- if match (&param_impl.kind, &param_trait.kind) {
- (Const { .. }, Const { .. })
- if tcx.type_of(param_impl.def_id) != tcx.type_of(param_trait.def_id) =>
- {
- true
- }
- (Const { .. }, Type { .. }) | (Type { .. }, Const { .. }) => true,
- // this is exhaustive so that anyone adding new generic param kinds knows
- // to make sure this error is reported for them.
- (Const { .. }, Const { .. }) | (Type { .. }, Type { .. }) => false,
- (Lifetime { .. }, _) | (_, Lifetime { .. }) => unreachable!(),
- } {
- let param_impl_span = tcx.def_span(param_impl.def_id);
- let param_trait_span = tcx.def_span(param_trait.def_id);
-
- let mut err = struct_span_err!(
- tcx.sess,
- param_impl_span,
- E0053,
- "{} `{}` has an incompatible generic parameter for trait `{}`",
- assoc_item_kind_str(&impl_item),
- trait_item.name,
- &tcx.def_path_str(tcx.parent(trait_item.def_id))
- );
-
- let make_param_message = |prefix: &str, param: &ty::GenericParamDef| match param.kind {
- Const { .. } => {
- format!("{} const parameter of type `{}`", prefix, tcx.type_of(param.def_id))
- }
- Type { .. } => format!("{} type parameter", prefix),
- Lifetime { .. } => unreachable!(),
- };
-
- let trait_header_span = tcx.def_ident_span(tcx.parent(trait_item.def_id)).unwrap();
- err.span_label(trait_header_span, "");
- err.span_label(param_trait_span, make_param_message("expected", param_trait));
-
- let impl_header_span = tcx.def_span(tcx.parent(impl_item.def_id));
- err.span_label(impl_header_span, "");
- err.span_label(param_impl_span, make_param_message("found", param_impl));
-
- let reported = err.emit();
- return Err(reported);
- }
- }
-
- Ok(())
-}
-
-pub(crate) fn compare_const_impl<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_c: &ty::AssocItem,
- impl_c_span: Span,
- trait_c: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
-) {
- debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
-
- tcx.infer_ctxt().enter(|infcx| {
- let param_env = tcx.param_env(impl_c.def_id);
- let ocx = ObligationCtxt::new(&infcx);
-
- // The below is for the most part highly similar to the procedure
- // for methods above. It is simpler in many respects, especially
- // because we shouldn't really have to deal with lifetimes or
- // predicates. In fact some of this should probably be put into
- // shared functions because of DRY violations...
- let trait_to_impl_substs = impl_trait_ref.substs;
-
- // Create a parameter environment that represents the implementation's
- // method.
- let impl_c_hir_id = tcx.hir().local_def_id_to_hir_id(impl_c.def_id.expect_local());
-
- // Compute placeholder form of impl and trait const tys.
- let impl_ty = tcx.type_of(impl_c.def_id);
- let trait_ty = tcx.bound_type_of(trait_c.def_id).subst(tcx, trait_to_impl_substs);
- let mut cause = ObligationCause::new(
- impl_c_span,
- impl_c_hir_id,
- ObligationCauseCode::CompareImplItemObligation {
- impl_item_def_id: impl_c.def_id.expect_local(),
- trait_item_def_id: trait_c.def_id,
- kind: impl_c.kind,
- },
- );
-
- // There is no "body" here, so just pass dummy id.
- let impl_ty = ocx.normalize(cause.clone(), param_env, impl_ty);
-
- debug!("compare_const_impl: impl_ty={:?}", impl_ty);
-
- let trait_ty = ocx.normalize(cause.clone(), param_env, trait_ty);
-
- debug!("compare_const_impl: trait_ty={:?}", trait_ty);
-
- let err = infcx
- .at(&cause, param_env)
- .sup(trait_ty, impl_ty)
- .map(|ok| ocx.register_infer_ok_obligations(ok));
-
- if let Err(terr) = err {
- debug!(
- "checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
- impl_ty, trait_ty
- );
-
- // Locate the Span containing just the type of the offending impl
- match tcx.hir().expect_impl_item(impl_c.def_id.expect_local()).kind {
- ImplItemKind::Const(ref ty, _) => cause.span = ty.span,
- _ => bug!("{:?} is not a impl const", impl_c),
- }
-
- let mut diag = struct_span_err!(
- tcx.sess,
- cause.span,
- E0326,
- "implemented const `{}` has an incompatible type for trait",
- trait_c.name
- );
-
- let trait_c_span = trait_c.def_id.as_local().map(|trait_c_def_id| {
- // Add a label to the Span containing just the type of the const
- match tcx.hir().expect_trait_item(trait_c_def_id).kind {
- TraitItemKind::Const(ref ty, _) => ty.span,
- _ => bug!("{:?} is not a trait const", trait_c),
- }
- });
-
- infcx.note_type_err(
- &mut diag,
- &cause,
- trait_c_span.map(|span| (span, "type in trait".to_owned())),
- Some(infer::ValuePairs::Terms(ExpectedFound {
- expected: trait_ty.into(),
- found: impl_ty.into(),
- })),
- &terr,
- false,
- false,
- );
- diag.emit();
- }
-
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- return;
- }
-
- let outlives_environment = OutlivesEnvironment::new(param_env);
- infcx.check_region_obligations_and_report_errors(
- impl_c.def_id.expect_local(),
- &outlives_environment,
- );
- });
-}
-
-pub(crate) fn compare_ty_impl<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_ty: &ty::AssocItem,
- impl_ty_span: Span,
- trait_ty: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
- trait_item_span: Option<Span>,
-) {
- debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
-
- let _: Result<(), ErrorGuaranteed> = (|| {
- compare_number_of_generics(tcx, impl_ty, impl_ty_span, trait_ty, trait_item_span)?;
-
- compare_generic_param_kinds(tcx, impl_ty, trait_ty)?;
-
- let sp = tcx.def_span(impl_ty.def_id);
- compare_type_predicate_entailment(tcx, impl_ty, sp, trait_ty, impl_trait_ref)?;
-
- check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
- })();
-}
-
-/// The equivalent of [compare_predicate_entailment], but for associated types
-/// instead of associated functions.
-fn compare_type_predicate_entailment<'tcx>(
- tcx: TyCtxt<'tcx>,
- impl_ty: &ty::AssocItem,
- impl_ty_span: Span,
- trait_ty: &ty::AssocItem,
- impl_trait_ref: ty::TraitRef<'tcx>,
-) -> Result<(), ErrorGuaranteed> {
- let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
- let trait_to_impl_substs =
- impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs);
-
- let impl_ty_generics = tcx.generics_of(impl_ty.def_id);
- let trait_ty_generics = tcx.generics_of(trait_ty.def_id);
- let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
- let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
-
- check_region_bounds_on_impl_item(
- tcx,
- impl_ty,
- trait_ty,
- &trait_ty_generics,
- &impl_ty_generics,
- )?;
-
- let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs);
-
- if impl_ty_own_bounds.is_empty() {
- // Nothing to check.
- return Ok(());
- }
-
- // This `HirId` should be used for the `body_id` field on each
- // `ObligationCause` (and the `FnCtxt`). This is what
- // `regionck_item` expects.
- let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
- debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs);
-
- // The predicates declared by the impl definition, the trait and the
- // associated type in the trait are assumed.
- let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
- let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
- hybrid_preds
- .predicates
- .extend(trait_ty_predicates.instantiate_own(tcx, trait_to_impl_substs).predicates);
-
- debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
-
- let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id);
- let param_env = ty::ParamEnv::new(
- tcx.intern_predicates(&hybrid_preds.predicates),
- Reveal::UserFacing,
- hir::Constness::NotConst,
- );
- let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
- tcx.infer_ctxt().enter(|infcx| {
- let ocx = ObligationCtxt::new(&infcx);
-
- debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
-
- let mut selcx = traits::SelectionContext::new(&infcx);
-
- assert_eq!(impl_ty_own_bounds.predicates.len(), impl_ty_own_bounds.spans.len());
- for (span, predicate) in
- std::iter::zip(impl_ty_own_bounds.spans, impl_ty_own_bounds.predicates)
- {
- let cause = ObligationCause::misc(span, impl_ty_hir_id);
- let traits::Normalized { value: predicate, obligations } =
- traits::normalize(&mut selcx, param_env, cause, predicate);
-
- let cause = ObligationCause::new(
- span,
- impl_ty_hir_id,
- ObligationCauseCode::CompareImplItemObligation {
- impl_item_def_id: impl_ty.def_id.expect_local(),
- trait_item_def_id: trait_ty.def_id,
- kind: impl_ty.kind,
- },
- );
- ocx.register_obligations(obligations);
- ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
- }
-
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- let reported = infcx.report_fulfillment_errors(&errors, None, false);
- return Err(reported);
- }
-
- // Finally, resolve all regions. This catches wily misuses of
- // lifetime parameters.
- let outlives_environment = OutlivesEnvironment::new(param_env);
- infcx.check_region_obligations_and_report_errors(
- impl_ty.def_id.expect_local(),
- &outlives_environment,
- );
-
- Ok(())
- })
-}
-
-/// Validate that `ProjectionCandidate`s created for this associated type will
-/// be valid.
-///
-/// Usually given
-///
-/// trait X { type Y: Copy } impl X for T { type Y = S; }
-///
-/// We are able to normalize `<T as X>::U` to `S`, and so when we check the
-/// impl is well-formed we have to prove `S: Copy`.
-///
-/// For default associated types the normalization is not possible (the value
-/// from the impl could be overridden). We also can't normalize generic
-/// associated types (yet) because they contain bound parameters.
-#[tracing::instrument(level = "debug", skip(tcx))]
-pub fn check_type_bounds<'tcx>(
- tcx: TyCtxt<'tcx>,
- trait_ty: &ty::AssocItem,
- impl_ty: &ty::AssocItem,
- impl_ty_span: Span,
- impl_trait_ref: ty::TraitRef<'tcx>,
-) -> Result<(), ErrorGuaranteed> {
- // Given
- //
- // impl<A, B> Foo<u32> for (A, B) {
- // type Bar<C> =...
- // }
- //
- // - `impl_trait_ref` would be `<(A, B) as Foo<u32>>
- // - `impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
- // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from
- // the *trait* with the generic associated type parameters (as bound vars).
- //
- // A note regarding the use of bound vars here:
- // Imagine as an example
- // ```
- // trait Family {
- // type Member<C: Eq>;
- // }
- //
- // impl Family for VecFamily {
- // type Member<C: Eq> = i32;
- // }
- // ```
- // Here, we would generate
- // ```notrust
- // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) }
- // ```
- // when we really would like to generate
- // ```notrust
- // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) :- Implemented(C: Eq) }
- // ```
- // But, this is probably fine, because although the first clause can be used with types C that
- // do not implement Eq, for it to cause some kind of problem, there would have to be a
- // VecFamily::Member<X> for some type X where !(X: Eq), that appears in the value of type
- // Member<C: Eq> = .... That type would fail a well-formedness check that we ought to be doing
- // elsewhere, which would check that any <T as Family>::Member<X> meets the bounds declared in
- // the trait (notably, that X: Eq and T: Family).
- let defs: &ty::Generics = tcx.generics_of(impl_ty.def_id);
- let mut substs = smallvec::SmallVec::with_capacity(defs.count());
- if let Some(def_id) = defs.parent {
- let parent_defs = tcx.generics_of(def_id);
- InternalSubsts::fill_item(&mut substs, tcx, parent_defs, &mut |param, _| {
- tcx.mk_param_from_def(param)
- });
- }
- let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
- smallvec::SmallVec::with_capacity(defs.count());
- InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param.kind {
- GenericParamDefKind::Type { .. } => {
- let kind = ty::BoundTyKind::Param(param.name);
- let bound_var = ty::BoundVariableKind::Ty(kind);
- bound_vars.push(bound_var);
- tcx.mk_ty(ty::Bound(
- ty::INNERMOST,
- ty::BoundTy { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
- ))
- .into()
- }
- GenericParamDefKind::Lifetime => {
- let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
- let bound_var = ty::BoundVariableKind::Region(kind);
- bound_vars.push(bound_var);
- tcx.mk_region(ty::ReLateBound(
- ty::INNERMOST,
- ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
- ))
- .into()
- }
- GenericParamDefKind::Const { .. } => {
- let bound_var = ty::BoundVariableKind::Const;
- bound_vars.push(bound_var);
- tcx.mk_const(ty::ConstS {
- ty: tcx.type_of(param.def_id),
- kind: ty::ConstKind::Bound(
- ty::INNERMOST,
- ty::BoundVar::from_usize(bound_vars.len() - 1),
- ),
- })
- .into()
- }
- });
- let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
- let impl_ty_substs = tcx.intern_substs(&substs);
- let container_id = impl_ty.container_id(tcx);
-
- let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
- let impl_ty_value = tcx.type_of(impl_ty.def_id);
-
- let param_env = tcx.param_env(impl_ty.def_id);
-
- // When checking something like
- //
- // trait X { type Y: PartialEq<<Self as X>::Y> }
- // impl X for T { default type Y = S; }
- //
- // We will have to prove the bound S: PartialEq<<T as X>::Y>. In this case
- // we want <T as X>::Y to normalize to S. This is valid because we are
- // checking the default value specifically here. Add this equality to the
- // ParamEnv for normalization specifically.
- let normalize_param_env = {
- let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
- match impl_ty_value.kind() {
- ty::Projection(proj)
- if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs =>
- {
- // Don't include this predicate if the projected type is
- // exactly the same as the projection. This can occur in
- // (somewhat dubious) code like this:
- //
- // impl<T> X for T where T: X { type Y = <T as X>::Y; }
- }
- _ => predicates.push(
- ty::Binder::bind_with_vars(
- ty::ProjectionPredicate {
- projection_ty: ty::ProjectionTy {
- item_def_id: trait_ty.def_id,
- substs: rebased_substs,
- },
- term: impl_ty_value.into(),
- },
- bound_vars,
- )
- .to_predicate(tcx),
- ),
- };
- ty::ParamEnv::new(
- tcx.intern_predicates(&predicates),
- Reveal::UserFacing,
- param_env.constness(),
- )
- };
- debug!(?normalize_param_env);
-
- let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
- let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
-
- tcx.infer_ctxt().enter(move |infcx| {
- let ocx = ObligationCtxt::new(&infcx);
-
- let mut selcx = traits::SelectionContext::new(&infcx);
- let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
- let normalize_cause = ObligationCause::new(
- impl_ty_span,
- impl_ty_hir_id,
- ObligationCauseCode::CheckAssociatedTypeBounds {
- impl_item_def_id: impl_ty.def_id.expect_local(),
- trait_item_def_id: trait_ty.def_id,
- },
- );
- let mk_cause = |span: Span| {
- let code = if span.is_dummy() {
- traits::MiscObligation
- } else {
- traits::BindingObligation(trait_ty.def_id, span)
- };
- ObligationCause::new(impl_ty_span, impl_ty_hir_id, code)
- };
-
- let obligations = tcx
- .bound_explicit_item_bounds(trait_ty.def_id)
- .transpose_iter()
- .map(|e| e.map_bound(|e| *e).transpose_tuple2())
- .map(|(bound, span)| {
- debug!(?bound);
- // this is where opaque type is found
- let concrete_ty_bound = bound.subst(tcx, rebased_substs);
- debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
-
- traits::Obligation::new(mk_cause(span.0), param_env, concrete_ty_bound)
- })
- .collect();
- debug!("check_type_bounds: item_bounds={:?}", obligations);
-
- for mut obligation in util::elaborate_obligations(tcx, obligations) {
- let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize(
- &mut selcx,
- normalize_param_env,
- normalize_cause.clone(),
- obligation.predicate,
- );
- debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
- obligation.predicate = normalized_predicate;
-
- ocx.register_obligations(obligations);
- ocx.register_obligation(obligation);
- }
- // Check that all obligations are satisfied by the implementation's
- // version.
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- let reported = infcx.report_fulfillment_errors(&errors, None, false);
- return Err(reported);
- }
-
- // Finally, resolve all regions. This catches wily misuses of
- // lifetime parameters.
- let implied_bounds = match impl_ty.container {
- ty::TraitContainer => FxHashSet::default(),
- ty::ImplContainer => wfcheck::impl_implied_bounds(
- tcx,
- param_env,
- container_id.expect_local(),
- impl_ty_span,
- ),
- };
- let mut outlives_environment = OutlivesEnvironment::new(param_env);
- outlives_environment.add_implied_bounds(&infcx, implied_bounds, impl_ty_hir_id);
- infcx.check_region_obligations_and_report_errors(
- impl_ty.def_id.expect_local(),
- &outlives_environment,
- );
-
- let constraints = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
- for (key, value) in constraints {
- infcx
- .report_mismatched_types(
- &ObligationCause::misc(
- value.hidden_type.span,
- tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()),
- ),
- tcx.mk_opaque(key.def_id.to_def_id(), key.substs),
- value.hidden_type.ty,
- TypeError::Mismatch,
- )
- .emit();
- }
-
- Ok(())
- })
-}
-
-fn assoc_item_kind_str(impl_item: &ty::AssocItem) -> &'static str {
- match impl_item.kind {
- ty::AssocKind::Const => "const",
- ty::AssocKind::Fn => "method",
- ty::AssocKind::Type => "type",
- }
-}
diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs
deleted file mode 100644
index 6e97b0bf2..000000000
--- a/compiler/rustc_typeck/src/check/expr.rs
+++ /dev/null
@@ -1,2824 +0,0 @@
-//! Type checking expressions.
-//!
-//! See `mod.rs` for more context on type checking in general.
-
-use crate::astconv::AstConv as _;
-use crate::check::cast;
-use crate::check::coercion::CoerceMany;
-use crate::check::fatally_break_rust;
-use crate::check::method::SelfSource;
-use crate::check::report_unexpected_variant_res;
-use crate::check::BreakableCtxt;
-use crate::check::Diverges;
-use crate::check::DynamicCoerceMany;
-use crate::check::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
-use crate::check::FnCtxt;
-use crate::check::Needs;
-use crate::check::TupleArgumentsFlag::DontTupleArguments;
-use crate::errors::{
- FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct,
- YieldExprOutsideOfGenerator,
-};
-use crate::type_error_struct;
-
-use super::suggest_call_constructor;
-use crate::errors::{AddressOfTemporaryTaken, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive};
-use rustc_ast as ast;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::stack::ensure_sufficient_stack;
-use rustc_errors::{
- pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId,
- EmissionGuarantee, ErrorGuaranteed,
-};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::intravisit::Visitor;
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::{Closure, ExprKind, HirId, QPath};
-use rustc_infer::infer;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::InferOk;
-use rustc_infer::traits::ObligationCause;
-use rustc_middle::middle::stability;
-use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
-use rustc_middle::ty::error::TypeError::FieldMisMatch;
-use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TypeVisitable};
-use rustc_session::parse::feature_err;
-use rustc_span::hygiene::DesugaringKind;
-use rustc_span::lev_distance::find_best_match_for_name;
-use rustc_span::source_map::{Span, Spanned};
-use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{BytePos, Pos};
-use rustc_target::spec::abi::Abi::RustIntrinsic;
-use rustc_trait_selection::infer::InferCtxtExt;
-use rustc_trait_selection::traits::{self, ObligationCauseCode};
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) {
- let ty = self.check_expr_with_hint(expr, expected);
- self.demand_eqtype(expr.span, expected, ty);
- }
-
- pub fn check_expr_has_type_or_error(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- extend_err: impl FnMut(&mut Diagnostic),
- ) -> Ty<'tcx> {
- self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err)
- }
-
- fn check_expr_meets_expectation_or_error(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- mut extend_err: impl FnMut(&mut Diagnostic),
- ) -> Ty<'tcx> {
- let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
- let mut ty = self.check_expr_with_expectation(expr, expected);
-
- // While we don't allow *arbitrary* coercions here, we *do* allow
- // coercions from ! to `expected`.
- if ty.is_never() {
- if let Some(adjustments) = self.typeck_results.borrow().adjustments().get(expr.hir_id) {
- self.tcx().sess.delay_span_bug(
- expr.span,
- "expression with never type wound up being adjusted",
- );
- return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &adjustments[..] {
- target.to_owned()
- } else {
- self.tcx().ty_error()
- };
- }
-
- let adj_ty = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::AdjustmentType,
- span: expr.span,
- });
- self.apply_adjustments(
- expr,
- vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty }],
- );
- ty = adj_ty;
- }
-
- if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
- let expr = expr.peel_drop_temps();
- self.suggest_deref_ref_or_into(&mut err, expr, expected_ty, ty, None);
- extend_err(&mut err);
- err.emit();
- }
- ty
- }
-
- pub(super) fn check_expr_coercable_to_type(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
- ) -> Ty<'tcx> {
- let ty = self.check_expr_with_hint(expr, expected);
- // checks don't need two phase
- self.demand_coerce(expr, ty, expected, expected_ty_expr, AllowTwoPhase::No)
- }
-
- pub(super) fn check_expr_with_hint(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- ) -> Ty<'tcx> {
- self.check_expr_with_expectation(expr, ExpectHasType(expected))
- }
-
- fn check_expr_with_expectation_and_needs(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- needs: Needs,
- ) -> Ty<'tcx> {
- let ty = self.check_expr_with_expectation(expr, expected);
-
- // If the expression is used in a place whether mutable place is required
- // e.g. LHS of assignment, perform the conversion.
- if let Needs::MutPlace = needs {
- self.convert_place_derefs_to_mutable(expr);
- }
-
- ty
- }
-
- pub(super) fn check_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> Ty<'tcx> {
- self.check_expr_with_expectation(expr, NoExpectation)
- }
-
- pub(super) fn check_expr_with_needs(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- needs: Needs,
- ) -> Ty<'tcx> {
- self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
- }
-
- /// Invariant:
- /// If an expression has any sub-expressions that result in a type error,
- /// inspecting that expression's type with `ty.references_error()` will return
- /// true. Likewise, if an expression is known to diverge, inspecting its
- /// type with `ty::type_is_bot` will return true (n.b.: since Rust is
- /// strict, _|_ can appear in the type of an expression that does not,
- /// itself, diverge: for example, fn() -> _|_.)
- /// Note that inspecting a type's structure *directly* may expose the fact
- /// that there are actually multiple representations for `Error`, so avoid
- /// that when err needs to be handled differently.
- #[instrument(skip(self, expr), level = "debug")]
- pub(super) fn check_expr_with_expectation(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- self.check_expr_with_expectation_and_args(expr, expected, &[])
- }
-
- /// Same as `check_expr_with_expectation`, but allows us to pass in the arguments of a
- /// `ExprKind::Call` when evaluating its callee when it is an `ExprKind::Path`.
- pub(super) fn check_expr_with_expectation_and_args(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- args: &'tcx [hir::Expr<'tcx>],
- ) -> Ty<'tcx> {
- if self.tcx().sess.verbose() {
- // make this code only run with -Zverbose because it is probably slow
- if let Ok(lint_str) = self.tcx.sess.source_map().span_to_snippet(expr.span) {
- if !lint_str.contains('\n') {
- debug!("expr text: {lint_str}");
- } else {
- let mut lines = lint_str.lines();
- if let Some(line0) = lines.next() {
- let remaining_lines = lines.count();
- debug!("expr text: {line0}");
- debug!("expr text: ...(and {remaining_lines} more lines)");
- }
- }
- }
- }
-
- // True if `expr` is a `Try::from_ok(())` that is a result of desugaring a try block
- // without the final expr (e.g. `try { return; }`). We don't want to generate an
- // unreachable_code lint for it since warnings for autogenerated code are confusing.
- let is_try_block_generated_unit_expr = match expr.kind {
- ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
- args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock)
- }
-
- _ => false,
- };
-
- // Warn for expressions after diverging siblings.
- if !is_try_block_generated_unit_expr {
- self.warn_if_unreachable(expr.hir_id, expr.span, "expression");
- }
-
- // Hide the outer diverging and has_errors flags.
- let old_diverges = self.diverges.replace(Diverges::Maybe);
- let old_has_errors = self.has_errors.replace(false);
-
- let ty = ensure_sufficient_stack(|| match &expr.kind {
- hir::ExprKind::Path(
- qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..),
- ) => self.check_expr_path(qpath, expr, args),
- _ => self.check_expr_kind(expr, expected),
- });
-
- // Warn for non-block expressions with diverging children.
- match expr.kind {
- ExprKind::Block(..)
- | ExprKind::If(..)
- | ExprKind::Let(..)
- | ExprKind::Loop(..)
- | ExprKind::Match(..) => {}
- // If `expr` is a result of desugaring the try block and is an ok-wrapped
- // diverging expression (e.g. it arose from desugaring of `try { return }`),
- // we skip issuing a warning because it is autogenerated code.
- ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {}
- ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"),
- ExprKind::MethodCall(segment, ..) => {
- self.warn_if_unreachable(expr.hir_id, segment.ident.span, "call")
- }
- _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression"),
- }
-
- // Any expression that produces a value of type `!` must have diverged
- if ty.is_never() {
- self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
- }
-
- // Record the type, which applies it effects.
- // We need to do this after the warning above, so that
- // we don't warn for the diverging expression itself.
- self.write_ty(expr.hir_id, ty);
-
- // Combine the diverging and has_error flags.
- self.diverges.set(self.diverges.get() | old_diverges);
- self.has_errors.set(self.has_errors.get() | old_has_errors);
-
- debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id));
- debug!("... {:?}, expected is {:?}", ty, expected);
-
- ty
- }
-
- #[instrument(skip(self, expr), level = "debug")]
- fn check_expr_kind(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- trace!("expr={:#?}", expr);
-
- let tcx = self.tcx;
- match expr.kind {
- ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected),
- ExprKind::Lit(ref lit) => self.check_lit(&lit, expected),
- ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs, expected),
- ExprKind::Assign(lhs, rhs, span) => {
- self.check_expr_assign(expr, expected, lhs, rhs, span)
- }
- ExprKind::AssignOp(op, lhs, rhs) => {
- self.check_binop_assign(expr, op, lhs, rhs, expected)
- }
- ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
- ExprKind::AddrOf(kind, mutbl, oprnd) => {
- self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr)
- }
- ExprKind::Path(QPath::LangItem(lang_item, _, hir_id)) => {
- self.check_lang_item_path(lang_item, expr, hir_id)
- }
- ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr, &[]),
- ExprKind::InlineAsm(asm) => {
- // We defer some asm checks as we may not have resolved the input and output types yet (they may still be infer vars).
- self.deferred_asm_checks.borrow_mut().push((asm, expr.hir_id));
- self.check_expr_asm(asm)
- }
- ExprKind::Break(destination, ref expr_opt) => {
- self.check_expr_break(destination, expr_opt.as_deref(), expr)
- }
- ExprKind::Continue(destination) => {
- if destination.target_id.is_ok() {
- tcx.types.never
- } else {
- // There was an error; make type-check fail.
- tcx.ty_error()
- }
- }
- ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr),
- ExprKind::Let(let_expr) => self.check_expr_let(let_expr),
- ExprKind::Loop(body, _, source, _) => {
- self.check_expr_loop(body, source, expected, expr)
- }
- ExprKind::Match(discrim, arms, match_src) => {
- self.check_match(expr, &discrim, arms, expected, match_src)
- }
- ExprKind::Closure(&Closure { capture_clause, fn_decl, body, movability, .. }) => {
- self.check_expr_closure(expr, capture_clause, &fn_decl, body, movability, expected)
- }
- ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected),
- ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected),
- ExprKind::MethodCall(segment, args, _) => {
- self.check_method_call(expr, segment, args, expected)
- }
- ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
- ExprKind::Type(e, t) => {
- let ty = self.to_ty_saving_user_provided_ty(&t);
- self.check_expr_eq_type(&e, ty);
- ty
- }
- ExprKind::If(cond, then_expr, opt_else_expr) => {
- self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
- }
- ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected),
- ExprKind::Array(args) => self.check_expr_array(args, expected, expr),
- ExprKind::ConstBlock(ref anon_const) => {
- self.check_expr_const_block(anon_const, expected, expr)
- }
- ExprKind::Repeat(element, ref count) => {
- self.check_expr_repeat(element, count, expected, expr)
- }
- ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr),
- ExprKind::Struct(qpath, fields, ref base_expr) => {
- self.check_expr_struct(expr, expected, qpath, fields, base_expr)
- }
- ExprKind::Field(base, field) => self.check_field(expr, &base, field),
- ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
- ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
- hir::ExprKind::Err => tcx.ty_error(),
- }
- }
-
- fn check_expr_box(&self, expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>) -> Ty<'tcx> {
- let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| match ty.kind() {
- ty::Adt(def, _) if def.is_box() => Expectation::rvalue_hint(self, ty.boxed_ty()),
- _ => NoExpectation,
- });
- let referent_ty = self.check_expr_with_expectation(expr, expected_inner);
- self.require_type_is_sized(referent_ty, expr.span, traits::SizedBoxType);
- self.tcx.mk_box(referent_ty)
- }
-
- fn check_expr_unary(
- &self,
- unop: hir::UnOp,
- oprnd: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
- let expected_inner = match unop {
- hir::UnOp::Not | hir::UnOp::Neg => expected,
- hir::UnOp::Deref => NoExpectation,
- };
- let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner);
-
- if !oprnd_t.references_error() {
- oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
- match unop {
- hir::UnOp::Deref => {
- if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) {
- oprnd_t = ty;
- } else {
- let mut err = type_error_struct!(
- tcx.sess,
- expr.span,
- oprnd_t,
- E0614,
- "type `{oprnd_t}` cannot be dereferenced",
- );
- let sp = tcx.sess.source_map().start_point(expr.span);
- if let Some(sp) =
- tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
- {
- tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp);
- }
- err.emit();
- oprnd_t = tcx.ty_error();
- }
- }
- hir::UnOp::Not => {
- let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
- // If it's builtin, we can reuse the type, this helps inference.
- if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) {
- oprnd_t = result;
- }
- }
- hir::UnOp::Neg => {
- let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
- // If it's builtin, we can reuse the type, this helps inference.
- if !oprnd_t.is_numeric() {
- oprnd_t = result;
- }
- }
- }
- }
- oprnd_t
- }
-
- fn check_expr_addr_of(
- &self,
- kind: hir::BorrowKind,
- mutbl: hir::Mutability,
- oprnd: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
- match ty.kind() {
- ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
- if oprnd.is_syntactic_place_expr() {
- // Places may legitimately have unsized types.
- // For example, dereferences of a fat pointer and
- // the last field of a struct can be unsized.
- ExpectHasType(*ty)
- } else {
- Expectation::rvalue_hint(self, *ty)
- }
- }
- _ => NoExpectation,
- }
- });
- let ty =
- self.check_expr_with_expectation_and_needs(&oprnd, hint, Needs::maybe_mut_place(mutbl));
-
- let tm = ty::TypeAndMut { ty, mutbl };
- match kind {
- _ if tm.ty.references_error() => self.tcx.ty_error(),
- hir::BorrowKind::Raw => {
- self.check_named_place_expr(oprnd);
- self.tcx.mk_ptr(tm)
- }
- hir::BorrowKind::Ref => {
- // Note: at this point, we cannot say what the best lifetime
- // is to use for resulting pointer. We want to use the
- // shortest lifetime possible so as to avoid spurious borrowck
- // errors. Moreover, the longest lifetime will depend on the
- // precise details of the value whose address is being taken
- // (and how long it is valid), which we don't know yet until
- // type inference is complete.
- //
- // Therefore, here we simply generate a region variable. The
- // region inferencer will then select a suitable value.
- // Finally, borrowck will infer the value of the region again,
- // this time with enough precision to check that the value
- // whose address was taken can actually be made to live as long
- // as it needs to live.
- let region = self.next_region_var(infer::AddrOfRegion(expr.span));
- self.tcx.mk_ref(region, tm)
- }
- }
- }
-
- /// Does this expression refer to a place that either:
- /// * Is based on a local or static.
- /// * Contains a dereference
- /// Note that the adjustments for the children of `expr` should already
- /// have been resolved.
- fn check_named_place_expr(&self, oprnd: &'tcx hir::Expr<'tcx>) {
- let is_named = oprnd.is_place_expr(|base| {
- // Allow raw borrows if there are any deref adjustments.
- //
- // const VAL: (i32,) = (0,);
- // const REF: &(i32,) = &(0,);
- //
- // &raw const VAL.0; // ERROR
- // &raw const REF.0; // OK, same as &raw const (*REF).0;
- //
- // This is maybe too permissive, since it allows
- // `let u = &raw const Box::new((1,)).0`, which creates an
- // immediately dangling raw pointer.
- self.typeck_results
- .borrow()
- .adjustments()
- .get(base.hir_id)
- .map_or(false, |x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_))))
- });
- if !is_named {
- self.tcx.sess.emit_err(AddressOfTemporaryTaken { span: oprnd.span });
- }
- }
-
- fn check_lang_item_path(
- &self,
- lang_item: hir::LangItem,
- expr: &'tcx hir::Expr<'tcx>,
- hir_id: Option<hir::HirId>,
- ) -> Ty<'tcx> {
- self.resolve_lang_item_path(lang_item, expr.span, expr.hir_id, hir_id).1
- }
-
- pub(crate) fn check_expr_path(
- &self,
- qpath: &'tcx hir::QPath<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- args: &'tcx [hir::Expr<'tcx>],
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
- let (res, opt_ty, segs) =
- self.resolve_ty_and_res_fully_qualified_call(qpath, expr.hir_id, expr.span);
- let ty = match res {
- Res::Err => {
- self.set_tainted_by_errors();
- tcx.ty_error()
- }
- Res::Def(DefKind::Ctor(_, CtorKind::Fictive), _) => {
- report_unexpected_variant_res(tcx, res, qpath, expr.span);
- tcx.ty_error()
- }
- _ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0,
- };
-
- if let ty::FnDef(did, ..) = *ty.kind() {
- let fn_sig = ty.fn_sig(tcx);
- if tcx.fn_sig(did).abi() == RustIntrinsic && tcx.item_name(did) == sym::transmute {
- let from = fn_sig.inputs().skip_binder()[0];
- let to = fn_sig.output().skip_binder();
- // We defer the transmute to the end of typeck, once all inference vars have
- // been resolved or we errored. This is important as we can only check transmute
- // on concrete types, but the output type may not be known yet (it would only
- // be known if explicitly specified via turbofish).
- self.deferred_transmute_checks.borrow_mut().push((from, to, expr.span));
- }
- if !tcx.features().unsized_fn_params {
- // We want to remove some Sized bounds from std functions,
- // but don't want to expose the removal to stable Rust.
- // i.e., we don't want to allow
- //
- // ```rust
- // drop as fn(str);
- // ```
- //
- // to work in stable even if the Sized bound on `drop` is relaxed.
- for i in 0..fn_sig.inputs().skip_binder().len() {
- // We just want to check sizedness, so instead of introducing
- // placeholder lifetimes with probing, we just replace higher lifetimes
- // with fresh vars.
- let span = args.get(i).map(|a| a.span).unwrap_or(expr.span);
- let input = self.replace_bound_vars_with_fresh_vars(
- span,
- infer::LateBoundRegionConversionTime::FnCall,
- fn_sig.input(i),
- );
- self.require_type_is_sized_deferred(
- input,
- span,
- traits::SizedArgumentType(None),
- );
- }
- }
- // Here we want to prevent struct constructors from returning unsized types.
- // There were two cases this happened: fn pointer coercion in stable
- // and usual function call in presence of unsized_locals.
- // Also, as we just want to check sizedness, instead of introducing
- // placeholder lifetimes with probing, we just replace higher lifetimes
- // with fresh vars.
- let output = self.replace_bound_vars_with_fresh_vars(
- expr.span,
- infer::LateBoundRegionConversionTime::FnCall,
- fn_sig.output(),
- );
- self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType);
- }
-
- // We always require that the type provided as the value for
- // a type parameter outlives the moment of instantiation.
- let substs = self.typeck_results.borrow().node_substs(expr.hir_id);
- self.add_wf_bounds(substs, expr);
-
- ty
- }
-
- fn check_expr_break(
- &self,
- destination: hir::Destination,
- expr_opt: Option<&'tcx hir::Expr<'tcx>>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
- if let Ok(target_id) = destination.target_id {
- let (e_ty, cause);
- if let Some(e) = expr_opt {
- // If this is a break with a value, we need to type-check
- // the expression. Get an expected type from the loop context.
- let opt_coerce_to = {
- // We should release `enclosing_breakables` before the `check_expr_with_hint`
- // below, so can't move this block of code to the enclosing scope and share
- // `ctxt` with the second `enclosing_breakables` borrow below.
- let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
- match enclosing_breakables.opt_find_breakable(target_id) {
- Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()),
- None => {
- // Avoid ICE when `break` is inside a closure (#65383).
- return tcx.ty_error_with_message(
- expr.span,
- "break was outside loop, but no error was emitted",
- );
- }
- }
- };
-
- // If the loop context is not a `loop { }`, then break with
- // a value is illegal, and `opt_coerce_to` will be `None`.
- // Just set expectation to error in that case.
- let coerce_to = opt_coerce_to.unwrap_or_else(|| tcx.ty_error());
-
- // Recurse without `enclosing_breakables` borrowed.
- e_ty = self.check_expr_with_hint(e, coerce_to);
- cause = self.misc(e.span);
- } else {
- // Otherwise, this is a break *without* a value. That's
- // always legal, and is equivalent to `break ()`.
- e_ty = tcx.mk_unit();
- cause = self.misc(expr.span);
- }
-
- // Now that we have type-checked `expr_opt`, borrow
- // the `enclosing_loops` field and let's coerce the
- // type of `expr_opt` into what is expected.
- let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
- let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else {
- // Avoid ICE when `break` is inside a closure (#65383).
- return tcx.ty_error_with_message(
- expr.span,
- "break was outside loop, but no error was emitted",
- );
- };
-
- if let Some(ref mut coerce) = ctxt.coerce {
- if let Some(ref e) = expr_opt {
- coerce.coerce(self, &cause, e, e_ty);
- } else {
- assert!(e_ty.is_unit());
- let ty = coerce.expected_ty();
- coerce.coerce_forced_unit(
- self,
- &cause,
- &mut |mut err| {
- self.suggest_mismatched_types_on_tail(
- &mut err, expr, ty, e_ty, target_id,
- );
- if let Some(val) = ty_kind_suggestion(ty) {
- let label = destination
- .label
- .map(|l| format!(" {}", l.ident))
- .unwrap_or_else(String::new);
- err.span_suggestion(
- expr.span,
- "give it a value of the expected type",
- format!("break{label} {val}"),
- Applicability::HasPlaceholders,
- );
- }
- },
- false,
- );
- }
- } else {
- // If `ctxt.coerce` is `None`, we can just ignore
- // the type of the expression. This is because
- // either this was a break *without* a value, in
- // which case it is always a legal type (`()`), or
- // else an error would have been flagged by the
- // `loops` pass for using break with an expression
- // where you are not supposed to.
- assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some());
- }
-
- // If we encountered a `break`, then (no surprise) it may be possible to break from the
- // loop... unless the value being returned from the loop diverges itself, e.g.
- // `break return 5` or `break loop {}`.
- ctxt.may_break |= !self.diverges.get().is_always();
-
- // the type of a `break` is always `!`, since it diverges
- tcx.types.never
- } else {
- // Otherwise, we failed to find the enclosing loop;
- // this can only happen if the `break` was not
- // inside a loop at all, which is caught by the
- // loop-checking pass.
- let err = self.tcx.ty_error_with_message(
- expr.span,
- "break was outside loop, but no error was emitted",
- );
-
- // We still need to assign a type to the inner expression to
- // prevent the ICE in #43162.
- if let Some(e) = expr_opt {
- self.check_expr_with_hint(e, err);
-
- // ... except when we try to 'break rust;'.
- // ICE this expression in particular (see #43162).
- if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
- if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
- fatally_break_rust(self.tcx.sess);
- }
- }
- }
-
- // There was an error; make type-check fail.
- err
- }
- }
-
- fn check_expr_return(
- &self,
- expr_opt: Option<&'tcx hir::Expr<'tcx>>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- if self.ret_coercion.is_none() {
- let mut err = ReturnStmtOutsideOfFnBody {
- span: expr.span,
- encl_body_span: None,
- encl_fn_span: None,
- };
-
- let encl_item_id = self.tcx.hir().get_parent_item(expr.hir_id);
-
- if let Some(hir::Node::Item(hir::Item {
- kind: hir::ItemKind::Fn(..),
- span: encl_fn_span,
- ..
- }))
- | Some(hir::Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
- span: encl_fn_span,
- ..
- }))
- | Some(hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::Fn(..),
- span: encl_fn_span,
- ..
- })) = self.tcx.hir().find_by_def_id(encl_item_id)
- {
- // We are inside a function body, so reporting "return statement
- // outside of function body" needs an explanation.
-
- let encl_body_owner_id = self.tcx.hir().enclosing_body_owner(expr.hir_id);
-
- // If this didn't hold, we would not have to report an error in
- // the first place.
- assert_ne!(encl_item_id, encl_body_owner_id);
-
- let encl_body_id = self.tcx.hir().body_owned_by(encl_body_owner_id);
- let encl_body = self.tcx.hir().body(encl_body_id);
-
- err.encl_body_span = Some(encl_body.value.span);
- err.encl_fn_span = Some(*encl_fn_span);
- }
-
- self.tcx.sess.emit_err(err);
-
- if let Some(e) = expr_opt {
- // We still have to type-check `e` (issue #86188), but calling
- // `check_return_expr` only works inside fn bodies.
- self.check_expr(e);
- }
- } else if let Some(e) = expr_opt {
- if self.ret_coercion_span.get().is_none() {
- self.ret_coercion_span.set(Some(e.span));
- }
- self.check_return_expr(e, true);
- } else {
- let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
- if self.ret_coercion_span.get().is_none() {
- self.ret_coercion_span.set(Some(expr.span));
- }
- let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
- if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) {
- coercion.coerce_forced_unit(
- self,
- &cause,
- &mut |db| {
- let span = fn_decl.output.span();
- if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
- db.span_label(
- span,
- format!("expected `{snippet}` because of this return type"),
- );
- }
- },
- true,
- );
- } else {
- coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
- }
- }
- self.tcx.types.never
- }
-
- /// `explicit_return` is `true` if we're checking an explicit `return expr`,
- /// and `false` if we're checking a trailing expression.
- pub(super) fn check_return_expr(
- &self,
- return_expr: &'tcx hir::Expr<'tcx>,
- explicit_return: bool,
- ) {
- let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
- span_bug!(return_expr.span, "check_return_expr called outside fn body")
- });
-
- let ret_ty = ret_coercion.borrow().expected_ty();
- let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty);
- let mut span = return_expr.span;
- // Use the span of the trailing expression for our cause,
- // not the span of the entire function
- if !explicit_return {
- if let ExprKind::Block(body, _) = return_expr.kind && let Some(last_expr) = body.expr {
- span = last_expr.span;
- }
- }
- ret_coercion.borrow_mut().coerce(
- self,
- &self.cause(span, ObligationCauseCode::ReturnValue(return_expr.hir_id)),
- return_expr,
- return_expr_ty,
- );
-
- if self.return_type_has_opaque {
- // Point any obligations that were registered due to opaque type
- // inference at the return expression.
- self.select_obligations_where_possible(false, |errors| {
- self.point_at_return_for_opaque_ty_error(errors, span, return_expr_ty);
- });
- }
- }
-
- fn point_at_return_for_opaque_ty_error(
- &self,
- errors: &mut Vec<traits::FulfillmentError<'tcx>>,
- span: Span,
- return_expr_ty: Ty<'tcx>,
- ) {
- // Don't point at the whole block if it's empty
- if span == self.tcx.hir().span(self.body_id) {
- return;
- }
- for err in errors {
- let cause = &mut err.obligation.cause;
- if let ObligationCauseCode::OpaqueReturnType(None) = cause.code() {
- let new_cause = ObligationCause::new(
- cause.span,
- cause.body_id,
- ObligationCauseCode::OpaqueReturnType(Some((return_expr_ty, span))),
- );
- *cause = new_cause;
- }
- }
- }
-
- pub(crate) fn check_lhs_assignable(
- &self,
- lhs: &'tcx hir::Expr<'tcx>,
- err_code: &'static str,
- op_span: Span,
- adjust_err: impl FnOnce(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>),
- ) {
- if lhs.is_syntactic_place_expr() {
- return;
- }
-
- // FIXME: Make this use SessionDiagnostic once error codes can be dynamically set.
- let mut err = self.tcx.sess.struct_span_err_with_code(
- op_span,
- "invalid left-hand side of assignment",
- DiagnosticId::Error(err_code.into()),
- );
- err.span_label(lhs.span, "cannot assign to this expression");
-
- self.comes_from_while_condition(lhs.hir_id, |expr| {
- err.span_suggestion_verbose(
- expr.span.shrink_to_lo(),
- "you might have meant to use pattern destructuring",
- "let ",
- Applicability::MachineApplicable,
- );
- });
-
- adjust_err(&mut err);
-
- err.emit();
- }
-
- // Check if an expression `original_expr_id` comes from the condition of a while loop,
- // as opposed from the body of a while loop, which we can naively check by iterating
- // parents until we find a loop...
- pub(super) fn comes_from_while_condition(
- &self,
- original_expr_id: HirId,
- then: impl FnOnce(&hir::Expr<'_>),
- ) {
- let mut parent = self.tcx.hir().get_parent_node(original_expr_id);
- while let Some(node) = self.tcx.hir().find(parent) {
- match node {
- hir::Node::Expr(hir::Expr {
- kind:
- hir::ExprKind::Loop(
- hir::Block {
- expr:
- Some(hir::Expr {
- kind:
- hir::ExprKind::Match(expr, ..) | hir::ExprKind::If(expr, ..),
- ..
- }),
- ..
- },
- _,
- hir::LoopSource::While,
- _,
- ),
- ..
- }) => {
- // Check if our original expression is a child of the condition of a while loop
- let expr_is_ancestor = std::iter::successors(Some(original_expr_id), |id| {
- self.tcx.hir().find_parent_node(*id)
- })
- .take_while(|id| *id != parent)
- .any(|id| id == expr.hir_id);
- // if it is, then we have a situation like `while Some(0) = value.get(0) {`,
- // where `while let` was more likely intended.
- if expr_is_ancestor {
- then(expr);
- }
- break;
- }
- hir::Node::Item(_)
- | hir::Node::ImplItem(_)
- | hir::Node::TraitItem(_)
- | hir::Node::Crate(_) => break,
- _ => {
- parent = self.tcx.hir().get_parent_node(parent);
- }
- }
- }
- }
-
- // A generic function for checking the 'then' and 'else' clauses in an 'if'
- // or 'if-else' expression.
- fn check_then_else(
- &self,
- cond_expr: &'tcx hir::Expr<'tcx>,
- then_expr: &'tcx hir::Expr<'tcx>,
- opt_else_expr: Option<&'tcx hir::Expr<'tcx>>,
- sp: Span,
- orig_expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool, |_| {});
-
- self.warn_if_unreachable(
- cond_expr.hir_id,
- then_expr.span,
- "block in `if` or `while` expression",
- );
-
- let cond_diverges = self.diverges.get();
- self.diverges.set(Diverges::Maybe);
-
- let expected = orig_expected.adjust_for_branches(self);
- let then_ty = self.check_expr_with_expectation(then_expr, expected);
- let then_diverges = self.diverges.get();
- self.diverges.set(Diverges::Maybe);
-
- // We've already taken the expected type's preferences
- // into account when typing the `then` branch. To figure
- // out the initial shot at a LUB, we thus only consider
- // `expected` if it represents a *hard* constraint
- // (`only_has_type`); otherwise, we just go with a
- // fresh type variable.
- let coerce_to_ty = expected.coercion_target_type(self, sp);
- let mut coerce: DynamicCoerceMany<'_> = CoerceMany::new(coerce_to_ty);
-
- coerce.coerce(self, &self.misc(sp), then_expr, then_ty);
-
- if let Some(else_expr) = opt_else_expr {
- let else_ty = self.check_expr_with_expectation(else_expr, expected);
- let else_diverges = self.diverges.get();
-
- let opt_suggest_box_span = self.opt_suggest_box_span(else_ty, orig_expected);
- let if_cause = self.if_cause(
- sp,
- cond_expr.span,
- then_expr,
- else_expr,
- then_ty,
- else_ty,
- opt_suggest_box_span,
- );
-
- coerce.coerce(self, &if_cause, else_expr, else_ty);
-
- // We won't diverge unless both branches do (or the condition does).
- self.diverges.set(cond_diverges | then_diverges & else_diverges);
- } else {
- self.if_fallback_coercion(sp, then_expr, &mut coerce);
-
- // If the condition is false we can't diverge.
- self.diverges.set(cond_diverges);
- }
-
- let result_ty = coerce.complete(self);
- if cond_ty.references_error() { self.tcx.ty_error() } else { result_ty }
- }
-
- /// Type check assignment expression `expr` of form `lhs = rhs`.
- /// The expected type is `()` and is passed to the function for the purposes of diagnostics.
- fn check_expr_assign(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- lhs: &'tcx hir::Expr<'tcx>,
- rhs: &'tcx hir::Expr<'tcx>,
- span: Span,
- ) -> Ty<'tcx> {
- let expected_ty = expected.coercion_target_type(self, expr.span);
- if expected_ty == self.tcx.types.bool {
- // The expected type is `bool` but this will result in `()` so we can reasonably
- // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`.
- // The likely cause of this is `if foo = bar { .. }`.
- let actual_ty = self.tcx.mk_unit();
- let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap();
- let lhs_ty = self.check_expr(&lhs);
- let rhs_ty = self.check_expr(&rhs);
- let (applicability, eq) = if self.can_coerce(rhs_ty, lhs_ty) {
- (Applicability::MachineApplicable, true)
- } else {
- (Applicability::MaybeIncorrect, false)
- };
- if !lhs.is_syntactic_place_expr()
- && lhs.is_approximately_pattern()
- && !matches!(lhs.kind, hir::ExprKind::Lit(_))
- {
- // Do not suggest `if let x = y` as `==` is way more likely to be the intention.
- let hir = self.tcx.hir();
- if let hir::Node::Expr(hir::Expr { kind: ExprKind::If { .. }, .. }) =
- hir.get(hir.get_parent_node(hir.get_parent_node(expr.hir_id)))
- {
- err.span_suggestion_verbose(
- expr.span.shrink_to_lo(),
- "you might have meant to use pattern matching",
- "let ",
- applicability,
- );
- };
- }
- if eq {
- err.span_suggestion_verbose(
- span,
- "you might have meant to compare for equality",
- "==",
- applicability,
- );
- }
-
- // If the assignment expression itself is ill-formed, don't
- // bother emitting another error
- if lhs_ty.references_error() || rhs_ty.references_error() {
- err.delay_as_bug()
- } else {
- err.emit();
- }
- return self.tcx.ty_error();
- }
-
- let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
-
- let suggest_deref_binop = |err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
- rhs_ty: Ty<'tcx>| {
- if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
- // Can only assign if the type is sized, so if `DerefMut` yields a type that is
- // unsized, do not suggest dereferencing it.
- let lhs_deref_ty_is_sized = self
- .infcx
- .type_implements_trait(
- self.tcx.lang_items().sized_trait().unwrap(),
- lhs_deref_ty,
- ty::List::empty(),
- self.param_env,
- )
- .may_apply();
- if lhs_deref_ty_is_sized && self.can_coerce(rhs_ty, lhs_deref_ty) {
- err.span_suggestion_verbose(
- lhs.span.shrink_to_lo(),
- "consider dereferencing here to assign to the mutably borrowed value",
- "*",
- Applicability::MachineApplicable,
- );
- }
- }
- };
-
- self.check_lhs_assignable(lhs, "E0070", span, |err| {
- let rhs_ty = self.check_expr(&rhs);
- suggest_deref_binop(err, rhs_ty);
- });
-
- // This is (basically) inlined `check_expr_coercable_to_type`, but we want
- // to suggest an additional fixup here in `suggest_deref_binop`.
- let rhs_ty = self.check_expr_with_hint(&rhs, lhs_ty);
- if let (_, Some(mut diag)) =
- self.demand_coerce_diag(rhs, rhs_ty, lhs_ty, Some(lhs), AllowTwoPhase::No)
- {
- suggest_deref_binop(&mut diag, rhs_ty);
- diag.emit();
- }
-
- self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
-
- if lhs_ty.references_error() || rhs_ty.references_error() {
- self.tcx.ty_error()
- } else {
- self.tcx.mk_unit()
- }
- }
-
- pub(super) fn check_expr_let(&self, let_expr: &'tcx hir::Let<'tcx>) -> Ty<'tcx> {
- // for let statements, this is done in check_stmt
- let init = let_expr.init;
- self.warn_if_unreachable(init.hir_id, init.span, "block in `let` expression");
- // otherwise check exactly as a let statement
- self.check_decl(let_expr.into());
- // but return a bool, for this is a boolean expression
- self.tcx.types.bool
- }
-
- fn check_expr_loop(
- &self,
- body: &'tcx hir::Block<'tcx>,
- source: hir::LoopSource,
- expected: Expectation<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let coerce = match source {
- // you can only use break with a value from a normal `loop { }`
- hir::LoopSource::Loop => {
- let coerce_to = expected.coercion_target_type(self, body.span);
- Some(CoerceMany::new(coerce_to))
- }
-
- hir::LoopSource::While | hir::LoopSource::ForLoop => None,
- };
-
- let ctxt = BreakableCtxt {
- coerce,
- may_break: false, // Will get updated if/when we find a `break`.
- };
-
- let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
- self.check_block_no_value(&body);
- });
-
- if ctxt.may_break {
- // No way to know whether it's diverging because
- // of a `break` or an outer `break` or `return`.
- self.diverges.set(Diverges::Maybe);
- }
-
- // If we permit break with a value, then result type is
- // the LUB of the breaks (possibly ! if none); else, it
- // is nil. This makes sense because infinite loops
- // (which would have type !) are only possible iff we
- // permit break with a value [1].
- if ctxt.coerce.is_none() && !ctxt.may_break {
- // [1]
- self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
- }
- ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
- }
-
- /// Checks a method call.
- fn check_method_call(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- segment: &hir::PathSegment<'_>,
- args: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let rcvr = &args[0];
- let rcvr_t = self.check_expr(&rcvr);
- // no need to check for bot/err -- callee does that
- let rcvr_t = self.structurally_resolved_type(args[0].span, rcvr_t);
- let span = segment.ident.span;
-
- let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) {
- Ok(method) => {
- // We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
- // trigger this codepath causing `structurally_resolved_type` to emit an error.
-
- self.write_method_call(expr.hir_id, method);
- Ok(method)
- }
- Err(error) => {
- if segment.ident.name != kw::Empty {
- if let Some(mut err) = self.report_method_error(
- span,
- rcvr_t,
- segment.ident,
- SelfSource::MethodCall(&args[0]),
- error,
- Some(args),
- ) {
- err.emit();
- }
- }
- Err(())
- }
- };
-
- // Call the generic checker.
- self.check_method_argument_types(
- span,
- expr,
- method,
- &args[1..],
- DontTupleArguments,
- expected,
- )
- }
-
- fn check_expr_cast(
- &self,
- e: &'tcx hir::Expr<'tcx>,
- t: &'tcx hir::Ty<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- // Find the type of `e`. Supply hints based on the type we are casting to,
- // if appropriate.
- let t_cast = self.to_ty_saving_user_provided_ty(t);
- let t_cast = self.resolve_vars_if_possible(t_cast);
- let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
- let t_expr = self.resolve_vars_if_possible(t_expr);
-
- // Eagerly check for some obvious errors.
- if t_expr.references_error() || t_cast.references_error() {
- self.tcx.ty_error()
- } else {
- // Defer other checks until we're done type checking.
- let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
- match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) {
- Ok(cast_check) => {
- debug!(
- "check_expr_cast: deferring cast from {:?} to {:?}: {:?}",
- t_cast, t_expr, cast_check,
- );
- deferred_cast_checks.push(cast_check);
- t_cast
- }
- Err(_) => self.tcx.ty_error(),
- }
- }
- }
-
- fn check_expr_array(
- &self,
- args: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let element_ty = if !args.is_empty() {
- let coerce_to = expected
- .to_option(self)
- .and_then(|uty| match *uty.kind() {
- ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
- _ => None,
- })
- .unwrap_or_else(|| {
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span: expr.span,
- })
- });
- let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
- assert_eq!(self.diverges.get(), Diverges::Maybe);
- for e in args {
- let e_ty = self.check_expr_with_hint(e, coerce_to);
- let cause = self.misc(e.span);
- coerce.coerce(self, &cause, e, e_ty);
- }
- coerce.complete(self)
- } else {
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span: expr.span,
- })
- };
- self.tcx.mk_array(element_ty, args.len() as u64)
- }
-
- fn check_expr_const_block(
- &self,
- anon_const: &'tcx hir::AnonConst,
- expected: Expectation<'tcx>,
- _expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let body = self.tcx.hir().body(anon_const.body);
-
- // Create a new function context.
- let fcx = FnCtxt::new(self, self.param_env.with_const(), body.value.hir_id);
- crate::check::GatherLocalsVisitor::new(&fcx).visit_body(body);
-
- let ty = fcx.check_expr_with_expectation(&body.value, expected);
- fcx.require_type_is_sized(ty, body.value.span, traits::ConstSized);
- fcx.write_ty(anon_const.hir_id, ty);
- ty
- }
-
- fn check_expr_repeat(
- &self,
- element: &'tcx hir::Expr<'tcx>,
- count: &'tcx hir::ArrayLen,
- expected: Expectation<'tcx>,
- _expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
- let count = self.array_length_to_const(count);
-
- let uty = match expected {
- ExpectHasType(uty) => match *uty.kind() {
- ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
- _ => None,
- },
- _ => None,
- };
-
- let (element_ty, t) = match uty {
- Some(uty) => {
- self.check_expr_coercable_to_type(&element, uty, None);
- (uty, uty)
- }
- None => {
- let ty = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: element.span,
- });
- let element_ty = self.check_expr_has_type_or_error(&element, ty, |_| {});
- (element_ty, ty)
- }
- };
-
- if element_ty.references_error() {
- return tcx.ty_error();
- }
-
- self.check_repeat_element_needs_copy_bound(element, count, element_ty);
-
- tcx.mk_ty(ty::Array(t, count))
- }
-
- fn check_repeat_element_needs_copy_bound(
- &self,
- element: &hir::Expr<'_>,
- count: ty::Const<'tcx>,
- element_ty: Ty<'tcx>,
- ) {
- let tcx = self.tcx;
- // Actual constants as the repeat element get inserted repeatedly instead of getting copied via Copy.
- match &element.kind {
- hir::ExprKind::ConstBlock(..) => return,
- hir::ExprKind::Path(qpath) => {
- let res = self.typeck_results.borrow().qpath_res(qpath, element.hir_id);
- if let Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::AnonConst, _) = res
- {
- return;
- }
- }
- _ => {}
- }
- // If someone calls a const fn, they can extract that call out into a separate constant (or a const
- // block in the future), so we check that to tell them that in the diagnostic. Does not affect typeck.
- let is_const_fn = match element.kind {
- hir::ExprKind::Call(func, _args) => match *self.node_ty(func.hir_id).kind() {
- ty::FnDef(def_id, _) => tcx.is_const_fn(def_id),
- _ => false,
- },
- _ => false,
- };
-
- // If the length is 0, we don't create any elements, so we don't copy any. If the length is 1, we
- // don't copy that one element, we move it. Only check for Copy if the length is larger.
- if count.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
- let lang_item = self.tcx.require_lang_item(LangItem::Copy, None);
- let code = traits::ObligationCauseCode::RepeatElementCopy { is_const_fn };
- self.require_type_meets(element_ty, element.span, code, lang_item);
- }
- }
-
- fn check_expr_tuple(
- &self,
- elts: &'tcx [hir::Expr<'tcx>],
- expected: Expectation<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let flds = expected.only_has_type(self).and_then(|ty| {
- let ty = self.resolve_vars_with_obligations(ty);
- match ty.kind() {
- ty::Tuple(flds) => Some(&flds[..]),
- _ => None,
- }
- });
-
- let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds {
- Some(fs) if i < fs.len() => {
- let ety = fs[i];
- self.check_expr_coercable_to_type(&e, ety, None);
- ety
- }
- _ => self.check_expr_with_expectation(&e, NoExpectation),
- });
- let tuple = self.tcx.mk_tup(elt_ts_iter);
- if tuple.references_error() {
- self.tcx.ty_error()
- } else {
- self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
- tuple
- }
- }
-
- fn check_expr_struct(
- &self,
- expr: &hir::Expr<'_>,
- expected: Expectation<'tcx>,
- qpath: &QPath<'_>,
- fields: &'tcx [hir::ExprField<'tcx>],
- base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
- ) -> Ty<'tcx> {
- // Find the relevant variant
- let Some((variant, adt_ty)) = self.check_struct_path(qpath, expr.hir_id) else {
- self.check_struct_fields_on_error(fields, base_expr);
- return self.tcx.ty_error();
- };
-
- // Prohibit struct expressions when non-exhaustive flag is set.
- let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
- if !adt.did().is_local() && variant.is_field_list_non_exhaustive() {
- self.tcx
- .sess
- .emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() });
- }
-
- self.check_expr_struct_fields(
- adt_ty,
- expected,
- expr.hir_id,
- qpath.span(),
- variant,
- fields,
- base_expr,
- expr.span,
- );
-
- self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
- adt_ty
- }
-
- fn check_expr_struct_fields(
- &self,
- adt_ty: Ty<'tcx>,
- expected: Expectation<'tcx>,
- expr_id: hir::HirId,
- span: Span,
- variant: &'tcx ty::VariantDef,
- ast_fields: &'tcx [hir::ExprField<'tcx>],
- base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
- expr_span: Span,
- ) {
- let tcx = self.tcx;
-
- let expected_inputs =
- self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]);
- let adt_ty_hint = if let Some(expected_inputs) = expected_inputs {
- expected_inputs.get(0).cloned().unwrap_or(adt_ty)
- } else {
- adt_ty
- };
- // re-link the regions that EIfEO can erase.
- self.demand_eqtype(span, adt_ty_hint, adt_ty);
-
- let ty::Adt(adt, substs) = adt_ty.kind() else {
- span_bug!(span, "non-ADT passed to check_expr_struct_fields");
- };
- let adt_kind = adt.adt_kind();
-
- let mut remaining_fields = variant
- .fields
- .iter()
- .enumerate()
- .map(|(i, field)| (field.ident(tcx).normalize_to_macros_2_0(), (i, field)))
- .collect::<FxHashMap<_, _>>();
-
- let mut seen_fields = FxHashMap::default();
-
- let mut error_happened = false;
-
- // Type-check each field.
- for field in ast_fields {
- let ident = tcx.adjust_ident(field.ident, variant.def_id);
- let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
- seen_fields.insert(ident, field.span);
- self.write_field_index(field.hir_id, i);
-
- // We don't look at stability attributes on
- // struct-like enums (yet...), but it's definitely not
- // a bug to have constructed one.
- if adt_kind != AdtKind::Enum {
- tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
- }
-
- self.field_ty(field.span, v_field, substs)
- } else {
- error_happened = true;
- if let Some(prev_span) = seen_fields.get(&ident) {
- tcx.sess.emit_err(FieldMultiplySpecifiedInInitializer {
- span: field.ident.span,
- prev_span: *prev_span,
- ident,
- });
- } else {
- self.report_unknown_field(
- adt_ty,
- variant,
- field,
- ast_fields,
- adt.variant_descr(),
- expr_span,
- );
- }
-
- tcx.ty_error()
- };
-
- // Make sure to give a type to the field even if there's
- // an error, so we can continue type-checking.
- self.check_expr_coercable_to_type(&field.expr, field_type, None);
- }
-
- // Make sure the programmer specified correct number of fields.
- if adt_kind == AdtKind::Union {
- if ast_fields.len() != 1 {
- struct_span_err!(
- tcx.sess,
- span,
- E0784,
- "union expressions should have exactly one field",
- )
- .emit();
- }
- }
-
- // If check_expr_struct_fields hit an error, do not attempt to populate
- // the fields with the base_expr. This could cause us to hit errors later
- // when certain fields are assumed to exist that in fact do not.
- if error_happened {
- return;
- }
-
- if let Some(base_expr) = base_expr {
- // FIXME: We are currently creating two branches here in order to maintain
- // consistency. But they should be merged as much as possible.
- let fru_tys = if self.tcx.features().type_changing_struct_update {
- if adt.is_struct() {
- // Make some fresh substitutions for our ADT type.
- let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did());
- // We do subtyping on the FRU fields first, so we can
- // learn exactly what types we expect the base expr
- // needs constrained to be compatible with the struct
- // type we expect from the expectation value.
- let fru_tys = variant
- .fields
- .iter()
- .map(|f| {
- let fru_ty = self.normalize_associated_types_in(
- expr_span,
- self.field_ty(base_expr.span, f, fresh_substs),
- );
- let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
- if let Some(_) = remaining_fields.remove(&ident) {
- let target_ty = self.field_ty(base_expr.span, f, substs);
- let cause = self.misc(base_expr.span);
- match self.at(&cause, self.param_env).sup(target_ty, fru_ty) {
- Ok(InferOk { obligations, value: () }) => {
- self.register_predicates(obligations)
- }
- Err(_) => {
- // This should never happen, since we're just subtyping the
- // remaining_fields, but it's fine to emit this, I guess.
- self.report_mismatched_types(
- &cause,
- target_ty,
- fru_ty,
- FieldMisMatch(variant.name, ident.name),
- )
- .emit();
- }
- }
- }
- self.resolve_vars_if_possible(fru_ty)
- })
- .collect();
- // The use of fresh substs that we have subtyped against
- // our base ADT type's fields allows us to guide inference
- // along so that, e.g.
- // ```
- // MyStruct<'a, F1, F2, const C: usize> {
- // f: F1,
- // // Other fields that reference `'a`, `F2`, and `C`
- // }
- //
- // let x = MyStruct {
- // f: 1usize,
- // ..other_struct
- // };
- // ```
- // will have the `other_struct` expression constrained to
- // `MyStruct<'a, _, F2, C>`, as opposed to just `_`...
- // This is important to allow coercions to happen in
- // `other_struct` itself. See `coerce-in-base-expr.rs`.
- let fresh_base_ty = self.tcx.mk_adt(*adt, fresh_substs);
- self.check_expr_has_type_or_error(
- base_expr,
- self.resolve_vars_if_possible(fresh_base_ty),
- |_| {},
- );
- fru_tys
- } else {
- // Check the base_expr, regardless of a bad expected adt_ty, so we can get
- // type errors on that expression, too.
- self.check_expr(base_expr);
- self.tcx
- .sess
- .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
- return;
- }
- } else {
- self.check_expr_has_type_or_error(base_expr, adt_ty, |_| {
- let base_ty = self.typeck_results.borrow().expr_ty(*base_expr);
- let same_adt = match (adt_ty.kind(), base_ty.kind()) {
- (ty::Adt(adt, _), ty::Adt(base_adt, _)) if adt == base_adt => true,
- _ => false,
- };
- if self.tcx.sess.is_nightly_build() && same_adt {
- feature_err(
- &self.tcx.sess.parse_sess,
- sym::type_changing_struct_update,
- base_expr.span,
- "type changing struct updating is experimental",
- )
- .emit();
- }
- });
- match adt_ty.kind() {
- ty::Adt(adt, substs) if adt.is_struct() => variant
- .fields
- .iter()
- .map(|f| {
- self.normalize_associated_types_in(expr_span, f.ty(self.tcx, substs))
- })
- .collect(),
- _ => {
- self.tcx
- .sess
- .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
- return;
- }
- }
- };
- self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys);
- } else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() {
- debug!(?remaining_fields);
- let private_fields: Vec<&ty::FieldDef> = variant
- .fields
- .iter()
- .filter(|field| {
- !field.vis.is_accessible_from(tcx.parent_module(expr_id).to_def_id(), tcx)
- })
- .collect();
-
- if !private_fields.is_empty() {
- self.report_private_fields(adt_ty, span, private_fields, ast_fields);
- } else {
- self.report_missing_fields(
- adt_ty,
- span,
- remaining_fields,
- variant,
- ast_fields,
- substs,
- );
- }
- }
- }
-
- fn check_struct_fields_on_error(
- &self,
- fields: &'tcx [hir::ExprField<'tcx>],
- base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
- ) {
- for field in fields {
- self.check_expr(&field.expr);
- }
- if let Some(base) = *base_expr {
- self.check_expr(&base);
- }
- }
-
- /// Report an error for a struct field expression when there are fields which aren't provided.
- ///
- /// ```text
- /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo`
- /// --> src/main.rs:8:5
- /// |
- /// 8 | foo::Foo {};
- /// | ^^^^^^^^ missing `you_can_use_this_field`
- ///
- /// error: aborting due to previous error
- /// ```
- fn report_missing_fields(
- &self,
- adt_ty: Ty<'tcx>,
- span: Span,
- remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
- variant: &'tcx ty::VariantDef,
- ast_fields: &'tcx [hir::ExprField<'tcx>],
- substs: SubstsRef<'tcx>,
- ) {
- let len = remaining_fields.len();
-
- let mut displayable_field_names: Vec<&str> =
- remaining_fields.keys().map(|ident| ident.as_str()).collect();
- // sorting &str primitives here, sort_unstable is ok
- displayable_field_names.sort_unstable();
-
- let mut truncated_fields_error = String::new();
- let remaining_fields_names = match &displayable_field_names[..] {
- [field1] => format!("`{}`", field1),
- [field1, field2] => format!("`{field1}` and `{field2}`"),
- [field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"),
- _ => {
- truncated_fields_error =
- format!(" and {} other field{}", len - 3, pluralize!(len - 3));
- displayable_field_names
- .iter()
- .take(3)
- .map(|n| format!("`{n}`"))
- .collect::<Vec<_>>()
- .join(", ")
- }
- };
-
- let mut err = struct_span_err!(
- self.tcx.sess,
- span,
- E0063,
- "missing field{} {}{} in initializer of `{}`",
- pluralize!(len),
- remaining_fields_names,
- truncated_fields_error,
- adt_ty
- );
- err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}"));
-
- // If the last field is a range literal, but it isn't supposed to be, then they probably
- // meant to use functional update syntax.
- //
- // I don't use 'is_range_literal' because only double-sided, half-open ranges count.
- if let Some((
- last,
- ExprKind::Struct(
- QPath::LangItem(LangItem::Range, ..),
- &[ref range_start, ref range_end],
- _,
- ),
- )) = ast_fields.last().map(|last| (last, &last.expr.kind)) &&
- let variant_field =
- variant.fields.iter().find(|field| field.ident(self.tcx) == last.ident) &&
- let range_def_id = self.tcx.lang_items().range_struct() &&
- variant_field
- .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
- .map(|adt| adt.did())
- != range_def_id
- {
- let instead = self
- .tcx
- .sess
- .source_map()
- .span_to_snippet(range_end.expr.span)
- .map(|s| format!(" from `{s}`"))
- .unwrap_or_default();
- err.span_suggestion(
- range_start.span.shrink_to_hi(),
- &format!("to set the remaining fields{instead}, separate the last named field with a comma"),
- ",",
- Applicability::MaybeIncorrect,
- );
- }
-
- err.emit();
- }
-
- /// Report an error for a struct field expression when there are invisible fields.
- ///
- /// ```text
- /// error: cannot construct `Foo` with struct literal syntax due to private fields
- /// --> src/main.rs:8:5
- /// |
- /// 8 | foo::Foo {};
- /// | ^^^^^^^^
- ///
- /// error: aborting due to previous error
- /// ```
- fn report_private_fields(
- &self,
- adt_ty: Ty<'tcx>,
- span: Span,
- private_fields: Vec<&ty::FieldDef>,
- used_fields: &'tcx [hir::ExprField<'tcx>],
- ) {
- let mut err = self.tcx.sess.struct_span_err(
- span,
- &format!(
- "cannot construct `{adt_ty}` with struct literal syntax due to private fields",
- ),
- );
- let (used_private_fields, remaining_private_fields): (
- Vec<(Symbol, Span, bool)>,
- Vec<(Symbol, Span, bool)>,
- ) = private_fields
- .iter()
- .map(|field| {
- match used_fields.iter().find(|used_field| field.name == used_field.ident.name) {
- Some(used_field) => (field.name, used_field.span, true),
- None => (field.name, self.tcx.def_span(field.did), false),
- }
- })
- .partition(|field| field.2);
- err.span_labels(used_private_fields.iter().map(|(_, span, _)| *span), "private field");
- if !remaining_private_fields.is_empty() {
- let remaining_private_fields_len = remaining_private_fields.len();
- let names = match &remaining_private_fields
- .iter()
- .map(|(name, _, _)| name)
- .collect::<Vec<_>>()[..]
- {
- _ if remaining_private_fields_len > 6 => String::new(),
- [name] => format!("`{name}` "),
- [names @ .., last] => {
- let names = names.iter().map(|name| format!("`{name}`")).collect::<Vec<_>>();
- format!("{} and `{last}` ", names.join(", "))
- }
- [] => unreachable!(),
- };
- err.note(format!(
- "... and other private field{s} {names}that {were} not provided",
- s = pluralize!(remaining_private_fields_len),
- were = pluralize!("was", remaining_private_fields_len),
- ));
- }
- err.emit();
- }
-
- fn report_unknown_field(
- &self,
- ty: Ty<'tcx>,
- variant: &'tcx ty::VariantDef,
- field: &hir::ExprField<'_>,
- skip_fields: &[hir::ExprField<'_>],
- kind_name: &str,
- expr_span: Span,
- ) {
- if variant.is_recovered() {
- self.set_tainted_by_errors();
- return;
- }
- let mut err = self.type_error_struct_with_diag(
- field.ident.span,
- |actual| match ty.kind() {
- ty::Adt(adt, ..) if adt.is_enum() => struct_span_err!(
- self.tcx.sess,
- field.ident.span,
- E0559,
- "{} `{}::{}` has no field named `{}`",
- kind_name,
- actual,
- variant.name,
- field.ident
- ),
- _ => struct_span_err!(
- self.tcx.sess,
- field.ident.span,
- E0560,
- "{} `{}` has no field named `{}`",
- kind_name,
- actual,
- field.ident
- ),
- },
- ty,
- );
-
- let variant_ident_span = self.tcx.def_ident_span(variant.def_id).unwrap();
- match variant.ctor_kind {
- CtorKind::Fn => match ty.kind() {
- ty::Adt(adt, ..) if adt.is_enum() => {
- err.span_label(
- variant_ident_span,
- format!(
- "`{adt}::{variant}` defined here",
- adt = ty,
- variant = variant.name,
- ),
- );
- err.span_label(field.ident.span, "field does not exist");
- err.span_suggestion_verbose(
- expr_span,
- &format!(
- "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
- adt = ty,
- variant = variant.name,
- ),
- format!(
- "{adt}::{variant}(/* fields */)",
- adt = ty,
- variant = variant.name,
- ),
- Applicability::HasPlaceholders,
- );
- }
- _ => {
- err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty));
- err.span_label(field.ident.span, "field does not exist");
- err.span_suggestion_verbose(
- expr_span,
- &format!(
- "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
- adt = ty,
- kind_name = kind_name,
- ),
- format!("{adt}(/* fields */)", adt = ty),
- Applicability::HasPlaceholders,
- );
- }
- },
- _ => {
- // prevent all specified fields from being suggested
- let skip_fields = skip_fields.iter().map(|x| x.ident.name);
- if let Some(field_name) = self.suggest_field_name(
- variant,
- field.ident.name,
- skip_fields.collect(),
- expr_span,
- ) {
- err.span_suggestion(
- field.ident.span,
- "a field with a similar name exists",
- field_name,
- Applicability::MaybeIncorrect,
- );
- } else {
- match ty.kind() {
- ty::Adt(adt, ..) => {
- if adt.is_enum() {
- err.span_label(
- field.ident.span,
- format!("`{}::{}` does not have this field", ty, variant.name),
- );
- } else {
- err.span_label(
- field.ident.span,
- format!("`{ty}` does not have this field"),
- );
- }
- let available_field_names =
- self.available_field_names(variant, expr_span);
- if !available_field_names.is_empty() {
- err.note(&format!(
- "available fields are: {}",
- self.name_series_display(available_field_names)
- ));
- }
- }
- _ => bug!("non-ADT passed to report_unknown_field"),
- }
- };
- }
- }
- err.emit();
- }
-
- // Return a hint about the closest match in field names
- fn suggest_field_name(
- &self,
- variant: &'tcx ty::VariantDef,
- field: Symbol,
- skip: Vec<Symbol>,
- // The span where stability will be checked
- span: Span,
- ) -> Option<Symbol> {
- let names = variant
- .fields
- .iter()
- .filter_map(|field| {
- // ignore already set fields and private fields from non-local crates
- // and unstable fields.
- if skip.iter().any(|&x| x == field.name)
- || (!variant.def_id.is_local() && !field.vis.is_public())
- || matches!(
- self.tcx.eval_stability(field.did, None, span, None),
- stability::EvalResult::Deny { .. }
- )
- {
- None
- } else {
- Some(field.name)
- }
- })
- .collect::<Vec<Symbol>>();
-
- find_best_match_for_name(&names, field, None)
- }
-
- fn available_field_names(
- &self,
- variant: &'tcx ty::VariantDef,
- access_span: Span,
- ) -> Vec<Symbol> {
- variant
- .fields
- .iter()
- .filter(|field| {
- let def_scope = self
- .tcx
- .adjust_ident_and_get_scope(field.ident(self.tcx), variant.def_id, self.body_id)
- .1;
- field.vis.is_accessible_from(def_scope, self.tcx)
- && !matches!(
- self.tcx.eval_stability(field.did, None, access_span, None),
- stability::EvalResult::Deny { .. }
- )
- })
- .filter(|field| !self.tcx.is_doc_hidden(field.did))
- .map(|field| field.name)
- .collect()
- }
-
- fn name_series_display(&self, names: Vec<Symbol>) -> String {
- // dynamic limit, to never omit just one field
- let limit = if names.len() == 6 { 6 } else { 5 };
- let mut display =
- names.iter().take(limit).map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
- if names.len() > limit {
- display = format!("{} ... and {} others", display, names.len() - limit);
- }
- display
- }
-
- // Check field access expressions
- fn check_field(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- base: &'tcx hir::Expr<'tcx>,
- field: Ident,
- ) -> Ty<'tcx> {
- debug!("check_field(expr: {:?}, base: {:?}, field: {:?})", expr, base, field);
- let expr_t = self.check_expr(base);
- let expr_t = self.structurally_resolved_type(base.span, expr_t);
- let mut private_candidate = None;
- let mut autoderef = self.autoderef(expr.span, expr_t);
- while let Some((base_t, _)) = autoderef.next() {
- debug!("base_t: {:?}", base_t);
- match base_t.kind() {
- ty::Adt(base_def, substs) if !base_def.is_enum() => {
- debug!("struct named {:?}", base_t);
- let (ident, def_scope) =
- self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id);
- let fields = &base_def.non_enum_variant().fields;
- if let Some(index) = fields
- .iter()
- .position(|f| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
- {
- let field = &fields[index];
- let field_ty = self.field_ty(expr.span, field, substs);
- // Save the index of all fields regardless of their visibility in case
- // of error recovery.
- self.write_field_index(expr.hir_id, index);
- let adjustments = self.adjust_steps(&autoderef);
- if field.vis.is_accessible_from(def_scope, self.tcx) {
- self.apply_adjustments(base, adjustments);
- self.register_predicates(autoderef.into_obligations());
-
- self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
- return field_ty;
- }
- private_candidate = Some((adjustments, base_def.did(), field_ty));
- }
- }
- ty::Tuple(tys) => {
- let fstr = field.as_str();
- if let Ok(index) = fstr.parse::<usize>() {
- if fstr == index.to_string() {
- if let Some(&field_ty) = tys.get(index) {
- let adjustments = self.adjust_steps(&autoderef);
- self.apply_adjustments(base, adjustments);
- self.register_predicates(autoderef.into_obligations());
-
- self.write_field_index(expr.hir_id, index);
- return field_ty;
- }
- }
- }
- }
- _ => {}
- }
- }
- self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
-
- if let Some((adjustments, did, field_ty)) = private_candidate {
- // (#90483) apply adjustments to avoid ExprUseVisitor from
- // creating erroneous projection.
- self.apply_adjustments(base, adjustments);
- self.ban_private_field_access(expr, expr_t, field, did);
- return field_ty;
- }
-
- if field.name == kw::Empty {
- } else if self.method_exists(field, expr_t, expr.hir_id, true) {
- self.ban_take_value_of_method(expr, expr_t, field);
- } else if !expr_t.is_primitive_ty() {
- self.ban_nonexisting_field(field, base, expr, expr_t);
- } else {
- let field_name = field.to_string();
- let mut err = type_error_struct!(
- self.tcx().sess,
- field.span,
- expr_t,
- E0610,
- "`{expr_t}` is a primitive type and therefore doesn't have fields",
- );
- let is_valid_suffix = |field: String| {
- if field == "f32" || field == "f64" {
- return true;
- }
- let mut chars = field.chars().peekable();
- match chars.peek() {
- Some('e') | Some('E') => {
- chars.next();
- if let Some(c) = chars.peek()
- && !c.is_numeric() && *c != '-' && *c != '+'
- {
- return false;
- }
- while let Some(c) = chars.peek() {
- if !c.is_numeric() {
- break;
- }
- chars.next();
- }
- }
- _ => (),
- }
- let suffix = chars.collect::<String>();
- suffix.is_empty() || suffix == "f32" || suffix == "f64"
- };
- if let ty::Infer(ty::IntVar(_)) = expr_t.kind()
- && let ExprKind::Lit(Spanned {
- node: ast::LitKind::Int(_, ast::LitIntType::Unsuffixed),
- ..
- }) = base.kind
- && !base.span.from_expansion()
- && is_valid_suffix(field_name)
- {
- err.span_suggestion_verbose(
- field.span.shrink_to_lo(),
- "If the number is meant to be a floating point number, consider adding a `0` after the period",
- '0',
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- }
-
- self.tcx().ty_error()
- }
-
- fn check_call_constructor<G: EmissionGuarantee>(
- &self,
- err: &mut DiagnosticBuilder<'_, G>,
- base: &'tcx hir::Expr<'tcx>,
- def_id: DefId,
- ) {
- if let Some(local_id) = def_id.as_local() {
- let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_id);
- let node = self.tcx.hir().get(hir_id);
-
- if let Some(fields) = node.tuple_fields() {
- let kind = match self.tcx.opt_def_kind(local_id) {
- Some(DefKind::Ctor(of, _)) => of,
- _ => return,
- };
-
- suggest_call_constructor(base.span, kind, fields.len(), err);
- }
- } else {
- // The logic here isn't smart but `associated_item_def_ids`
- // doesn't work nicely on local.
- if let DefKind::Ctor(of, _) = self.tcx.def_kind(def_id) {
- let parent_def_id = self.tcx.parent(def_id);
- let fields = self.tcx.associated_item_def_ids(parent_def_id);
- suggest_call_constructor(base.span, of, fields.len(), err);
- }
- }
- }
-
- fn suggest_await_on_field_access(
- &self,
- err: &mut Diagnostic,
- field_ident: Ident,
- base: &'tcx hir::Expr<'tcx>,
- ty: Ty<'tcx>,
- ) {
- let output_ty = match self.get_impl_future_output_ty(ty) {
- Some(output_ty) => self.resolve_vars_if_possible(output_ty),
- _ => return,
- };
- let mut add_label = true;
- if let ty::Adt(def, _) = output_ty.skip_binder().kind() {
- // no field access on enum type
- if !def.is_enum() {
- if def
- .non_enum_variant()
- .fields
- .iter()
- .any(|field| field.ident(self.tcx) == field_ident)
- {
- add_label = false;
- err.span_label(
- field_ident.span,
- "field not available in `impl Future`, but it is available in its `Output`",
- );
- err.span_suggestion_verbose(
- base.span.shrink_to_hi(),
- "consider `await`ing on the `Future` and access the field of its `Output`",
- ".await",
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
- if add_label {
- err.span_label(field_ident.span, &format!("field not found in `{ty}`"));
- }
- }
-
- fn ban_nonexisting_field(
- &self,
- field: Ident,
- base: &'tcx hir::Expr<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- expr_t: Ty<'tcx>,
- ) {
- debug!(
- "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, expr_ty={:?}",
- field, base, expr, expr_t
- );
- let mut err = self.no_such_field_err(field, expr_t, base.hir_id);
-
- match *expr_t.peel_refs().kind() {
- ty::Array(_, len) => {
- self.maybe_suggest_array_indexing(&mut err, expr, base, field, len);
- }
- ty::RawPtr(..) => {
- self.suggest_first_deref_field(&mut err, expr, base, field);
- }
- ty::Adt(def, _) if !def.is_enum() => {
- self.suggest_fields_on_recordish(&mut err, def, field, expr.span);
- }
- ty::Param(param_ty) => {
- self.point_at_param_definition(&mut err, param_ty);
- }
- ty::Opaque(_, _) => {
- self.suggest_await_on_field_access(&mut err, field, base, expr_t.peel_refs());
- }
- ty::FnDef(def_id, _) => {
- self.check_call_constructor(&mut err, base, def_id);
- }
- _ => {}
- }
-
- if field.name == kw::Await {
- // We know by construction that `<expr>.await` is either on Rust 2015
- // or results in `ExprKind::Await`. Suggest switching the edition to 2018.
- err.note("to `.await` a `Future`, switch to Rust 2018 or later");
- err.help_use_latest_edition();
- }
-
- err.emit();
- }
-
- fn ban_private_field_access(
- &self,
- expr: &hir::Expr<'_>,
- expr_t: Ty<'tcx>,
- field: Ident,
- base_did: DefId,
- ) {
- let struct_path = self.tcx().def_path_str(base_did);
- let kind_name = self.tcx().def_kind(base_did).descr(base_did);
- let mut err = struct_span_err!(
- self.tcx().sess,
- field.span,
- E0616,
- "field `{field}` of {kind_name} `{struct_path}` is private",
- );
- err.span_label(field.span, "private field");
- // Also check if an accessible method exists, which is often what is meant.
- if self.method_exists(field, expr_t, expr.hir_id, false) && !self.expr_in_place(expr.hir_id)
- {
- self.suggest_method_call(
- &mut err,
- &format!("a method `{field}` also exists, call it with parentheses"),
- field,
- expr_t,
- expr,
- None,
- );
- }
- err.emit();
- }
-
- fn ban_take_value_of_method(&self, expr: &hir::Expr<'_>, expr_t: Ty<'tcx>, field: Ident) {
- let mut err = type_error_struct!(
- self.tcx().sess,
- field.span,
- expr_t,
- E0615,
- "attempted to take value of method `{field}` on type `{expr_t}`",
- );
- err.span_label(field.span, "method, not a field");
- let expr_is_call =
- if let hir::Node::Expr(hir::Expr { kind: ExprKind::Call(callee, _args), .. }) =
- self.tcx.hir().get(self.tcx.hir().get_parent_node(expr.hir_id))
- {
- expr.hir_id == callee.hir_id
- } else {
- false
- };
- let expr_snippet =
- self.tcx.sess.source_map().span_to_snippet(expr.span).unwrap_or_default();
- let is_wrapped = expr_snippet.starts_with('(') && expr_snippet.ends_with(')');
- let after_open = expr.span.lo() + rustc_span::BytePos(1);
- let before_close = expr.span.hi() - rustc_span::BytePos(1);
-
- if expr_is_call && is_wrapped {
- err.multipart_suggestion(
- "remove wrapping parentheses to call the method",
- vec![
- (expr.span.with_hi(after_open), String::new()),
- (expr.span.with_lo(before_close), String::new()),
- ],
- Applicability::MachineApplicable,
- );
- } else if !self.expr_in_place(expr.hir_id) {
- // Suggest call parentheses inside the wrapping parentheses
- let span = if is_wrapped {
- expr.span.with_lo(after_open).with_hi(before_close)
- } else {
- expr.span
- };
- self.suggest_method_call(
- &mut err,
- "use parentheses to call the method",
- field,
- expr_t,
- expr,
- Some(span),
- );
- } else {
- let mut found = false;
-
- if let ty::RawPtr(ty_and_mut) = expr_t.kind()
- && let ty::Adt(adt_def, _) = ty_and_mut.ty.kind()
- {
- if adt_def.variants().len() == 1
- && adt_def
- .variants()
- .iter()
- .next()
- .unwrap()
- .fields
- .iter()
- .any(|f| f.ident(self.tcx) == field)
- {
- if let Some(dot_loc) = expr_snippet.rfind('.') {
- found = true;
- err.span_suggestion(
- expr.span.with_hi(expr.span.lo() + BytePos::from_usize(dot_loc)),
- "to access the field, dereference first",
- format!("(*{})", &expr_snippet[0..dot_loc]),
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
-
- if !found {
- err.help("methods are immutable and cannot be assigned to");
- }
- }
-
- err.emit();
- }
-
- fn point_at_param_definition(&self, err: &mut Diagnostic, param: ty::ParamTy) {
- let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
- let generic_param = generics.type_param(&param, self.tcx);
- if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param.kind {
- return;
- }
- let param_def_id = generic_param.def_id;
- let param_hir_id = match param_def_id.as_local() {
- Some(x) => self.tcx.hir().local_def_id_to_hir_id(x),
- None => return,
- };
- let param_span = self.tcx.hir().span(param_hir_id);
- let param_name = self.tcx.hir().ty_param_name(param_def_id.expect_local());
-
- err.span_label(param_span, &format!("type parameter '{param_name}' declared here"));
- }
-
- fn suggest_fields_on_recordish(
- &self,
- err: &mut Diagnostic,
- def: ty::AdtDef<'tcx>,
- field: Ident,
- access_span: Span,
- ) {
- if let Some(suggested_field_name) =
- self.suggest_field_name(def.non_enum_variant(), field.name, vec![], access_span)
- {
- err.span_suggestion(
- field.span,
- "a field with a similar name exists",
- suggested_field_name,
- Applicability::MaybeIncorrect,
- );
- } else {
- err.span_label(field.span, "unknown field");
- let struct_variant_def = def.non_enum_variant();
- let field_names = self.available_field_names(struct_variant_def, access_span);
- if !field_names.is_empty() {
- err.note(&format!(
- "available fields are: {}",
- self.name_series_display(field_names),
- ));
- }
- }
- }
-
- fn maybe_suggest_array_indexing(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- base: &hir::Expr<'_>,
- field: Ident,
- len: ty::Const<'tcx>,
- ) {
- if let (Some(len), Ok(user_index)) =
- (len.try_eval_usize(self.tcx, self.param_env), field.as_str().parse::<u64>())
- && let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span)
- {
- let help = "instead of using tuple indexing, use array indexing";
- let suggestion = format!("{base}[{field}]");
- let applicability = if len < user_index {
- Applicability::MachineApplicable
- } else {
- Applicability::MaybeIncorrect
- };
- err.span_suggestion(expr.span, help, suggestion, applicability);
- }
- }
-
- fn suggest_first_deref_field(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- base: &hir::Expr<'_>,
- field: Ident,
- ) {
- if let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) {
- let msg = format!("`{base}` is a raw pointer; try dereferencing it");
- let suggestion = format!("(*{base}).{field}");
- err.span_suggestion(expr.span, &msg, suggestion, Applicability::MaybeIncorrect);
- }
- }
-
- fn no_such_field_err(
- &self,
- field: Ident,
- expr_t: Ty<'tcx>,
- id: HirId,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
- let span = field.span;
- debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t);
-
- let mut err = type_error_struct!(
- self.tcx().sess,
- field.span,
- expr_t,
- E0609,
- "no field `{field}` on type `{expr_t}`",
- );
-
- // try to add a suggestion in case the field is a nested field of a field of the Adt
- if let Some((fields, substs)) = self.get_field_candidates(span, expr_t) {
- for candidate_field in fields.iter() {
- if let Some(mut field_path) = self.check_for_nested_field_satisfying(
- span,
- &|candidate_field, _| candidate_field.ident(self.tcx()) == field,
- candidate_field,
- substs,
- vec![],
- self.tcx.parent_module(id).to_def_id(),
- ) {
- // field_path includes `field` that we're looking for, so pop it.
- field_path.pop();
-
- let field_path_str = field_path
- .iter()
- .map(|id| id.name.to_ident_string())
- .collect::<Vec<String>>()
- .join(".");
- debug!("field_path_str: {:?}", field_path_str);
-
- err.span_suggestion_verbose(
- field.span.shrink_to_lo(),
- "one of the expressions' fields has a field of the same name",
- format!("{field_path_str}."),
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
- err
- }
-
- pub(crate) fn get_field_candidates(
- &self,
- span: Span,
- base_t: Ty<'tcx>,
- ) -> Option<(&[ty::FieldDef], SubstsRef<'tcx>)> {
- debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_t);
-
- for (base_t, _) in self.autoderef(span, base_t) {
- match base_t.kind() {
- ty::Adt(base_def, substs) if !base_def.is_enum() => {
- let fields = &base_def.non_enum_variant().fields;
- // For compile-time reasons put a limit on number of fields we search
- if fields.len() > 100 {
- return None;
- }
- return Some((fields, substs));
- }
- _ => {}
- }
- }
- None
- }
-
- /// This method is called after we have encountered a missing field error to recursively
- /// search for the field
- pub(crate) fn check_for_nested_field_satisfying(
- &self,
- span: Span,
- matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
- candidate_field: &ty::FieldDef,
- subst: SubstsRef<'tcx>,
- mut field_path: Vec<Ident>,
- id: DefId,
- ) -> Option<Vec<Ident>> {
- debug!(
- "check_for_nested_field_satisfying(span: {:?}, candidate_field: {:?}, field_path: {:?}",
- span, candidate_field, field_path
- );
-
- if field_path.len() > 3 {
- // For compile-time reasons and to avoid infinite recursion we only check for fields
- // up to a depth of three
- None
- } else {
- // recursively search fields of `candidate_field` if it's a ty::Adt
- field_path.push(candidate_field.ident(self.tcx).normalize_to_macros_2_0());
- let field_ty = candidate_field.ty(self.tcx, subst);
- if let Some((nested_fields, subst)) = self.get_field_candidates(span, field_ty) {
- for field in nested_fields.iter() {
- if field.vis.is_accessible_from(id, self.tcx) {
- if matches(candidate_field, field_ty) {
- return Some(field_path);
- } else if let Some(field_path) = self.check_for_nested_field_satisfying(
- span,
- matches,
- field,
- subst,
- field_path.clone(),
- id,
- ) {
- return Some(field_path);
- }
- }
- }
- }
- None
- }
- }
-
- fn check_expr_index(
- &self,
- base: &'tcx hir::Expr<'tcx>,
- idx: &'tcx hir::Expr<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- let base_t = self.check_expr(&base);
- let idx_t = self.check_expr(&idx);
-
- if base_t.references_error() {
- base_t
- } else if idx_t.references_error() {
- idx_t
- } else {
- let base_t = self.structurally_resolved_type(base.span, base_t);
- match self.lookup_indexing(expr, base, base_t, idx, idx_t) {
- Some((index_ty, element_ty)) => {
- // two-phase not needed because index_ty is never mutable
- self.demand_coerce(idx, idx_t, index_ty, None, AllowTwoPhase::No);
- self.select_obligations_where_possible(false, |errors| {
- self.point_at_index_if_possible(errors, idx.span)
- });
- element_ty
- }
- None => {
- let mut err = type_error_struct!(
- self.tcx.sess,
- expr.span,
- base_t,
- E0608,
- "cannot index into a value of type `{base_t}`",
- );
- // Try to give some advice about indexing tuples.
- if let ty::Tuple(..) = base_t.kind() {
- let mut needs_note = true;
- // If the index is an integer, we can show the actual
- // fixed expression:
- if let ExprKind::Lit(ref lit) = idx.kind {
- if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node {
- let snip = self.tcx.sess.source_map().span_to_snippet(base.span);
- if let Ok(snip) = snip {
- err.span_suggestion(
- expr.span,
- "to access tuple elements, use",
- format!("{snip}.{i}"),
- Applicability::MachineApplicable,
- );
- needs_note = false;
- }
- }
- }
- if needs_note {
- err.help(
- "to access tuple elements, use tuple indexing \
- syntax (e.g., `tuple.0`)",
- );
- }
- }
- err.emit();
- self.tcx.ty_error()
- }
- }
- }
- }
-
- fn point_at_index_if_possible(
- &self,
- errors: &mut Vec<traits::FulfillmentError<'tcx>>,
- span: Span,
- ) {
- for error in errors {
- match error.obligation.predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(predicate)
- if self.tcx.is_diagnostic_item(sym::SliceIndex, predicate.trait_ref.def_id) => {
- }
- _ => continue,
- }
- error.obligation.cause.span = span;
- }
- }
-
- fn check_expr_yield(
- &self,
- value: &'tcx hir::Expr<'tcx>,
- expr: &'tcx hir::Expr<'tcx>,
- src: &'tcx hir::YieldSource,
- ) -> Ty<'tcx> {
- match self.resume_yield_tys {
- Some((resume_ty, yield_ty)) => {
- self.check_expr_coercable_to_type(&value, yield_ty, None);
-
- resume_ty
- }
- // Given that this `yield` expression was generated as a result of lowering a `.await`,
- // we know that the yield type must be `()`; however, the context won't contain this
- // information. Hence, we check the source of the yield expression here and check its
- // value's type against `()` (this check should always hold).
- None if src.is_await() => {
- self.check_expr_coercable_to_type(&value, self.tcx.mk_unit(), None);
- self.tcx.mk_unit()
- }
- _ => {
- self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span });
- // Avoid expressions without types during writeback (#78653).
- self.check_expr(value);
- self.tcx.mk_unit()
- }
- }
- }
-
- fn check_expr_asm_operand(&self, expr: &'tcx hir::Expr<'tcx>, is_input: bool) {
- let needs = if is_input { Needs::None } else { Needs::MutPlace };
- let ty = self.check_expr_with_needs(expr, needs);
- self.require_type_is_sized(ty, expr.span, traits::InlineAsmSized);
-
- if !is_input && !expr.is_syntactic_place_expr() {
- let mut err = self.tcx.sess.struct_span_err(expr.span, "invalid asm output");
- err.span_label(expr.span, "cannot assign to this expression");
- err.emit();
- }
-
- // If this is an input value, we require its type to be fully resolved
- // at this point. This allows us to provide helpful coercions which help
- // pass the type candidate list in a later pass.
- //
- // We don't require output types to be resolved at this point, which
- // allows them to be inferred based on how they are used later in the
- // function.
- if is_input {
- let ty = self.structurally_resolved_type(expr.span, ty);
- match *ty.kind() {
- ty::FnDef(..) => {
- let fnptr_ty = self.tcx.mk_fn_ptr(ty.fn_sig(self.tcx));
- self.demand_coerce(expr, ty, fnptr_ty, None, AllowTwoPhase::No);
- }
- ty::Ref(_, base_ty, mutbl) => {
- let ptr_ty = self.tcx.mk_ptr(ty::TypeAndMut { ty: base_ty, mutbl });
- self.demand_coerce(expr, ty, ptr_ty, None, AllowTwoPhase::No);
- }
- _ => {}
- }
- }
- }
-
- fn check_expr_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>) -> Ty<'tcx> {
- for (op, _op_sp) in asm.operands {
- match op {
- hir::InlineAsmOperand::In { expr, .. } => {
- self.check_expr_asm_operand(expr, true);
- }
- hir::InlineAsmOperand::Out { expr: Some(expr), .. }
- | hir::InlineAsmOperand::InOut { expr, .. } => {
- self.check_expr_asm_operand(expr, false);
- }
- hir::InlineAsmOperand::Out { expr: None, .. } => {}
- hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
- self.check_expr_asm_operand(in_expr, true);
- if let Some(out_expr) = out_expr {
- self.check_expr_asm_operand(out_expr, false);
- }
- }
- // `AnonConst`s have their own body and is type-checked separately.
- // As they don't flow into the type system we don't need them to
- // be well-formed.
- hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymFn { .. } => {}
- hir::InlineAsmOperand::SymStatic { .. } => {}
- }
- }
- if asm.options.contains(ast::InlineAsmOptions::NORETURN) {
- self.tcx.types.never
- } else {
- self.tcx.mk_unit()
- }
- }
-}
-
-pub(super) fn ty_kind_suggestion(ty: Ty<'_>) -> Option<&'static str> {
- Some(match ty.kind() {
- ty::Bool => "true",
- ty::Char => "'a'",
- ty::Int(_) | ty::Uint(_) => "42",
- ty::Float(_) => "3.14159",
- ty::Error(_) | ty::Never => return None,
- _ => "value",
- })
-}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
deleted file mode 100644
index 660e7e4e3..000000000
--- a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
+++ /dev/null
@@ -1,1900 +0,0 @@
-use crate::astconv::AstConv;
-use crate::check::coercion::CoerceMany;
-use crate::check::fn_ctxt::arg_matrix::{
- ArgMatrix, Compatibility, Error, ExpectedIdx, ProvidedIdx,
-};
-use crate::check::gather_locals::Declaration;
-use crate::check::intrinsicck::InlineAsmCtxt;
-use crate::check::method::MethodCallee;
-use crate::check::Expectation::*;
-use crate::check::TupleArgumentsFlag::*;
-use crate::check::{
- potentially_plural_count, struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt,
- LocalTy, Needs, TupleArgumentsFlag,
-};
-use crate::structured_errors::StructuredDiagnostic;
-
-use rustc_ast as ast;
-use rustc_errors::{pluralize, Applicability, Diagnostic, DiagnosticId, MultiSpan};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorOf, DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::{ExprKind, Node, QPath};
-use rustc_index::vec::IndexVec;
-use rustc_infer::infer::error_reporting::{FailureCode, ObligationCauseExt};
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::InferOk;
-use rustc_infer::infer::TypeTrace;
-use rustc_middle::ty::adjustment::AllowTwoPhase;
-use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{self, DefIdTree, IsSuggestable, Ty};
-use rustc_session::Session;
-use rustc_span::symbol::Ident;
-use rustc_span::{self, Span};
-use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext};
-
-use std::iter;
-use std::slice;
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- pub(in super::super) fn check_casts(&self) {
- let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
- debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len());
- for cast in deferred_cast_checks.drain(..) {
- cast.check(self);
- }
- }
-
- pub(in super::super) fn check_transmutes(&self) {
- let mut deferred_transmute_checks = self.deferred_transmute_checks.borrow_mut();
- debug!("FnCtxt::check_transmutes: {} deferred checks", deferred_transmute_checks.len());
- for (from, to, span) in deferred_transmute_checks.drain(..) {
- self.check_transmute(span, from, to);
- }
- }
-
- pub(in super::super) fn check_asms(&self) {
- let mut deferred_asm_checks = self.deferred_asm_checks.borrow_mut();
- debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len());
- for (asm, hir_id) in deferred_asm_checks.drain(..) {
- let enclosing_id = self.tcx.hir().enclosing_body_owner(hir_id);
- InlineAsmCtxt::new_in_fn(self)
- .check_asm(asm, self.tcx.hir().local_def_id_to_hir_id(enclosing_id));
- }
- }
-
- pub(in super::super) fn check_method_argument_types(
- &self,
- sp: Span,
- expr: &'tcx hir::Expr<'tcx>,
- method: Result<MethodCallee<'tcx>, ()>,
- args_no_rcvr: &'tcx [hir::Expr<'tcx>],
- tuple_arguments: TupleArgumentsFlag,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let has_error = match method {
- Ok(method) => method.substs.references_error() || method.sig.references_error(),
- Err(_) => true,
- };
- if has_error {
- let err_inputs = self.err_args(args_no_rcvr.len());
-
- let err_inputs = match tuple_arguments {
- DontTupleArguments => err_inputs,
- TupleArguments => vec![self.tcx.intern_tup(&err_inputs)],
- };
-
- self.check_argument_types(
- sp,
- expr,
- &err_inputs,
- None,
- args_no_rcvr,
- false,
- tuple_arguments,
- method.ok().map(|method| method.def_id),
- );
- return self.tcx.ty_error();
- }
-
- let method = method.unwrap();
- // HACK(eddyb) ignore self in the definition (see above).
- let expected_input_tys = self.expected_inputs_for_expected_output(
- sp,
- expected,
- method.sig.output(),
- &method.sig.inputs()[1..],
- );
- self.check_argument_types(
- sp,
- expr,
- &method.sig.inputs()[1..],
- expected_input_tys,
- args_no_rcvr,
- method.sig.c_variadic,
- tuple_arguments,
- Some(method.def_id),
- );
- method.sig.output()
- }
-
- /// Generic function that factors out common logic from function calls,
- /// method calls and overloaded operators.
- pub(in super::super) fn check_argument_types(
- &self,
- // Span enclosing the call site
- call_span: Span,
- // Expression of the call site
- call_expr: &'tcx hir::Expr<'tcx>,
- // Types (as defined in the *signature* of the target function)
- formal_input_tys: &[Ty<'tcx>],
- // More specific expected types, after unifying with caller output types
- expected_input_tys: Option<Vec<Ty<'tcx>>>,
- // The expressions for each provided argument
- provided_args: &'tcx [hir::Expr<'tcx>],
- // Whether the function is variadic, for example when imported from C
- c_variadic: bool,
- // Whether the arguments have been bundled in a tuple (ex: closures)
- tuple_arguments: TupleArgumentsFlag,
- // The DefId for the function being called, for better error messages
- fn_def_id: Option<DefId>,
- ) {
- let tcx = self.tcx;
-
- // Conceptually, we've got some number of expected inputs, and some number of provided aguments
- // and we can form a grid of whether each argument could satisfy a given input:
- // in1 | in2 | in3 | ...
- // arg1 ? | | |
- // arg2 | ? | |
- // arg3 | | ? |
- // ...
- // Initially, we just check the diagonal, because in the case of correct code
- // these are the only checks that matter
- // However, in the unhappy path, we'll fill in this whole grid to attempt to provide
- // better error messages about invalid method calls.
-
- // All the input types from the fn signature must outlive the call
- // so as to validate implied bounds.
- for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) {
- self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
- }
-
- let mut err_code = "E0061";
-
- // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here
- let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments {
- let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]);
- match tuple_type.kind() {
- // We expected a tuple and got a tuple
- ty::Tuple(arg_types) => {
- // Argument length differs
- if arg_types.len() != provided_args.len() {
- err_code = "E0057";
- }
- let expected_input_tys = match expected_input_tys {
- Some(expected_input_tys) => match expected_input_tys.get(0) {
- Some(ty) => match ty.kind() {
- ty::Tuple(tys) => Some(tys.iter().collect()),
- _ => None,
- },
- None => None,
- },
- None => None,
- };
- (arg_types.iter().collect(), expected_input_tys)
- }
- _ => {
- // Otherwise, there's a mismatch, so clear out what we're expecting, and set
- // our input types to err_args so we don't blow up the error messages
- struct_span_err!(
- tcx.sess,
- call_span,
- E0059,
- "cannot use call notation; the first type parameter \
- for the function trait is neither a tuple nor unit"
- )
- .emit();
- (self.err_args(provided_args.len()), None)
- }
- }
- } else {
- (formal_input_tys.to_vec(), expected_input_tys)
- };
-
- // If there are no external expectations at the call site, just use the types from the function defn
- let expected_input_tys = if let Some(expected_input_tys) = expected_input_tys {
- assert_eq!(expected_input_tys.len(), formal_input_tys.len());
- expected_input_tys
- } else {
- formal_input_tys.clone()
- };
-
- let minimum_input_count = expected_input_tys.len();
- let provided_arg_count = provided_args.len();
-
- // We introduce a helper function to demand that a given argument satisfy a given input
- // This is more complicated than just checking type equality, as arguments could be coerced
- // This version writes those types back so further type checking uses the narrowed types
- let demand_compatible = |idx| {
- let formal_input_ty: Ty<'tcx> = formal_input_tys[idx];
- let expected_input_ty: Ty<'tcx> = expected_input_tys[idx];
- let provided_arg = &provided_args[idx];
-
- debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty);
-
- // We're on the happy path here, so we'll do a more involved check and write back types
- // To check compatibility, we'll do 3 things:
- // 1. Unify the provided argument with the expected type
- let expectation = Expectation::rvalue_hint(self, expected_input_ty);
-
- let checked_ty = self.check_expr_with_expectation(provided_arg, expectation);
-
- // 2. Coerce to the most detailed type that could be coerced
- // to, which is `expected_ty` if `rvalue_hint` returns an
- // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
- let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
-
- // Cause selection errors caused by resolving a single argument to point at the
- // argument and not the call. This lets us customize the span pointed to in the
- // fulfillment error to be more accurate.
- let coerced_ty =
- self.resolve_vars_with_obligations_and_mutate_fulfillment(coerced_ty, |errors| {
- self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr);
- self.point_at_arg_instead_of_call_if_possible(
- errors,
- call_expr,
- call_span,
- provided_args,
- &expected_input_tys,
- );
- });
-
- let coerce_error = self
- .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None)
- .err();
-
- if coerce_error.is_some() {
- return Compatibility::Incompatible(coerce_error);
- }
-
- // 3. Check if the formal type is a supertype of the checked one
- // and register any such obligations for future type checks
- let supertype_error = self
- .at(&self.misc(provided_arg.span), self.param_env)
- .sup(formal_input_ty, coerced_ty);
- let subtyping_error = match supertype_error {
- Ok(InferOk { obligations, value: () }) => {
- self.register_predicates(obligations);
- None
- }
- Err(err) => Some(err),
- };
-
- // If neither check failed, the types are compatible
- match subtyping_error {
- None => Compatibility::Compatible,
- Some(_) => Compatibility::Incompatible(subtyping_error),
- }
- };
-
- // To start, we only care "along the diagonal", where we expect every
- // provided arg to be in the right spot
- let mut compatibility_diagonal =
- vec![Compatibility::Incompatible(None); provided_args.len()];
-
- // Keep track of whether we *could possibly* be satisfied, i.e. whether we're on the happy path
- // if the wrong number of arguments were supplied, we CAN'T be satisfied,
- // and if we're c_variadic, the supplied arguments must be >= the minimum count from the function
- // otherwise, they need to be identical, because rust doesn't currently support variadic functions
- let mut call_appears_satisfied = if c_variadic {
- provided_arg_count >= minimum_input_count
- } else {
- provided_arg_count == minimum_input_count
- };
-
- // Check the arguments.
- // We do this in a pretty awful way: first we type-check any arguments
- // that are not closures, then we type-check the closures. This is so
- // that we have more information about the types of arguments when we
- // type-check the functions. This isn't really the right way to do this.
- for check_closures in [false, true] {
- // More awful hacks: before we check argument types, try to do
- // an "opportunistic" trait resolution of any trait bounds on
- // the call. This helps coercions.
- if check_closures {
- self.select_obligations_where_possible(false, |errors| {
- self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr);
- self.point_at_arg_instead_of_call_if_possible(
- errors,
- call_expr,
- call_span,
- &provided_args,
- &expected_input_tys,
- );
- })
- }
-
- // Check each argument, to satisfy the input it was provided for
- // Visually, we're traveling down the diagonal of the compatibility matrix
- for (idx, arg) in provided_args.iter().enumerate() {
- // Warn only for the first loop (the "no closures" one).
- // Closure arguments themselves can't be diverging, but
- // a previous argument can, e.g., `foo(panic!(), || {})`.
- if !check_closures {
- self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
- }
-
- // For C-variadic functions, we don't have a declared type for all of
- // the arguments hence we only do our usual type checking with
- // the arguments who's types we do know. However, we *can* check
- // for unreachable expressions (see above).
- // FIXME: unreachable warning current isn't emitted
- if idx >= minimum_input_count {
- continue;
- }
-
- let is_closure = matches!(arg.kind, ExprKind::Closure { .. });
- if is_closure != check_closures {
- continue;
- }
-
- let compatible = demand_compatible(idx);
- let is_compatible = matches!(compatible, Compatibility::Compatible);
- compatibility_diagonal[idx] = compatible;
-
- if !is_compatible {
- call_appears_satisfied = false;
- }
- }
- }
-
- if c_variadic && provided_arg_count < minimum_input_count {
- err_code = "E0060";
- }
-
- for arg in provided_args.iter().skip(minimum_input_count) {
- // Make sure we've checked this expr at least once.
- let arg_ty = self.check_expr(&arg);
-
- // If the function is c-style variadic, we skipped a bunch of arguments
- // so we need to check those, and write out the types
- // Ideally this would be folded into the above, for uniform style
- // but c-variadic is already a corner case
- if c_variadic {
- fn variadic_error<'tcx>(
- sess: &'tcx Session,
- span: Span,
- ty: Ty<'tcx>,
- cast_ty: &str,
- ) {
- use crate::structured_errors::MissingCastForVariadicArg;
-
- MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit();
- }
-
- // There are a few types which get autopromoted when passed via varargs
- // in C but we just error out instead and require explicit casts.
- let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
- match arg_ty.kind() {
- ty::Float(ty::FloatTy::F32) => {
- variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
- }
- ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => {
- variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
- }
- ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => {
- variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
- }
- ty::FnDef(..) => {
- let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
- let ptr_ty = self.resolve_vars_if_possible(ptr_ty);
- variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
- }
- _ => {}
- }
- }
- }
-
- if !call_appears_satisfied {
- let compatibility_diagonal = IndexVec::from_raw(compatibility_diagonal);
- let provided_args = IndexVec::from_iter(provided_args.iter().take(if c_variadic {
- minimum_input_count
- } else {
- provided_arg_count
- }));
- debug_assert_eq!(
- formal_input_tys.len(),
- expected_input_tys.len(),
- "expected formal_input_tys to be the same size as expected_input_tys"
- );
- let formal_and_expected_inputs = IndexVec::from_iter(
- formal_input_tys
- .iter()
- .copied()
- .zip(expected_input_tys.iter().copied())
- .map(|vars| self.resolve_vars_if_possible(vars)),
- );
-
- self.report_arg_errors(
- compatibility_diagonal,
- formal_and_expected_inputs,
- provided_args,
- c_variadic,
- err_code,
- fn_def_id,
- call_span,
- call_expr,
- );
- }
- }
-
- fn report_arg_errors(
- &self,
- compatibility_diagonal: IndexVec<ProvidedIdx, Compatibility<'tcx>>,
- formal_and_expected_inputs: IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
- provided_args: IndexVec<ProvidedIdx, &'tcx hir::Expr<'tcx>>,
- c_variadic: bool,
- err_code: &str,
- fn_def_id: Option<DefId>,
- call_span: Span,
- call_expr: &hir::Expr<'tcx>,
- ) {
- // Next, let's construct the error
- let (error_span, full_call_span, ctor_of) = match &call_expr.kind {
- hir::ExprKind::Call(
- hir::Expr { hir_id, span, kind: hir::ExprKind::Path(qpath), .. },
- _,
- ) => {
- if let Res::Def(DefKind::Ctor(of, _), _) =
- self.typeck_results.borrow().qpath_res(qpath, *hir_id)
- {
- (call_span, *span, Some(of))
- } else {
- (call_span, *span, None)
- }
- }
- hir::ExprKind::Call(hir::Expr { span, .. }, _) => (call_span, *span, None),
- hir::ExprKind::MethodCall(path_segment, _, span) => {
- let ident_span = path_segment.ident.span;
- let ident_span = if let Some(args) = path_segment.args {
- ident_span.with_hi(args.span_ext.hi())
- } else {
- ident_span
- };
- (
- *span, ident_span, None, // methods are never ctors
- )
- }
- k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k),
- };
- let args_span = error_span.trim_start(full_call_span).unwrap_or(error_span);
- let call_name = match ctor_of {
- Some(CtorOf::Struct) => "struct",
- Some(CtorOf::Variant) => "enum variant",
- None => "function",
- };
-
- // Don't print if it has error types or is just plain `_`
- fn has_error_or_infer<'tcx>(tys: impl IntoIterator<Item = Ty<'tcx>>) -> bool {
- tys.into_iter().any(|ty| ty.references_error() || ty.is_ty_var())
- }
-
- self.set_tainted_by_errors();
- let tcx = self.tcx;
-
- // Get the argument span in the context of the call span so that
- // suggestions and labels are (more) correct when an arg is a
- // macro invocation.
- let normalize_span = |span: Span| -> Span {
- let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span);
- // Sometimes macros mess up the spans, so do not normalize the
- // arg span to equal the error span, because that's less useful
- // than pointing out the arg expr in the wrong context.
- if normalized_span.source_equal(error_span) { span } else { normalized_span }
- };
-
- // Precompute the provided types and spans, since that's all we typically need for below
- let provided_arg_tys: IndexVec<ProvidedIdx, (Ty<'tcx>, Span)> = provided_args
- .iter()
- .map(|expr| {
- let ty = self
- .typeck_results
- .borrow()
- .expr_ty_adjusted_opt(*expr)
- .unwrap_or_else(|| tcx.ty_error());
- (self.resolve_vars_if_possible(ty), normalize_span(expr.span))
- })
- .collect();
- let callee_expr = match &call_expr.peel_blocks().kind {
- hir::ExprKind::Call(callee, _) => Some(*callee),
- hir::ExprKind::MethodCall(_, callee, _) => {
- if let Some((DefKind::AssocFn, def_id)) =
- self.typeck_results.borrow().type_dependent_def(call_expr.hir_id)
- && let Some(assoc) = tcx.opt_associated_item(def_id)
- && assoc.fn_has_self_parameter
- {
- Some(&callee[0])
- } else {
- None
- }
- }
- _ => None,
- };
- let callee_ty = callee_expr
- .and_then(|callee_expr| self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr));
-
- // A "softer" version of the `demand_compatible`, which checks types without persisting them,
- // and treats error types differently
- // This will allow us to "probe" for other argument orders that would likely have been correct
- let check_compatible = |provided_idx: ProvidedIdx, expected_idx: ExpectedIdx| {
- if provided_idx.as_usize() == expected_idx.as_usize() {
- return compatibility_diagonal[provided_idx].clone();
- }
-
- let (formal_input_ty, expected_input_ty) = formal_and_expected_inputs[expected_idx];
- // If either is an error type, we defy the usual convention and consider them to *not* be
- // coercible. This prevents our error message heuristic from trying to pass errors into
- // every argument.
- if (formal_input_ty, expected_input_ty).references_error() {
- return Compatibility::Incompatible(None);
- }
-
- let (arg_ty, arg_span) = provided_arg_tys[provided_idx];
-
- let expectation = Expectation::rvalue_hint(self, expected_input_ty);
- let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
- let can_coerce = self.can_coerce(arg_ty, coerced_ty);
- if !can_coerce {
- return Compatibility::Incompatible(None);
- }
-
- // Using probe here, since we don't want this subtyping to affect inference.
- let subtyping_error = self.probe(|_| {
- self.at(&self.misc(arg_span), self.param_env).sup(formal_input_ty, coerced_ty).err()
- });
-
- // Same as above: if either the coerce type or the checked type is an error type,
- // consider them *not* compatible.
- let references_error = (coerced_ty, arg_ty).references_error();
- match (references_error, subtyping_error) {
- (false, None) => Compatibility::Compatible,
- (_, subtyping_error) => Compatibility::Incompatible(subtyping_error),
- }
- };
-
- // The algorithm here is inspired by levenshtein distance and longest common subsequence.
- // We'll try to detect 4 different types of mistakes:
- // - An extra parameter has been provided that doesn't satisfy *any* of the other inputs
- // - An input is missing, which isn't satisfied by *any* of the other arguments
- // - Some number of arguments have been provided in the wrong order
- // - A type is straight up invalid
-
- // First, let's find the errors
- let (mut errors, matched_inputs) =
- ArgMatrix::new(provided_args.len(), formal_and_expected_inputs.len(), check_compatible)
- .find_errors();
-
- // First, check if we just need to wrap some arguments in a tuple.
- if let Some((mismatch_idx, terr)) =
- compatibility_diagonal.iter().enumerate().find_map(|(i, c)| {
- if let Compatibility::Incompatible(Some(terr)) = c { Some((i, terr)) } else { None }
- })
- {
- // Is the first bad expected argument a tuple?
- // Do we have as many extra provided arguments as the tuple's length?
- // If so, we might have just forgotten to wrap some args in a tuple.
- if let Some(ty::Tuple(tys)) =
- formal_and_expected_inputs.get(mismatch_idx.into()).map(|tys| tys.1.kind())
- // If the tuple is unit, we're not actually wrapping any arguments.
- && !tys.is_empty()
- && provided_arg_tys.len() == formal_and_expected_inputs.len() - 1 + tys.len()
- {
- // Wrap up the N provided arguments starting at this position in a tuple.
- let provided_as_tuple = tcx.mk_tup(
- provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx).take(tys.len()),
- );
-
- let mut satisfied = true;
- // Check if the newly wrapped tuple + rest of the arguments are compatible.
- for ((_, expected_ty), provided_ty) in std::iter::zip(
- formal_and_expected_inputs.iter().skip(mismatch_idx),
- [provided_as_tuple].into_iter().chain(
- provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx + tys.len()),
- ),
- ) {
- if !self.can_coerce(provided_ty, *expected_ty) {
- satisfied = false;
- break;
- }
- }
-
- // If they're compatible, suggest wrapping in an arg, and we're done!
- // Take some care with spans, so we don't suggest wrapping a macro's
- // innards in parenthesis, for example.
- if satisfied
- && let Some((_, lo)) =
- provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx))
- && let Some((_, hi)) =
- provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx + tys.len() - 1))
- {
- let mut err;
- if tys.len() == 1 {
- // A tuple wrap suggestion actually occurs within,
- // so don't do anything special here.
- err = self.report_and_explain_type_error(
- TypeTrace::types(
- &self.misc(*lo),
- true,
- formal_and_expected_inputs[mismatch_idx.into()].1,
- provided_arg_tys[mismatch_idx.into()].0,
- ),
- terr,
- );
- err.span_label(
- full_call_span,
- format!("arguments to this {} are incorrect", call_name),
- );
- } else {
- err = tcx.sess.struct_span_err_with_code(
- full_call_span,
- &format!(
- "this {} takes {}{} but {} {} supplied",
- call_name,
- if c_variadic { "at least " } else { "" },
- potentially_plural_count(
- formal_and_expected_inputs.len(),
- "argument"
- ),
- potentially_plural_count(provided_args.len(), "argument"),
- pluralize!("was", provided_args.len())
- ),
- DiagnosticId::Error(err_code.to_owned()),
- );
- err.multipart_suggestion_verbose(
- "wrap these arguments in parentheses to construct a tuple",
- vec![
- (lo.shrink_to_lo(), "(".to_string()),
- (hi.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MachineApplicable,
- );
- };
- self.label_fn_like(&mut err, fn_def_id, callee_ty);
- err.emit();
- return;
- }
- }
- }
-
- // Okay, so here's where it gets complicated in regards to what errors
- // we emit and how.
- // There are 3 different "types" of errors we might encounter.
- // 1) Missing/extra/swapped arguments
- // 2) Valid but incorrect arguments
- // 3) Invalid arguments
- // - Currently I think this only comes up with `CyclicTy`
- //
- // We first need to go through, remove those from (3) and emit those
- // as their own error, particularly since they're error code and
- // message is special. From what I can tell, we *must* emit these
- // here (vs somewhere prior to this function) since the arguments
- // become invalid *because* of how they get used in the function.
- // It is what it is.
-
- if errors.is_empty() {
- if cfg!(debug_assertions) {
- span_bug!(error_span, "expected errors from argument matrix");
- } else {
- tcx.sess
- .struct_span_err(
- error_span,
- "argument type mismatch was detected, \
- but rustc had trouble determining where",
- )
- .note(
- "we would appreciate a bug report: \
- https://github.com/rust-lang/rust/issues/new",
- )
- .emit();
- }
- return;
- }
-
- errors.drain_filter(|error| {
- let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(error)) = error else { return false };
- let (provided_ty, provided_span) = provided_arg_tys[*provided_idx];
- let (expected_ty, _) = formal_and_expected_inputs[*expected_idx];
- let cause = &self.misc(provided_span);
- let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
- if let Some(e) = error {
- if !matches!(trace.cause.as_failure_code(e), FailureCode::Error0308(_)) {
- self.report_and_explain_type_error(trace, e).emit();
- return true;
- }
- }
- false
- });
-
- // We're done if we found errors, but we already emitted them.
- if errors.is_empty() {
- return;
- }
-
- // Okay, now that we've emitted the special errors separately, we
- // are only left missing/extra/swapped and mismatched arguments, both
- // can be collated pretty easily if needed.
-
- // Next special case: if there is only one "Incompatible" error, just emit that
- if let [
- Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(err))),
- ] = &errors[..]
- {
- let (formal_ty, expected_ty) = formal_and_expected_inputs[*expected_idx];
- let (provided_ty, provided_arg_span) = provided_arg_tys[*provided_idx];
- let cause = &self.misc(provided_arg_span);
- let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
- let mut err = self.report_and_explain_type_error(trace, err);
- self.emit_coerce_suggestions(
- &mut err,
- &provided_args[*provided_idx],
- provided_ty,
- Expectation::rvalue_hint(self, expected_ty)
- .only_has_type(self)
- .unwrap_or(formal_ty),
- None,
- None,
- );
- err.span_label(
- full_call_span,
- format!("arguments to this {} are incorrect", call_name),
- );
- // Call out where the function is defined
- self.label_fn_like(&mut err, fn_def_id, callee_ty);
- err.emit();
- return;
- }
-
- let mut err = if formal_and_expected_inputs.len() == provided_args.len() {
- struct_span_err!(
- tcx.sess,
- full_call_span,
- E0308,
- "arguments to this {} are incorrect",
- call_name,
- )
- } else {
- tcx.sess.struct_span_err_with_code(
- full_call_span,
- &format!(
- "this {} takes {}{} but {} {} supplied",
- call_name,
- if c_variadic { "at least " } else { "" },
- potentially_plural_count(formal_and_expected_inputs.len(), "argument"),
- potentially_plural_count(provided_args.len(), "argument"),
- pluralize!("was", provided_args.len())
- ),
- DiagnosticId::Error(err_code.to_owned()),
- )
- };
-
- // As we encounter issues, keep track of what we want to provide for the suggestion
- let mut labels = vec![];
- // If there is a single error, we give a specific suggestion; otherwise, we change to
- // "did you mean" with the suggested function call
- enum SuggestionText {
- None,
- Provide(bool),
- Remove(bool),
- Swap,
- Reorder,
- DidYouMean,
- }
- let mut suggestion_text = SuggestionText::None;
-
- let mut errors = errors.into_iter().peekable();
- while let Some(error) = errors.next() {
- match error {
- Error::Invalid(provided_idx, expected_idx, compatibility) => {
- let (formal_ty, expected_ty) = formal_and_expected_inputs[expected_idx];
- let (provided_ty, provided_span) = provided_arg_tys[provided_idx];
- if let Compatibility::Incompatible(error) = &compatibility {
- let cause = &self.misc(provided_span);
- let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
- if let Some(e) = error {
- self.note_type_err(
- &mut err,
- &trace.cause,
- None,
- Some(trace.values),
- e,
- false,
- true,
- );
- }
- }
-
- self.emit_coerce_suggestions(
- &mut err,
- &provided_args[provided_idx],
- provided_ty,
- Expectation::rvalue_hint(self, expected_ty)
- .only_has_type(self)
- .unwrap_or(formal_ty),
- None,
- None,
- );
- }
- Error::Extra(arg_idx) => {
- let (provided_ty, provided_span) = provided_arg_tys[arg_idx];
- let provided_ty_name = if !has_error_or_infer([provided_ty]) {
- // FIXME: not suggestable, use something else
- format!(" of type `{}`", provided_ty)
- } else {
- "".to_string()
- };
- labels
- .push((provided_span, format!("argument{} unexpected", provided_ty_name)));
- suggestion_text = match suggestion_text {
- SuggestionText::None => SuggestionText::Remove(false),
- SuggestionText::Remove(_) => SuggestionText::Remove(true),
- _ => SuggestionText::DidYouMean,
- };
- }
- Error::Missing(expected_idx) => {
- // If there are multiple missing arguments adjacent to each other,
- // then we can provide a single error.
-
- let mut missing_idxs = vec![expected_idx];
- while let Some(e) = errors.next_if(|e| {
- matches!(e, Error::Missing(next_expected_idx)
- if *next_expected_idx == *missing_idxs.last().unwrap() + 1)
- }) {
- match e {
- Error::Missing(expected_idx) => missing_idxs.push(expected_idx),
- _ => unreachable!(),
- }
- }
-
- // NOTE: Because we might be re-arranging arguments, might have extra
- // arguments, etc. it's hard to *really* know where we should provide
- // this error label, so as a heuristic, we point to the provided arg, or
- // to the call if the missing inputs pass the provided args.
- match &missing_idxs[..] {
- &[expected_idx] => {
- let (_, input_ty) = formal_and_expected_inputs[expected_idx];
- let span = if let Some((_, arg_span)) =
- provided_arg_tys.get(expected_idx.to_provided_idx())
- {
- *arg_span
- } else {
- args_span
- };
- let rendered = if !has_error_or_infer([input_ty]) {
- format!(" of type `{}`", input_ty)
- } else {
- "".to_string()
- };
- labels.push((span, format!("an argument{} is missing", rendered)));
- suggestion_text = match suggestion_text {
- SuggestionText::None => SuggestionText::Provide(false),
- SuggestionText::Provide(_) => SuggestionText::Provide(true),
- _ => SuggestionText::DidYouMean,
- };
- }
- &[first_idx, second_idx] => {
- let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
- let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
- let span = if let (Some((_, first_span)), Some((_, second_span))) = (
- provided_arg_tys.get(first_idx.to_provided_idx()),
- provided_arg_tys.get(second_idx.to_provided_idx()),
- ) {
- first_span.to(*second_span)
- } else {
- args_span
- };
- let rendered =
- if !has_error_or_infer([first_expected_ty, second_expected_ty]) {
- format!(
- " of type `{}` and `{}`",
- first_expected_ty, second_expected_ty
- )
- } else {
- "".to_string()
- };
- labels.push((span, format!("two arguments{} are missing", rendered)));
- suggestion_text = match suggestion_text {
- SuggestionText::None | SuggestionText::Provide(_) => {
- SuggestionText::Provide(true)
- }
- _ => SuggestionText::DidYouMean,
- };
- }
- &[first_idx, second_idx, third_idx] => {
- let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
- let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
- let (_, third_expected_ty) = formal_and_expected_inputs[third_idx];
- let span = if let (Some((_, first_span)), Some((_, third_span))) = (
- provided_arg_tys.get(first_idx.to_provided_idx()),
- provided_arg_tys.get(third_idx.to_provided_idx()),
- ) {
- first_span.to(*third_span)
- } else {
- args_span
- };
- let rendered = if !has_error_or_infer([
- first_expected_ty,
- second_expected_ty,
- third_expected_ty,
- ]) {
- format!(
- " of type `{}`, `{}`, and `{}`",
- first_expected_ty, second_expected_ty, third_expected_ty
- )
- } else {
- "".to_string()
- };
- labels.push((span, format!("three arguments{} are missing", rendered)));
- suggestion_text = match suggestion_text {
- SuggestionText::None | SuggestionText::Provide(_) => {
- SuggestionText::Provide(true)
- }
- _ => SuggestionText::DidYouMean,
- };
- }
- missing_idxs => {
- let first_idx = *missing_idxs.first().unwrap();
- let last_idx = *missing_idxs.last().unwrap();
- // NOTE: Because we might be re-arranging arguments, might have extra arguments, etc.
- // It's hard to *really* know where we should provide this error label, so this is a
- // decent heuristic
- let span = if let (Some((_, first_span)), Some((_, last_span))) = (
- provided_arg_tys.get(first_idx.to_provided_idx()),
- provided_arg_tys.get(last_idx.to_provided_idx()),
- ) {
- first_span.to(*last_span)
- } else {
- args_span
- };
- labels.push((span, format!("multiple arguments are missing")));
- suggestion_text = match suggestion_text {
- SuggestionText::None | SuggestionText::Provide(_) => {
- SuggestionText::Provide(true)
- }
- _ => SuggestionText::DidYouMean,
- };
- }
- }
- }
- Error::Swap(
- first_provided_idx,
- second_provided_idx,
- first_expected_idx,
- second_expected_idx,
- ) => {
- let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx];
- let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx];
- let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) {
- format!(", found `{}`", first_provided_ty)
- } else {
- String::new()
- };
- labels.push((
- first_span,
- format!("expected `{}`{}", first_expected_ty, first_provided_ty_name),
- ));
-
- let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx];
- let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx];
- let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) {
- format!(", found `{}`", second_provided_ty)
- } else {
- String::new()
- };
- labels.push((
- second_span,
- format!("expected `{}`{}", second_expected_ty, second_provided_ty_name),
- ));
-
- suggestion_text = match suggestion_text {
- SuggestionText::None => SuggestionText::Swap,
- _ => SuggestionText::DidYouMean,
- };
- }
- Error::Permutation(args) => {
- for (dst_arg, dest_input) in args {
- let (_, expected_ty) = formal_and_expected_inputs[dst_arg];
- let (provided_ty, provided_span) = provided_arg_tys[dest_input];
- let provided_ty_name = if !has_error_or_infer([provided_ty]) {
- format!(", found `{}`", provided_ty)
- } else {
- String::new()
- };
- labels.push((
- provided_span,
- format!("expected `{}`{}", expected_ty, provided_ty_name),
- ));
- }
-
- suggestion_text = match suggestion_text {
- SuggestionText::None => SuggestionText::Reorder,
- _ => SuggestionText::DidYouMean,
- };
- }
- }
- }
-
- // If we have less than 5 things to say, it would be useful to call out exactly what's wrong
- if labels.len() <= 5 {
- for (span, label) in labels {
- err.span_label(span, label);
- }
- }
-
- // Call out where the function is defined
- self.label_fn_like(&mut err, fn_def_id, callee_ty);
-
- // And add a suggestion block for all of the parameters
- let suggestion_text = match suggestion_text {
- SuggestionText::None => None,
- SuggestionText::Provide(plural) => {
- Some(format!("provide the argument{}", if plural { "s" } else { "" }))
- }
- SuggestionText::Remove(plural) => {
- Some(format!("remove the extra argument{}", if plural { "s" } else { "" }))
- }
- SuggestionText::Swap => Some("swap these arguments".to_string()),
- SuggestionText::Reorder => Some("reorder these arguments".to_string()),
- SuggestionText::DidYouMean => Some("did you mean".to_string()),
- };
- if let Some(suggestion_text) = suggestion_text {
- let source_map = self.sess().source_map();
- let mut suggestion = format!(
- "{}(",
- source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| fn_def_id
- .map_or("".to_string(), |fn_def_id| tcx.item_name(fn_def_id).to_string()))
- );
- let mut needs_comma = false;
- for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() {
- if needs_comma {
- suggestion += ", ";
- } else {
- needs_comma = true;
- }
- let suggestion_text = if let Some(provided_idx) = provided_idx
- && let (_, provided_span) = provided_arg_tys[*provided_idx]
- && let Ok(arg_text) =
- source_map.span_to_snippet(provided_span)
- {
- arg_text
- } else {
- // Propose a placeholder of the correct type
- let (_, expected_ty) = formal_and_expected_inputs[expected_idx];
- if expected_ty.is_unit() {
- "()".to_string()
- } else if expected_ty.is_suggestable(tcx, false) {
- format!("/* {} */", expected_ty)
- } else {
- "/* value */".to_string()
- }
- };
- suggestion += &suggestion_text;
- }
- suggestion += ")";
- err.span_suggestion_verbose(
- error_span,
- &suggestion_text,
- suggestion,
- Applicability::HasPlaceholders,
- );
- }
-
- err.emit();
- }
-
- // AST fragment checking
- pub(in super::super) fn check_lit(
- &self,
- lit: &hir::Lit,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
-
- match lit.node {
- ast::LitKind::Str(..) => tcx.mk_static_str(),
- ast::LitKind::ByteStr(ref v) => {
- tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
- }
- ast::LitKind::Byte(_) => tcx.types.u8,
- ast::LitKind::Char(_) => tcx.types.char,
- ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(ty::int_ty(t)),
- ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(ty::uint_ty(t)),
- ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
- let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
- ty::Int(_) | ty::Uint(_) => Some(ty),
- ty::Char => Some(tcx.types.u8),
- ty::RawPtr(..) => Some(tcx.types.usize),
- ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
- _ => None,
- });
- opt_ty.unwrap_or_else(|| self.next_int_var())
- }
- ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => {
- tcx.mk_mach_float(ty::float_ty(t))
- }
- ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
- let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
- ty::Float(_) => Some(ty),
- _ => None,
- });
- opt_ty.unwrap_or_else(|| self.next_float_var())
- }
- ast::LitKind::Bool(_) => tcx.types.bool,
- ast::LitKind::Err(_) => tcx.ty_error(),
- }
- }
-
- pub fn check_struct_path(
- &self,
- qpath: &QPath<'_>,
- hir_id: hir::HirId,
- ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
- let path_span = qpath.span();
- let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
- let variant = match def {
- Res::Err => {
- self.set_tainted_by_errors();
- return None;
- }
- Res::Def(DefKind::Variant, _) => match ty.kind() {
- ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)),
- _ => bug!("unexpected type: {:?}", ty),
- },
- Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
- | Res::SelfTy { .. } => match ty.kind() {
- ty::Adt(adt, substs) if !adt.is_enum() => {
- Some((adt.non_enum_variant(), adt.did(), substs))
- }
- _ => None,
- },
- _ => bug!("unexpected definition: {:?}", def),
- };
-
- if let Some((variant, did, substs)) = variant {
- debug!("check_struct_path: did={:?} substs={:?}", did, substs);
- self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
-
- // Check bounds on type arguments used in the path.
- self.add_required_obligations(path_span, did, substs);
-
- Some((variant, ty))
- } else {
- match ty.kind() {
- ty::Error(_) => {
- // E0071 might be caused by a spelling error, which will have
- // already caused an error message and probably a suggestion
- // elsewhere. Refrain from emitting more unhelpful errors here
- // (issue #88844).
- }
- _ => {
- struct_span_err!(
- self.tcx.sess,
- path_span,
- E0071,
- "expected struct, variant or union type, found {}",
- ty.sort_string(self.tcx)
- )
- .span_label(path_span, "not a struct")
- .emit();
- }
- }
- None
- }
- }
-
- pub fn check_decl_initializer(
- &self,
- hir_id: hir::HirId,
- pat: &'tcx hir::Pat<'tcx>,
- init: &'tcx hir::Expr<'tcx>,
- ) -> Ty<'tcx> {
- // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
- // for #42640 (default match binding modes).
- //
- // See #44848.
- let ref_bindings = pat.contains_explicit_ref_binding();
-
- let local_ty = self.local_ty(init.span, hir_id).revealed_ty;
- if let Some(m) = ref_bindings {
- // Somewhat subtle: if we have a `ref` binding in the pattern,
- // we want to avoid introducing coercions for the RHS. This is
- // both because it helps preserve sanity and, in the case of
- // ref mut, for soundness (issue #23116). In particular, in
- // the latter case, we need to be clear that the type of the
- // referent for the reference that results is *equal to* the
- // type of the place it is referencing, and not some
- // supertype thereof.
- let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
- self.demand_eqtype(init.span, local_ty, init_ty);
- init_ty
- } else {
- self.check_expr_coercable_to_type(init, local_ty, None)
- }
- }
-
- pub(in super::super) fn check_decl(&self, decl: Declaration<'tcx>) {
- // Determine and write the type which we'll check the pattern against.
- let decl_ty = self.local_ty(decl.span, decl.hir_id).decl_ty;
- self.write_ty(decl.hir_id, decl_ty);
-
- // Type check the initializer.
- if let Some(ref init) = decl.init {
- let init_ty = self.check_decl_initializer(decl.hir_id, decl.pat, &init);
- self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, init_ty);
- }
-
- // Does the expected pattern type originate from an expression and what is the span?
- let (origin_expr, ty_span) = match (decl.ty, decl.init) {
- (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
- (_, Some(init)) => {
- (true, Some(init.span.find_ancestor_inside(decl.span).unwrap_or(init.span)))
- } // No explicit type; so use the scrutinee.
- _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
- };
-
- // Type check the pattern. Override if necessary to avoid knock-on errors.
- self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr);
- let pat_ty = self.node_ty(decl.pat.hir_id);
- self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, pat_ty);
-
- if let Some(blk) = decl.els {
- let previous_diverges = self.diverges.get();
- let else_ty = self.check_block_with_expected(blk, NoExpectation);
- let cause = self.cause(blk.span, ObligationCauseCode::LetElse);
- if let Some(mut err) =
- self.demand_eqtype_with_origin(&cause, self.tcx.types.never, else_ty)
- {
- err.emit();
- }
- self.diverges.set(previous_diverges);
- }
- }
-
- /// Type check a `let` statement.
- pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
- self.check_decl(local.into());
- }
-
- pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
- // Don't do all the complex logic below for `DeclItem`.
- match stmt.kind {
- hir::StmtKind::Item(..) => return,
- hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
- }
-
- self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
-
- // Hide the outer diverging and `has_errors` flags.
- let old_diverges = self.diverges.replace(Diverges::Maybe);
- let old_has_errors = self.has_errors.replace(false);
-
- match stmt.kind {
- hir::StmtKind::Local(l) => {
- self.check_decl_local(l);
- }
- // Ignore for now.
- hir::StmtKind::Item(_) => {}
- hir::StmtKind::Expr(ref expr) => {
- // Check with expected type of `()`.
- self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
- if expr.can_have_side_effects() {
- self.suggest_semicolon_at_end(expr.span, err);
- }
- });
- }
- hir::StmtKind::Semi(ref expr) => {
- // All of this is equivalent to calling `check_expr`, but it is inlined out here
- // in order to capture the fact that this `match` is the last statement in its
- // function. This is done for better suggestions to remove the `;`.
- let expectation = match expr.kind {
- hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
- _ => NoExpectation,
- };
- self.check_expr_with_expectation(expr, expectation);
- }
- }
-
- // Combine the diverging and `has_error` flags.
- self.diverges.set(self.diverges.get() | old_diverges);
- self.has_errors.set(self.has_errors.get() | old_has_errors);
- }
-
- pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
- let unit = self.tcx.mk_unit();
- let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
-
- // if the block produces a `!` value, that can always be
- // (effectively) coerced to unit.
- if !ty.is_never() {
- self.demand_suptype(blk.span, unit, ty);
- }
- }
-
- pub(in super::super) fn check_block_with_expected(
- &self,
- blk: &'tcx hir::Block<'tcx>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let prev = self.ps.replace(self.ps.get().recurse(blk));
-
- // In some cases, blocks have just one exit, but other blocks
- // can be targeted by multiple breaks. This can happen both
- // with labeled blocks as well as when we desugar
- // a `try { ... }` expression.
- //
- // Example 1:
- //
- // 'a: { if true { break 'a Err(()); } Ok(()) }
- //
- // Here we would wind up with two coercions, one from
- // `Err(())` and the other from the tail expression
- // `Ok(())`. If the tail expression is omitted, that's a
- // "forced unit" -- unless the block diverges, in which
- // case we can ignore the tail expression (e.g., `'a: {
- // break 'a 22; }` would not force the type of the block
- // to be `()`).
- let tail_expr = blk.expr.as_ref();
- let coerce_to_ty = expected.coercion_target_type(self, blk.span);
- let coerce = if blk.targeted_by_break {
- CoerceMany::new(coerce_to_ty)
- } else {
- let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
- Some(e) => slice::from_ref(e),
- None => &[],
- };
- CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
- };
-
- let prev_diverges = self.diverges.get();
- let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
-
- let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
- for (pos, s) in blk.stmts.iter().enumerate() {
- self.check_stmt(s, blk.stmts.len() - 1 == pos);
- }
-
- // check the tail expression **without** holding the
- // `enclosing_breakables` lock below.
- let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
-
- let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
- let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
- let coerce = ctxt.coerce.as_mut().unwrap();
- if let Some(tail_expr_ty) = tail_expr_ty {
- let tail_expr = tail_expr.unwrap();
- let span = self.get_expr_coercion_span(tail_expr);
- let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
- let ty_for_diagnostic = coerce.merged_ty();
- // We use coerce_inner here because we want to augment the error
- // suggesting to wrap the block in square brackets if it might've
- // been mistaken array syntax
- coerce.coerce_inner(
- self,
- &cause,
- Some(tail_expr),
- tail_expr_ty,
- Some(&mut |diag: &mut Diagnostic| {
- self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic);
- }),
- false,
- );
- } else {
- // Subtle: if there is no explicit tail expression,
- // that is typically equivalent to a tail expression
- // of `()` -- except if the block diverges. In that
- // case, there is no value supplied from the tail
- // expression (assuming there are no other breaks,
- // this implies that the type of the block will be
- // `!`).
- //
- // #41425 -- label the implicit `()` as being the
- // "found type" here, rather than the "expected type".
- if !self.diverges.get().is_always() {
- // #50009 -- Do not point at the entire fn block span, point at the return type
- // span, as it is the cause of the requirement, and
- // `consider_hint_about_removing_semicolon` will point at the last expression
- // if it were a relevant part of the error. This improves usability in editors
- // that highlight errors inline.
- let mut sp = blk.span;
- let mut fn_span = None;
- if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
- let ret_sp = decl.output.span();
- if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
- // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
- // output would otherwise be incorrect and even misleading. Make sure
- // the span we're aiming at correspond to a `fn` body.
- if block_sp == blk.span {
- sp = ret_sp;
- fn_span = Some(ident.span);
- }
- }
- }
- coerce.coerce_forced_unit(
- self,
- &self.misc(sp),
- &mut |err| {
- if let Some(expected_ty) = expected.only_has_type(self) {
- if !self.consider_removing_semicolon(blk, expected_ty, err) {
- self.consider_returning_binding(blk, expected_ty, err);
- }
- if expected_ty == self.tcx.types.bool {
- // If this is caused by a missing `let` in a `while let`,
- // silence this redundant error, as we already emit E0070.
-
- // Our block must be a `assign desugar local; assignment`
- if let Some(hir::Node::Block(hir::Block {
- stmts:
- [
- hir::Stmt {
- kind:
- hir::StmtKind::Local(hir::Local {
- source:
- hir::LocalSource::AssignDesugar(_),
- ..
- }),
- ..
- },
- hir::Stmt {
- kind:
- hir::StmtKind::Expr(hir::Expr {
- kind: hir::ExprKind::Assign(..),
- ..
- }),
- ..
- },
- ],
- ..
- })) = self.tcx.hir().find(blk.hir_id)
- {
- self.comes_from_while_condition(blk.hir_id, |_| {
- err.downgrade_to_delayed_bug();
- })
- }
- }
- }
- if let Some(fn_span) = fn_span {
- err.span_label(
- fn_span,
- "implicitly returns `()` as its body has no tail or `return` \
- expression",
- );
- }
- },
- false,
- );
- }
- }
- });
-
- if ctxt.may_break {
- // If we can break from the block, then the block's exit is always reachable
- // (... as long as the entry is reachable) - regardless of the tail of the block.
- self.diverges.set(prev_diverges);
- }
-
- let mut ty = ctxt.coerce.unwrap().complete(self);
-
- if self.has_errors.get() || ty.references_error() {
- ty = self.tcx.ty_error()
- }
-
- self.write_ty(blk.hir_id, ty);
-
- self.ps.set(prev);
- ty
- }
-
- fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
- let node = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(id));
- match node {
- Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
- | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
- let body = self.tcx.hir().body(body_id);
- if let ExprKind::Block(block, _) = &body.value.kind {
- return Some(block.span);
- }
- }
- _ => {}
- }
- None
- }
-
- /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
- fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
- let parent = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(blk_id));
- self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
- }
-
- /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
- /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
- /// when given code like the following:
- /// ```text
- /// if false { return 0i32; } else { 1u32 }
- /// // ^^^^ point at this instead of the whole `if` expression
- /// ```
- fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
- let check_in_progress = |elem: &hir::Expr<'_>| {
- self.typeck_results.borrow().node_type_opt(elem.hir_id).filter(|ty| !ty.is_never()).map(
- |_| match elem.kind {
- // Point at the tail expression when possible.
- hir::ExprKind::Block(block, _) => block.expr.map_or(block.span, |e| e.span),
- _ => elem.span,
- },
- )
- };
-
- if let hir::ExprKind::If(_, _, Some(el)) = expr.kind {
- if let Some(rslt) = check_in_progress(el) {
- return rslt;
- }
- }
-
- if let hir::ExprKind::Match(_, arms, _) = expr.kind {
- let mut iter = arms.iter().filter_map(|arm| check_in_progress(arm.body));
- if let Some(span) = iter.next() {
- if iter.next().is_none() {
- return span;
- }
- }
- }
-
- expr.span
- }
-
- fn overwrite_local_ty_if_err(
- &self,
- hir_id: hir::HirId,
- pat: &'tcx hir::Pat<'tcx>,
- decl_ty: Ty<'tcx>,
- ty: Ty<'tcx>,
- ) {
- if ty.references_error() {
- // Override the types everywhere with `err()` to avoid knock on errors.
- self.write_ty(hir_id, ty);
- self.write_ty(pat.hir_id, ty);
- let local_ty = LocalTy { decl_ty, revealed_ty: ty };
- self.locals.borrow_mut().insert(hir_id, local_ty);
- self.locals.borrow_mut().insert(pat.hir_id, local_ty);
- }
- }
-
- // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
- // The newly resolved definition is written into `type_dependent_defs`.
- fn finish_resolving_struct_path(
- &self,
- qpath: &QPath<'_>,
- path_span: Span,
- hir_id: hir::HirId,
- ) -> (Res, Ty<'tcx>) {
- match *qpath {
- QPath::Resolved(ref maybe_qself, ref path) => {
- let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
- let ty = <dyn AstConv<'_>>::res_to_ty(self, self_ty, path, true);
- (path.res, ty)
- }
- QPath::TypeRelative(ref qself, ref segment) => {
- let ty = self.to_ty(qself);
-
- let result = <dyn AstConv<'_>>::associated_path_to_ty(
- self, hir_id, path_span, ty, qself, segment, true,
- );
- let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
- let result = result.map(|(_, kind, def_id)| (kind, def_id));
-
- // Write back the new resolution.
- self.write_resolution(hir_id, result);
-
- (result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), ty)
- }
- QPath::LangItem(lang_item, span, id) => {
- self.resolve_lang_item_path(lang_item, span, hir_id, id)
- }
- }
- }
-
- /// Given a vec of evaluated `FulfillmentError`s and an `fn` call argument expressions, we walk
- /// the checked and coerced types for each argument to see if any of the `FulfillmentError`s
- /// reference a type argument. The reason to walk also the checked type is that the coerced type
- /// can be not easily comparable with predicate type (because of coercion). If the types match
- /// for either checked or coerced type, and there's only *one* argument that does, we point at
- /// the corresponding argument's expression span instead of the `fn` call path span.
- fn point_at_arg_instead_of_call_if_possible(
- &self,
- errors: &mut Vec<traits::FulfillmentError<'tcx>>,
- expr: &'tcx hir::Expr<'tcx>,
- call_sp: Span,
- args: &'tcx [hir::Expr<'tcx>],
- expected_tys: &[Ty<'tcx>],
- ) {
- // We *do not* do this for desugared call spans to keep good diagnostics when involving
- // the `?` operator.
- if call_sp.desugaring_kind().is_some() {
- return;
- }
-
- 'outer: for error in errors {
- // Only if the cause is somewhere inside the expression we want try to point at arg.
- // Otherwise, it means that the cause is somewhere else and we should not change
- // anything because we can break the correct span.
- if !call_sp.contains(error.obligation.cause.span) {
- continue;
- }
-
- // Peel derived obligation, because it's the type that originally
- // started this inference chain that matters, not the one we wound
- // up with at the end.
- fn unpeel_to_top<'a, 'tcx>(
- mut code: &'a ObligationCauseCode<'tcx>,
- ) -> &'a ObligationCauseCode<'tcx> {
- let mut result_code = code;
- loop {
- let parent = match code {
- ObligationCauseCode::ImplDerivedObligation(c) => &c.derived.parent_code,
- ObligationCauseCode::BuiltinDerivedObligation(c)
- | ObligationCauseCode::DerivedObligation(c) => &c.parent_code,
- _ => break result_code,
- };
- (result_code, code) = (code, parent);
- }
- }
- let self_: ty::subst::GenericArg<'_> =
- match unpeel_to_top(error.obligation.cause.code()) {
- ObligationCauseCode::BuiltinDerivedObligation(code)
- | ObligationCauseCode::DerivedObligation(code) => {
- code.parent_trait_pred.self_ty().skip_binder().into()
- }
- ObligationCauseCode::ImplDerivedObligation(code) => {
- code.derived.parent_trait_pred.self_ty().skip_binder().into()
- }
- _ if let ty::PredicateKind::Trait(predicate) =
- error.obligation.predicate.kind().skip_binder() =>
- {
- predicate.self_ty().into()
- }
- _ => continue,
- };
- let self_ = self.resolve_vars_if_possible(self_);
- let ty_matches_self = |ty: Ty<'tcx>| ty.walk().any(|arg| arg == self_);
-
- let typeck_results = self.typeck_results.borrow();
-
- for (idx, arg) in args.iter().enumerate() {
- // Don't adjust the span if we already have a more precise span
- // within one of the args.
- if arg.span.contains(error.obligation.cause.span) {
- let references_arg =
- typeck_results.expr_ty_opt(arg).map_or(false, &ty_matches_self)
- || expected_tys.get(idx).copied().map_or(false, &ty_matches_self);
- if references_arg && !arg.span.from_expansion() {
- error.obligation.cause.map_code(|parent_code| {
- ObligationCauseCode::FunctionArgumentObligation {
- arg_hir_id: args[idx].hir_id,
- call_hir_id: expr.hir_id,
- parent_code,
- }
- })
- }
- continue 'outer;
- }
- }
-
- // Collect the argument position for all arguments that could have caused this
- // `FulfillmentError`.
- let mut referenced_in: Vec<_> = std::iter::zip(expected_tys, args)
- .enumerate()
- .flat_map(|(idx, (expected_ty, arg))| {
- if let Some(arg_ty) = typeck_results.expr_ty_opt(arg) {
- vec![(idx, arg_ty), (idx, *expected_ty)]
- } else {
- vec![]
- }
- })
- .filter_map(|(i, ty)| {
- let ty = self.resolve_vars_if_possible(ty);
- // We walk the argument type because the argument's type could have
- // been `Option<T>`, but the `FulfillmentError` references `T`.
- if ty_matches_self(ty) { Some(i) } else { None }
- })
- .collect();
-
- // Both checked and coerced types could have matched, thus we need to remove
- // duplicates.
-
- // We sort primitive type usize here and can use unstable sort
- referenced_in.sort_unstable();
- referenced_in.dedup();
-
- if let &[idx] = &referenced_in[..] {
- // Do not point at the inside of a macro.
- // That would often result in poor error messages.
- if args[idx].span.from_expansion() {
- continue;
- }
- // We make sure that only *one* argument matches the obligation failure
- // and we assign the obligation's span to its expression's.
- error.obligation.cause.span = args[idx].span;
- error.obligation.cause.map_code(|parent_code| {
- ObligationCauseCode::FunctionArgumentObligation {
- arg_hir_id: args[idx].hir_id,
- call_hir_id: expr.hir_id,
- parent_code,
- }
- });
- } else if error.obligation.cause.span == call_sp {
- // Make function calls point at the callee, not the whole thing.
- if let hir::ExprKind::Call(callee, _) = expr.kind {
- error.obligation.cause.span = callee.span;
- }
- }
- }
- }
-
- /// Given a vec of evaluated `FulfillmentError`s and an `fn` call expression, we walk the
- /// `PathSegment`s and resolve their type parameters to see if any of the `FulfillmentError`s
- /// were caused by them. If they were, we point at the corresponding type argument's span
- /// instead of the `fn` call path span.
- fn point_at_type_arg_instead_of_call_if_possible(
- &self,
- errors: &mut Vec<traits::FulfillmentError<'tcx>>,
- call_expr: &'tcx hir::Expr<'tcx>,
- ) {
- if let hir::ExprKind::Call(path, _) = &call_expr.kind {
- if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &path.kind {
- for error in errors {
- if let ty::PredicateKind::Trait(predicate) =
- error.obligation.predicate.kind().skip_binder()
- {
- // If any of the type arguments in this path segment caused the
- // `FulfillmentError`, point at its span (#61860).
- for arg in path
- .segments
- .iter()
- .filter_map(|seg| seg.args.as_ref())
- .flat_map(|a| a.args.iter())
- {
- if let hir::GenericArg::Type(hir_ty) = &arg
- && let Some(ty) =
- self.typeck_results.borrow().node_type_opt(hir_ty.hir_id)
- && self.resolve_vars_if_possible(ty) == predicate.self_ty()
- {
- error.obligation.cause.span = hir_ty.span;
- break;
- }
- }
- }
- }
- }
- }
- }
-
- fn label_fn_like(
- &self,
- err: &mut rustc_errors::DiagnosticBuilder<'tcx, rustc_errors::ErrorGuaranteed>,
- callable_def_id: Option<DefId>,
- callee_ty: Option<Ty<'tcx>>,
- ) {
- let Some(mut def_id) = callable_def_id else {
- return;
- };
-
- if let Some(assoc_item) = self.tcx.opt_associated_item(def_id)
- // Possibly points at either impl or trait item, so try to get it
- // to point to trait item, then get the parent.
- // This parent might be an impl in the case of an inherent function,
- // but the next check will fail.
- && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id)
- && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id)
- // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce"
- && let Some(call_kind) = ty::ClosureKind::from_def_id(self.tcx, maybe_trait_def_id)
- && let Some(callee_ty) = callee_ty
- {
- let callee_ty = callee_ty.peel_refs();
- match *callee_ty.kind() {
- ty::Param(param) => {
- let param =
- self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx);
- if param.kind.is_synthetic() {
- // if it's `impl Fn() -> ..` then just fall down to the def-id based logic
- def_id = param.def_id;
- } else {
- // Otherwise, find the predicate that makes this generic callable,
- // and point at that.
- let instantiated = self
- .tcx
- .explicit_predicates_of(self.body_id.owner)
- .instantiate_identity(self.tcx);
- // FIXME(compiler-errors): This could be problematic if something has two
- // fn-like predicates with different args, but callable types really never
- // do that, so it's OK.
- for (predicate, span) in
- std::iter::zip(instantiated.predicates, instantiated.spans)
- {
- if let ty::PredicateKind::Trait(pred) = predicate.kind().skip_binder()
- && pred.self_ty().peel_refs() == callee_ty
- && ty::ClosureKind::from_def_id(self.tcx, pred.def_id()).is_some()
- {
- err.span_note(span, "callable defined here");
- return;
- }
- }
- }
- }
- ty::Opaque(new_def_id, _)
- | ty::Closure(new_def_id, _)
- | ty::FnDef(new_def_id, _) => {
- def_id = new_def_id;
- }
- _ => {
- // Look for a user-provided impl of a `Fn` trait, and point to it.
- let new_def_id = self.probe(|_| {
- let trait_ref = ty::TraitRef::new(
- call_kind.to_def_id(self.tcx),
- self.tcx.mk_substs([
- ty::GenericArg::from(callee_ty),
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: rustc_span::DUMMY_SP,
- })
- .into(),
- ].into_iter()),
- );
- let obligation = traits::Obligation::new(
- traits::ObligationCause::dummy(),
- self.param_env,
- ty::Binder::dummy(ty::TraitPredicate {
- trait_ref,
- constness: ty::BoundConstness::NotConst,
- polarity: ty::ImplPolarity::Positive,
- }),
- );
- match SelectionContext::new(&self).select(&obligation) {
- Ok(Some(traits::ImplSource::UserDefined(impl_source))) => {
- Some(impl_source.impl_def_id)
- }
- _ => None
- }
- });
- if let Some(new_def_id) = new_def_id {
- def_id = new_def_id;
- } else {
- return;
- }
- }
- }
- }
-
- if let Some(def_span) = self.tcx.def_ident_span(def_id) && !def_span.is_dummy() {
- let mut spans: MultiSpan = def_span.into();
-
- let params = self
- .tcx
- .hir()
- .get_if_local(def_id)
- .and_then(|node| node.body_id())
- .into_iter()
- .flat_map(|id| self.tcx.hir().body(id).params);
-
- for param in params {
- spans.push_span_label(param.span, "");
- }
-
- let def_kind = self.tcx.def_kind(def_id);
- err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id)));
- } else {
- let def_kind = self.tcx.def_kind(def_id);
- err.span_note(
- self.tcx.def_span(def_id),
- &format!("{} defined here", def_kind.descr(def_id)),
- );
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
deleted file mode 100644
index 05bcc710e..000000000
--- a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
+++ /dev/null
@@ -1,296 +0,0 @@
-mod _impl;
-mod arg_matrix;
-mod checks;
-mod suggestions;
-
-pub use _impl::*;
-pub use suggestions::*;
-
-use crate::astconv::AstConv;
-use crate::check::coercion::DynamicCoerceMany;
-use crate::check::{Diverges, EnclosingBreakables, Inherited, UnsafetyState};
-
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
-use rustc_infer::infer;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
-use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{self, Const, Ty, TyCtxt};
-use rustc_session::Session;
-use rustc_span::symbol::Ident;
-use rustc_span::{self, Span};
-use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
-
-use std::cell::{Cell, RefCell};
-use std::ops::Deref;
-
-pub struct FnCtxt<'a, 'tcx> {
- pub(super) body_id: hir::HirId,
-
- /// The parameter environment used for proving trait obligations
- /// in this function. This can change when we descend into
- /// closures (as they bring new things into scope), hence it is
- /// not part of `Inherited` (as of the time of this writing,
- /// closures do not yet change the environment, but they will
- /// eventually).
- pub(super) param_env: ty::ParamEnv<'tcx>,
-
- /// Number of errors that had been reported when we started
- /// checking this function. On exit, if we find that *more* errors
- /// have been reported, we will skip regionck and other work that
- /// expects the types within the function to be consistent.
- // FIXME(matthewjasper) This should not exist, and it's not correct
- // if type checking is run in parallel.
- err_count_on_creation: usize,
-
- /// If `Some`, this stores coercion information for returned
- /// expressions. If `None`, this is in a context where return is
- /// inappropriate, such as a const expression.
- ///
- /// This is a `RefCell<DynamicCoerceMany>`, which means that we
- /// can track all the return expressions and then use them to
- /// compute a useful coercion from the set, similar to a match
- /// expression or other branching context. You can use methods
- /// like `expected_ty` to access the declared return type (if
- /// any).
- pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
-
- pub(super) ret_type_span: Option<Span>,
-
- /// Used exclusively to reduce cost of advanced evaluation used for
- /// more helpful diagnostics.
- pub(super) in_tail_expr: bool,
-
- /// First span of a return site that we find. Used in error messages.
- pub(super) ret_coercion_span: Cell<Option<Span>>,
-
- pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
-
- pub(super) ps: Cell<UnsafetyState>,
-
- /// Whether the last checked node generates a divergence (e.g.,
- /// `return` will set this to `Always`). In general, when entering
- /// an expression or other node in the tree, the initial value
- /// indicates whether prior parts of the containing expression may
- /// have diverged. It is then typically set to `Maybe` (and the
- /// old value remembered) for processing the subparts of the
- /// current expression. As each subpart is processed, they may set
- /// the flag to `Always`, etc. Finally, at the end, we take the
- /// result and "union" it with the original value, so that when we
- /// return the flag indicates if any subpart of the parent
- /// expression (up to and including this part) has diverged. So,
- /// if you read it after evaluating a subexpression `X`, the value
- /// you get indicates whether any subexpression that was
- /// evaluating up to and including `X` diverged.
- ///
- /// We currently use this flag only for diagnostic purposes:
- ///
- /// - To warn about unreachable code: if, after processing a
- /// sub-expression but before we have applied the effects of the
- /// current node, we see that the flag is set to `Always`, we
- /// can issue a warning. This corresponds to something like
- /// `foo(return)`; we warn on the `foo()` expression. (We then
- /// update the flag to `WarnedAlways` to suppress duplicate
- /// reports.) Similarly, if we traverse to a fresh statement (or
- /// tail expression) from an `Always` setting, we will issue a
- /// warning. This corresponds to something like `{return;
- /// foo();}` or `{return; 22}`, where we would warn on the
- /// `foo()` or `22`.
- ///
- /// An expression represents dead code if, after checking it,
- /// the diverges flag is set to something other than `Maybe`.
- pub(super) diverges: Cell<Diverges>,
-
- /// Whether any child nodes have any type errors.
- pub(super) has_errors: Cell<bool>,
-
- pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
-
- pub(super) inh: &'a Inherited<'a, 'tcx>,
-
- /// True if the function or closure's return type is known before
- /// entering the function/closure, i.e. if the return type is
- /// either given explicitly or inferred from, say, an `Fn*` trait
- /// bound. Used for diagnostic purposes only.
- pub(super) return_type_pre_known: bool,
-
- /// True if the return type has an Opaque type
- pub(super) return_type_has_opaque: bool,
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- pub fn new(
- inh: &'a Inherited<'a, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- body_id: hir::HirId,
- ) -> FnCtxt<'a, 'tcx> {
- FnCtxt {
- body_id,
- param_env,
- err_count_on_creation: inh.tcx.sess.err_count(),
- ret_coercion: None,
- ret_type_span: None,
- in_tail_expr: false,
- ret_coercion_span: Cell::new(None),
- resume_yield_tys: None,
- ps: Cell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
- diverges: Cell::new(Diverges::Maybe),
- has_errors: Cell::new(false),
- enclosing_breakables: RefCell::new(EnclosingBreakables {
- stack: Vec::new(),
- by_id: Default::default(),
- }),
- inh,
- return_type_pre_known: true,
- return_type_has_opaque: false,
- }
- }
-
- pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
- ObligationCause::new(span, self.body_id, code)
- }
-
- pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
- self.cause(span, ObligationCauseCode::MiscObligation)
- }
-
- pub fn sess(&self) -> &Session {
- &self.tcx.sess
- }
-
- pub fn errors_reported_since_creation(&self) -> bool {
- self.tcx.sess.err_count() > self.err_count_on_creation
- }
-}
-
-impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
- type Target = Inherited<'a, 'tcx>;
- fn deref(&self) -> &Self::Target {
- &self.inh
- }
-}
-
-impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
- fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- fn item_def_id(&self) -> Option<DefId> {
- None
- }
-
- fn get_type_parameter_bounds(
- &self,
- _: Span,
- def_id: DefId,
- _: Ident,
- ) -> ty::GenericPredicates<'tcx> {
- let tcx = self.tcx;
- let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local());
- let generics = tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&def_id];
- ty::GenericPredicates {
- parent: None,
- predicates: tcx.arena.alloc_from_iter(
- self.param_env.caller_bounds().iter().filter_map(|predicate| {
- match predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(data) if data.self_ty().is_param(index) => {
- // HACK(eddyb) should get the original `Span`.
- let span = tcx.def_span(def_id);
- Some((predicate, span))
- }
- _ => None,
- }
- }),
- ),
- }
- }
-
- fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
- let v = match def {
- Some(def) => infer::EarlyBoundRegion(span, def.name),
- None => infer::MiscVariable(span),
- };
- Some(self.next_region_var(v))
- }
-
- fn allow_ty_infer(&self) -> bool {
- true
- }
-
- fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
- if let Some(param) = param {
- if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
- return ty;
- }
- unreachable!()
- } else {
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span,
- })
- }
- }
-
- fn ct_infer(
- &self,
- ty: Ty<'tcx>,
- param: Option<&ty::GenericParamDef>,
- span: Span,
- ) -> Const<'tcx> {
- if let Some(param) = param {
- if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
- return ct;
- }
- unreachable!()
- } else {
- self.next_const_var(
- ty,
- ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
- )
- }
- }
-
- fn projected_ty_from_poly_trait_ref(
- &self,
- span: Span,
- item_def_id: DefId,
- item_segment: &hir::PathSegment<'_>,
- poly_trait_ref: ty::PolyTraitRef<'tcx>,
- ) -> Ty<'tcx> {
- let trait_ref = self.replace_bound_vars_with_fresh_vars(
- span,
- infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
- poly_trait_ref,
- );
-
- let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
- self,
- self.tcx,
- span,
- item_def_id,
- item_segment,
- trait_ref.substs,
- );
-
- self.tcx().mk_projection(item_def_id, item_substs)
- }
-
- fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
- if ty.has_escaping_bound_vars() {
- ty // FIXME: normalization and escaping regions
- } else {
- self.normalize_associated_types_in(span, ty)
- }
- }
-
- fn set_tainted_by_errors(&self) {
- self.infcx.set_tainted_by_errors()
- }
-
- fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
- self.write_ty(hir_id, ty)
- }
-}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
deleted file mode 100644
index 57771e096..000000000
--- a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
+++ /dev/null
@@ -1,912 +0,0 @@
-use super::FnCtxt;
-use crate::astconv::AstConv;
-use crate::errors::{AddReturnTypeSuggestion, ExpectedReturnTypeLabel};
-
-use rustc_ast::util::parser::ExprPrecedence;
-use rustc_errors::{Applicability, Diagnostic, MultiSpan};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorOf, DefKind};
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::{
- Expr, ExprKind, GenericBound, Node, Path, QPath, Stmt, StmtKind, TyKind, WherePredicate,
-};
-use rustc_infer::infer::{self, TyCtxtInferExt};
-use rustc_infer::traits::{self, StatementAsExpression};
-use rustc_middle::lint::in_external_macro;
-use rustc_middle::ty::{self, Binder, IsSuggestable, Subst, ToPredicate, Ty};
-use rustc_span::symbol::sym;
-use rustc_span::Span;
-use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- pub(in super::super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut Diagnostic) {
- err.span_suggestion_short(
- span.shrink_to_hi(),
- "consider using a semicolon here",
- ";",
- Applicability::MachineApplicable,
- );
- }
-
- /// On implicit return expressions with mismatched types, provides the following suggestions:
- ///
- /// - Points out the method's return type as the reason for the expected type.
- /// - Possible missing semicolon.
- /// - Possible missing return type if the return type is the default, and not `fn main()`.
- pub fn suggest_mismatched_types_on_tail(
- &self,
- err: &mut Diagnostic,
- expr: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- blk_id: hir::HirId,
- ) -> bool {
- let expr = expr.peel_drop_temps();
- self.suggest_missing_semicolon(err, expr, expected, false);
- let mut pointing_at_return_type = false;
- if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
- let fn_id = self.tcx.hir().get_return_block(blk_id).unwrap();
- pointing_at_return_type = self.suggest_missing_return_type(
- err,
- &fn_decl,
- expected,
- found,
- can_suggest,
- fn_id,
- );
- self.suggest_missing_break_or_return_expr(
- err, expr, &fn_decl, expected, found, blk_id, fn_id,
- );
- }
- pointing_at_return_type
- }
-
- /// When encountering an fn-like ctor that needs to unify with a value, check whether calling
- /// the ctor would successfully solve the type mismatch and if so, suggest it:
- /// ```compile_fail,E0308
- /// fn foo(x: usize) -> usize { x }
- /// let x: usize = foo; // suggest calling the `foo` function: `foo(42)`
- /// ```
- fn suggest_fn_call(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- ) -> bool {
- let (def_id, output, inputs) = match *found.kind() {
- ty::FnDef(def_id, _) => {
- let fn_sig = found.fn_sig(self.tcx);
- (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len())
- }
- ty::Closure(def_id, substs) => {
- let fn_sig = substs.as_closure().sig();
- (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len() - 1)
- }
- ty::Opaque(def_id, substs) => {
- let sig = self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
- if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
- && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
- // args tuple will always be substs[1]
- && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
- {
- Some((
- pred.kind().rebind(proj.term.ty().unwrap()),
- args.len(),
- ))
- } else {
- None
- }
- });
- if let Some((output, inputs)) = sig {
- (def_id, output, inputs)
- } else {
- return false;
- }
- }
- _ => return false,
- };
-
- let output = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, output);
- let output = self.normalize_associated_types_in(expr.span, output);
- if !output.is_ty_var() && self.can_coerce(output, expected) {
- let (sugg_call, mut applicability) = match inputs {
- 0 => ("".to_string(), Applicability::MachineApplicable),
- 1..=4 => (
- (0..inputs).map(|_| "_").collect::<Vec<_>>().join(", "),
- Applicability::MachineApplicable,
- ),
- _ => ("...".to_string(), Applicability::HasPlaceholders),
- };
-
- let msg = match self.tcx.def_kind(def_id) {
- DefKind::Fn => "call this function",
- DefKind::Closure | DefKind::OpaqueTy => "call this closure",
- DefKind::Ctor(CtorOf::Struct, _) => "instantiate this tuple struct",
- DefKind::Ctor(CtorOf::Variant, _) => "instantiate this tuple variant",
- _ => "call this function",
- };
-
- let sugg = match expr.kind {
- hir::ExprKind::Call(..)
- | hir::ExprKind::Path(..)
- | hir::ExprKind::Index(..)
- | hir::ExprKind::Lit(..) => {
- vec![(expr.span.shrink_to_hi(), format!("({sugg_call})"))]
- }
- hir::ExprKind::Closure { .. } => {
- // Might be `{ expr } || { bool }`
- applicability = Applicability::MaybeIncorrect;
- vec![
- (expr.span.shrink_to_lo(), "(".to_string()),
- (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
- ]
- }
- _ => {
- vec![
- (expr.span.shrink_to_lo(), "(".to_string()),
- (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
- ]
- }
- };
-
- err.multipart_suggestion_verbose(
- format!("use parentheses to {msg}"),
- sugg,
- applicability,
- );
-
- return true;
- }
- false
- }
-
- pub fn suggest_deref_ref_or_into(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
- ) {
- let expr = expr.peel_blocks();
- if let Some((sp, msg, suggestion, applicability, verbose)) =
- self.check_ref(expr, found, expected)
- {
- if verbose {
- err.span_suggestion_verbose(sp, &msg, suggestion, applicability);
- } else {
- err.span_suggestion(sp, &msg, suggestion, applicability);
- }
- } else if let (ty::FnDef(def_id, ..), true) =
- (&found.kind(), self.suggest_fn_call(err, expr, expected, found))
- {
- if let Some(sp) = self.tcx.hir().span_if_local(*def_id) {
- err.span_label(sp, format!("{found} defined here"));
- }
- } else if !self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
- let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
- if !methods.is_empty() {
- let mut suggestions = methods.iter()
- .filter_map(|conversion_method| {
- let receiver_method_ident = expr.method_ident();
- if let Some(method_ident) = receiver_method_ident
- && method_ident.name == conversion_method.name
- {
- return None // do not suggest code that is already there (#53348)
- }
-
- let method_call_list = [sym::to_vec, sym::to_string];
- let mut sugg = if let ExprKind::MethodCall(receiver_method, ..) = expr.kind
- && receiver_method.ident.name == sym::clone
- && method_call_list.contains(&conversion_method.name)
- // If receiver is `.clone()` and found type has one of those methods,
- // we guess that the user wants to convert from a slice type (`&[]` or `&str`)
- // to an owned type (`Vec` or `String`). These conversions clone internally,
- // so we remove the user's `clone` call.
- {
- vec![(
- receiver_method.ident.span,
- conversion_method.name.to_string()
- )]
- } else if expr.precedence().order()
- < ExprPrecedence::MethodCall.order()
- {
- vec![
- (expr.span.shrink_to_lo(), "(".to_string()),
- (expr.span.shrink_to_hi(), format!(").{}()", conversion_method.name)),
- ]
- } else {
- vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))]
- };
- let struct_pat_shorthand_field = self.maybe_get_struct_pattern_shorthand_field(expr);
- if let Some(name) = struct_pat_shorthand_field {
- sugg.insert(
- 0,
- (expr.span.shrink_to_lo(), format!("{}: ", name)),
- );
- }
- Some(sugg)
- })
- .peekable();
- if suggestions.peek().is_some() {
- err.multipart_suggestions(
- "try using a conversion method",
- suggestions,
- Applicability::MaybeIncorrect,
- );
- }
- } else if let ty::Adt(found_adt, found_substs) = found.kind()
- && self.tcx.is_diagnostic_item(sym::Option, found_adt.did())
- && let ty::Adt(expected_adt, expected_substs) = expected.kind()
- && self.tcx.is_diagnostic_item(sym::Option, expected_adt.did())
- && let ty::Ref(_, inner_ty, _) = expected_substs.type_at(0).kind()
- && inner_ty.is_str()
- {
- let ty = found_substs.type_at(0);
- let mut peeled = ty;
- let mut ref_cnt = 0;
- while let ty::Ref(_, inner, _) = peeled.kind() {
- peeled = *inner;
- ref_cnt += 1;
- }
- if let ty::Adt(adt, _) = peeled.kind()
- && self.tcx.is_diagnostic_item(sym::String, adt.did())
- {
- err.span_suggestion_verbose(
- expr.span.shrink_to_hi(),
- "try converting the passed type into a `&str`",
- format!(".map(|x| &*{}x)", "*".repeat(ref_cnt)),
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
- }
-
- /// When encountering the expected boxed value allocated in the stack, suggest allocating it
- /// in the heap by calling `Box::new()`.
- pub(in super::super) fn suggest_boxing_when_appropriate(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- ) {
- if self.tcx.hir().is_inside_const_context(expr.hir_id) {
- // Do not suggest `Box::new` in const context.
- return;
- }
- if !expected.is_box() || found.is_box() {
- return;
- }
- let boxed_found = self.tcx.mk_box(found);
- if self.can_coerce(boxed_found, expected) {
- err.multipart_suggestion(
- "store this in the heap by calling `Box::new`",
- vec![
- (expr.span.shrink_to_lo(), "Box::new(".to_string()),
- (expr.span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MachineApplicable,
- );
- err.note(
- "for more on the distinction between the stack and the heap, read \
- https://doc.rust-lang.org/book/ch15-01-box.html, \
- https://doc.rust-lang.org/rust-by-example/std/box.html, and \
- https://doc.rust-lang.org/std/boxed/index.html",
- );
- }
- }
-
- /// When encountering a closure that captures variables, where a FnPtr is expected,
- /// suggest a non-capturing closure
- pub(in super::super) fn suggest_no_capture_closure(
- &self,
- err: &mut Diagnostic,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- ) {
- if let (ty::FnPtr(_), ty::Closure(def_id, _)) = (expected.kind(), found.kind()) {
- if let Some(upvars) = self.tcx.upvars_mentioned(*def_id) {
- // Report upto four upvars being captured to reduce the amount error messages
- // reported back to the user.
- let spans_and_labels = upvars
- .iter()
- .take(4)
- .map(|(var_hir_id, upvar)| {
- let var_name = self.tcx.hir().name(*var_hir_id).to_string();
- let msg = format!("`{}` captured here", var_name);
- (upvar.span, msg)
- })
- .collect::<Vec<_>>();
-
- let mut multi_span: MultiSpan =
- spans_and_labels.iter().map(|(sp, _)| *sp).collect::<Vec<_>>().into();
- for (sp, label) in spans_and_labels {
- multi_span.push_span_label(sp, label);
- }
- err.span_note(
- multi_span,
- "closures can only be coerced to `fn` types if they do not capture any variables"
- );
- }
- }
- }
-
- /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
- #[instrument(skip(self, err))]
- pub(in super::super) fn suggest_calling_boxed_future_when_appropriate(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- ) -> bool {
- // Handle #68197.
-
- if self.tcx.hir().is_inside_const_context(expr.hir_id) {
- // Do not suggest `Box::new` in const context.
- return false;
- }
- let pin_did = self.tcx.lang_items().pin_type();
- // This guards the `unwrap` and `mk_box` below.
- if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() {
- return false;
- }
- let box_found = self.tcx.mk_box(found);
- let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap();
- let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap();
- match expected.kind() {
- ty::Adt(def, _) if Some(def.did()) == pin_did => {
- if self.can_coerce(pin_box_found, expected) {
- debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected);
- match found.kind() {
- ty::Adt(def, _) if def.is_box() => {
- err.help("use `Box::pin`");
- }
- _ => {
- err.multipart_suggestion(
- "you need to pin and box this expression",
- vec![
- (expr.span.shrink_to_lo(), "Box::pin(".to_string()),
- (expr.span.shrink_to_hi(), ")".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- }
- }
- true
- } else if self.can_coerce(pin_found, expected) {
- match found.kind() {
- ty::Adt(def, _) if def.is_box() => {
- err.help("use `Box::pin`");
- true
- }
- _ => false,
- }
- } else {
- false
- }
- }
- ty::Adt(def, _) if def.is_box() && self.can_coerce(box_found, expected) => {
- // Check if the parent expression is a call to Pin::new. If it
- // is and we were expecting a Box, ergo Pin<Box<expected>>, we
- // can suggest Box::pin.
- let parent = self.tcx.hir().get_parent_node(expr.hir_id);
- let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else {
- return false;
- };
- match fn_name.kind {
- ExprKind::Path(QPath::TypeRelative(
- hir::Ty {
- kind: TyKind::Path(QPath::Resolved(_, Path { res: recv_ty, .. })),
- ..
- },
- method,
- )) if recv_ty.opt_def_id() == pin_did && method.ident.name == sym::new => {
- err.span_suggestion(
- fn_name.span,
- "use `Box::pin` to pin and box this expression",
- "Box::pin",
- Applicability::MachineApplicable,
- );
- true
- }
- _ => false,
- }
- }
- _ => false,
- }
- }
-
- /// A common error is to forget to add a semicolon at the end of a block, e.g.,
- ///
- /// ```compile_fail,E0308
- /// # fn bar_that_returns_u32() -> u32 { 4 }
- /// fn foo() {
- /// bar_that_returns_u32()
- /// }
- /// ```
- ///
- /// This routine checks if the return expression in a block would make sense on its own as a
- /// statement and the return type has been left as default or has been specified as `()`. If so,
- /// it suggests adding a semicolon.
- ///
- /// If the expression is the expression of a closure without block (`|| expr`), a
- /// block is needed to be added too (`|| { expr; }`). This is denoted by `needs_block`.
- pub fn suggest_missing_semicolon(
- &self,
- err: &mut Diagnostic,
- expression: &'tcx hir::Expr<'tcx>,
- expected: Ty<'tcx>,
- needs_block: bool,
- ) {
- if expected.is_unit() {
- // `BlockTailExpression` only relevant if the tail expr would be
- // useful on its own.
- match expression.kind {
- ExprKind::Call(..)
- | ExprKind::MethodCall(..)
- | ExprKind::Loop(..)
- | ExprKind::If(..)
- | ExprKind::Match(..)
- | ExprKind::Block(..)
- if expression.can_have_side_effects()
- // If the expression is from an external macro, then do not suggest
- // adding a semicolon, because there's nowhere to put it.
- // See issue #81943.
- && !in_external_macro(self.tcx.sess, expression.span) =>
- {
- if needs_block {
- err.multipart_suggestion(
- "consider using a semicolon here",
- vec![
- (expression.span.shrink_to_lo(), "{ ".to_owned()),
- (expression.span.shrink_to_hi(), "; }".to_owned()),
- ],
- Applicability::MachineApplicable,
- );
- } else {
- err.span_suggestion(
- expression.span.shrink_to_hi(),
- "consider using a semicolon here",
- ";",
- Applicability::MachineApplicable,
- );
- }
- }
- _ => (),
- }
- }
- }
-
- /// A possible error is to forget to add a return type that is needed:
- ///
- /// ```compile_fail,E0308
- /// # fn bar_that_returns_u32() -> u32 { 4 }
- /// fn foo() {
- /// bar_that_returns_u32()
- /// }
- /// ```
- ///
- /// This routine checks if the return type is left as default, the method is not part of an
- /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
- /// type.
- pub(in super::super) fn suggest_missing_return_type(
- &self,
- err: &mut Diagnostic,
- fn_decl: &hir::FnDecl<'_>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- can_suggest: bool,
- fn_id: hir::HirId,
- ) -> bool {
- let found =
- self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found));
- // Only suggest changing the return type for methods that
- // haven't set a return type at all (and aren't `fn main()` or an impl).
- match (
- &fn_decl.output,
- found.is_suggestable(self.tcx, false),
- can_suggest,
- expected.is_unit(),
- ) {
- (&hir::FnRetTy::DefaultReturn(span), true, true, true) => {
- err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found });
- true
- }
- (&hir::FnRetTy::DefaultReturn(span), false, true, true) => {
- // FIXME: if `found` could be `impl Iterator` or `impl Fn*`, we should suggest
- // that.
- err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span });
- true
- }
- (&hir::FnRetTy::DefaultReturn(span), _, false, true) => {
- // `fn main()` must return `()`, do not suggest changing return type
- err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span });
- true
- }
- // expectation was caused by something else, not the default return
- (&hir::FnRetTy::DefaultReturn(_), _, _, false) => false,
- (&hir::FnRetTy::Return(ref ty), _, _, _) => {
- // Only point to return type if the expected type is the return type, as if they
- // are not, the expectation must have been caused by something else.
- debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
- let span = ty.span;
- let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
- debug!("suggest_missing_return_type: return type {:?}", ty);
- debug!("suggest_missing_return_type: expected type {:?}", ty);
- let bound_vars = self.tcx.late_bound_vars(fn_id);
- let ty = Binder::bind_with_vars(ty, bound_vars);
- let ty = self.normalize_associated_types_in(span, ty);
- let ty = self.tcx.erase_late_bound_regions(ty);
- if self.can_coerce(expected, ty) {
- err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
- self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
- return true;
- }
- false
- }
- }
- }
-
- /// check whether the return type is a generic type with a trait bound
- /// only suggest this if the generic param is not present in the arguments
- /// if this is true, hint them towards changing the return type to `impl Trait`
- /// ```compile_fail,E0308
- /// fn cant_name_it<T: Fn() -> u32>() -> T {
- /// || 3
- /// }
- /// ```
- fn try_suggest_return_impl_trait(
- &self,
- err: &mut Diagnostic,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- fn_id: hir::HirId,
- ) {
- // Only apply the suggestion if:
- // - the return type is a generic parameter
- // - the generic param is not used as a fn param
- // - the generic param has at least one bound
- // - the generic param doesn't appear in any other bounds where it's not the Self type
- // Suggest:
- // - Changing the return type to be `impl <all bounds>`
-
- debug!("try_suggest_return_impl_trait, expected = {:?}, found = {:?}", expected, found);
-
- let ty::Param(expected_ty_as_param) = expected.kind() else { return };
-
- let fn_node = self.tcx.hir().find(fn_id);
-
- let Some(hir::Node::Item(hir::Item {
- kind:
- hir::ItemKind::Fn(
- hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. },
- hir::Generics { params, predicates, .. },
- _body_id,
- ),
- ..
- })) = fn_node else { return };
-
- if params.get(expected_ty_as_param.index as usize).is_none() {
- return;
- };
-
- // get all where BoundPredicates here, because they are used in to cases below
- let where_predicates = predicates
- .iter()
- .filter_map(|p| match p {
- WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
- bounds,
- bounded_ty,
- ..
- }) => {
- // FIXME: Maybe these calls to `ast_ty_to_ty` can be removed (and the ones below)
- let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, bounded_ty);
- Some((ty, bounds))
- }
- _ => None,
- })
- .map(|(ty, bounds)| match ty.kind() {
- ty::Param(param_ty) if param_ty == expected_ty_as_param => Ok(Some(bounds)),
- // check whether there is any predicate that contains our `T`, like `Option<T>: Send`
- _ => match ty.contains(expected) {
- true => Err(()),
- false => Ok(None),
- },
- })
- .collect::<Result<Vec<_>, _>>();
-
- let Ok(where_predicates) = where_predicates else { return };
-
- // now get all predicates in the same types as the where bounds, so we can chain them
- let predicates_from_where =
- where_predicates.iter().flatten().flat_map(|bounds| bounds.iter());
-
- // extract all bounds from the source code using their spans
- let all_matching_bounds_strs = predicates_from_where
- .filter_map(|bound| match bound {
- GenericBound::Trait(_, _) => {
- self.tcx.sess.source_map().span_to_snippet(bound.span()).ok()
- }
- _ => None,
- })
- .collect::<Vec<String>>();
-
- if all_matching_bounds_strs.len() == 0 {
- return;
- }
-
- let all_bounds_str = all_matching_bounds_strs.join(" + ");
-
- let ty_param_used_in_fn_params = fn_parameters.iter().any(|param| {
- let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, param);
- matches!(ty.kind(), ty::Param(fn_param_ty_param) if expected_ty_as_param == fn_param_ty_param)
- });
-
- if ty_param_used_in_fn_params {
- return;
- }
-
- err.span_suggestion(
- fn_return.span(),
- "consider using an impl return type",
- format!("impl {}", all_bounds_str),
- Applicability::MaybeIncorrect,
- );
- }
-
- pub(in super::super) fn suggest_missing_break_or_return_expr(
- &self,
- err: &mut Diagnostic,
- expr: &'tcx hir::Expr<'tcx>,
- fn_decl: &hir::FnDecl<'_>,
- expected: Ty<'tcx>,
- found: Ty<'tcx>,
- id: hir::HirId,
- fn_id: hir::HirId,
- ) {
- if !expected.is_unit() {
- return;
- }
- let found = self.resolve_vars_with_obligations(found);
-
- let in_loop = self.is_loop(id)
- || self.tcx.hir().parent_iter(id).any(|(parent_id, _)| self.is_loop(parent_id));
-
- let in_local_statement = self.is_local_statement(id)
- || self
- .tcx
- .hir()
- .parent_iter(id)
- .any(|(parent_id, _)| self.is_local_statement(parent_id));
-
- if in_loop && in_local_statement {
- err.multipart_suggestion(
- "you might have meant to break the loop with this value",
- vec![
- (expr.span.shrink_to_lo(), "break ".to_string()),
- (expr.span.shrink_to_hi(), ";".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- return;
- }
-
- if let hir::FnRetTy::Return(ty) = fn_decl.output {
- let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
- let bound_vars = self.tcx.late_bound_vars(fn_id);
- let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars));
- let ty = self.normalize_associated_types_in(expr.span, ty);
- let ty = match self.tcx.asyncness(fn_id.owner) {
- hir::IsAsync::Async => self
- .tcx
- .infer_ctxt()
- .enter(|infcx| {
- infcx.get_impl_future_output_ty(ty).unwrap_or_else(|| {
- span_bug!(
- fn_decl.output.span(),
- "failed to get output type of async function"
- )
- })
- })
- .skip_binder(),
- hir::IsAsync::NotAsync => ty,
- };
- if self.can_coerce(found, ty) {
- err.multipart_suggestion(
- "you might have meant to return this value",
- vec![
- (expr.span.shrink_to_lo(), "return ".to_string()),
- (expr.span.shrink_to_hi(), ";".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
- }
- }
- }
-
- pub(in super::super) fn suggest_missing_parentheses(
- &self,
- err: &mut Diagnostic,
- expr: &hir::Expr<'_>,
- ) {
- let sp = self.tcx.sess.source_map().start_point(expr.span);
- if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
- // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
- self.tcx.sess.parse_sess.expr_parentheses_needed(err, *sp);
- }
- }
-
- /// Given an expression type mismatch, peel any `&` expressions until we get to
- /// a block expression, and then suggest replacing the braces with square braces
- /// if it was possibly mistaken array syntax.
- pub(crate) fn suggest_block_to_brackets_peeling_refs(
- &self,
- diag: &mut Diagnostic,
- mut expr: &hir::Expr<'_>,
- mut expr_ty: Ty<'tcx>,
- mut expected_ty: Ty<'tcx>,
- ) {
- loop {
- match (&expr.kind, expr_ty.kind(), expected_ty.kind()) {
- (
- hir::ExprKind::AddrOf(_, _, inner_expr),
- ty::Ref(_, inner_expr_ty, _),
- ty::Ref(_, inner_expected_ty, _),
- ) => {
- expr = *inner_expr;
- expr_ty = *inner_expr_ty;
- expected_ty = *inner_expected_ty;
- }
- (hir::ExprKind::Block(blk, _), _, _) => {
- self.suggest_block_to_brackets(diag, *blk, expr_ty, expected_ty);
- break;
- }
- _ => break,
- }
- }
- }
-
- /// Suggest wrapping the block in square brackets instead of curly braces
- /// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`.
- pub(crate) fn suggest_block_to_brackets(
- &self,
- diag: &mut Diagnostic,
- blk: &hir::Block<'_>,
- blk_ty: Ty<'tcx>,
- expected_ty: Ty<'tcx>,
- ) {
- if let ty::Slice(elem_ty) | ty::Array(elem_ty, _) = expected_ty.kind() {
- if self.can_coerce(blk_ty, *elem_ty)
- && blk.stmts.is_empty()
- && blk.rules == hir::BlockCheckMode::DefaultBlock
- {
- let source_map = self.tcx.sess.source_map();
- if let Ok(snippet) = source_map.span_to_snippet(blk.span) {
- if snippet.starts_with('{') && snippet.ends_with('}') {
- diag.multipart_suggestion_verbose(
- "to create an array, use square brackets instead of curly braces",
- vec![
- (
- blk.span
- .shrink_to_lo()
- .with_hi(rustc_span::BytePos(blk.span.lo().0 + 1)),
- "[".to_string(),
- ),
- (
- blk.span
- .shrink_to_hi()
- .with_lo(rustc_span::BytePos(blk.span.hi().0 - 1)),
- "]".to_string(),
- ),
- ],
- Applicability::MachineApplicable,
- );
- }
- }
- }
- }
- }
-
- fn is_loop(&self, id: hir::HirId) -> bool {
- let node = self.tcx.hir().get(id);
- matches!(node, Node::Expr(Expr { kind: ExprKind::Loop(..), .. }))
- }
-
- fn is_local_statement(&self, id: hir::HirId) -> bool {
- let node = self.tcx.hir().get(id);
- matches!(node, Node::Stmt(Stmt { kind: StmtKind::Local(..), .. }))
- }
-
- /// Suggest that `&T` was cloned instead of `T` because `T` does not implement `Clone`,
- /// which is a side-effect of autoref.
- pub(crate) fn note_type_is_not_clone(
- &self,
- diag: &mut Diagnostic,
- expected_ty: Ty<'tcx>,
- found_ty: Ty<'tcx>,
- expr: &hir::Expr<'_>,
- ) {
- let hir::ExprKind::MethodCall(segment, &[ref callee_expr], _) = expr.kind else { return; };
- let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; };
- let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return };
- let results = self.typeck_results.borrow();
- // First, look for a `Clone::clone` call
- if segment.ident.name == sym::clone
- && results.type_dependent_def_id(expr.hir_id).map_or(
- false,
- |did| {
- let assoc_item = self.tcx.associated_item(did);
- assoc_item.container == ty::AssocItemContainer::TraitContainer
- && assoc_item.container_id(self.tcx) == clone_trait_did
- },
- )
- // If that clone call hasn't already dereferenced the self type (i.e. don't give this
- // diagnostic in cases where we have `(&&T).clone()` and we expect `T`).
- && !results.expr_adjustments(callee_expr).iter().any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(..)))
- // Check that we're in fact trying to clone into the expected type
- && self.can_coerce(*pointee_ty, expected_ty)
- // And the expected type doesn't implement `Clone`
- && !self.predicate_must_hold_considering_regions(&traits::Obligation {
- cause: traits::ObligationCause::dummy(),
- param_env: self.param_env,
- recursion_depth: 0,
- predicate: ty::Binder::dummy(ty::TraitRef {
- def_id: clone_trait_did,
- substs: self.tcx.mk_substs([expected_ty.into()].iter()),
- })
- .without_const()
- .to_predicate(self.tcx),
- })
- {
- diag.span_note(
- callee_expr.span,
- &format!(
- "`{expected_ty}` does not implement `Clone`, so `{found_ty}` was cloned instead"
- ),
- );
- }
- }
-
- /// A common error is to add an extra semicolon:
- ///
- /// ```compile_fail,E0308
- /// fn foo() -> usize {
- /// 22;
- /// }
- /// ```
- ///
- /// This routine checks if the final statement in a block is an
- /// expression with an explicit semicolon whose type is compatible
- /// with `expected_ty`. If so, it suggests removing the semicolon.
- pub(crate) fn consider_removing_semicolon(
- &self,
- blk: &'tcx hir::Block<'tcx>,
- expected_ty: Ty<'tcx>,
- err: &mut Diagnostic,
- ) -> bool {
- if let Some((span_semi, boxed)) = self.could_remove_semicolon(blk, expected_ty) {
- if let StatementAsExpression::NeedsBoxing = boxed {
- err.span_suggestion_verbose(
- span_semi,
- "consider removing this semicolon and boxing the expression",
- "",
- Applicability::HasPlaceholders,
- );
- } else {
- err.span_suggestion_short(
- span_semi,
- "remove this semicolon",
- "",
- Applicability::MachineApplicable,
- );
- }
- true
- } else {
- false
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs
deleted file mode 100644
index d4f800149..000000000
--- a/compiler/rustc_typeck/src/check/generator_interior.rs
+++ /dev/null
@@ -1,632 +0,0 @@
-//! This calculates the types which has storage which lives across a suspension point in a
-//! generator from the perspective of typeck. The actual types used at runtime
-//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
-//! types computed here.
-
-use self::drop_ranges::DropRanges;
-use super::FnCtxt;
-use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
-use rustc_errors::pluralize;
-use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::hir_id::HirIdSet;
-use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
-use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
-use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt, TypeVisitable};
-use rustc_span::symbol::sym;
-use rustc_span::Span;
-use tracing::debug;
-
-mod drop_ranges;
-
-struct InteriorVisitor<'a, 'tcx> {
- fcx: &'a FnCtxt<'a, 'tcx>,
- region_scope_tree: &'a region::ScopeTree,
- types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>,
- rvalue_scopes: &'a RvalueScopes,
- expr_count: usize,
- kind: hir::GeneratorKind,
- prev_unresolved_span: Option<Span>,
- linted_values: HirIdSet,
- drop_ranges: DropRanges,
-}
-
-impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
- fn record(
- &mut self,
- ty: Ty<'tcx>,
- hir_id: HirId,
- scope: Option<region::Scope>,
- expr: Option<&'tcx Expr<'tcx>>,
- source_span: Span,
- ) {
- use rustc_span::DUMMY_SP;
-
- let ty = self.fcx.resolve_vars_if_possible(ty);
-
- debug!(
- "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}",
- ty, hir_id, scope, expr, source_span, self.expr_count,
- );
-
- let live_across_yield = scope
- .map(|s| {
- self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| {
- // If we are recording an expression that is the last yield
- // in the scope, or that has a postorder CFG index larger
- // than the one of all of the yields, then its value can't
- // be storage-live (and therefore live) at any of the yields.
- //
- // See the mega-comment at `yield_in_scope` for a proof.
-
- yield_data
- .iter()
- .find(|yield_data| {
- debug!(
- "comparing counts yield: {} self: {}, source_span = {:?}",
- yield_data.expr_and_pat_count, self.expr_count, source_span
- );
-
- if self.fcx.sess().opts.unstable_opts.drop_tracking
- && self
- .drop_ranges
- .is_dropped_at(hir_id, yield_data.expr_and_pat_count)
- {
- debug!("value is dropped at yield point; not recording");
- return false;
- }
-
- // If it is a borrowing happening in the guard,
- // it needs to be recorded regardless because they
- // do live across this yield point.
- yield_data.expr_and_pat_count >= self.expr_count
- })
- .cloned()
- })
- })
- .unwrap_or_else(|| {
- Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() })
- });
-
- if let Some(yield_data) = live_across_yield {
- debug!(
- "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}",
- expr, scope, ty, self.expr_count, yield_data.span
- );
-
- if let Some((unresolved_type, unresolved_type_span)) =
- self.fcx.unresolved_type_vars(&ty)
- {
- // If unresolved type isn't a ty_var then unresolved_type_span is None
- let span = self
- .prev_unresolved_span
- .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span));
-
- // If we encounter an int/float variable, then inference fallback didn't
- // finish due to some other error. Don't emit spurious additional errors.
- if let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) =
- unresolved_type.kind()
- {
- self.fcx
- .tcx
- .sess
- .delay_span_bug(span, &format!("Encountered var {:?}", unresolved_type));
- } else {
- let note = format!(
- "the type is part of the {} because of this {}",
- self.kind, yield_data.source
- );
-
- self.fcx
- .need_type_info_err_in_generator(self.kind, span, unresolved_type)
- .span_note(yield_data.span, &*note)
- .emit();
- }
- } else {
- // Insert the type into the ordered set.
- let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree));
-
- if !self.linted_values.contains(&hir_id) {
- check_must_not_suspend_ty(
- self.fcx,
- ty,
- hir_id,
- SuspendCheckData {
- expr,
- source_span,
- yield_span: yield_data.span,
- plural_len: 1,
- ..Default::default()
- },
- );
- self.linted_values.insert(hir_id);
- }
-
- self.types.insert(ty::GeneratorInteriorTypeCause {
- span: source_span,
- ty,
- scope_span,
- yield_span: yield_data.span,
- expr: expr.map(|e| e.hir_id),
- });
- }
- } else {
- debug!(
- "no type in expr = {:?}, count = {:?}, span = {:?}",
- expr,
- self.expr_count,
- expr.map(|e| e.span)
- );
- if let Some((unresolved_type, unresolved_type_span)) =
- self.fcx.unresolved_type_vars(&ty)
- {
- debug!(
- "remained unresolved_type = {:?}, unresolved_type_span: {:?}",
- unresolved_type, unresolved_type_span
- );
- self.prev_unresolved_span = unresolved_type_span;
- }
- }
- }
-}
-
-pub fn resolve_interior<'a, 'tcx>(
- fcx: &'a FnCtxt<'a, 'tcx>,
- def_id: DefId,
- body_id: hir::BodyId,
- interior: Ty<'tcx>,
- kind: hir::GeneratorKind,
-) {
- let body = fcx.tcx.hir().body(body_id);
- let typeck_results = fcx.inh.typeck_results.borrow();
- let mut visitor = InteriorVisitor {
- fcx,
- types: FxIndexSet::default(),
- region_scope_tree: fcx.tcx.region_scope_tree(def_id),
- rvalue_scopes: &typeck_results.rvalue_scopes,
- expr_count: 0,
- kind,
- prev_unresolved_span: None,
- linted_values: <_>::default(),
- drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body),
- };
- intravisit::walk_body(&mut visitor, body);
-
- // Check that we visited the same amount of expressions as the RegionResolutionVisitor
- let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap();
- assert_eq!(region_expr_count, visitor.expr_count);
-
- // The types are already kept in insertion order.
- let types = visitor.types;
-
- // The types in the generator interior contain lifetimes local to the generator itself,
- // which should not be exposed outside of the generator. Therefore, we replace these
- // lifetimes with existentially-bound lifetimes, which reflect the exact value of the
- // lifetimes not being known by users.
- //
- // These lifetimes are used in auto trait impl checking (for example,
- // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync),
- // so knowledge of the exact relationships between them isn't particularly important.
-
- debug!("types in generator {:?}, span = {:?}", types, body.value.span);
-
- let mut counter = 0;
- let mut captured_tys = FxHashSet::default();
- let type_causes: Vec<_> = types
- .into_iter()
- .filter_map(|mut cause| {
- // Erase regions and canonicalize late-bound regions to deduplicate as many types as we
- // can.
- let erased = fcx.tcx.erase_regions(cause.ty);
- if captured_tys.insert(erased) {
- // Replace all regions inside the generator interior with late bound regions.
- // Note that each region slot in the types gets a new fresh late bound region,
- // which means that none of the regions inside relate to any other, even if
- // typeck had previously found constraints that would cause them to be related.
- let folded = fcx.tcx.fold_regions(erased, |_, current_depth| {
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_u32(counter),
- kind: ty::BrAnon(counter),
- };
- let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, br));
- counter += 1;
- r
- });
-
- cause.ty = folded;
- Some(cause)
- } else {
- None
- }
- })
- .collect();
-
- // Extract type components to build the witness type.
- let type_list = fcx.tcx.mk_type_list(type_causes.iter().map(|cause| cause.ty));
- let bound_vars = fcx.tcx.mk_bound_variable_kinds(
- (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
- );
- let witness =
- fcx.tcx.mk_generator_witness(ty::Binder::bind_with_vars(type_list, bound_vars.clone()));
-
- drop(typeck_results);
- // Store the generator types and spans into the typeck results for this generator.
- fcx.inh.typeck_results.borrow_mut().generator_interior_types =
- ty::Binder::bind_with_vars(type_causes, bound_vars);
-
- debug!(
- "types in generator after region replacement {:?}, span = {:?}",
- witness, body.value.span
- );
-
- // Unify the type variable inside the generator with the new witness
- match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(interior, witness) {
- Ok(ok) => fcx.register_infer_ok_obligations(ok),
- _ => bug!(),
- }
-}
-
-// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in
-// librustc_middle/middle/region.rs since `expr_count` is compared against the results
-// there.
-impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> {
- fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
- let Arm { guard, pat, body, .. } = arm;
- self.visit_pat(pat);
- if let Some(ref g) = guard {
- {
- // If there is a guard, we need to count all variables bound in the pattern as
- // borrowed for the entire guard body, regardless of whether they are accessed.
- // We do this by walking the pattern bindings and recording `&T` for any `x: T`
- // that is bound.
-
- struct ArmPatCollector<'a, 'b, 'tcx> {
- interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>,
- scope: Scope,
- }
-
- impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> {
- fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
- intravisit::walk_pat(self, pat);
- if let PatKind::Binding(_, id, ident, ..) = pat.kind {
- let ty =
- self.interior_visitor.fcx.typeck_results.borrow().node_type(id);
- let tcx = self.interior_visitor.fcx.tcx;
- let ty = tcx.mk_ref(
- // Use `ReErased` as `resolve_interior` is going to replace all the
- // regions anyway.
- tcx.mk_region(ty::ReErased),
- ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
- );
- self.interior_visitor.record(
- ty,
- id,
- Some(self.scope),
- None,
- ident.span,
- );
- }
- }
- }
-
- ArmPatCollector {
- interior_visitor: self,
- scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node },
- }
- .visit_pat(pat);
- }
-
- match g {
- Guard::If(ref e) => {
- self.visit_expr(e);
- }
- Guard::IfLet(ref l) => {
- self.visit_let_expr(l);
- }
- }
- }
- self.visit_expr(body);
- }
-
- fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
- intravisit::walk_pat(self, pat);
-
- self.expr_count += 1;
-
- if let PatKind::Binding(..) = pat.kind {
- let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap();
- let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
- self.record(ty, pat.hir_id, Some(scope), None, pat.span);
- }
- }
-
- fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
- match &expr.kind {
- ExprKind::Call(callee, args) => match &callee.kind {
- ExprKind::Path(qpath) => {
- let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id);
- match res {
- // Direct calls never need to keep the callee `ty::FnDef`
- // ZST in a temporary, so skip its type, just in case it
- // can significantly complicate the generator type.
- Res::Def(
- DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn),
- _,
- ) => {
- // NOTE(eddyb) this assumes a path expression has
- // no nested expressions to keep track of.
- self.expr_count += 1;
-
- // Record the rest of the call expression normally.
- for arg in *args {
- self.visit_expr(arg);
- }
- }
- _ => intravisit::walk_expr(self, expr),
- }
- }
- _ => intravisit::walk_expr(self, expr),
- },
- _ => intravisit::walk_expr(self, expr),
- }
-
- self.expr_count += 1;
-
- debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
-
- let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr);
- let may_need_drop = |ty: Ty<'tcx>| {
- // Avoid ICEs in needs_drop.
- let ty = self.fcx.resolve_vars_if_possible(ty);
- let ty = self.fcx.tcx.erase_regions(ty);
- if ty.needs_infer() {
- return true;
- }
- ty.needs_drop(self.fcx.tcx, self.fcx.param_env)
- };
-
- // Typically, the value produced by an expression is consumed by its parent in some way,
- // so we only have to check if the parent contains a yield (note that the parent may, for
- // example, store the value into a local variable, but then we already consider local
- // variables to be live across their scope).
- //
- // However, in the case of temporary values, we are going to store the value into a
- // temporary on the stack that is live for the current temporary scope and then return a
- // reference to it. That value may be live across the entire temporary scope.
- //
- // There's another subtlety: if the type has an observable drop, it must be dropped after
- // the yield, even if it's not borrowed or referenced after the yield. Ideally this would
- // *only* happen for types with observable drop, not all types which wrap them, but that
- // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in
- // src/test/ui/generator/drop-tracking-parent-expression.rs.
- let scope = if self.drop_ranges.is_borrowed_temporary(expr)
- || ty.map_or(true, |ty| {
- let needs_drop = may_need_drop(ty);
- debug!(?needs_drop, ?ty);
- needs_drop
- }) {
- self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
- } else {
- debug!("parent_node: {:?}", self.fcx.tcx.hir().find_parent_node(expr.hir_id));
- match self.fcx.tcx.hir().find_parent_node(expr.hir_id) {
- Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
- None => {
- self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
- }
- }
- };
-
- // If there are adjustments, then record the final type --
- // this is the actual value that is being produced.
- if let Some(adjusted_ty) = ty {
- self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span);
- }
-
- // Also record the unadjusted type (which is the only type if
- // there are no adjustments). The reason for this is that the
- // unadjusted value is sometimes a "temporary" that would wind
- // up in a MIR temporary.
- //
- // As an example, consider an expression like `vec![].push(x)`.
- // Here, the `vec![]` would wind up MIR stored into a
- // temporary variable `t` which we can borrow to invoke
- // `<Vec<_>>::push(&mut t, x)`.
- //
- // Note that an expression can have many adjustments, and we
- // are just ignoring those intermediate types. This is because
- // those intermediate values are always linearly "consumed" by
- // the other adjustments, and hence would never be directly
- // captured in the MIR.
- //
- // (Note that this partly relies on the fact that the `Deref`
- // traits always return references, which means their content
- // can be reborrowed without needing to spill to a temporary.
- // If this were not the case, then we could conceivably have
- // to create intermediate temporaries.)
- //
- // The type table might not have information for this expression
- // if it is in a malformed scope. (#66387)
- if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
- self.record(ty, expr.hir_id, scope, Some(expr), expr.span);
- } else {
- self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
- }
- }
-}
-
-#[derive(Default)]
-pub struct SuspendCheckData<'a, 'tcx> {
- expr: Option<&'tcx Expr<'tcx>>,
- source_span: Span,
- yield_span: Span,
- descr_pre: &'a str,
- descr_post: &'a str,
- plural_len: usize,
-}
-
-// Returns whether it emitted a diagnostic or not
-// Note that this fn and the proceeding one are based on the code
-// for creating must_use diagnostics
-//
-// Note that this technique was chosen over things like a `Suspend` marker trait
-// as it is simpler and has precedent in the compiler
-pub fn check_must_not_suspend_ty<'tcx>(
- fcx: &FnCtxt<'_, 'tcx>,
- ty: Ty<'tcx>,
- hir_id: HirId,
- data: SuspendCheckData<'_, 'tcx>,
-) -> bool {
- if ty.is_unit()
- // FIXME: should this check `is_ty_uninhabited_from`. This query is not available in this stage
- // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in
- // `must_use`
- // || fcx.tcx.is_ty_uninhabited_from(fcx.tcx.parent_module(hir_id).to_def_id(), ty, fcx.param_env)
- {
- return false;
- }
-
- let plural_suffix = pluralize!(data.plural_len);
-
- match *ty.kind() {
- ty::Adt(..) if ty.is_box() => {
- let boxed_ty = ty.boxed_ty();
- let descr_pre = &format!("{}boxed ", data.descr_pre);
- check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
- }
- ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
- // FIXME: support adding the attribute to TAITs
- ty::Opaque(def, _) => {
- let mut has_emitted = false;
- for &(predicate, _) in fcx.tcx.explicit_item_bounds(def) {
- // We only look at the `DefId`, so it is safe to skip the binder here.
- if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
- predicate.kind().skip_binder()
- {
- let def_id = poly_trait_predicate.trait_ref.def_id;
- let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix);
- if check_must_not_suspend_def(
- fcx.tcx,
- def_id,
- hir_id,
- SuspendCheckData { descr_pre, ..data },
- ) {
- has_emitted = true;
- break;
- }
- }
- }
- has_emitted
- }
- ty::Dynamic(binder, _) => {
- let mut has_emitted = false;
- for predicate in binder.iter() {
- if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
- let def_id = trait_ref.def_id;
- let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post);
- if check_must_not_suspend_def(
- fcx.tcx,
- def_id,
- hir_id,
- SuspendCheckData { descr_post, ..data },
- ) {
- has_emitted = true;
- break;
- }
- }
- }
- has_emitted
- }
- ty::Tuple(fields) => {
- let mut has_emitted = false;
- let comps = match data.expr.map(|e| &e.kind) {
- Some(hir::ExprKind::Tup(comps)) => {
- debug_assert_eq!(comps.len(), fields.len());
- Some(comps)
- }
- _ => None,
- };
- for (i, ty) in fields.iter().enumerate() {
- let descr_post = &format!(" in tuple element {i}");
- let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span);
- if check_must_not_suspend_ty(
- fcx,
- ty,
- hir_id,
- SuspendCheckData {
- descr_post,
- expr: comps.and_then(|comps| comps.get(i)),
- source_span: span,
- ..data
- },
- ) {
- has_emitted = true;
- }
- }
- has_emitted
- }
- ty::Array(ty, len) => {
- let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix);
- check_must_not_suspend_ty(
- fcx,
- ty,
- hir_id,
- SuspendCheckData {
- descr_pre,
- plural_len: len.try_eval_usize(fcx.tcx, fcx.param_env).unwrap_or(0) as usize
- + 1,
- ..data
- },
- )
- }
- _ => false,
- }
-}
-
-fn check_must_not_suspend_def(
- tcx: TyCtxt<'_>,
- def_id: DefId,
- hir_id: HirId,
- data: SuspendCheckData<'_, '_>,
-) -> bool {
- if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) {
- tcx.struct_span_lint_hir(
- rustc_session::lint::builtin::MUST_NOT_SUSPEND,
- hir_id,
- data.source_span,
- |lint| {
- let msg = format!(
- "{}`{}`{} held across a suspend point, but should not be",
- data.descr_pre,
- tcx.def_path_str(def_id),
- data.descr_post,
- );
- let mut err = lint.build(&msg);
-
- // add span pointing to the offending yield/await
- err.span_label(data.yield_span, "the value is held across this suspend point");
-
- // Add optional reason note
- if let Some(note) = attr.value_str() {
- // FIXME(guswynn): consider formatting this better
- err.span_note(data.source_span, note.as_str());
- }
-
- // Add some quick suggestions on what to do
- // FIXME: can `drop` work as a suggestion here as well?
- err.span_help(
- data.source_span,
- "consider using a block (`{ ... }`) \
- to shrink the value's scope, ending before the suspend point",
- );
-
- err.emit();
- },
- );
-
- true
- } else {
- false
- }
-}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs
deleted file mode 100644
index 518cd7342..000000000
--- a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs
+++ /dev/null
@@ -1,309 +0,0 @@
-//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
-//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
-//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
-//!
-//! There are three phases to this analysis:
-//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
-//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
-//! and also build a control flow graph.
-//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
-//! the CFG and find the exact points where we know a value is definitely dropped.
-//!
-//! The end result is a data structure that maps the post-order index of each node in the HIR tree
-//! to a set of values that are known to be dropped at that location.
-
-use self::cfg_build::build_control_flow_graph;
-use self::record_consumed_borrow::find_consumed_and_borrowed;
-use crate::check::FnCtxt;
-use hir::def_id::DefId;
-use hir::{Body, HirId, HirIdMap, Node};
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir as hir;
-use rustc_index::bit_set::BitSet;
-use rustc_index::vec::IndexVec;
-use rustc_middle::hir::map::Map;
-use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
-use rustc_middle::ty;
-use std::collections::BTreeMap;
-use std::fmt::Debug;
-
-mod cfg_build;
-mod cfg_propagate;
-mod cfg_visualize;
-mod record_consumed_borrow;
-
-pub fn compute_drop_ranges<'a, 'tcx>(
- fcx: &'a FnCtxt<'a, 'tcx>,
- def_id: DefId,
- body: &'tcx Body<'tcx>,
-) -> DropRanges {
- if fcx.sess().opts.unstable_opts.drop_tracking {
- let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
-
- let typeck_results = &fcx.typeck_results.borrow();
- let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
- let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
- fcx.tcx.hir(),
- fcx.tcx,
- typeck_results,
- consumed_borrowed_places,
- body,
- num_exprs,
- );
-
- drop_ranges.propagate_to_fixpoint();
-
- debug!("borrowed_temporaries = {borrowed_temporaries:?}");
- DropRanges {
- tracked_value_map: drop_ranges.tracked_value_map,
- nodes: drop_ranges.nodes,
- borrowed_temporaries: Some(borrowed_temporaries),
- }
- } else {
- // If drop range tracking is not enabled, skip all the analysis and produce an
- // empty set of DropRanges.
- DropRanges {
- tracked_value_map: FxHashMap::default(),
- nodes: IndexVec::new(),
- borrowed_temporaries: None,
- }
- }
-}
-
-/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
-///
-/// This includes the place itself, and if the place is a reference to a local
-/// variable then `f` is also called on the HIR node for that variable as well.
-///
-/// For example, if `place` points to `foo()`, then `f` is called once for the
-/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
-/// be called both on the `ExprKind::Path` node that represents the expression
-/// as well as the HirId of the local `x` itself.
-fn for_each_consumable<'tcx>(hir: Map<'tcx>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
- f(place);
- let node = hir.find(place.hir_id());
- if let Some(Node::Expr(expr)) = node {
- match expr.kind {
- hir::ExprKind::Path(hir::QPath::Resolved(
- _,
- hir::Path { res: hir::def::Res::Local(hir_id), .. },
- )) => {
- f(TrackedValue::Variable(*hir_id));
- }
- _ => (),
- }
- }
-}
-
-rustc_index::newtype_index! {
- pub struct PostOrderId {
- DEBUG_FORMAT = "id({})",
- }
-}
-
-rustc_index::newtype_index! {
- pub struct TrackedValueIndex {
- DEBUG_FORMAT = "hidx({})",
- }
-}
-
-/// Identifies a value whose drop state we need to track.
-#[derive(PartialEq, Eq, Hash, Clone, Copy)]
-enum TrackedValue {
- /// Represents a named variable, such as a let binding, parameter, or upvar.
- ///
- /// The HirId points to the variable's definition site.
- Variable(HirId),
- /// A value produced as a result of an expression.
- ///
- /// The HirId points to the expression that returns this value.
- Temporary(HirId),
-}
-
-impl Debug for TrackedValue {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ty::tls::with_opt(|opt_tcx| {
- if let Some(tcx) = opt_tcx {
- write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
- } else {
- match self {
- Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id),
- Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id),
- }
- }
- })
- }
-}
-
-impl TrackedValue {
- fn hir_id(&self) -> HirId {
- match self {
- TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
- }
- }
-
- fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
- match place_with_id.place.base {
- PlaceBase::Rvalue | PlaceBase::StaticItem => {
- TrackedValue::Temporary(place_with_id.hir_id)
- }
- PlaceBase::Local(hir_id)
- | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
- TrackedValue::Variable(hir_id)
- }
- }
- }
-}
-
-/// Represents a reason why we might not be able to convert a HirId or Place
-/// into a tracked value.
-#[derive(Debug)]
-enum TrackedValueConversionError {
- /// Place projects are not currently supported.
- ///
- /// The reasoning around these is kind of subtle, so we choose to be more
- /// conservative around these for now. There is no reason in theory we
- /// cannot support these, we just have not implemented it yet.
- PlaceProjectionsNotSupported,
-}
-
-impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
- type Error = TrackedValueConversionError;
-
- fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
- if !place_with_id.place.projections.is_empty() {
- debug!(
- "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
- place_with_id
- );
- return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
- }
-
- Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
- }
-}
-
-pub struct DropRanges {
- tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
- nodes: IndexVec<PostOrderId, NodeInfo>,
- borrowed_temporaries: Option<FxHashSet<HirId>>,
-}
-
-impl DropRanges {
- pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
- self.tracked_value_map
- .get(&TrackedValue::Temporary(hir_id))
- .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
- .cloned()
- .map_or(false, |tracked_value_id| {
- self.expect_node(location.into()).drop_state.contains(tracked_value_id)
- })
- }
-
- pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
- if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
- }
-
- /// Returns a reference to the NodeInfo for a node, panicking if it does not exist
- fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
- &self.nodes[id]
- }
-}
-
-/// Tracks information needed to compute drop ranges.
-struct DropRangesBuilder {
- /// The core of DropRangesBuilder is a set of nodes, which each represent
- /// one expression. We primarily refer to them by their index in a
- /// post-order traversal of the HIR tree, since this is what
- /// generator_interior uses to talk about yield positions.
- ///
- /// This IndexVec keeps the relevant details for each node. See the
- /// NodeInfo struct for more details, but this information includes things
- /// such as the set of control-flow successors, which variables are dropped
- /// or reinitialized, and whether each variable has been inferred to be
- /// known-dropped or potentially reinitialized at each point.
- nodes: IndexVec<PostOrderId, NodeInfo>,
- /// We refer to values whose drop state we are tracking by the HirId of
- /// where they are defined. Within a NodeInfo, however, we store the
- /// drop-state in a bit vector indexed by a HirIdIndex
- /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
- /// from HirIds to the HirIdIndex that is used to represent that value in
- /// bitvector.
- tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
-
- /// When building the control flow graph, we don't always know the
- /// post-order index of the target node at the point we encounter it.
- /// For example, this happens with break and continue. In those cases,
- /// we store a pair of the PostOrderId of the source and the HirId
- /// of the target. Once we have gathered all of these edges, we make a
- /// pass over the set of deferred edges (see process_deferred_edges in
- /// cfg_build.rs), look up the PostOrderId for the target (since now the
- /// post-order index for all nodes is known), and add missing control flow
- /// edges.
- deferred_edges: Vec<(PostOrderId, HirId)>,
- /// This maps HirIds of expressions to their post-order index. It is
- /// used in process_deferred_edges to correctly add back-edges.
- post_order_map: HirIdMap<PostOrderId>,
-}
-
-impl Debug for DropRangesBuilder {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("DropRanges")
- .field("hir_id_map", &self.tracked_value_map)
- .field("post_order_maps", &self.post_order_map)
- .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
- .finish()
- }
-}
-
-/// DropRanges keeps track of what values are definitely dropped at each point in the code.
-///
-/// Values of interest are defined by the hir_id of their place. Locations in code are identified
-/// by their index in the post-order traversal. At its core, DropRanges maps
-/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
-/// dropped at the point of the node identified by post_order_id.
-impl DropRangesBuilder {
- /// Returns the number of values (hir_ids) that are tracked
- fn num_values(&self) -> usize {
- self.tracked_value_map.len()
- }
-
- fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
- let size = self.num_values();
- self.nodes.ensure_contains_elem(id, || NodeInfo::new(size));
- &mut self.nodes[id]
- }
-
- fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
- trace!("adding control edge from {:?} to {:?}", from, to);
- self.node_mut(from).successors.push(to);
- }
-}
-
-#[derive(Debug)]
-struct NodeInfo {
- /// IDs of nodes that can follow this one in the control flow
- ///
- /// If the vec is empty, then control proceeds to the next node.
- successors: Vec<PostOrderId>,
-
- /// List of hir_ids that are dropped by this node.
- drops: Vec<TrackedValueIndex>,
-
- /// List of hir_ids that are reinitialized by this node.
- reinits: Vec<TrackedValueIndex>,
-
- /// Set of values that are definitely dropped at this point.
- drop_state: BitSet<TrackedValueIndex>,
-}
-
-impl NodeInfo {
- fn new(num_values: usize) -> Self {
- Self {
- successors: vec![],
- drops: vec![],
- reinits: vec![],
- drop_state: BitSet::new_filled(num_values),
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/inherited.rs b/compiler/rustc_typeck/src/check/inherited.rs
deleted file mode 100644
index cd152eb97..000000000
--- a/compiler/rustc_typeck/src/check/inherited.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-use super::callee::DeferredCallResolution;
-
-use rustc_data_structures::fx::FxHashSet;
-use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
-use rustc_hir::HirIdMap;
-use rustc_infer::infer;
-use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt};
-use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::visit::TypeVisitable;
-use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_span::def_id::LocalDefIdMap;
-use rustc_span::{self, Span};
-use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt};
-
-use std::cell::RefCell;
-use std::ops::Deref;
-
-/// Closures defined within the function. For example:
-/// ```ignore (illustrative)
-/// fn foo() {
-/// bar(move|| { ... })
-/// }
-/// ```
-/// Here, the function `foo()` and the closure passed to
-/// `bar()` will each have their own `FnCtxt`, but they will
-/// share the inherited fields.
-pub struct Inherited<'a, 'tcx> {
- pub(super) infcx: InferCtxt<'a, 'tcx>,
-
- pub(super) typeck_results: &'a RefCell<ty::TypeckResults<'tcx>>,
-
- pub(super) locals: RefCell<HirIdMap<super::LocalTy<'tcx>>>,
-
- pub(super) fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
-
- // Some additional `Sized` obligations badly affect type inference.
- // These obligations are added in a later stage of typeck.
- pub(super) deferred_sized_obligations:
- RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>,
-
- // When we process a call like `c()` where `c` is a closure type,
- // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
- // `FnOnce` closure. In that case, we defer full resolution of the
- // call until upvar inference can kick in and make the
- // decision. We keep these deferred resolutions grouped by the
- // def-id of the closure, so that once we decide, we can easily go
- // back and process them.
- pub(super) deferred_call_resolutions: RefCell<LocalDefIdMap<Vec<DeferredCallResolution<'tcx>>>>,
-
- pub(super) deferred_cast_checks: RefCell<Vec<super::cast::CastCheck<'tcx>>>,
-
- pub(super) deferred_transmute_checks: RefCell<Vec<(Ty<'tcx>, Ty<'tcx>, Span)>>,
-
- pub(super) deferred_asm_checks: RefCell<Vec<(&'tcx hir::InlineAsm<'tcx>, hir::HirId)>>,
-
- pub(super) deferred_generator_interiors:
- RefCell<Vec<(hir::BodyId, Ty<'tcx>, hir::GeneratorKind)>>,
-
- pub(super) body_id: Option<hir::BodyId>,
-
- /// Whenever we introduce an adjustment from `!` into a type variable,
- /// we record that type variable here. This is later used to inform
- /// fallback. See the `fallback` module for details.
- pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>,
-}
-
-impl<'a, 'tcx> Deref for Inherited<'a, 'tcx> {
- type Target = InferCtxt<'a, 'tcx>;
- fn deref(&self) -> &Self::Target {
- &self.infcx
- }
-}
-
-/// A temporary returned by `Inherited::build(...)`. This is necessary
-/// for multiple `InferCtxt` to share the same `in_progress_typeck_results`
-/// without using `Rc` or something similar.
-pub struct InheritedBuilder<'tcx> {
- infcx: infer::InferCtxtBuilder<'tcx>,
- def_id: LocalDefId,
-}
-
-impl<'tcx> Inherited<'_, 'tcx> {
- pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> {
- let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner;
-
- InheritedBuilder {
- infcx: tcx
- .infer_ctxt()
- .ignoring_regions()
- .with_fresh_in_progress_typeck_results(hir_owner),
- def_id,
- }
- }
-}
-
-impl<'tcx> InheritedBuilder<'tcx> {
- pub fn enter<F, R>(&mut self, f: F) -> R
- where
- F: for<'a> FnOnce(Inherited<'a, 'tcx>) -> R,
- {
- let def_id = self.def_id;
- self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id)))
- }
-}
-
-impl<'a, 'tcx> Inherited<'a, 'tcx> {
- fn new(infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId) -> Self {
- let tcx = infcx.tcx;
- let body_id = tcx.hir().maybe_body_owned_by(def_id);
- let typeck_results =
- infcx.in_progress_typeck_results.expect("building `FnCtxt` without typeck results");
-
- Inherited {
- typeck_results,
- infcx,
- fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)),
- locals: RefCell::new(Default::default()),
- deferred_sized_obligations: RefCell::new(Vec::new()),
- deferred_call_resolutions: RefCell::new(Default::default()),
- deferred_cast_checks: RefCell::new(Vec::new()),
- deferred_transmute_checks: RefCell::new(Vec::new()),
- deferred_asm_checks: RefCell::new(Vec::new()),
- deferred_generator_interiors: RefCell::new(Vec::new()),
- diverging_type_vars: RefCell::new(Default::default()),
- body_id,
- }
- }
-
- #[instrument(level = "debug", skip(self))]
- pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
- if obligation.has_escaping_bound_vars() {
- span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation);
- }
- self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation);
- }
-
- pub(super) fn register_predicates<I>(&self, obligations: I)
- where
- I: IntoIterator<Item = traits::PredicateObligation<'tcx>>,
- {
- for obligation in obligations {
- self.register_predicate(obligation);
- }
- }
-
- pub(super) fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
- self.register_predicates(infer_ok.obligations);
- infer_ok.value
- }
-
- pub(super) fn normalize_associated_types_in<T>(
- &self,
- span: Span,
- body_id: hir::HirId,
- param_env: ty::ParamEnv<'tcx>,
- value: T,
- ) -> T
- where
- T: TypeFoldable<'tcx>,
- {
- self.normalize_associated_types_in_with_cause(
- ObligationCause::misc(span, body_id),
- param_env,
- value,
- )
- }
-
- pub(super) fn normalize_associated_types_in_with_cause<T>(
- &self,
- cause: ObligationCause<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- value: T,
- ) -> T
- where
- T: TypeFoldable<'tcx>,
- {
- let ok = self.partially_normalize_associated_types_in(cause, param_env, value);
- debug!(?ok);
- self.register_infer_ok_obligations(ok)
- }
-}
diff --git a/compiler/rustc_typeck/src/check/intrinsicck.rs b/compiler/rustc_typeck/src/check/intrinsicck.rs
deleted file mode 100644
index df94abbaf..000000000
--- a/compiler/rustc_typeck/src/check/intrinsicck.rs
+++ /dev/null
@@ -1,530 +0,0 @@
-use rustc_ast::InlineAsmTemplatePiece;
-use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::struct_span_err;
-use rustc_hir as hir;
-use rustc_index::vec::Idx;
-use rustc_middle::ty::layout::{LayoutError, SizeSkeleton};
-use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitable, UintTy};
-use rustc_session::lint;
-use rustc_span::{Span, Symbol, DUMMY_SP};
-use rustc_target::abi::{Pointer, VariantIdx};
-use rustc_target::asm::{InlineAsmReg, InlineAsmRegClass, InlineAsmRegOrRegClass, InlineAsmType};
-use rustc_trait_selection::infer::InferCtxtExt;
-
-use super::FnCtxt;
-
-/// If the type is `Option<T>`, it will return `T`, otherwise
-/// the type itself. Works on most `Option`-like types.
-fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
- let ty::Adt(def, substs) = *ty.kind() else { return ty };
-
- if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
- let data_idx;
-
- let one = VariantIdx::new(1);
- let zero = VariantIdx::new(0);
-
- if def.variant(zero).fields.is_empty() {
- data_idx = one;
- } else if def.variant(one).fields.is_empty() {
- data_idx = zero;
- } else {
- return ty;
- }
-
- if def.variant(data_idx).fields.len() == 1 {
- return def.variant(data_idx).fields[0].ty(tcx, substs);
- }
- }
-
- ty
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- pub fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>) {
- let convert = |ty: Ty<'tcx>| {
- let ty = self.resolve_vars_if_possible(ty);
- let ty = self.tcx.normalize_erasing_regions(self.param_env, ty);
- (SizeSkeleton::compute(ty, self.tcx, self.param_env), ty)
- };
- let (sk_from, from) = convert(from);
- let (sk_to, to) = convert(to);
-
- // Check for same size using the skeletons.
- if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) {
- if sk_from.same_size(sk_to) {
- return;
- }
-
- // Special-case transmuting from `typeof(function)` and
- // `Option<typeof(function)>` to present a clearer error.
- let from = unpack_option_like(self.tcx, from);
- if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&self.tcx) {
- struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type")
- .note(&format!("source type: {from}"))
- .note(&format!("target type: {to}"))
- .help("cast with `as` to a pointer instead")
- .emit();
- return;
- }
- }
-
- // Try to display a sensible error with as much information as possible.
- let skeleton_string = |ty: Ty<'tcx>, sk| match sk {
- Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()),
- Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
- Err(LayoutError::Unknown(bad)) => {
- if bad == ty {
- "this type does not have a fixed size".to_owned()
- } else {
- format!("size can vary because of {bad}")
- }
- }
- Err(err) => err.to_string(),
- };
-
- let mut err = struct_span_err!(
- self.tcx.sess,
- span,
- E0512,
- "cannot transmute between types of different sizes, \
- or dependently-sized types"
- );
- if from == to {
- err.note(&format!("`{from}` does not have a fixed size"));
- } else {
- err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
- .note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
- }
- err.emit();
- }
-
- // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()`
- fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool {
- // Type still may have region variables, but `Sized` does not depend
- // on those, so just erase them before querying.
- if self.tcx.erase_regions(ty).is_sized(self.tcx.at(DUMMY_SP), self.param_env) {
- return true;
- }
- if let ty::Foreign(..) = ty.kind() {
- return true;
- }
- false
- }
-}
-
-pub struct InlineAsmCtxt<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- fcx: Option<&'a FnCtxt<'a, 'tcx>>,
-}
-
-impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
- pub fn new_global_asm(tcx: TyCtxt<'tcx>) -> Self {
- InlineAsmCtxt { tcx, fcx: None }
- }
-
- pub fn new_in_fn(fcx: &'a FnCtxt<'a, 'tcx>) -> Self {
- InlineAsmCtxt { tcx: fcx.tcx, fcx: Some(fcx) }
- }
-
- fn check_asm_operand_type(
- &self,
- idx: usize,
- reg: InlineAsmRegOrRegClass,
- expr: &hir::Expr<'tcx>,
- template: &[InlineAsmTemplatePiece],
- is_input: bool,
- tied_input: Option<(&hir::Expr<'tcx>, Option<InlineAsmType>)>,
- target_features: &FxHashSet<Symbol>,
- ) -> Option<InlineAsmType> {
- let fcx = self.fcx.unwrap_or_else(|| span_bug!(expr.span, "asm operand for global asm"));
- // Check the type against the allowed types for inline asm.
- let ty = fcx.typeck_results.borrow().expr_ty_adjusted(expr);
- let ty = fcx.resolve_vars_if_possible(ty);
- let asm_ty_isize = match self.tcx.sess.target.pointer_width {
- 16 => InlineAsmType::I16,
- 32 => InlineAsmType::I32,
- 64 => InlineAsmType::I64,
- _ => unreachable!(),
- };
-
- // Expect types to be fully resolved, no const or type variables.
- if ty.has_infer_types_or_consts() {
- assert!(fcx.is_tainted_by_errors());
- return None;
- }
-
- let asm_ty = match *ty.kind() {
- // `!` is allowed for input but not for output (issue #87802)
- ty::Never if is_input => return None,
- ty::Error(_) => return None,
- ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
- ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
- ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
- ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
- ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
- ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
- ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
- ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
- ty::FnPtr(_) => Some(asm_ty_isize),
- ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if fcx.is_thin_ptr_ty(ty) => {
- Some(asm_ty_isize)
- }
- ty::Adt(adt, substs) if adt.repr().simd() => {
- let fields = &adt.non_enum_variant().fields;
- let elem_ty = fields[0].ty(self.tcx, substs);
- match elem_ty.kind() {
- ty::Never | ty::Error(_) => return None,
- ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => {
- Some(InlineAsmType::VecI8(fields.len() as u64))
- }
- ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => {
- Some(InlineAsmType::VecI16(fields.len() as u64))
- }
- ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => {
- Some(InlineAsmType::VecI32(fields.len() as u64))
- }
- ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => {
- Some(InlineAsmType::VecI64(fields.len() as u64))
- }
- ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => {
- Some(InlineAsmType::VecI128(fields.len() as u64))
- }
- ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => {
- Some(match self.tcx.sess.target.pointer_width {
- 16 => InlineAsmType::VecI16(fields.len() as u64),
- 32 => InlineAsmType::VecI32(fields.len() as u64),
- 64 => InlineAsmType::VecI64(fields.len() as u64),
- _ => unreachable!(),
- })
- }
- ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(fields.len() as u64)),
- ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(fields.len() as u64)),
- _ => None,
- }
- }
- ty::Infer(_) => unreachable!(),
- _ => None,
- };
- let Some(asm_ty) = asm_ty else {
- let msg = &format!("cannot use value of type `{ty}` for inline assembly");
- let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- err.note(
- "only integers, floats, SIMD vectors, pointers and function pointers \
- can be used as arguments for inline assembly",
- );
- err.emit();
- return None;
- };
-
- // Check that the type implements Copy. The only case where this can
- // possibly fail is for SIMD types which don't #[derive(Copy)].
- if !fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, ty, DUMMY_SP) {
- let msg = "arguments for inline assembly must be copyable";
- let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- err.note(&format!("`{ty}` does not implement the Copy trait"));
- err.emit();
- }
-
- // Ideally we wouldn't need to do this, but LLVM's register allocator
- // really doesn't like it when tied operands have different types.
- //
- // This is purely an LLVM limitation, but we have to live with it since
- // there is no way to hide this with implicit conversions.
- //
- // For the purposes of this check we only look at the `InlineAsmType`,
- // which means that pointers and integers are treated as identical (modulo
- // size).
- if let Some((in_expr, Some(in_asm_ty))) = tied_input {
- if in_asm_ty != asm_ty {
- let msg = "incompatible types for asm inout argument";
- let mut err = self.tcx.sess.struct_span_err(vec![in_expr.span, expr.span], msg);
-
- let in_expr_ty = fcx.typeck_results.borrow().expr_ty_adjusted(in_expr);
- let in_expr_ty = fcx.resolve_vars_if_possible(in_expr_ty);
- err.span_label(in_expr.span, &format!("type `{in_expr_ty}`"));
- err.span_label(expr.span, &format!("type `{ty}`"));
- err.note(
- "asm inout arguments must have the same type, \
- unless they are both pointers or integers of the same size",
- );
- err.emit();
- }
-
- // All of the later checks have already been done on the input, so
- // let's not emit errors and warnings twice.
- return Some(asm_ty);
- }
-
- // Check the type against the list of types supported by the selected
- // register class.
- let asm_arch = self.tcx.sess.asm_arch.unwrap();
- let reg_class = reg.reg_class();
- let supported_tys = reg_class.supported_types(asm_arch);
- let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
- let msg = &format!("type `{ty}` cannot be used with this register class");
- let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- let supported_tys: Vec<_> =
- supported_tys.iter().map(|(t, _)| t.to_string()).collect();
- err.note(&format!(
- "register class `{}` supports these types: {}",
- reg_class.name(),
- supported_tys.join(", "),
- ));
- if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) {
- err.help(&format!(
- "consider using the `{}` register class instead",
- suggest.name()
- ));
- }
- err.emit();
- return Some(asm_ty);
- };
-
- // Check whether the selected type requires a target feature. Note that
- // this is different from the feature check we did earlier. While the
- // previous check checked that this register class is usable at all
- // with the currently enabled features, some types may only be usable
- // with a register class when a certain feature is enabled. We check
- // this here since it depends on the results of typeck.
- //
- // Also note that this check isn't run when the operand type is never
- // (!). In that case we still need the earlier check to verify that the
- // register class is usable at all.
- if let Some(feature) = feature {
- if !target_features.contains(&feature) {
- let msg = &format!("`{}` target feature is not enabled", feature);
- let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- err.note(&format!(
- "this is required to use type `{}` with register class `{}`",
- ty,
- reg_class.name(),
- ));
- err.emit();
- return Some(asm_ty);
- }
- }
-
- // Check whether a modifier is suggested for using this type.
- if let Some((suggested_modifier, suggested_result)) =
- reg_class.suggest_modifier(asm_arch, asm_ty)
- {
- // Search for any use of this operand without a modifier and emit
- // the suggestion for them.
- let mut spans = vec![];
- for piece in template {
- if let &InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span } = piece
- {
- if operand_idx == idx && modifier.is_none() {
- spans.push(span);
- }
- }
- }
- if !spans.is_empty() {
- let (default_modifier, default_result) =
- reg_class.default_modifier(asm_arch).unwrap();
- self.tcx.struct_span_lint_hir(
- lint::builtin::ASM_SUB_REGISTER,
- expr.hir_id,
- spans,
- |lint| {
- let msg = "formatting may not be suitable for sub-register argument";
- let mut err = lint.build(msg);
- err.span_label(expr.span, "for this argument");
- err.help(&format!(
- "use the `{suggested_modifier}` modifier to have the register formatted as `{suggested_result}`",
- ));
- err.help(&format!(
- "or use the `{default_modifier}` modifier to keep the default formatting of `{default_result}`",
- ));
- err.emit();
- },
- );
- }
- }
-
- Some(asm_ty)
- }
-
- pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>, enclosing_id: hir::HirId) {
- let hir = self.tcx.hir();
- let enclosing_def_id = hir.local_def_id(enclosing_id).to_def_id();
- let target_features = self.tcx.asm_target_features(enclosing_def_id);
- let Some(asm_arch) = self.tcx.sess.asm_arch else {
- self.tcx.sess.delay_span_bug(DUMMY_SP, "target architecture does not support asm");
- return;
- };
- for (idx, (op, op_sp)) in asm.operands.iter().enumerate() {
- // Validate register classes against currently enabled target
- // features. We check that at least one type is available for
- // the enabled features.
- //
- // We ignore target feature requirements for clobbers: if the
- // feature is disabled then the compiler doesn't care what we
- // do with the registers.
- //
- // Note that this is only possible for explicit register
- // operands, which cannot be used in the asm string.
- if let Some(reg) = op.reg() {
- // Some explicit registers cannot be used depending on the
- // target. Reject those here.
- if let InlineAsmRegOrRegClass::Reg(reg) = reg {
- if let InlineAsmReg::Err = reg {
- // `validate` will panic on `Err`, as an error must
- // already have been reported.
- continue;
- }
- if let Err(msg) = reg.validate(
- asm_arch,
- self.tcx.sess.relocation_model(),
- &target_features,
- &self.tcx.sess.target,
- op.is_clobber(),
- ) {
- let msg = format!("cannot use register `{}`: {}", reg.name(), msg);
- self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
- continue;
- }
- }
-
- if !op.is_clobber() {
- let mut missing_required_features = vec![];
- let reg_class = reg.reg_class();
- if let InlineAsmRegClass::Err = reg_class {
- continue;
- }
- for &(_, feature) in reg_class.supported_types(asm_arch) {
- match feature {
- Some(feature) => {
- if target_features.contains(&feature) {
- missing_required_features.clear();
- break;
- } else {
- missing_required_features.push(feature);
- }
- }
- None => {
- missing_required_features.clear();
- break;
- }
- }
- }
-
- // We are sorting primitive strs here and can use unstable sort here
- missing_required_features.sort_unstable();
- missing_required_features.dedup();
- match &missing_required_features[..] {
- [] => {}
- [feature] => {
- let msg = format!(
- "register class `{}` requires the `{}` target feature",
- reg_class.name(),
- feature
- );
- self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
- // register isn't enabled, don't do more checks
- continue;
- }
- features => {
- let msg = format!(
- "register class `{}` requires at least one of the following target features: {}",
- reg_class.name(),
- features
- .iter()
- .map(|f| f.as_str())
- .intersperse(", ")
- .collect::<String>(),
- );
- self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
- // register isn't enabled, don't do more checks
- continue;
- }
- }
- }
- }
-
- match *op {
- hir::InlineAsmOperand::In { reg, ref expr } => {
- self.check_asm_operand_type(
- idx,
- reg,
- expr,
- asm.template,
- true,
- None,
- &target_features,
- );
- }
- hir::InlineAsmOperand::Out { reg, late: _, ref expr } => {
- if let Some(expr) = expr {
- self.check_asm_operand_type(
- idx,
- reg,
- expr,
- asm.template,
- false,
- None,
- &target_features,
- );
- }
- }
- hir::InlineAsmOperand::InOut { reg, late: _, ref expr } => {
- self.check_asm_operand_type(
- idx,
- reg,
- expr,
- asm.template,
- false,
- None,
- &target_features,
- );
- }
- hir::InlineAsmOperand::SplitInOut { reg, late: _, ref in_expr, ref out_expr } => {
- let in_ty = self.check_asm_operand_type(
- idx,
- reg,
- in_expr,
- asm.template,
- true,
- None,
- &target_features,
- );
- if let Some(out_expr) = out_expr {
- self.check_asm_operand_type(
- idx,
- reg,
- out_expr,
- asm.template,
- false,
- Some((in_expr, in_ty)),
- &target_features,
- );
- }
- }
- // No special checking is needed for these:
- // - Typeck has checked that Const operands are integers.
- // - AST lowering guarantees that SymStatic points to a static.
- hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymStatic { .. } => {}
- // Check that sym actually points to a function. Later passes
- // depend on this.
- hir::InlineAsmOperand::SymFn { anon_const } => {
- let ty = self.tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
- match ty.kind() {
- ty::Never | ty::Error(_) => {}
- ty::FnDef(..) => {}
- _ => {
- let mut err =
- self.tcx.sess.struct_span_err(*op_sp, "invalid `sym` operand");
- err.span_label(
- self.tcx.hir().span(anon_const.body.hir_id),
- &format!("is {} `{}`", ty.kind().article(), ty),
- );
- err.help("`sym` operands must refer to either a function or a static");
- err.emit();
- }
- };
- }
- }
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/method/mod.rs b/compiler/rustc_typeck/src/check/method/mod.rs
deleted file mode 100644
index 0e678c41f..000000000
--- a/compiler/rustc_typeck/src/check/method/mod.rs
+++ /dev/null
@@ -1,658 +0,0 @@
-//! Method lookup: the secret sauce of Rust. See the [rustc dev guide] for more information.
-//!
-//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/method-lookup.html
-
-mod confirm;
-mod prelude2021;
-pub mod probe;
-mod suggest;
-
-pub use self::suggest::SelfSource;
-pub use self::MethodError::*;
-
-use crate::check::{Expectation, FnCtxt};
-use crate::ObligationCause;
-use rustc_data_structures::sync::Lrc;
-use rustc_errors::{Applicability, Diagnostic};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorOf, DefKind, Namespace};
-use rustc_hir::def_id::DefId;
-use rustc_infer::infer::{self, InferOk};
-use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
-use rustc_middle::ty::{
- self, AssocKind, DefIdTree, GenericParamDefKind, ProjectionPredicate, ProjectionTy, Term,
- ToPredicate, Ty, TypeVisitable,
-};
-use rustc_span::symbol::Ident;
-use rustc_span::Span;
-use rustc_trait_selection::traits;
-use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
-
-use self::probe::{IsSuggestion, ProbeScope};
-
-pub fn provide(providers: &mut ty::query::Providers) {
- probe::provide(providers);
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct MethodCallee<'tcx> {
- /// Impl method ID, for inherent methods, or trait method ID, otherwise.
- pub def_id: DefId,
- pub substs: SubstsRef<'tcx>,
-
- /// Instantiated method signature, i.e., it has been
- /// substituted, normalized, and has had late-bound
- /// lifetimes replaced with inference variables.
- pub sig: ty::FnSig<'tcx>,
-}
-
-#[derive(Debug)]
-pub enum MethodError<'tcx> {
- // Did not find an applicable method, but we did find various near-misses that may work.
- NoMatch(NoMatchData<'tcx>),
-
- // Multiple methods might apply.
- Ambiguity(Vec<CandidateSource>),
-
- // Found an applicable method, but it is not visible. The third argument contains a list of
- // not-in-scope traits which may work.
- PrivateMatch(DefKind, DefId, Vec<DefId>),
-
- // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have
- // forgotten to import a trait.
- IllegalSizedBound(Vec<DefId>, bool, Span),
-
- // Found a match, but the return type is wrong
- BadReturnType,
-}
-
-// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
-// could lead to matches if satisfied, and a list of not-in-scope traits which may work.
-#[derive(Debug)]
-pub struct NoMatchData<'tcx> {
- pub static_candidates: Vec<CandidateSource>,
- pub unsatisfied_predicates:
- Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
- pub out_of_scope_traits: Vec<DefId>,
- pub lev_candidate: Option<ty::AssocItem>,
- pub mode: probe::Mode,
-}
-
-// A pared down enum describing just the places from which a method
-// candidate can arise. Used for error reporting only.
-#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
-pub enum CandidateSource {
- Impl(DefId),
- Trait(DefId /* trait id */),
-}
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- /// Determines whether the type `self_ty` supports a method name `method_name` or not.
- #[instrument(level = "debug", skip(self))]
- pub fn method_exists(
- &self,
- method_name: Ident,
- self_ty: Ty<'tcx>,
- call_expr_id: hir::HirId,
- allow_private: bool,
- ) -> bool {
- let mode = probe::Mode::MethodCall;
- match self.probe_for_name(
- method_name.span,
- mode,
- method_name,
- IsSuggestion(false),
- self_ty,
- call_expr_id,
- ProbeScope::TraitsInScope,
- ) {
- Ok(..) => true,
- Err(NoMatch(..)) => false,
- Err(Ambiguity(..)) => true,
- Err(PrivateMatch(..)) => allow_private,
- Err(IllegalSizedBound(..)) => true,
- Err(BadReturnType) => bug!("no return type expectations but got BadReturnType"),
- }
- }
-
- /// Adds a suggestion to call the given method to the provided diagnostic.
- #[instrument(level = "debug", skip(self, err, call_expr))]
- pub(crate) fn suggest_method_call(
- &self,
- err: &mut Diagnostic,
- msg: &str,
- method_name: Ident,
- self_ty: Ty<'tcx>,
- call_expr: &hir::Expr<'_>,
- span: Option<Span>,
- ) {
- let params = self
- .probe_for_name(
- method_name.span,
- probe::Mode::MethodCall,
- method_name,
- IsSuggestion(false),
- self_ty,
- call_expr.hir_id,
- ProbeScope::TraitsInScope,
- )
- .map(|pick| {
- let sig = self.tcx.fn_sig(pick.item.def_id);
- sig.inputs().skip_binder().len().saturating_sub(1)
- })
- .unwrap_or(0);
-
- // Account for `foo.bar<T>`;
- let sugg_span = span.unwrap_or(call_expr.span).shrink_to_hi();
- let (suggestion, applicability) = (
- format!("({})", (0..params).map(|_| "_").collect::<Vec<_>>().join(", ")),
- if params > 0 { Applicability::HasPlaceholders } else { Applicability::MaybeIncorrect },
- );
-
- err.span_suggestion_verbose(sugg_span, msg, suggestion, applicability);
- }
-
- /// Performs method lookup. If lookup is successful, it will return the callee
- /// and store an appropriate adjustment for the self-expr. In some cases it may
- /// report an error (e.g., invoking the `drop` method).
- ///
- /// # Arguments
- ///
- /// Given a method call like `foo.bar::<T1,...Tn>(a, b + 1, ...)`:
- ///
- /// * `self`: the surrounding `FnCtxt` (!)
- /// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
- /// * `segment`: the name and generic arguments of the method (`bar::<T1, ...Tn>`)
- /// * `span`: the span for the method call
- /// * `call_expr`: the complete method call: (`foo.bar::<T1,...Tn>(...)`)
- /// * `self_expr`: the self expression (`foo`)
- /// * `args`: the expressions of the arguments (`a, b + 1, ...`)
- #[instrument(level = "debug", skip(self, call_expr, self_expr))]
- pub fn lookup_method(
- &self,
- self_ty: Ty<'tcx>,
- segment: &hir::PathSegment<'_>,
- span: Span,
- call_expr: &'tcx hir::Expr<'tcx>,
- self_expr: &'tcx hir::Expr<'tcx>,
- args: &'tcx [hir::Expr<'tcx>],
- ) -> Result<MethodCallee<'tcx>, MethodError<'tcx>> {
- debug!(
- "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
- segment.ident, self_ty, call_expr, self_expr
- );
-
- let pick =
- self.lookup_probe(span, segment.ident, self_ty, call_expr, ProbeScope::TraitsInScope)?;
-
- self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args);
-
- for import_id in &pick.import_ids {
- debug!("used_trait_import: {:?}", import_id);
- Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports)
- .unwrap()
- .insert(*import_id);
- }
-
- self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None);
-
- let result =
- self.confirm_method(span, self_expr, call_expr, self_ty, pick.clone(), segment);
- debug!("result = {:?}", result);
-
- if let Some(span) = result.illegal_sized_bound {
- let mut needs_mut = false;
- if let ty::Ref(region, t_type, mutability) = self_ty.kind() {
- let trait_type = self
- .tcx
- .mk_ref(*region, ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() });
- // We probe again to see if there might be a borrow mutability discrepancy.
- match self.lookup_probe(
- span,
- segment.ident,
- trait_type,
- call_expr,
- ProbeScope::TraitsInScope,
- ) {
- Ok(ref new_pick) if *new_pick != pick => {
- needs_mut = true;
- }
- _ => {}
- }
- }
-
- // We probe again, taking all traits into account (not only those in scope).
- let mut candidates = match self.lookup_probe(
- span,
- segment.ident,
- self_ty,
- call_expr,
- ProbeScope::AllTraits,
- ) {
- // If we find a different result the caller probably forgot to import a trait.
- Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container_id(self.tcx)],
- Err(Ambiguity(ref sources)) => sources
- .iter()
- .filter_map(|source| {
- match *source {
- // Note: this cannot come from an inherent impl,
- // because the first probing succeeded.
- CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def),
- CandidateSource::Trait(_) => None,
- }
- })
- .collect(),
- _ => Vec::new(),
- };
- candidates.retain(|candidate| *candidate != self.tcx.parent(result.callee.def_id));
-
- return Err(IllegalSizedBound(candidates, needs_mut, span));
- }
-
- Ok(result.callee)
- }
-
- #[instrument(level = "debug", skip(self, call_expr))]
- pub fn lookup_probe(
- &self,
- span: Span,
- method_name: Ident,
- self_ty: Ty<'tcx>,
- call_expr: &'tcx hir::Expr<'tcx>,
- scope: ProbeScope,
- ) -> probe::PickResult<'tcx> {
- let mode = probe::Mode::MethodCall;
- let self_ty = self.resolve_vars_if_possible(self_ty);
- self.probe_for_name(
- span,
- mode,
- method_name,
- IsSuggestion(false),
- self_ty,
- call_expr.hir_id,
- scope,
- )
- }
-
- pub(super) fn obligation_for_method(
- &self,
- span: Span,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- opt_input_types: Option<&[Ty<'tcx>]>,
- ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
- {
- // Construct a trait-reference `self_ty : Trait<input_tys>`
- let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
- match param.kind {
- GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
- GenericParamDefKind::Type { .. } => {
- if param.index == 0 {
- return self_ty.into();
- } else if let Some(input_types) = opt_input_types {
- return input_types[param.index as usize - 1].into();
- }
- }
- }
- self.var_for_def(span, param)
- });
-
- let trait_ref = ty::TraitRef::new(trait_def_id, substs);
-
- // Construct an obligation
- let poly_trait_ref = ty::Binder::dummy(trait_ref);
- (
- traits::Obligation::misc(
- span,
- self.body_id,
- self.param_env,
- poly_trait_ref.without_const().to_predicate(self.tcx),
- ),
- substs,
- )
- }
-
- pub(super) fn obligation_for_op_method(
- &self,
- span: Span,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- opt_input_type: Option<Ty<'tcx>>,
- opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
- expected: Expectation<'tcx>,
- ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
- {
- // Construct a trait-reference `self_ty : Trait<input_tys>`
- let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
- match param.kind {
- GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
- GenericParamDefKind::Type { .. } => {
- if param.index == 0 {
- return self_ty.into();
- } else if let Some(input_type) = opt_input_type {
- return input_type.into();
- }
- }
- }
- self.var_for_def(span, param)
- });
-
- let trait_ref = ty::TraitRef::new(trait_def_id, substs);
-
- // Construct an obligation
- let poly_trait_ref = ty::Binder::dummy(trait_ref);
- let opt_output_ty =
- expected.only_has_type(self).and_then(|ty| (!ty.needs_infer()).then(|| ty));
- let opt_output_assoc_item = self.tcx.associated_items(trait_def_id).find_by_name_and_kind(
- self.tcx,
- Ident::from_str("Output"),
- AssocKind::Type,
- trait_def_id,
- );
- let output_pred =
- opt_output_ty.zip(opt_output_assoc_item).map(|(output_ty, output_assoc_item)| {
- ty::Binder::dummy(ty::PredicateKind::Projection(ProjectionPredicate {
- projection_ty: ProjectionTy { substs, item_def_id: output_assoc_item.def_id },
- term: Term::Ty(output_ty),
- }))
- .to_predicate(self.tcx)
- });
-
- (
- traits::Obligation::new(
- traits::ObligationCause::new(
- span,
- self.body_id,
- traits::BinOp {
- rhs_span: opt_input_expr.map(|expr| expr.span),
- is_lit: opt_input_expr
- .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
- output_pred,
- },
- ),
- self.param_env,
- poly_trait_ref.without_const().to_predicate(self.tcx),
- ),
- substs,
- )
- }
-
- /// `lookup_method_in_trait` is used for overloaded operators.
- /// It does a very narrow slice of what the normal probe/confirm path does.
- /// In particular, it doesn't really do any probing: it simply constructs
- /// an obligation for a particular trait with the given self type and checks
- /// whether that trait is implemented.
- #[instrument(level = "debug", skip(self, span, opt_input_types))]
- pub(super) fn lookup_method_in_trait(
- &self,
- span: Span,
- m_name: Ident,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- opt_input_types: Option<&[Ty<'tcx>]>,
- ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
- debug!(
- "lookup_in_trait_adjusted(self_ty={:?}, m_name={}, trait_def_id={:?}, opt_input_types={:?})",
- self_ty, m_name, trait_def_id, opt_input_types
- );
-
- let (obligation, substs) =
- self.obligation_for_method(span, trait_def_id, self_ty, opt_input_types);
- self.construct_obligation_for_trait(
- span,
- m_name,
- trait_def_id,
- obligation,
- substs,
- None,
- false,
- )
- }
-
- pub(super) fn lookup_op_method_in_trait(
- &self,
- span: Span,
- m_name: Ident,
- trait_def_id: DefId,
- self_ty: Ty<'tcx>,
- opt_input_type: Option<Ty<'tcx>>,
- opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
- expected: Expectation<'tcx>,
- ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
- let (obligation, substs) = self.obligation_for_op_method(
- span,
- trait_def_id,
- self_ty,
- opt_input_type,
- opt_input_expr,
- expected,
- );
- self.construct_obligation_for_trait(
- span,
- m_name,
- trait_def_id,
- obligation,
- substs,
- opt_input_expr,
- true,
- )
- }
-
- // FIXME(#18741): it seems likely that we can consolidate some of this
- // code with the other method-lookup code. In particular, the second half
- // of this method is basically the same as confirmation.
- fn construct_obligation_for_trait(
- &self,
- span: Span,
- m_name: Ident,
- trait_def_id: DefId,
- obligation: traits::PredicateObligation<'tcx>,
- substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
- opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
- is_op: bool,
- ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
- debug!(?obligation);
-
- // Now we want to know if this can be matched
- if !self.predicate_may_hold(&obligation) {
- debug!("--> Cannot match obligation");
- // Cannot be matched, no such method resolution is possible.
- return None;
- }
-
- // Trait must have a method named `m_name` and it should not have
- // type parameters or early-bound regions.
- let tcx = self.tcx;
- let Some(method_item) = self.associated_value(trait_def_id, m_name) else {
- tcx.sess.delay_span_bug(
- span,
- "operator trait does not have corresponding operator method",
- );
- return None;
- };
- let def_id = method_item.def_id;
- let generics = tcx.generics_of(def_id);
- assert_eq!(generics.params.len(), 0);
-
- debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
- let mut obligations = vec![];
-
- // Instantiate late-bound regions and substitute the trait
- // parameters into the method type to get the actual method type.
- //
- // N.B., instantiate late-bound regions first so that
- // `instantiate_type_scheme` can normalize associated types that
- // may reference those regions.
- let fn_sig = tcx.bound_fn_sig(def_id);
- let fn_sig = fn_sig.subst(self.tcx, substs);
- let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig);
-
- let InferOk { value, obligations: o } = if is_op {
- self.normalize_op_associated_types_in_as_infer_ok(span, fn_sig, opt_input_expr)
- } else {
- self.normalize_associated_types_in_as_infer_ok(span, fn_sig)
- };
- let fn_sig = {
- obligations.extend(o);
- value
- };
-
- // Register obligations for the parameters. This will include the
- // `Self` parameter, which in turn has a bound of the main trait,
- // so this also effectively registers `obligation` as well. (We
- // used to register `obligation` explicitly, but that resulted in
- // double error messages being reported.)
- //
- // Note that as the method comes from a trait, it should not have
- // any late-bound regions appearing in its bounds.
- let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
-
- let InferOk { value, obligations: o } = if is_op {
- self.normalize_op_associated_types_in_as_infer_ok(span, bounds, opt_input_expr)
- } else {
- self.normalize_associated_types_in_as_infer_ok(span, bounds)
- };
- let bounds = {
- obligations.extend(o);
- value
- };
-
- assert!(!bounds.has_escaping_bound_vars());
-
- let cause = if is_op {
- ObligationCause::new(
- span,
- self.body_id,
- traits::BinOp {
- rhs_span: opt_input_expr.map(|expr| expr.span),
- is_lit: opt_input_expr
- .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
- output_pred: None,
- },
- )
- } else {
- traits::ObligationCause::misc(span, self.body_id)
- };
- obligations.extend(traits::predicates_for_generics(cause.clone(), self.param_env, bounds));
-
- // Also add an obligation for the method type being well-formed.
- let method_ty = tcx.mk_fn_ptr(ty::Binder::dummy(fn_sig));
- debug!(
- "lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}",
- method_ty, obligation
- );
- obligations.push(traits::Obligation::new(
- cause,
- self.param_env,
- ty::Binder::dummy(ty::PredicateKind::WellFormed(method_ty.into())).to_predicate(tcx),
- ));
-
- let callee = MethodCallee { def_id, substs, sig: fn_sig };
-
- debug!("callee = {:?}", callee);
-
- Some(InferOk { obligations, value: callee })
- }
-
- /// Performs a [full-qualified function call] (formerly "universal function call") lookup. If
- /// lookup is successful, it will return the type of definition and the [`DefId`] of the found
- /// function definition.
- ///
- /// [full-qualified function call]: https://doc.rust-lang.org/reference/expressions/call-expr.html#disambiguating-function-calls
- ///
- /// # Arguments
- ///
- /// Given a function call like `Foo::bar::<T1,...Tn>(...)`:
- ///
- /// * `self`: the surrounding `FnCtxt` (!)
- /// * `span`: the span of the call, excluding arguments (`Foo::bar::<T1, ...Tn>`)
- /// * `method_name`: the identifier of the function within the container type (`bar`)
- /// * `self_ty`: the type to search within (`Foo`)
- /// * `self_ty_span` the span for the type being searched within (span of `Foo`)
- /// * `expr_id`: the [`hir::HirId`] of the expression composing the entire call
- #[instrument(level = "debug", skip(self))]
- pub fn resolve_fully_qualified_call(
- &self,
- span: Span,
- method_name: Ident,
- self_ty: Ty<'tcx>,
- self_ty_span: Span,
- expr_id: hir::HirId,
- ) -> Result<(DefKind, DefId), MethodError<'tcx>> {
- debug!(
- "resolve_fully_qualified_call: method_name={:?} self_ty={:?} expr_id={:?}",
- method_name, self_ty, expr_id,
- );
-
- let tcx = self.tcx;
-
- // Check if we have an enum variant.
- if let ty::Adt(adt_def, _) = self_ty.kind() {
- if adt_def.is_enum() {
- let variant_def = adt_def
- .variants()
- .iter()
- .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did()));
- if let Some(variant_def) = variant_def {
- // Braced variants generate unusable names in value namespace (reserved for
- // possible future use), so variants resolved as associated items may refer to
- // them as well. It's ok to use the variant's id as a ctor id since an
- // error will be reported on any use of such resolution anyway.
- let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id);
- tcx.check_stability(ctor_def_id, Some(expr_id), span, Some(method_name.span));
- return Ok((
- DefKind::Ctor(CtorOf::Variant, variant_def.ctor_kind),
- ctor_def_id,
- ));
- }
- }
- }
-
- let pick = self.probe_for_name(
- span,
- probe::Mode::Path,
- method_name,
- IsSuggestion(false),
- self_ty,
- expr_id,
- ProbeScope::TraitsInScope,
- )?;
-
- self.lint_fully_qualified_call_from_2018(
- span,
- method_name,
- self_ty,
- self_ty_span,
- expr_id,
- &pick,
- );
-
- debug!("resolve_fully_qualified_call: pick={:?}", pick);
- {
- let mut typeck_results = self.typeck_results.borrow_mut();
- let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap();
- for import_id in pick.import_ids {
- debug!("resolve_fully_qualified_call: used_trait_import: {:?}", import_id);
- used_trait_imports.insert(import_id);
- }
- }
-
- let def_kind = pick.item.kind.as_def_kind();
- debug!(
- "resolve_fully_qualified_call: def_kind={:?}, def_id={:?}",
- def_kind, pick.item.def_id
- );
- tcx.check_stability(pick.item.def_id, Some(expr_id), span, Some(method_name.span));
- Ok((def_kind, pick.item.def_id))
- }
-
- /// Finds item with name `item_name` defined in impl/trait `def_id`
- /// and return it, or `None`, if no such item was defined there.
- pub fn associated_value(&self, def_id: DefId, item_name: Ident) -> Option<ty::AssocItem> {
- self.tcx
- .associated_items(def_id)
- .find_by_name_and_namespace(self.tcx, item_name, Namespace::ValueNS, def_id)
- .copied()
- }
-}
diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs
deleted file mode 100644
index 17c2e4868..000000000
--- a/compiler/rustc_typeck/src/check/mod.rs
+++ /dev/null
@@ -1,970 +0,0 @@
-/*!
-
-# typeck: check phase
-
-Within the check phase of type check, we check each item one at a time
-(bodies of function expressions are checked as part of the containing
-function). Inference is used to supply types wherever they are unknown.
-
-By far the most complex case is checking the body of a function. This
-can be broken down into several distinct phases:
-
-- gather: creates type variables to represent the type of each local
- variable and pattern binding.
-
-- main: the main pass does the lion's share of the work: it
- determines the types of all expressions, resolves
- methods, checks for most invalid conditions, and so forth. In
- some cases, where a type is unknown, it may create a type or region
- variable and use that as the type of an expression.
-
- In the process of checking, various constraints will be placed on
- these type variables through the subtyping relationships requested
- through the `demand` module. The `infer` module is in charge
- of resolving those constraints.
-
-- regionck: after main is complete, the regionck pass goes over all
- types looking for regions and making sure that they did not escape
- into places where they are not in scope. This may also influence the
- final assignments of the various region variables if there is some
- flexibility.
-
-- writeback: writes the final types within a function body, replacing
- type variables with their final inferred types. These final types
- are written into the `tcx.node_types` table, which should *never* contain
- any reference to a type variable.
-
-## Intermediate types
-
-While type checking a function, the intermediate types for the
-expressions, blocks, and so forth contained within the function are
-stored in `fcx.node_types` and `fcx.node_substs`. These types
-may contain unresolved type variables. After type checking is
-complete, the functions in the writeback module are used to take the
-types from this table, resolve them, and then write them into their
-permanent home in the type context `tcx`.
-
-This means that during inferencing you should use `fcx.write_ty()`
-and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
-nodes within the function.
-
-The types of top-level items, which never contain unbound type
-variables, are stored directly into the `tcx` typeck_results.
-
-N.B., a type variable is not the same thing as a type parameter. A
-type variable is an instance of a type parameter. That is,
-given a generic function `fn foo<T>(t: T)`, while checking the
-function `foo`, the type `ty_param(0)` refers to the type `T`, which
-is treated in abstract. However, when `foo()` is called, `T` will be
-substituted for a fresh type variable `N`. This variable will
-eventually be resolved to some concrete type (which might itself be
-a type parameter).
-
-*/
-
-pub mod _match;
-mod autoderef;
-mod callee;
-pub mod cast;
-mod check;
-mod closure;
-pub mod coercion;
-mod compare_method;
-pub mod demand;
-mod diverges;
-pub mod dropck;
-mod expectation;
-mod expr;
-mod fallback;
-mod fn_ctxt;
-mod gather_locals;
-mod generator_interior;
-mod inherited;
-pub mod intrinsic;
-mod intrinsicck;
-pub mod method;
-mod op;
-mod pat;
-mod place_op;
-mod region;
-pub mod regionck;
-pub mod rvalue_scopes;
-mod upvar;
-pub mod wfcheck;
-pub mod writeback;
-
-use check::{check_abi, check_fn, check_mod_item_types};
-pub use diverges::Diverges;
-pub use expectation::Expectation;
-pub use fn_ctxt::*;
-use hir::def::CtorOf;
-pub use inherited::{Inherited, InheritedBuilder};
-
-use crate::astconv::AstConv;
-use crate::check::gather_locals::GatherLocalsVisitor;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::{
- pluralize, struct_span_err, Applicability, DiagnosticBuilder, EmissionGuarantee, MultiSpan,
-};
-use rustc_hir as hir;
-use rustc_hir::def::Res;
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::intravisit::Visitor;
-use rustc_hir::{HirIdMap, ImplicitSelfKind, Node};
-use rustc_index::bit_set::BitSet;
-use rustc_index::vec::Idx;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
-use rustc_middle::ty::{self, Ty, TyCtxt, UserType};
-use rustc_session::config;
-use rustc_session::parse::feature_err;
-use rustc_session::Session;
-use rustc_span::source_map::DUMMY_SP;
-use rustc_span::symbol::{kw, Ident};
-use rustc_span::{self, BytePos, Span};
-use rustc_target::abi::VariantIdx;
-use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::traits;
-use rustc_trait_selection::traits::error_reporting::recursive_type_with_infinite_size_error;
-use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor;
-use std::cell::RefCell;
-
-use crate::require_c_abi_if_c_variadic;
-use crate::util::common::indenter;
-
-use self::coercion::DynamicCoerceMany;
-use self::region::region_scope_tree;
-pub use self::Expectation::*;
-
-#[macro_export]
-macro_rules! type_error_struct {
- ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({
- let mut err = rustc_errors::struct_span_err!($session, $span, $code, $($message)*);
-
- if $typ.references_error() {
- err.downgrade_to_delayed_bug();
- }
-
- err
- })
-}
-
-/// The type of a local binding, including the revealed type for anon types.
-#[derive(Copy, Clone, Debug)]
-pub struct LocalTy<'tcx> {
- decl_ty: Ty<'tcx>,
- revealed_ty: Ty<'tcx>,
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum Needs {
- MutPlace,
- None,
-}
-
-impl Needs {
- fn maybe_mut_place(m: hir::Mutability) -> Self {
- match m {
- hir::Mutability::Mut => Needs::MutPlace,
- hir::Mutability::Not => Needs::None,
- }
- }
-}
-
-#[derive(Copy, Clone)]
-pub struct UnsafetyState {
- pub def: hir::HirId,
- pub unsafety: hir::Unsafety,
- from_fn: bool,
-}
-
-impl UnsafetyState {
- pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState {
- UnsafetyState { def, unsafety, from_fn: true }
- }
-
- pub fn recurse(self, blk: &hir::Block<'_>) -> UnsafetyState {
- use hir::BlockCheckMode;
- match self.unsafety {
- // If this unsafe, then if the outer function was already marked as
- // unsafe we shouldn't attribute the unsafe'ness to the block. This
- // way the block can be warned about instead of ignoring this
- // extraneous block (functions are never warned about).
- hir::Unsafety::Unsafe if self.from_fn => self,
-
- unsafety => {
- let (unsafety, def) = match blk.rules {
- BlockCheckMode::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.hir_id),
- BlockCheckMode::DefaultBlock => (unsafety, self.def),
- };
- UnsafetyState { def, unsafety, from_fn: false }
- }
- }
- }
-}
-
-#[derive(Debug, Copy, Clone)]
-pub enum PlaceOp {
- Deref,
- Index,
-}
-
-pub struct BreakableCtxt<'tcx> {
- may_break: bool,
-
- // this is `null` for loops where break with a value is illegal,
- // such as `while`, `for`, and `while let`
- coerce: Option<DynamicCoerceMany<'tcx>>,
-}
-
-pub struct EnclosingBreakables<'tcx> {
- stack: Vec<BreakableCtxt<'tcx>>,
- by_id: HirIdMap<usize>,
-}
-
-impl<'tcx> EnclosingBreakables<'tcx> {
- fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'tcx> {
- self.opt_find_breakable(target_id).unwrap_or_else(|| {
- bug!("could not find enclosing breakable with id {}", target_id);
- })
- }
-
- fn opt_find_breakable(&mut self, target_id: hir::HirId) -> Option<&mut BreakableCtxt<'tcx>> {
- match self.by_id.get(&target_id) {
- Some(ix) => Some(&mut self.stack[*ix]),
- None => None,
- }
- }
-}
-
-pub fn provide(providers: &mut Providers) {
- method::provide(providers);
- wfcheck::provide(providers);
- *providers = Providers {
- typeck_item_bodies,
- typeck_const_arg,
- typeck,
- diagnostic_only_typeck,
- has_typeck_results,
- adt_destructor,
- used_trait_imports,
- check_mod_item_types,
- region_scope_tree,
- ..*providers
- };
-}
-
-fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::Destructor> {
- tcx.calculate_dtor(def_id, dropck::check_drop_impl)
-}
-
-/// If this `DefId` is a "primary tables entry", returns
-/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`.
-///
-/// If this function returns `Some`, then `typeck_results(def_id)` will
-/// succeed; if it returns `None`, then `typeck_results(def_id)` may or
-/// may not succeed. In some cases where this function returns `None`
-/// (notably closures), `typeck_results(def_id)` would wind up
-/// redirecting to the owning function.
-fn primary_body_of(
- tcx: TyCtxt<'_>,
- id: hir::HirId,
-) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
- match tcx.hir().get(id) {
- Node::Item(item) => match item.kind {
- hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => {
- Some((body, Some(ty), None))
- }
- hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))),
- _ => None,
- },
- Node::TraitItem(item) => match item.kind {
- hir::TraitItemKind::Const(ty, Some(body)) => Some((body, Some(ty), None)),
- hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
- Some((body, None, Some(sig)))
- }
- _ => None,
- },
- Node::ImplItem(item) => match item.kind {
- hir::ImplItemKind::Const(ty, body) => Some((body, Some(ty), None)),
- hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(sig))),
- _ => None,
- },
- Node::AnonConst(constant) => Some((constant.body, None, None)),
- _ => None,
- }
-}
-
-fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
- // Closures' typeck results come from their outermost function,
- // as they are part of the same "inference environment".
- let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
- if typeck_root_def_id != def_id {
- return tcx.has_typeck_results(typeck_root_def_id);
- }
-
- if let Some(def_id) = def_id.as_local() {
- let id = tcx.hir().local_def_id_to_hir_id(def_id);
- primary_body_of(tcx, id).is_some()
- } else {
- false
- }
-}
-
-fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &FxHashSet<LocalDefId> {
- &*tcx.typeck(def_id).used_trait_imports
-}
-
-fn typeck_const_arg<'tcx>(
- tcx: TyCtxt<'tcx>,
- (did, param_did): (LocalDefId, DefId),
-) -> &ty::TypeckResults<'tcx> {
- let fallback = move || tcx.type_of(param_did);
- typeck_with_fallback(tcx, did, fallback)
-}
-
-fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
- if let Some(param_did) = tcx.opt_const_param_of(def_id) {
- tcx.typeck_const_arg((def_id, param_did))
- } else {
- let fallback = move || tcx.type_of(def_id.to_def_id());
- typeck_with_fallback(tcx, def_id, fallback)
- }
-}
-
-/// Used only to get `TypeckResults` for type inference during error recovery.
-/// Currently only used for type inference of `static`s and `const`s to avoid type cycle errors.
-fn diagnostic_only_typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
- let fallback = move || {
- let span = tcx.hir().span(tcx.hir().local_def_id_to_hir_id(def_id));
- tcx.ty_error_with_message(span, "diagnostic only typeck table used")
- };
- typeck_with_fallback(tcx, def_id, fallback)
-}
-
-#[instrument(skip(tcx, fallback))]
-fn typeck_with_fallback<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
- fallback: impl Fn() -> Ty<'tcx> + 'tcx,
-) -> &'tcx ty::TypeckResults<'tcx> {
- // Closures' typeck results come from their outermost function,
- // as they are part of the same "inference environment".
- let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()).expect_local();
- if typeck_root_def_id != def_id {
- return tcx.typeck(typeck_root_def_id);
- }
-
- let id = tcx.hir().local_def_id_to_hir_id(def_id);
- let span = tcx.hir().span(id);
-
- // Figure out what primary body this item has.
- let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| {
- span_bug!(span, "can't type-check body of {:?}", def_id);
- });
- let body = tcx.hir().body(body_id);
-
- let typeck_results = Inherited::build(tcx, def_id).enter(|inh| {
- let param_env = tcx.param_env(def_id);
- let fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig {
- let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() {
- let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
- <dyn AstConv<'_>>::ty_of_fn(&fcx, id, header.unsafety, header.abi, decl, None, None)
- } else {
- tcx.fn_sig(def_id)
- };
-
- check_abi(tcx, id, span, fn_sig.abi());
-
- // Compute the function signature from point of view of inside the fn.
- let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig);
- let fn_sig = inh.normalize_associated_types_in(
- body.value.span,
- body_id.hir_id,
- param_env,
- fn_sig,
- );
- check_fn(&inh, param_env, fn_sig, decl, id, body, None, true).0
- } else {
- let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
- let expected_type = body_ty
- .and_then(|ty| match ty.kind {
- hir::TyKind::Infer => Some(<dyn AstConv<'_>>::ast_ty_to_ty(&fcx, ty)),
- _ => None,
- })
- .unwrap_or_else(|| match tcx.hir().get(id) {
- Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) {
- Node::Expr(&hir::Expr {
- kind: hir::ExprKind::ConstBlock(ref anon_const),
- ..
- }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span,
- }),
- Node::Ty(&hir::Ty {
- kind: hir::TyKind::Typeof(ref anon_const), ..
- }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span,
- }),
- Node::Expr(&hir::Expr { kind: hir::ExprKind::InlineAsm(asm), .. })
- | Node::Item(&hir::Item { kind: hir::ItemKind::GlobalAsm(asm), .. }) => {
- let operand_ty = asm
- .operands
- .iter()
- .filter_map(|(op, _op_sp)| match op {
- hir::InlineAsmOperand::Const { anon_const }
- if anon_const.hir_id == id =>
- {
- // Inline assembly constants must be integers.
- Some(fcx.next_int_var())
- }
- hir::InlineAsmOperand::SymFn { anon_const }
- if anon_const.hir_id == id =>
- {
- Some(fcx.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span,
- }))
- }
- _ => None,
- })
- .next();
- operand_ty.unwrap_or_else(fallback)
- }
- _ => fallback(),
- },
- _ => fallback(),
- });
-
- let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type);
- fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
-
- // Gather locals in statics (because of block expressions).
- GatherLocalsVisitor::new(&fcx).visit_body(body);
-
- fcx.check_expr_coercable_to_type(&body.value, expected_type, None);
-
- fcx.write_ty(id, expected_type);
-
- fcx
- };
-
- let fallback_has_occurred = fcx.type_inference_fallback();
-
- // Even though coercion casts provide type hints, we check casts after fallback for
- // backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
- fcx.check_casts();
- fcx.select_obligations_where_possible(fallback_has_occurred, |_| {});
-
- // Closure and generator analysis may run after fallback
- // because they don't constrain other type variables.
- fcx.closure_analyze(body);
- assert!(fcx.deferred_call_resolutions.borrow().is_empty());
- // Before the generator analysis, temporary scopes shall be marked to provide more
- // precise information on types to be captured.
- fcx.resolve_rvalue_scopes(def_id.to_def_id());
- fcx.resolve_generator_interiors(def_id.to_def_id());
-
- for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) {
- let ty = fcx.normalize_ty(span, ty);
- fcx.require_type_is_sized(ty, span, code);
- }
-
- fcx.select_all_obligations_or_error();
-
- if !fcx.infcx.is_tainted_by_errors() {
- fcx.check_transmutes();
- }
-
- fcx.check_asms();
-
- fcx.infcx.skip_region_resolution();
-
- fcx.resolve_type_vars_in_body(body)
- });
-
- // Consistency check our TypeckResults instance can hold all ItemLocalIds
- // it will need to hold.
- assert_eq!(typeck_results.hir_owner, id.owner);
-
- typeck_results
-}
-
-/// When `check_fn` is invoked on a generator (i.e., a body that
-/// includes yield), it returns back some information about the yield
-/// points.
-struct GeneratorTypes<'tcx> {
- /// Type of generator argument / values returned by `yield`.
- resume_ty: Ty<'tcx>,
-
- /// Type of value that is yielded.
- yield_ty: Ty<'tcx>,
-
- /// Types that are captured (see `GeneratorInterior` for more).
- interior: Ty<'tcx>,
-
- /// Indicates if the generator is movable or static (immovable).
- movability: hir::Movability,
-}
-
-/// Given a `DefId` for an opaque type in return position, find its parent item's return
-/// expressions.
-fn get_owner_return_paths<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
-) -> Option<(LocalDefId, ReturnsVisitor<'tcx>)> {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let parent_id = tcx.hir().get_parent_item(hir_id);
- tcx.hir().find_by_def_id(parent_id).and_then(|node| node.body_id()).map(|body_id| {
- let body = tcx.hir().body(body_id);
- let mut visitor = ReturnsVisitor::default();
- visitor.visit_body(body);
- (parent_id, visitor)
- })
-}
-
-// Forbid defining intrinsics in Rust code,
-// as they must always be defined by the compiler.
-fn fn_maybe_err(tcx: TyCtxt<'_>, sp: Span, abi: Abi) {
- if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi {
- tcx.sess.span_err(sp, "intrinsic must be in `extern \"rust-intrinsic\" { ... }` block");
- }
-}
-
-fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
- // Only restricted on wasm target for now
- if !tcx.sess.target.is_like_wasm {
- return;
- }
-
- // If `#[link_section]` is missing, then nothing to verify
- let attrs = tcx.codegen_fn_attrs(id);
- if attrs.link_section.is_none() {
- return;
- }
-
- // For the wasm32 target statics with `#[link_section]` are placed into custom
- // sections of the final output file, but this isn't link custom sections of
- // other executable formats. Namely we can only embed a list of bytes,
- // nothing with pointers to anything else or relocations. If any relocation
- // show up, reject them here.
- // `#[link_section]` may contain arbitrary, or even undefined bytes, but it is
- // the consumer's responsibility to ensure all bytes that have been read
- // have defined values.
- if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
- && alloc.inner().relocations().len() != 0
- {
- let msg = "statics with a custom `#[link_section]` must be a \
- simple list of bytes on the wasm target with no \
- extra levels of indirection such as references";
- tcx.sess.span_err(tcx.def_span(id), msg);
- }
-}
-
-fn report_forbidden_specialization(
- tcx: TyCtxt<'_>,
- impl_item: &hir::ImplItemRef,
- parent_impl: DefId,
-) {
- let mut err = struct_span_err!(
- tcx.sess,
- impl_item.span,
- E0520,
- "`{}` specializes an item from a parent `impl`, but \
- that item is not marked `default`",
- impl_item.ident
- );
- err.span_label(impl_item.span, format!("cannot specialize default item `{}`", impl_item.ident));
-
- match tcx.span_of_impl(parent_impl) {
- Ok(span) => {
- err.span_label(span, "parent `impl` is here");
- err.note(&format!(
- "to specialize, `{}` in the parent `impl` must be marked `default`",
- impl_item.ident
- ));
- }
- Err(cname) => {
- err.note(&format!("parent implementation is in crate `{cname}`"));
- }
- }
-
- err.emit();
-}
-
-fn missing_items_err(
- tcx: TyCtxt<'_>,
- impl_span: Span,
- missing_items: &[&ty::AssocItem],
- full_impl_span: Span,
-) {
- let missing_items_msg = missing_items
- .iter()
- .map(|trait_item| trait_item.name.to_string())
- .collect::<Vec<_>>()
- .join("`, `");
-
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0046,
- "not all trait items implemented, missing: `{missing_items_msg}`",
- );
- err.span_label(impl_span, format!("missing `{missing_items_msg}` in implementation"));
-
- // `Span` before impl block closing brace.
- let hi = full_impl_span.hi() - BytePos(1);
- // Point at the place right before the closing brace of the relevant `impl` to suggest
- // adding the associated item at the end of its body.
- let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi);
- // Obtain the level of indentation ending in `sugg_sp`.
- let padding =
- tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new());
-
- for trait_item in missing_items {
- let snippet = suggestion_signature(trait_item, tcx);
- let code = format!("{}{}\n{}", padding, snippet, padding);
- let msg = format!("implement the missing item: `{snippet}`");
- let appl = Applicability::HasPlaceholders;
- if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
- err.span_label(span, format!("`{}` from trait", trait_item.name));
- err.tool_only_span_suggestion(sugg_sp, &msg, code, appl);
- } else {
- err.span_suggestion_hidden(sugg_sp, &msg, code, appl);
- }
- }
- err.emit();
-}
-
-fn missing_items_must_implement_one_of_err(
- tcx: TyCtxt<'_>,
- impl_span: Span,
- missing_items: &[Ident],
- annotation_span: Option<Span>,
-) {
- let missing_items_msg =
- missing_items.iter().map(Ident::to_string).collect::<Vec<_>>().join("`, `");
-
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0046,
- "not all trait items implemented, missing one of: `{missing_items_msg}`",
- );
- err.span_label(impl_span, format!("missing one of `{missing_items_msg}` in implementation"));
-
- if let Some(annotation_span) = annotation_span {
- err.span_note(annotation_span, "required because of this annotation");
- }
-
- err.emit();
-}
-
-/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
-fn bounds_from_generic_predicates<'tcx>(
- tcx: TyCtxt<'tcx>,
- predicates: ty::GenericPredicates<'tcx>,
-) -> (String, String) {
- let mut types: FxHashMap<Ty<'tcx>, Vec<DefId>> = FxHashMap::default();
- let mut projections = vec![];
- for (predicate, _) in predicates.predicates {
- debug!("predicate {:?}", predicate);
- let bound_predicate = predicate.kind();
- match bound_predicate.skip_binder() {
- ty::PredicateKind::Trait(trait_predicate) => {
- let entry = types.entry(trait_predicate.self_ty()).or_default();
- let def_id = trait_predicate.def_id();
- if Some(def_id) != tcx.lang_items().sized_trait() {
- // Type params are `Sized` by default, do not add that restriction to the list
- // if it is a positive requirement.
- entry.push(trait_predicate.def_id());
- }
- }
- ty::PredicateKind::Projection(projection_pred) => {
- projections.push(bound_predicate.rebind(projection_pred));
- }
- _ => {}
- }
- }
- let generics = if types.is_empty() {
- "".to_string()
- } else {
- format!(
- "<{}>",
- types
- .keys()
- .filter_map(|t| match t.kind() {
- ty::Param(_) => Some(t.to_string()),
- // Avoid suggesting the following:
- // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
- _ => None,
- })
- .collect::<Vec<_>>()
- .join(", ")
- )
- };
- let mut where_clauses = vec![];
- for (ty, bounds) in types {
- where_clauses
- .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))));
- }
- for projection in &projections {
- let p = projection.skip_binder();
- // FIXME: this is not currently supported syntax, we should be looking at the `types` and
- // insert the associated types where they correspond, but for now let's be "lazy" and
- // propose this instead of the following valid resugaring:
- // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>`
- where_clauses.push(format!(
- "{} = {}",
- tcx.def_path_str(p.projection_ty.item_def_id),
- p.term,
- ));
- }
- let where_clauses = if where_clauses.is_empty() {
- String::new()
- } else {
- format!(" where {}", where_clauses.join(", "))
- };
- (generics, where_clauses)
-}
-
-/// Return placeholder code for the given function.
-fn fn_sig_suggestion<'tcx>(
- tcx: TyCtxt<'tcx>,
- sig: ty::FnSig<'tcx>,
- ident: Ident,
- predicates: ty::GenericPredicates<'tcx>,
- assoc: &ty::AssocItem,
-) -> String {
- let args = sig
- .inputs()
- .iter()
- .enumerate()
- .map(|(i, ty)| {
- Some(match ty.kind() {
- ty::Param(_) if assoc.fn_has_self_parameter && i == 0 => "self".to_string(),
- ty::Ref(reg, ref_ty, mutability) if i == 0 => {
- let reg = format!("{reg} ");
- let reg = match &reg[..] {
- "'_ " | " " => "",
- reg => reg,
- };
- if assoc.fn_has_self_parameter {
- match ref_ty.kind() {
- ty::Param(param) if param.name == kw::SelfUpper => {
- format!("&{}{}self", reg, mutability.prefix_str())
- }
-
- _ => format!("self: {ty}"),
- }
- } else {
- format!("_: {ty}")
- }
- }
- _ => {
- if assoc.fn_has_self_parameter && i == 0 {
- format!("self: {ty}")
- } else {
- format!("_: {ty}")
- }
- }
- })
- })
- .chain(std::iter::once(if sig.c_variadic { Some("...".to_string()) } else { None }))
- .flatten()
- .collect::<Vec<String>>()
- .join(", ");
- let output = sig.output();
- let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() };
-
- let unsafety = sig.unsafety.prefix_str();
- let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates);
-
- // FIXME: this is not entirely correct, as the lifetimes from borrowed params will
- // not be present in the `fn` definition, not will we account for renamed
- // lifetimes between the `impl` and the `trait`, but this should be good enough to
- // fill in a significant portion of the missing code, and other subsequent
- // suggestions can help the user fix the code.
- format!("{unsafety}fn {ident}{generics}({args}){output}{where_clauses} {{ todo!() }}")
-}
-
-/// Return placeholder code for the given associated item.
-/// Similar to `ty::AssocItem::suggestion`, but appropriate for use as the code snippet of a
-/// structured suggestion.
-fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String {
- match assoc.kind {
- ty::AssocKind::Fn => {
- // We skip the binder here because the binder would deanonymize all
- // late-bound regions, and we don't want method signatures to show up
- // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
- // regions just fine, showing `fn(&MyType)`.
- fn_sig_suggestion(
- tcx,
- tcx.fn_sig(assoc.def_id).skip_binder(),
- assoc.ident(tcx),
- tcx.predicates_of(assoc.def_id),
- assoc,
- )
- }
- ty::AssocKind::Type => format!("type {} = Type;", assoc.name),
- ty::AssocKind::Const => {
- let ty = tcx.type_of(assoc.def_id);
- let val = expr::ty_kind_suggestion(ty).unwrap_or("value");
- format!("const {}: {} = {};", assoc.name, ty, val)
- }
- }
-}
-
-/// Emit an error when encountering two or more variants in a transparent enum.
-fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) {
- let variant_spans: Vec<_> = adt
- .variants()
- .iter()
- .map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap())
- .collect();
- let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),);
- let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {msg}");
- err.span_label(sp, &msg);
- if let [start @ .., end] = &*variant_spans {
- for variant_span in start {
- err.span_label(*variant_span, "");
- }
- err.span_label(*end, &format!("too many variants in `{}`", tcx.def_path_str(did)));
- }
- err.emit();
-}
-
-/// Emit an error when encountering two or more non-zero-sized fields in a transparent
-/// enum.
-fn bad_non_zero_sized_fields<'tcx>(
- tcx: TyCtxt<'tcx>,
- adt: ty::AdtDef<'tcx>,
- field_count: usize,
- field_spans: impl Iterator<Item = Span>,
- sp: Span,
-) {
- let msg = format!("needs at most one non-zero-sized field, but has {field_count}");
- let mut err = struct_span_err!(
- tcx.sess,
- sp,
- E0690,
- "{}transparent {} {}",
- if adt.is_enum() { "the variant of a " } else { "" },
- adt.descr(),
- msg,
- );
- err.span_label(sp, &msg);
- for sp in field_spans {
- err.span_label(sp, "this field is non-zero-sized");
- }
- err.emit();
-}
-
-fn report_unexpected_variant_res(tcx: TyCtxt<'_>, res: Res, qpath: &hir::QPath<'_>, span: Span) {
- struct_span_err!(
- tcx.sess,
- span,
- E0533,
- "expected unit struct, unit variant or constant, found {} `{}`",
- res.descr(),
- rustc_hir_pretty::qpath_to_string(qpath),
- )
- .emit();
-}
-
-/// Controls whether the arguments are tupled. This is used for the call
-/// operator.
-///
-/// Tupling means that all call-side arguments are packed into a tuple and
-/// passed as a single parameter. For example, if tupling is enabled, this
-/// function:
-/// ```
-/// fn f(x: (isize, isize)) {}
-/// ```
-/// Can be called as:
-/// ```ignore UNSOLVED (can this be done in user code?)
-/// # fn f(x: (isize, isize)) {}
-/// f(1, 2);
-/// ```
-/// Instead of:
-/// ```
-/// # fn f(x: (isize, isize)) {}
-/// f((1, 2));
-/// ```
-#[derive(Clone, Eq, PartialEq)]
-enum TupleArgumentsFlag {
- DontTupleArguments,
- TupleArguments,
-}
-
-fn typeck_item_bodies(tcx: TyCtxt<'_>, (): ()) {
- tcx.hir().par_body_owners(|body_owner_def_id| tcx.ensure().typeck(body_owner_def_id));
-}
-
-fn fatally_break_rust(sess: &Session) {
- let handler = sess.diagnostic();
- handler.span_bug_no_panic(
- MultiSpan::new(),
- "It looks like you're trying to break rust; would you like some ICE?",
- );
- handler.note_without_error("the compiler expectedly panicked. this is a feature.");
- handler.note_without_error(
- "we would appreciate a joke overview: \
- https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675",
- );
- handler.note_without_error(&format!(
- "rustc {} running on {}",
- option_env!("CFG_VERSION").unwrap_or("unknown_version"),
- config::host_triple(),
- ));
-}
-
-fn potentially_plural_count(count: usize, word: &str) -> String {
- format!("{} {}{}", count, word, pluralize!(count))
-}
-
-fn has_expected_num_generic_args<'tcx>(
- tcx: TyCtxt<'tcx>,
- trait_did: Option<DefId>,
- expected: usize,
-) -> bool {
- trait_did.map_or(true, |trait_did| {
- let generics = tcx.generics_of(trait_did);
- generics.count() == expected + if generics.has_self { 1 } else { 0 }
- })
-}
-
-/// Suggests calling the constructor of a tuple struct or enum variant
-///
-/// * `snippet` - The snippet of code that references the constructor
-/// * `span` - The span of the snippet
-/// * `params` - The number of parameters the constructor accepts
-/// * `err` - A mutable diagnostic builder to add the suggestion to
-fn suggest_call_constructor<G: EmissionGuarantee>(
- span: Span,
- kind: CtorOf,
- params: usize,
- err: &mut DiagnosticBuilder<'_, G>,
-) {
- // Note: tuple-structs don't have named fields, so just use placeholders
- let args = vec!["_"; params].join(", ");
- let applicable = if params > 0 {
- Applicability::HasPlaceholders
- } else {
- // When n = 0, it's an empty-tuple struct/enum variant
- // so we trivially know how to construct it
- Applicability::MachineApplicable
- };
- let kind = match kind {
- CtorOf::Struct => "a struct",
- CtorOf::Variant => "an enum variant",
- };
- err.span_label(span, &format!("this is the constructor of {kind}"));
- err.multipart_suggestion(
- "call the constructor",
- vec![(span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), format!(")({args})"))],
- applicable,
- );
-}
diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs
deleted file mode 100644
index 920b3e688..000000000
--- a/compiler/rustc_typeck/src/check/op.rs
+++ /dev/null
@@ -1,1076 +0,0 @@
-//! Code related to processing overloaded binary and unary operators.
-
-use super::method::MethodCallee;
-use super::{has_expected_num_generic_args, FnCtxt};
-use crate::check::Expectation;
-use rustc_ast as ast;
-use rustc_errors::{self, struct_span_err, Applicability, Diagnostic};
-use rustc_hir as hir;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::traits::ObligationCauseCode;
-use rustc_middle::ty::adjustment::{
- Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
-};
-use rustc_middle::ty::{
- self, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeSuperVisitable, TypeVisitable, TypeVisitor,
-};
-use rustc_span::source_map::Spanned;
-use rustc_span::symbol::{sym, Ident};
-use rustc_span::Span;
-use rustc_trait_selection::infer::InferCtxtExt;
-use rustc_trait_selection::traits::error_reporting::suggestions::InferCtxtExt as _;
-use rustc_trait_selection::traits::{FulfillmentError, TraitEngine, TraitEngineExt};
-use rustc_type_ir::sty::TyKind::*;
-
-use std::ops::ControlFlow;
-
-impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- /// Checks a `a <op>= b`
- pub fn check_binop_assign(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- op: hir::BinOp,
- lhs: &'tcx hir::Expr<'tcx>,
- rhs: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let (lhs_ty, rhs_ty, return_ty) =
- self.check_overloaded_binop(expr, lhs, rhs, op, IsAssign::Yes, expected);
-
- let ty =
- if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
- self.enforce_builtin_binop_types(lhs.span, lhs_ty, rhs.span, rhs_ty, op);
- self.tcx.mk_unit()
- } else {
- return_ty
- };
-
- self.check_lhs_assignable(lhs, "E0067", op.span, |err| {
- if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
- if self
- .lookup_op_method(
- lhs_deref_ty,
- Some(rhs_ty),
- Some(rhs),
- Op::Binary(op, IsAssign::Yes),
- expected,
- )
- .is_ok()
- {
- // Suppress this error, since we already emitted
- // a deref suggestion in check_overloaded_binop
- err.delay_as_bug();
- }
- }
- });
-
- ty
- }
-
- /// Checks a potentially overloaded binary operator.
- pub fn check_binop(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- op: hir::BinOp,
- lhs_expr: &'tcx hir::Expr<'tcx>,
- rhs_expr: &'tcx hir::Expr<'tcx>,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- let tcx = self.tcx;
-
- debug!(
- "check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
- expr.hir_id, expr, op, lhs_expr, rhs_expr
- );
-
- match BinOpCategory::from(op) {
- BinOpCategory::Shortcircuit => {
- // && and || are a simple case.
- self.check_expr_coercable_to_type(lhs_expr, tcx.types.bool, None);
- let lhs_diverges = self.diverges.get();
- self.check_expr_coercable_to_type(rhs_expr, tcx.types.bool, None);
-
- // Depending on the LHS' value, the RHS can never execute.
- self.diverges.set(lhs_diverges);
-
- tcx.types.bool
- }
- _ => {
- // Otherwise, we always treat operators as if they are
- // overloaded. This is the way to be most flexible w/r/t
- // types that get inferred.
- let (lhs_ty, rhs_ty, return_ty) = self.check_overloaded_binop(
- expr,
- lhs_expr,
- rhs_expr,
- op,
- IsAssign::No,
- expected,
- );
-
- // Supply type inference hints if relevant. Probably these
- // hints should be enforced during select as part of the
- // `consider_unification_despite_ambiguity` routine, but this
- // more convenient for now.
- //
- // The basic idea is to help type inference by taking
- // advantage of things we know about how the impls for
- // scalar types are arranged. This is important in a
- // scenario like `1_u32 << 2`, because it lets us quickly
- // deduce that the result type should be `u32`, even
- // though we don't know yet what type 2 has and hence
- // can't pin this down to a specific impl.
- if !lhs_ty.is_ty_var()
- && !rhs_ty.is_ty_var()
- && is_builtin_binop(lhs_ty, rhs_ty, op)
- {
- let builtin_return_ty = self.enforce_builtin_binop_types(
- lhs_expr.span,
- lhs_ty,
- rhs_expr.span,
- rhs_ty,
- op,
- );
- self.demand_suptype(expr.span, builtin_return_ty, return_ty);
- }
-
- return_ty
- }
- }
- }
-
- fn enforce_builtin_binop_types(
- &self,
- lhs_span: Span,
- lhs_ty: Ty<'tcx>,
- rhs_span: Span,
- rhs_ty: Ty<'tcx>,
- op: hir::BinOp,
- ) -> Ty<'tcx> {
- debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
-
- // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
- // (See https://github.com/rust-lang/rust/issues/57447.)
- let (lhs_ty, rhs_ty) = (deref_ty_if_possible(lhs_ty), deref_ty_if_possible(rhs_ty));
-
- let tcx = self.tcx;
- match BinOpCategory::from(op) {
- BinOpCategory::Shortcircuit => {
- self.demand_suptype(lhs_span, tcx.types.bool, lhs_ty);
- self.demand_suptype(rhs_span, tcx.types.bool, rhs_ty);
- tcx.types.bool
- }
-
- BinOpCategory::Shift => {
- // result type is same as LHS always
- lhs_ty
- }
-
- BinOpCategory::Math | BinOpCategory::Bitwise => {
- // both LHS and RHS and result will have the same type
- self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
- lhs_ty
- }
-
- BinOpCategory::Comparison => {
- // both LHS and RHS and result will have the same type
- self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
- tcx.types.bool
- }
- }
- }
-
- fn check_overloaded_binop(
- &self,
- expr: &'tcx hir::Expr<'tcx>,
- lhs_expr: &'tcx hir::Expr<'tcx>,
- rhs_expr: &'tcx hir::Expr<'tcx>,
- op: hir::BinOp,
- is_assign: IsAssign,
- expected: Expectation<'tcx>,
- ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
- debug!(
- "check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})",
- expr.hir_id, op, is_assign
- );
-
- let lhs_ty = match is_assign {
- IsAssign::No => {
- // Find a suitable supertype of the LHS expression's type, by coercing to
- // a type variable, to pass as the `Self` to the trait, avoiding invariant
- // trait matching creating lifetime constraints that are too strict.
- // e.g., adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result
- // in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`.
- let lhs_ty = self.check_expr(lhs_expr);
- let fresh_var = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: lhs_expr.span,
- });
- self.demand_coerce(lhs_expr, lhs_ty, fresh_var, Some(rhs_expr), AllowTwoPhase::No)
- }
- IsAssign::Yes => {
- // rust-lang/rust#52126: We have to use strict
- // equivalence on the LHS of an assign-op like `+=`;
- // overwritten or mutably-borrowed places cannot be
- // coerced to a supertype.
- self.check_expr(lhs_expr)
- }
- };
- let lhs_ty = self.resolve_vars_with_obligations(lhs_ty);
-
- // N.B., as we have not yet type-checked the RHS, we don't have the
- // type at hand. Make a variable to represent it. The whole reason
- // for this indirection is so that, below, we can check the expr
- // using this variable as the expected type, which sometimes lets
- // us do better coercions than we would be able to do otherwise,
- // particularly for things like `String + &String`.
- let rhs_ty_var = self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: rhs_expr.span,
- });
-
- let result = self.lookup_op_method(
- lhs_ty,
- Some(rhs_ty_var),
- Some(rhs_expr),
- Op::Binary(op, is_assign),
- expected,
- );
-
- // see `NB` above
- let rhs_ty = self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var, Some(lhs_expr));
- let rhs_ty = self.resolve_vars_with_obligations(rhs_ty);
-
- let return_ty = match result {
- Ok(method) => {
- let by_ref_binop = !op.node.is_by_value();
- if is_assign == IsAssign::Yes || by_ref_binop {
- if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() {
- let mutbl = match mutbl {
- hir::Mutability::Not => AutoBorrowMutability::Not,
- hir::Mutability::Mut => AutoBorrowMutability::Mut {
- // Allow two-phase borrows for binops in initial deployment
- // since they desugar to methods
- allow_two_phase_borrow: AllowTwoPhase::Yes,
- },
- };
- let autoref = Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
- target: method.sig.inputs()[0],
- };
- self.apply_adjustments(lhs_expr, vec![autoref]);
- }
- }
- if by_ref_binop {
- if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() {
- let mutbl = match mutbl {
- hir::Mutability::Not => AutoBorrowMutability::Not,
- hir::Mutability::Mut => AutoBorrowMutability::Mut {
- // Allow two-phase borrows for binops in initial deployment
- // since they desugar to methods
- allow_two_phase_borrow: AllowTwoPhase::Yes,
- },
- };
- let autoref = Adjustment {
- kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
- target: method.sig.inputs()[1],
- };
- // HACK(eddyb) Bypass checks due to reborrows being in
- // some cases applied on the RHS, on top of which we need
- // to autoref, which is not allowed by apply_adjustments.
- // self.apply_adjustments(rhs_expr, vec![autoref]);
- self.typeck_results
- .borrow_mut()
- .adjustments_mut()
- .entry(rhs_expr.hir_id)
- .or_default()
- .push(autoref);
- }
- }
- self.write_method_call(expr.hir_id, method);
-
- method.sig.output()
- }
- // error types are considered "builtin"
- Err(_) if lhs_ty.references_error() || rhs_ty.references_error() => self.tcx.ty_error(),
- Err(errors) => {
- let source_map = self.tcx.sess.source_map();
- let (mut err, missing_trait, use_output) = match is_assign {
- IsAssign::Yes => {
- let mut err = struct_span_err!(
- self.tcx.sess,
- expr.span,
- E0368,
- "binary assignment operation `{}=` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty,
- );
- err.span_label(
- lhs_expr.span,
- format!("cannot use `{}=` on type `{}`", op.node.as_str(), lhs_ty),
- );
- let missing_trait = match op.node {
- hir::BinOpKind::Add => Some("std::ops::AddAssign"),
- hir::BinOpKind::Sub => Some("std::ops::SubAssign"),
- hir::BinOpKind::Mul => Some("std::ops::MulAssign"),
- hir::BinOpKind::Div => Some("std::ops::DivAssign"),
- hir::BinOpKind::Rem => Some("std::ops::RemAssign"),
- hir::BinOpKind::BitAnd => Some("std::ops::BitAndAssign"),
- hir::BinOpKind::BitXor => Some("std::ops::BitXorAssign"),
- hir::BinOpKind::BitOr => Some("std::ops::BitOrAssign"),
- hir::BinOpKind::Shl => Some("std::ops::ShlAssign"),
- hir::BinOpKind::Shr => Some("std::ops::ShrAssign"),
- _ => None,
- };
- self.note_unmet_impls_on_type(&mut err, errors);
- (err, missing_trait, false)
- }
- IsAssign::No => {
- let (message, missing_trait, use_output) = match op.node {
- hir::BinOpKind::Add => (
- format!("cannot add `{rhs_ty}` to `{lhs_ty}`"),
- Some("std::ops::Add"),
- true,
- ),
- hir::BinOpKind::Sub => (
- format!("cannot subtract `{rhs_ty}` from `{lhs_ty}`"),
- Some("std::ops::Sub"),
- true,
- ),
- hir::BinOpKind::Mul => (
- format!("cannot multiply `{lhs_ty}` by `{rhs_ty}`"),
- Some("std::ops::Mul"),
- true,
- ),
- hir::BinOpKind::Div => (
- format!("cannot divide `{lhs_ty}` by `{rhs_ty}`"),
- Some("std::ops::Div"),
- true,
- ),
- hir::BinOpKind::Rem => (
- format!("cannot mod `{lhs_ty}` by `{rhs_ty}`"),
- Some("std::ops::Rem"),
- true,
- ),
- hir::BinOpKind::BitAnd => (
- format!("no implementation for `{lhs_ty} & {rhs_ty}`"),
- Some("std::ops::BitAnd"),
- true,
- ),
- hir::BinOpKind::BitXor => (
- format!("no implementation for `{lhs_ty} ^ {rhs_ty}`"),
- Some("std::ops::BitXor"),
- true,
- ),
- hir::BinOpKind::BitOr => (
- format!("no implementation for `{lhs_ty} | {rhs_ty}`"),
- Some("std::ops::BitOr"),
- true,
- ),
- hir::BinOpKind::Shl => (
- format!("no implementation for `{lhs_ty} << {rhs_ty}`"),
- Some("std::ops::Shl"),
- true,
- ),
- hir::BinOpKind::Shr => (
- format!("no implementation for `{lhs_ty} >> {rhs_ty}`"),
- Some("std::ops::Shr"),
- true,
- ),
- hir::BinOpKind::Eq | hir::BinOpKind::Ne => (
- format!(
- "binary operation `{}` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty
- ),
- Some("std::cmp::PartialEq"),
- false,
- ),
- hir::BinOpKind::Lt
- | hir::BinOpKind::Le
- | hir::BinOpKind::Gt
- | hir::BinOpKind::Ge => (
- format!(
- "binary operation `{}` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty
- ),
- Some("std::cmp::PartialOrd"),
- false,
- ),
- _ => (
- format!(
- "binary operation `{}` cannot be applied to type `{}`",
- op.node.as_str(),
- lhs_ty
- ),
- None,
- false,
- ),
- };
- let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "{message}");
- if !lhs_expr.span.eq(&rhs_expr.span) {
- self.add_type_neq_err_label(
- &mut err,
- lhs_expr.span,
- lhs_ty,
- rhs_ty,
- rhs_expr,
- op,
- is_assign,
- expected,
- );
- self.add_type_neq_err_label(
- &mut err,
- rhs_expr.span,
- rhs_ty,
- lhs_ty,
- lhs_expr,
- op,
- is_assign,
- expected,
- );
- }
- self.note_unmet_impls_on_type(&mut err, errors);
- (err, missing_trait, use_output)
- }
- };
-
- let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| {
- if self
- .lookup_op_method(
- lhs_deref_ty,
- Some(rhs_ty),
- Some(rhs_expr),
- Op::Binary(op, is_assign),
- expected,
- )
- .is_ok()
- {
- if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
- let msg = &format!(
- "`{}{}` can be used on `{}`, you can dereference `{}`",
- op.node.as_str(),
- match is_assign {
- IsAssign::Yes => "=",
- IsAssign::No => "",
- },
- lhs_deref_ty.peel_refs(),
- lstring,
- );
- err.span_suggestion_verbose(
- lhs_expr.span.shrink_to_lo(),
- msg,
- "*",
- rustc_errors::Applicability::MachineApplicable,
- );
- }
- }
- };
-
- // We should suggest `a + b` => `*a + b` if `a` is copy, and suggest
- // `a += b` => `*a += b` if a is a mut ref.
- if is_assign == IsAssign::Yes
- && let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
- suggest_deref_binop(lhs_deref_ty);
- } else if is_assign == IsAssign::No
- && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind() {
- if self.type_is_copy_modulo_regions(self.param_env, *lhs_deref_ty, lhs_expr.span) {
- suggest_deref_binop(*lhs_deref_ty);
- }
- }
- if let Some(missing_trait) = missing_trait {
- let mut visitor = TypeParamVisitor(vec![]);
- visitor.visit_ty(lhs_ty);
-
- if op.node == hir::BinOpKind::Add
- && self.check_str_addition(
- lhs_expr, rhs_expr, lhs_ty, rhs_ty, &mut err, is_assign, op,
- )
- {
- // This has nothing here because it means we did string
- // concatenation (e.g., "Hello " + "World!"). This means
- // we don't want the note in the else clause to be emitted
- } else if let [ty] = &visitor.0[..] {
- // Look for a TraitPredicate in the Fulfillment errors,
- // and use it to generate a suggestion.
- //
- // Note that lookup_op_method must be called again but
- // with a specific rhs_ty instead of a placeholder so
- // the resulting predicate generates a more specific
- // suggestion for the user.
- let errors = self
- .lookup_op_method(
- lhs_ty,
- Some(rhs_ty),
- Some(rhs_expr),
- Op::Binary(op, is_assign),
- expected,
- )
- .unwrap_err();
- if !errors.is_empty() {
- for error in errors {
- if let Some(trait_pred) =
- error.obligation.predicate.to_opt_poly_trait_pred()
- {
- let proj_pred = match error.obligation.cause.code() {
- ObligationCauseCode::BinOp {
- output_pred: Some(output_pred),
- ..
- } if use_output => {
- output_pred.to_opt_poly_projection_pred()
- }
- _ => None,
- };
-
- self.suggest_restricting_param_bound(
- &mut err,
- trait_pred,
- proj_pred,
- self.body_id,
- );
- }
- }
- } else if *ty != lhs_ty {
- // When we know that a missing bound is responsible, we don't show
- // this note as it is redundant.
- err.note(&format!(
- "the trait `{missing_trait}` is not implemented for `{lhs_ty}`"
- ));
- }
- }
- }
- err.emit();
- self.tcx.ty_error()
- }
- };
-
- (lhs_ty, rhs_ty, return_ty)
- }
-
- /// If one of the types is an uncalled function and calling it would yield the other type,
- /// suggest calling the function. Returns `true` if suggestion would apply (even if not given).
- fn add_type_neq_err_label(
- &self,
- err: &mut Diagnostic,
- span: Span,
- ty: Ty<'tcx>,
- other_ty: Ty<'tcx>,
- other_expr: &'tcx hir::Expr<'tcx>,
- op: hir::BinOp,
- is_assign: IsAssign,
- expected: Expectation<'tcx>,
- ) -> bool /* did we suggest to call a function because of missing parentheses? */ {
- err.span_label(span, ty.to_string());
- if let FnDef(def_id, _) = *ty.kind() {
- if !self.tcx.has_typeck_results(def_id) {
- return false;
- }
- // FIXME: Instead of exiting early when encountering bound vars in
- // the function signature, consider keeping the binder here and
- // propagating it downwards.
- let Some(fn_sig) = self.tcx.fn_sig(def_id).no_bound_vars() else {
- return false;
- };
-
- let other_ty = if let FnDef(def_id, _) = *other_ty.kind() {
- if !self.tcx.has_typeck_results(def_id) {
- return false;
- }
- // We're emitting a suggestion, so we can just ignore regions
- self.tcx.fn_sig(def_id).skip_binder().output()
- } else {
- other_ty
- };
-
- if self
- .lookup_op_method(
- fn_sig.output(),
- Some(other_ty),
- Some(other_expr),
- Op::Binary(op, is_assign),
- expected,
- )
- .is_ok()
- {
- let (variable_snippet, applicability) = if !fn_sig.inputs().is_empty() {
- ("( /* arguments */ )", Applicability::HasPlaceholders)
- } else {
- ("()", Applicability::MaybeIncorrect)
- };
-
- err.span_suggestion_verbose(
- span.shrink_to_hi(),
- "you might have forgotten to call this function",
- variable_snippet,
- applicability,
- );
- return true;
- }
- }
- false
- }
-
- /// Provide actionable suggestions when trying to add two strings with incorrect types,
- /// like `&str + &str`, `String + String` and `&str + &String`.
- ///
- /// If this function returns `true` it means a note was printed, so we don't need
- /// to print the normal "implementation of `std::ops::Add` might be missing" note
- fn check_str_addition(
- &self,
- lhs_expr: &'tcx hir::Expr<'tcx>,
- rhs_expr: &'tcx hir::Expr<'tcx>,
- lhs_ty: Ty<'tcx>,
- rhs_ty: Ty<'tcx>,
- err: &mut Diagnostic,
- is_assign: IsAssign,
- op: hir::BinOp,
- ) -> bool {
- let str_concat_note = "string concatenation requires an owned `String` on the left";
- let rm_borrow_msg = "remove the borrow to obtain an owned `String`";
- let to_owned_msg = "create an owned `String` from a string reference";
-
- let is_std_string = |ty: Ty<'tcx>| {
- ty.ty_adt_def()
- .map_or(false, |ty_def| self.tcx.is_diagnostic_item(sym::String, ty_def.did()))
- };
-
- match (lhs_ty.kind(), rhs_ty.kind()) {
- (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) // &str or &String + &str, &String or &&str
- if (*l_ty.kind() == Str || is_std_string(l_ty))
- && (*r_ty.kind() == Str
- || is_std_string(r_ty)
- || matches!(
- r_ty.kind(), Ref(_, inner_ty, _) if *inner_ty.kind() == Str
- )) =>
- {
- if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str`
- err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings");
- err.note(str_concat_note);
- if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
- err.span_suggestion_verbose(
- lhs_expr.span.until(lhs_inner_expr.span),
- rm_borrow_msg,
- "",
- Applicability::MachineApplicable
- );
- } else {
- err.span_suggestion_verbose(
- lhs_expr.span.shrink_to_hi(),
- to_owned_msg,
- ".to_owned()",
- Applicability::MachineApplicable
- );
- }
- }
- true
- }
- (&Ref(_, l_ty, _), &Adt(..)) // Handle `&str` & `&String` + `String`
- if (*l_ty.kind() == Str || is_std_string(l_ty)) && is_std_string(rhs_ty) =>
- {
- err.span_label(
- op.span,
- "`+` cannot be used to concatenate a `&str` with a `String`",
- );
- match is_assign {
- IsAssign::No => {
- let sugg_msg;
- let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
- sugg_msg = "remove the borrow on the left and add one on the right";
- (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned())
- } else {
- sugg_msg = "create an owned `String` on the left and add a borrow on the right";
- (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned())
- };
- let suggestions = vec![
- lhs_sugg,
- (rhs_expr.span.shrink_to_lo(), "&".to_owned()),
- ];
- err.multipart_suggestion_verbose(
- sugg_msg,
- suggestions,
- Applicability::MachineApplicable,
- );
- }
- IsAssign::Yes => {
- err.note(str_concat_note);
- }
- }
- true
- }
- _ => false,
- }
- }
-
- pub fn check_user_unop(
- &self,
- ex: &'tcx hir::Expr<'tcx>,
- operand_ty: Ty<'tcx>,
- op: hir::UnOp,
- expected: Expectation<'tcx>,
- ) -> Ty<'tcx> {
- assert!(op.is_by_value());
- match self.lookup_op_method(operand_ty, None, None, Op::Unary(op, ex.span), expected) {
- Ok(method) => {
- self.write_method_call(ex.hir_id, method);
- method.sig.output()
- }
- Err(errors) => {
- let actual = self.resolve_vars_if_possible(operand_ty);
- if !actual.references_error() {
- let mut err = struct_span_err!(
- self.tcx.sess,
- ex.span,
- E0600,
- "cannot apply unary operator `{}` to type `{}`",
- op.as_str(),
- actual
- );
- err.span_label(
- ex.span,
- format!("cannot apply unary operator `{}`", op.as_str()),
- );
-
- let mut visitor = TypeParamVisitor(vec![]);
- visitor.visit_ty(operand_ty);
- if let [_] = &visitor.0[..] && let ty::Param(_) = *operand_ty.kind() {
- let predicates = errors
- .iter()
- .filter_map(|error| {
- error.obligation.predicate.to_opt_poly_trait_pred()
- });
- for pred in predicates {
- self.suggest_restricting_param_bound(
- &mut err,
- pred,
- None,
- self.body_id,
- );
- }
- }
-
- let sp = self.tcx.sess.source_map().start_point(ex.span);
- if let Some(sp) =
- self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
- {
- // If the previous expression was a block expression, suggest parentheses
- // (turning this into a binary subtraction operation instead.)
- // for example, `{2} - 2` -> `({2}) - 2` (see src\test\ui\parser\expr-as-stmt.rs)
- self.tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp);
- } else {
- match actual.kind() {
- Uint(_) if op == hir::UnOp::Neg => {
- err.note("unsigned values cannot be negated");
-
- if let hir::ExprKind::Unary(
- _,
- hir::Expr {
- kind:
- hir::ExprKind::Lit(Spanned {
- node: ast::LitKind::Int(1, _),
- ..
- }),
- ..
- },
- ) = ex.kind
- {
- err.span_suggestion(
- ex.span,
- &format!(
- "you may have meant the maximum value of `{actual}`",
- ),
- format!("{actual}::MAX"),
- Applicability::MaybeIncorrect,
- );
- }
- }
- Str | Never | Char | Tuple(_) | Array(_, _) => {}
- Ref(_, lty, _) if *lty.kind() == Str => {}
- _ => {
- self.note_unmet_impls_on_type(&mut err, errors);
- }
- }
- }
- err.emit();
- }
- self.tcx.ty_error()
- }
- }
- }
-
- fn lookup_op_method(
- &self,
- lhs_ty: Ty<'tcx>,
- other_ty: Option<Ty<'tcx>>,
- other_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
- op: Op,
- expected: Expectation<'tcx>,
- ) -> Result<MethodCallee<'tcx>, Vec<FulfillmentError<'tcx>>> {
- let lang = self.tcx.lang_items();
-
- let span = match op {
- Op::Binary(op, _) => op.span,
- Op::Unary(_, span) => span,
- };
- let (opname, trait_did) = if let Op::Binary(op, IsAssign::Yes) = op {
- match op.node {
- hir::BinOpKind::Add => (sym::add_assign, lang.add_assign_trait()),
- hir::BinOpKind::Sub => (sym::sub_assign, lang.sub_assign_trait()),
- hir::BinOpKind::Mul => (sym::mul_assign, lang.mul_assign_trait()),
- hir::BinOpKind::Div => (sym::div_assign, lang.div_assign_trait()),
- hir::BinOpKind::Rem => (sym::rem_assign, lang.rem_assign_trait()),
- hir::BinOpKind::BitXor => (sym::bitxor_assign, lang.bitxor_assign_trait()),
- hir::BinOpKind::BitAnd => (sym::bitand_assign, lang.bitand_assign_trait()),
- hir::BinOpKind::BitOr => (sym::bitor_assign, lang.bitor_assign_trait()),
- hir::BinOpKind::Shl => (sym::shl_assign, lang.shl_assign_trait()),
- hir::BinOpKind::Shr => (sym::shr_assign, lang.shr_assign_trait()),
- hir::BinOpKind::Lt
- | hir::BinOpKind::Le
- | hir::BinOpKind::Ge
- | hir::BinOpKind::Gt
- | hir::BinOpKind::Eq
- | hir::BinOpKind::Ne
- | hir::BinOpKind::And
- | hir::BinOpKind::Or => {
- span_bug!(span, "impossible assignment operation: {}=", op.node.as_str())
- }
- }
- } else if let Op::Binary(op, IsAssign::No) = op {
- match op.node {
- hir::BinOpKind::Add => (sym::add, lang.add_trait()),
- hir::BinOpKind::Sub => (sym::sub, lang.sub_trait()),
- hir::BinOpKind::Mul => (sym::mul, lang.mul_trait()),
- hir::BinOpKind::Div => (sym::div, lang.div_trait()),
- hir::BinOpKind::Rem => (sym::rem, lang.rem_trait()),
- hir::BinOpKind::BitXor => (sym::bitxor, lang.bitxor_trait()),
- hir::BinOpKind::BitAnd => (sym::bitand, lang.bitand_trait()),
- hir::BinOpKind::BitOr => (sym::bitor, lang.bitor_trait()),
- hir::BinOpKind::Shl => (sym::shl, lang.shl_trait()),
- hir::BinOpKind::Shr => (sym::shr, lang.shr_trait()),
- hir::BinOpKind::Lt => (sym::lt, lang.partial_ord_trait()),
- hir::BinOpKind::Le => (sym::le, lang.partial_ord_trait()),
- hir::BinOpKind::Ge => (sym::ge, lang.partial_ord_trait()),
- hir::BinOpKind::Gt => (sym::gt, lang.partial_ord_trait()),
- hir::BinOpKind::Eq => (sym::eq, lang.eq_trait()),
- hir::BinOpKind::Ne => (sym::ne, lang.eq_trait()),
- hir::BinOpKind::And | hir::BinOpKind::Or => {
- span_bug!(span, "&& and || are not overloadable")
- }
- }
- } else if let Op::Unary(hir::UnOp::Not, _) = op {
- (sym::not, lang.not_trait())
- } else if let Op::Unary(hir::UnOp::Neg, _) = op {
- (sym::neg, lang.neg_trait())
- } else {
- bug!("lookup_op_method: op not supported: {:?}", op)
- };
-
- debug!(
- "lookup_op_method(lhs_ty={:?}, op={:?}, opname={:?}, trait_did={:?})",
- lhs_ty, op, opname, trait_did
- );
-
- // Catches cases like #83893, where a lang item is declared with the
- // wrong number of generic arguments. Should have yielded an error
- // elsewhere by now, but we have to catch it here so that we do not
- // index `other_tys` out of bounds (if the lang item has too many
- // generic arguments, `other_tys` is too short).
- if !has_expected_num_generic_args(
- self.tcx,
- trait_did,
- match op {
- // Binary ops have a generic right-hand side, unary ops don't
- Op::Binary(..) => 1,
- Op::Unary(..) => 0,
- },
- ) {
- return Err(vec![]);
- }
-
- let opname = Ident::with_dummy_span(opname);
- let method = trait_did.and_then(|trait_did| {
- self.lookup_op_method_in_trait(
- span,
- opname,
- trait_did,
- lhs_ty,
- other_ty,
- other_ty_expr,
- expected,
- )
- });
-
- match (method, trait_did) {
- (Some(ok), _) => {
- let method = self.register_infer_ok_obligations(ok);
- self.select_obligations_where_possible(false, |_| {});
- Ok(method)
- }
- (None, None) => Err(vec![]),
- (None, Some(trait_did)) => {
- let (obligation, _) = self.obligation_for_op_method(
- span,
- trait_did,
- lhs_ty,
- other_ty,
- other_ty_expr,
- expected,
- );
- let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
- fulfill.register_predicate_obligation(self, obligation);
- Err(fulfill.select_where_possible(&self.infcx))
- }
- }
- }
-}
-
-// Binary operator categories. These categories summarize the behavior
-// with respect to the builtin operations supported.
-enum BinOpCategory {
- /// &&, || -- cannot be overridden
- Shortcircuit,
-
- /// <<, >> -- when shifting a single integer, rhs can be any
- /// integer type. For simd, types must match.
- Shift,
-
- /// +, -, etc -- takes equal types, produces same type as input,
- /// applicable to ints/floats/simd
- Math,
-
- /// &, |, ^ -- takes equal types, produces same type as input,
- /// applicable to ints/floats/simd/bool
- Bitwise,
-
- /// ==, !=, etc -- takes equal types, produces bools, except for simd,
- /// which produce the input type
- Comparison,
-}
-
-impl BinOpCategory {
- fn from(op: hir::BinOp) -> BinOpCategory {
- match op.node {
- hir::BinOpKind::Shl | hir::BinOpKind::Shr => BinOpCategory::Shift,
-
- hir::BinOpKind::Add
- | hir::BinOpKind::Sub
- | hir::BinOpKind::Mul
- | hir::BinOpKind::Div
- | hir::BinOpKind::Rem => BinOpCategory::Math,
-
- hir::BinOpKind::BitXor | hir::BinOpKind::BitAnd | hir::BinOpKind::BitOr => {
- BinOpCategory::Bitwise
- }
-
- hir::BinOpKind::Eq
- | hir::BinOpKind::Ne
- | hir::BinOpKind::Lt
- | hir::BinOpKind::Le
- | hir::BinOpKind::Ge
- | hir::BinOpKind::Gt => BinOpCategory::Comparison,
-
- hir::BinOpKind::And | hir::BinOpKind::Or => BinOpCategory::Shortcircuit,
- }
- }
-}
-
-/// Whether the binary operation is an assignment (`a += b`), or not (`a + b`)
-#[derive(Clone, Copy, Debug, PartialEq)]
-enum IsAssign {
- No,
- Yes,
-}
-
-#[derive(Clone, Copy, Debug)]
-enum Op {
- Binary(hir::BinOp, IsAssign),
- Unary(hir::UnOp, Span),
-}
-
-/// Dereferences a single level of immutable referencing.
-fn deref_ty_if_possible<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> {
- match ty.kind() {
- ty::Ref(_, ty, hir::Mutability::Not) => *ty,
- _ => ty,
- }
-}
-
-/// Returns `true` if this is a built-in arithmetic operation (e.g., u32
-/// + u32, i16x4 == i16x4) and false if these types would have to be
-/// overloaded to be legal. There are two reasons that we distinguish
-/// builtin operations from overloaded ones (vs trying to drive
-/// everything uniformly through the trait system and intrinsics or
-/// something like that):
-///
-/// 1. Builtin operations can trivially be evaluated in constants.
-/// 2. For comparison operators applied to SIMD types the result is
-/// not of type `bool`. For example, `i16x4 == i16x4` yields a
-/// type like `i16x4`. This means that the overloaded trait
-/// `PartialEq` is not applicable.
-///
-/// Reason #2 is the killer. I tried for a while to always use
-/// overloaded logic and just check the types in constants/codegen after
-/// the fact, and it worked fine, except for SIMD types. -nmatsakis
-fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, rhs: Ty<'tcx>, op: hir::BinOp) -> bool {
- // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
- // (See https://github.com/rust-lang/rust/issues/57447.)
- let (lhs, rhs) = (deref_ty_if_possible(lhs), deref_ty_if_possible(rhs));
-
- match BinOpCategory::from(op) {
- BinOpCategory::Shortcircuit => true,
-
- BinOpCategory::Shift => {
- lhs.references_error()
- || rhs.references_error()
- || lhs.is_integral() && rhs.is_integral()
- }
-
- BinOpCategory::Math => {
- lhs.references_error()
- || rhs.references_error()
- || lhs.is_integral() && rhs.is_integral()
- || lhs.is_floating_point() && rhs.is_floating_point()
- }
-
- BinOpCategory::Bitwise => {
- lhs.references_error()
- || rhs.references_error()
- || lhs.is_integral() && rhs.is_integral()
- || lhs.is_floating_point() && rhs.is_floating_point()
- || lhs.is_bool() && rhs.is_bool()
- }
-
- BinOpCategory::Comparison => {
- lhs.references_error() || rhs.references_error() || lhs.is_scalar() && rhs.is_scalar()
- }
- }
-}
-
-struct TypeParamVisitor<'tcx>(Vec<Ty<'tcx>>);
-
-impl<'tcx> TypeVisitor<'tcx> for TypeParamVisitor<'tcx> {
- fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- if let ty::Param(_) = ty.kind() {
- self.0.push(ty);
- }
- ty.super_visit_with(self)
- }
-}
-
-struct TypeParamEraser<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, Span);
-
-impl<'tcx> TypeFolder<'tcx> for TypeParamEraser<'_, 'tcx> {
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.0.tcx
- }
-
- fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
- match ty.kind() {
- ty::Param(_) => self.0.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::MiscVariable,
- span: self.1,
- }),
- _ => ty.super_fold_with(self),
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs
deleted file mode 100644
index d49a6138f..000000000
--- a/compiler/rustc_typeck/src/check/regionck.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use crate::outlives::outlives_bounds::InferCtxtExt as _;
-use rustc_data_structures::fx::FxHashSet;
-use rustc_hir as hir;
-use rustc_infer::infer::outlives::env::OutlivesEnvironment;
-use rustc_infer::infer::InferCtxt;
-use rustc_middle::ty::Ty;
-
-pub(crate) trait OutlivesEnvironmentExt<'tcx> {
- fn add_implied_bounds(
- &mut self,
- infcx: &InferCtxt<'_, 'tcx>,
- fn_sig_tys: FxHashSet<Ty<'tcx>>,
- body_id: hir::HirId,
- );
-}
-
-impl<'tcx> OutlivesEnvironmentExt<'tcx> for OutlivesEnvironment<'tcx> {
- /// This method adds "implied bounds" into the outlives environment.
- /// Implied bounds are outlives relationships that we can deduce
- /// on the basis that certain types must be well-formed -- these are
- /// either the types that appear in the function signature or else
- /// the input types to an impl. For example, if you have a function
- /// like
- ///
- /// ```
- /// fn foo<'a, 'b, T>(x: &'a &'b [T]) { }
- /// ```
- ///
- /// we can assume in the caller's body that `'b: 'a` and that `T:
- /// 'b` (and hence, transitively, that `T: 'a`). This method would
- /// add those assumptions into the outlives-environment.
- ///
- /// Tests: `src/test/ui/regions/regions-free-region-ordering-*.rs`
- #[instrument(level = "debug", skip(self, infcx))]
- fn add_implied_bounds<'a>(
- &mut self,
- infcx: &InferCtxt<'a, 'tcx>,
- fn_sig_tys: FxHashSet<Ty<'tcx>>,
- body_id: hir::HirId,
- ) {
- for ty in fn_sig_tys {
- let ty = infcx.resolve_vars_if_possible(ty);
- let implied_bounds = infcx.implied_outlives_bounds(self.param_env, body_id, ty);
- self.add_outlives_bounds(Some(infcx), implied_bounds)
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/check_unused.rs b/compiler/rustc_typeck/src/check_unused.rs
deleted file mode 100644
index 4a3cfa1ca..000000000
--- a/compiler/rustc_typeck/src/check_unused.rs
+++ /dev/null
@@ -1,196 +0,0 @@
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::Applicability;
-use rustc_hir as hir;
-use rustc_hir::def::DefKind;
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_middle::ty::TyCtxt;
-use rustc_session::lint;
-use rustc_span::{Span, Symbol};
-
-pub fn check_crate(tcx: TyCtxt<'_>) {
- let mut used_trait_imports: FxHashSet<LocalDefId> = FxHashSet::default();
-
- for item_def_id in tcx.hir().body_owners() {
- let imports = tcx.used_trait_imports(item_def_id);
- debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
- used_trait_imports.extend(imports.iter());
- }
-
- for &id in tcx.maybe_unused_trait_imports(()) {
- debug_assert_eq!(tcx.def_kind(id), DefKind::Use);
- if tcx.visibility(id).is_public() {
- continue;
- }
- if used_trait_imports.contains(&id) {
- continue;
- }
- let item = tcx.hir().expect_item(id);
- if item.span.is_dummy() {
- continue;
- }
- let hir::ItemKind::Use(path, _) = item.kind else { unreachable!() };
- tcx.struct_span_lint_hir(lint::builtin::UNUSED_IMPORTS, item.hir_id(), path.span, |lint| {
- let msg = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(path.span) {
- format!("unused import: `{}`", snippet)
- } else {
- "unused import".to_owned()
- };
- lint.build(&msg).emit();
- });
- }
-
- unused_crates_lint(tcx);
-}
-
-fn unused_crates_lint(tcx: TyCtxt<'_>) {
- let lint = lint::builtin::UNUSED_EXTERN_CRATES;
-
- // Collect first the crates that are completely unused. These we
- // can always suggest removing (no matter which edition we are
- // in).
- let unused_extern_crates: FxHashMap<LocalDefId, Span> = tcx
- .maybe_unused_extern_crates(())
- .iter()
- .filter(|&&(def_id, _)| {
- // The `def_id` here actually was calculated during resolution (at least
- // at the time of this writing) and is being shipped to us via a side
- // channel of the tcx. There may have been extra expansion phases,
- // however, which ended up removing the `def_id` *after* expansion.
- //
- // As a result we need to verify that `def_id` is indeed still valid for
- // our AST and actually present in the HIR map. If it's not there then
- // there's safely nothing to warn about, and otherwise we carry on with
- // our execution.
- //
- // Note that if we carry through to the `extern_mod_stmt_cnum` query
- // below it'll cause a panic because `def_id` is actually bogus at this
- // point in time otherwise.
- if tcx.hir().find(tcx.hir().local_def_id_to_hir_id(def_id)).is_none() {
- return false;
- }
- true
- })
- .filter(|&&(def_id, _)| {
- tcx.extern_mod_stmt_cnum(def_id).map_or(true, |cnum| {
- !tcx.is_compiler_builtins(cnum)
- && !tcx.is_panic_runtime(cnum)
- && !tcx.has_global_allocator(cnum)
- && !tcx.has_panic_handler(cnum)
- })
- })
- .cloned()
- .collect();
-
- // Collect all the extern crates (in a reliable order).
- let mut crates_to_lint = vec![];
-
- for id in tcx.hir().items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::ExternCrate) {
- let item = tcx.hir().item(id);
- if let hir::ItemKind::ExternCrate(orig_name) = item.kind {
- crates_to_lint.push(ExternCrateToLint {
- def_id: item.def_id.to_def_id(),
- span: item.span,
- orig_name,
- warn_if_unused: !item.ident.as_str().starts_with('_'),
- });
- }
- }
- }
-
- let extern_prelude = &tcx.resolutions(()).extern_prelude;
-
- for extern_crate in &crates_to_lint {
- let def_id = extern_crate.def_id.expect_local();
- let item = tcx.hir().expect_item(def_id);
-
- // If the crate is fully unused, we suggest removing it altogether.
- // We do this in any edition.
- if extern_crate.warn_if_unused {
- if let Some(&span) = unused_extern_crates.get(&def_id) {
- let id = tcx.hir().local_def_id_to_hir_id(def_id);
- tcx.struct_span_lint_hir(lint, id, span, |lint| {
- // Removal suggestion span needs to include attributes (Issue #54400)
- let span_with_attrs = tcx
- .hir()
- .attrs(id)
- .iter()
- .map(|attr| attr.span)
- .fold(span, |acc, attr_span| acc.to(attr_span));
-
- lint.build("unused extern crate")
- .span_suggestion_short(
- span_with_attrs,
- "remove it",
- "",
- Applicability::MachineApplicable,
- )
- .emit();
- });
- continue;
- }
- }
-
- // If we are not in Rust 2018 edition, then we don't make any further
- // suggestions.
- if !tcx.sess.rust_2018() {
- continue;
- }
-
- // If the extern crate isn't in the extern prelude,
- // there is no way it can be written as a `use`.
- let orig_name = extern_crate.orig_name.unwrap_or(item.ident.name);
- if !extern_prelude.get(&orig_name).map_or(false, |from_item| !from_item) {
- continue;
- }
-
- // If the extern crate is renamed, then we cannot suggest replacing it with a use as this
- // would not insert the new name into the prelude, where other imports in the crate may be
- // expecting it.
- if extern_crate.orig_name.is_some() {
- continue;
- }
-
- let id = tcx.hir().local_def_id_to_hir_id(def_id);
- // If the extern crate has any attributes, they may have funky
- // semantics we can't faithfully represent using `use` (most
- // notably `#[macro_use]`). Ignore it.
- if !tcx.hir().attrs(id).is_empty() {
- continue;
- }
- tcx.struct_span_lint_hir(lint, id, extern_crate.span, |lint| {
- // Otherwise, we can convert it into a `use` of some kind.
- let base_replacement = match extern_crate.orig_name {
- Some(orig_name) => format!("use {} as {};", orig_name, item.ident.name),
- None => format!("use {};", item.ident.name),
- };
- let vis = tcx.sess.source_map().span_to_snippet(item.vis_span).unwrap_or_default();
- let add_vis = |to| if vis.is_empty() { to } else { format!("{} {}", vis, to) };
- lint.build("`extern crate` is not idiomatic in the new edition")
- .span_suggestion_short(
- extern_crate.span,
- &format!("convert it to a `{}`", add_vis("use".to_string())),
- add_vis(base_replacement),
- Applicability::MachineApplicable,
- )
- .emit();
- })
- }
-}
-
-struct ExternCrateToLint {
- /// `DefId` of the extern crate
- def_id: DefId,
-
- /// span from the item
- span: Span,
-
- /// if `Some`, then this is renamed (`extern crate orig_name as
- /// crate_name`), and -- perhaps surprisingly -- this stores the
- /// *original* name (`item.name` will contain the new name)
- orig_name: Option<Symbol>,
-
- /// if `false`, the original name started with `_`, so we shouldn't lint
- /// about it going unused (but we should still emit idiom lints).
- warn_if_unused: bool,
-}
diff --git a/compiler/rustc_typeck/src/coherence/builtin.rs b/compiler/rustc_typeck/src/coherence/builtin.rs
deleted file mode 100644
index 50946cc1d..000000000
--- a/compiler/rustc_typeck/src/coherence/builtin.rs
+++ /dev/null
@@ -1,603 +0,0 @@
-//! Check properties that are required by built-in traits and set
-//! up data structures required by type-checking/codegen.
-
-use crate::errors::{CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem};
-use rustc_errors::{struct_span_err, MultiSpan};
-use rustc_hir as hir;
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::lang_items::LangItem;
-use rustc_hir::ItemKind;
-use rustc_infer::infer;
-use rustc_infer::infer::outlives::env::OutlivesEnvironment;
-use rustc_infer::infer::TyCtxtInferExt;
-use rustc_middle::ty::adjustment::CoerceUnsizedInfo;
-use rustc_middle::ty::{self, suggest_constraining_type_params, Ty, TyCtxt, TypeVisitable};
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
-use rustc_trait_selection::traits::misc::{can_type_implement_copy, CopyImplementationError};
-use rustc_trait_selection::traits::predicate_for_trait_def;
-use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt};
-use std::collections::BTreeMap;
-
-pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) {
- let lang_items = tcx.lang_items();
- Checker { tcx, trait_def_id }
- .check(lang_items.drop_trait(), visit_implementation_of_drop)
- .check(lang_items.copy_trait(), visit_implementation_of_copy)
- .check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized)
- .check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn);
-}
-
-struct Checker<'tcx> {
- tcx: TyCtxt<'tcx>,
- trait_def_id: DefId,
-}
-
-impl<'tcx> Checker<'tcx> {
- fn check<F>(&self, trait_def_id: Option<DefId>, mut f: F) -> &Self
- where
- F: FnMut(TyCtxt<'tcx>, LocalDefId),
- {
- if Some(self.trait_def_id) == trait_def_id {
- for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) {
- f(self.tcx, impl_def_id);
- }
- }
- self
- }
-}
-
-fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
- // Destructors only work on nominal types.
- if let ty::Adt(..) | ty::Error(_) = tcx.type_of(impl_did).kind() {
- return;
- }
-
- let sp = match tcx.hir().expect_item(impl_did).kind {
- ItemKind::Impl(ref impl_) => impl_.self_ty.span,
- _ => bug!("expected Drop impl item"),
- };
-
- tcx.sess.emit_err(DropImplOnWrongItem { span: sp });
-}
-
-fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
- debug!("visit_implementation_of_copy: impl_did={:?}", impl_did);
-
- let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
-
- let self_type = tcx.type_of(impl_did);
- debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type);
-
- let span = tcx.hir().span(impl_hir_id);
- let param_env = tcx.param_env(impl_did);
- assert!(!self_type.has_escaping_bound_vars());
-
- debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type);
-
- let cause = traits::ObligationCause::misc(span, impl_hir_id);
- match can_type_implement_copy(tcx, param_env, self_type, cause) {
- Ok(()) => {}
- Err(CopyImplementationError::InfrigingFields(fields)) => {
- let item = tcx.hir().expect_item(impl_did);
- let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref tr), .. }) = item.kind {
- tr.path.span
- } else {
- span
- };
-
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0204,
- "the trait `Copy` may not be implemented for this type"
- );
-
- // We'll try to suggest constraining type parameters to fulfill the requirements of
- // their `Copy` implementation.
- let mut errors: BTreeMap<_, Vec<_>> = Default::default();
- let mut bounds = vec![];
-
- for (field, ty) in fields {
- let field_span = tcx.def_span(field.did);
- let field_ty_span = match tcx.hir().get_if_local(field.did) {
- Some(hir::Node::Field(field_def)) => field_def.ty.span,
- _ => field_span,
- };
- err.span_label(field_span, "this field does not implement `Copy`");
- // Spin up a new FulfillmentContext, so we can get the _precise_ reason
- // why this field does not implement Copy. This is useful because sometimes
- // it is not immediately clear why Copy is not implemented for a field, since
- // all we point at is the field itself.
- tcx.infer_ctxt().ignoring_regions().enter(|infcx| {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(tcx);
- fulfill_cx.register_bound(
- &infcx,
- param_env,
- ty,
- tcx.lang_items().copy_trait().unwrap(),
- traits::ObligationCause::dummy_with_span(field_ty_span),
- );
- for error in fulfill_cx.select_all_or_error(&infcx) {
- let error_predicate = error.obligation.predicate;
- // Only note if it's not the root obligation, otherwise it's trivial and
- // should be self-explanatory (i.e. a field literally doesn't implement Copy).
-
- // FIXME: This error could be more descriptive, especially if the error_predicate
- // contains a foreign type or if it's a deeply nested type...
- if error_predicate != error.root_obligation.predicate {
- errors
- .entry((ty.to_string(), error_predicate.to_string()))
- .or_default()
- .push(error.obligation.cause.span);
- }
- if let ty::PredicateKind::Trait(ty::TraitPredicate {
- trait_ref,
- polarity: ty::ImplPolarity::Positive,
- ..
- }) = error_predicate.kind().skip_binder()
- {
- let ty = trait_ref.self_ty();
- if let ty::Param(_) = ty.kind() {
- bounds.push((
- format!("{ty}"),
- trait_ref.print_only_trait_path().to_string(),
- Some(trait_ref.def_id),
- ));
- }
- }
- }
- });
- }
- for ((ty, error_predicate), spans) in errors {
- let span: MultiSpan = spans.into();
- err.span_note(
- span,
- &format!("the `Copy` impl for `{}` requires that `{}`", ty, error_predicate),
- );
- }
- suggest_constraining_type_params(
- tcx,
- tcx.hir().get_generics(impl_did).expect("impls always have generics"),
- &mut err,
- bounds.iter().map(|(param, constraint, def_id)| {
- (param.as_str(), constraint.as_str(), *def_id)
- }),
- );
- err.emit();
- }
- Err(CopyImplementationError::NotAnAdt) => {
- let item = tcx.hir().expect_item(impl_did);
- let span =
- if let ItemKind::Impl(ref impl_) = item.kind { impl_.self_ty.span } else { span };
-
- tcx.sess.emit_err(CopyImplOnNonAdt { span });
- }
- Err(CopyImplementationError::HasDestructor) => {
- tcx.sess.emit_err(CopyImplOnTypeWithDtor { span });
- }
- }
-}
-
-fn visit_implementation_of_coerce_unsized<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
- debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did);
-
- // Just compute this for the side-effects, in particular reporting
- // errors; other parts of the code may demand it for the info of
- // course.
- let span = tcx.def_span(impl_did);
- tcx.at(span).coerce_unsized_info(impl_did);
-}
-
-fn visit_implementation_of_dispatch_from_dyn<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
- debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did);
-
- let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
- let span = tcx.hir().span(impl_hir_id);
-
- let dispatch_from_dyn_trait = tcx.require_lang_item(LangItem::DispatchFromDyn, Some(span));
-
- let source = tcx.type_of(impl_did);
- assert!(!source.has_escaping_bound_vars());
- let target = {
- let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
- assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait);
-
- trait_ref.substs.type_at(1)
- };
-
- debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", source, target);
-
- let param_env = tcx.param_env(impl_did);
-
- let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg);
-
- tcx.infer_ctxt().enter(|infcx| {
- let cause = ObligationCause::misc(span, impl_hir_id);
-
- use rustc_type_ir::sty::TyKind::*;
- match (source.kind(), target.kind()) {
- (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
- if infcx.at(&cause, param_env).eq(r_a, *r_b).is_ok() && mutbl_a == *mutbl_b => {}
- (&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (),
- (&Adt(def_a, substs_a), &Adt(def_b, substs_b))
- if def_a.is_struct() && def_b.is_struct() =>
- {
- if def_a != def_b {
- let source_path = tcx.def_path_str(def_a.did());
- let target_path = tcx.def_path_str(def_b.did());
-
- create_err(&format!(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with the same \
- definition; expected `{}`, found `{}`",
- source_path, target_path,
- ))
- .emit();
-
- return;
- }
-
- if def_a.repr().c() || def_a.repr().packed() {
- create_err(
- "structs implementing `DispatchFromDyn` may not have \
- `#[repr(packed)]` or `#[repr(C)]`",
- )
- .emit();
- }
-
- let fields = &def_a.non_enum_variant().fields;
-
- let coerced_fields = fields
- .iter()
- .filter(|field| {
- let ty_a = field.ty(tcx, substs_a);
- let ty_b = field.ty(tcx, substs_b);
-
- if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
- if layout.is_zst() && layout.align.abi.bytes() == 1 {
- // ignore ZST fields with alignment of 1 byte
- return false;
- }
- }
-
- if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) {
- if ok.obligations.is_empty() {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for structs containing the field being coerced, \
- ZST fields with 1 byte alignment, and nothing else",
- )
- .note(&format!(
- "extra field `{}` of type `{}` is not allowed",
- field.name, ty_a,
- ))
- .emit();
-
- return false;
- }
- }
-
- return true;
- })
- .collect::<Vec<_>>();
-
- if coerced_fields.is_empty() {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with a single field \
- being coerced, none found",
- )
- .emit();
- } else if coerced_fields.len() > 1 {
- create_err(
- "implementing the `DispatchFromDyn` trait requires multiple coercions",
- )
- .note(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with a single field \
- being coerced",
- )
- .note(&format!(
- "currently, {} fields need coercions: {}",
- coerced_fields.len(),
- coerced_fields
- .iter()
- .map(|field| {
- format!(
- "`{}` (`{}` to `{}`)",
- field.name,
- field.ty(tcx, substs_a),
- field.ty(tcx, substs_b),
- )
- })
- .collect::<Vec<_>>()
- .join(", ")
- ))
- .emit();
- } else {
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
-
- for field in coerced_fields {
- let predicate = predicate_for_trait_def(
- tcx,
- param_env,
- cause.clone(),
- dispatch_from_dyn_trait,
- 0,
- field.ty(tcx, substs_a),
- &[field.ty(tcx, substs_b).into()],
- );
-
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
- }
-
- // Check that all transitive obligations are satisfied.
- let errors = fulfill_cx.select_all_or_error(&infcx);
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- }
-
- // Finally, resolve all regions.
- let outlives_env = OutlivesEnvironment::new(param_env);
- infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
- }
- }
- _ => {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures",
- )
- .emit();
- }
- }
- })
-}
-
-pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: DefId) -> CoerceUnsizedInfo {
- debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did);
-
- // this provider should only get invoked for local def-ids
- let impl_did = impl_did.expect_local();
- let span = tcx.def_span(impl_did);
-
- let coerce_unsized_trait = tcx.require_lang_item(LangItem::CoerceUnsized, Some(span));
-
- let unsize_trait = tcx.lang_items().require(LangItem::Unsize).unwrap_or_else(|err| {
- tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err));
- });
-
- let source = tcx.type_of(impl_did);
- let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
- assert_eq!(trait_ref.def_id, coerce_unsized_trait);
- let target = trait_ref.substs.type_at(1);
- debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", source, target);
-
- let param_env = tcx.param_env(impl_did);
- assert!(!source.has_escaping_bound_vars());
-
- let err_info = CoerceUnsizedInfo { custom_kind: None };
-
- debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target);
-
- tcx.infer_ctxt().enter(|infcx| {
- let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
- let cause = ObligationCause::misc(span, impl_hir_id);
- let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>,
- mt_b: ty::TypeAndMut<'tcx>,
- mk_ptr: &dyn Fn(Ty<'tcx>) -> Ty<'tcx>| {
- if (mt_a.mutbl, mt_b.mutbl) == (hir::Mutability::Not, hir::Mutability::Mut) {
- infcx
- .report_mismatched_types(
- &cause,
- mk_ptr(mt_b.ty),
- target,
- ty::error::TypeError::Mutability,
- )
- .emit();
- }
- (mt_a.ty, mt_b.ty, unsize_trait, None)
- };
- let (source, target, trait_def_id, kind) = match (source.kind(), target.kind()) {
- (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => {
- infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
- let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
- let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b };
- check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
- }
-
- (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => {
- let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
- check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
- }
-
- (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => {
- check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
- }
-
- (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b))
- if def_a.is_struct() && def_b.is_struct() =>
- {
- if def_a != def_b {
- let source_path = tcx.def_path_str(def_a.did());
- let target_path = tcx.def_path_str(def_b.did());
- struct_span_err!(
- tcx.sess,
- span,
- E0377,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with the same \
- definition; expected `{}`, found `{}`",
- source_path,
- target_path
- )
- .emit();
- return err_info;
- }
-
- // Here we are considering a case of converting
- // `S<P0...Pn>` to S<Q0...Qn>`. As an example, let's imagine a struct `Foo<T, U>`,
- // which acts like a pointer to `U`, but carries along some extra data of type `T`:
- //
- // struct Foo<T, U> {
- // extra: T,
- // ptr: *mut U,
- // }
- //
- // We might have an impl that allows (e.g.) `Foo<T, [i32; 3]>` to be unsized
- // to `Foo<T, [i32]>`. That impl would look like:
- //
- // impl<T, U: Unsize<V>, V> CoerceUnsized<Foo<T, V>> for Foo<T, U> {}
- //
- // Here `U = [i32; 3]` and `V = [i32]`. At runtime,
- // when this coercion occurs, we would be changing the
- // field `ptr` from a thin pointer of type `*mut [i32;
- // 3]` to a fat pointer of type `*mut [i32]` (with
- // extra data `3`). **The purpose of this check is to
- // make sure that we know how to do this conversion.**
- //
- // To check if this impl is legal, we would walk down
- // the fields of `Foo` and consider their types with
- // both substitutes. We are looking to find that
- // exactly one (non-phantom) field has changed its
- // type, which we will expect to be the pointer that
- // is becoming fat (we could probably generalize this
- // to multiple thin pointers of the same type becoming
- // fat, but we don't). In this case:
- //
- // - `extra` has type `T` before and type `T` after
- // - `ptr` has type `*mut U` before and type `*mut V` after
- //
- // Since just one field changed, we would then check
- // that `*mut U: CoerceUnsized<*mut V>` is implemented
- // (in other words, that we know how to do this
- // conversion). This will work out because `U:
- // Unsize<V>`, and we have a builtin rule that `*mut
- // U` can be coerced to `*mut V` if `U: Unsize<V>`.
- let fields = &def_a.non_enum_variant().fields;
- let diff_fields = fields
- .iter()
- .enumerate()
- .filter_map(|(i, f)| {
- let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
-
- if tcx.type_of(f.did).is_phantom_data() {
- // Ignore PhantomData fields
- return None;
- }
-
- // Ignore fields that aren't changed; it may
- // be that we could get away with subtyping or
- // something more accepting, but we use
- // equality because we want to be able to
- // perform this check without computing
- // variance where possible. (This is because
- // we may have to evaluate constraint
- // expressions in the course of execution.)
- // See e.g., #41936.
- if let Ok(ok) = infcx.at(&cause, param_env).eq(a, b) {
- if ok.obligations.is_empty() {
- return None;
- }
- }
-
- // Collect up all fields that were significantly changed
- // i.e., those that contain T in coerce_unsized T -> U
- Some((i, a, b))
- })
- .collect::<Vec<_>>();
-
- if diff_fields.is_empty() {
- struct_span_err!(
- tcx.sess,
- span,
- E0374,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with one field \
- being coerced, none found"
- )
- .emit();
- return err_info;
- } else if diff_fields.len() > 1 {
- let item = tcx.hir().expect_item(impl_did);
- let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref t), .. }) =
- item.kind
- {
- t.path.span
- } else {
- tcx.def_span(impl_did)
- };
-
- struct_span_err!(
- tcx.sess,
- span,
- E0375,
- "implementing the trait \
- `CoerceUnsized` requires multiple \
- coercions"
- )
- .note(
- "`CoerceUnsized` may only be implemented for \
- a coercion between structures with one field being coerced",
- )
- .note(&format!(
- "currently, {} fields need coercions: {}",
- diff_fields.len(),
- diff_fields
- .iter()
- .map(|&(i, a, b)| {
- format!("`{}` (`{}` to `{}`)", fields[i].name, a, b)
- })
- .collect::<Vec<_>>()
- .join(", ")
- ))
- .span_label(span, "requires multiple coercions")
- .emit();
- return err_info;
- }
-
- let (i, a, b) = diff_fields[0];
- let kind = ty::adjustment::CustomCoerceUnsized::Struct(i);
- (a, b, coerce_unsized_trait, Some(kind))
- }
-
- _ => {
- struct_span_err!(
- tcx.sess,
- span,
- E0376,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures"
- )
- .emit();
- return err_info;
- }
- };
-
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
-
- // Register an obligation for `A: Trait<B>`.
- let cause = traits::ObligationCause::misc(span, impl_hir_id);
- let predicate = predicate_for_trait_def(
- tcx,
- param_env,
- cause,
- trait_def_id,
- 0,
- source,
- &[target.into()],
- );
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
-
- // Check that all transitive obligations are satisfied.
- let errors = fulfill_cx.select_all_or_error(&infcx);
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- }
-
- // Finally, resolve all regions.
- let outlives_env = OutlivesEnvironment::new(param_env);
- infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
-
- CoerceUnsizedInfo { custom_kind: kind }
- })
-}
diff --git a/compiler/rustc_typeck/src/coherence/unsafety.rs b/compiler/rustc_typeck/src/coherence/unsafety.rs
deleted file mode 100644
index e45fb5fe4..000000000
--- a/compiler/rustc_typeck/src/coherence/unsafety.rs
+++ /dev/null
@@ -1,66 +0,0 @@
-//! Unsafety checker: every impl either implements a trait defined in this
-//! crate or pertains to a type defined in this crate.
-
-use rustc_errors::struct_span_err;
-use rustc_hir as hir;
-use rustc_hir::def::DefKind;
-use rustc_hir::Unsafety;
-use rustc_middle::ty::TyCtxt;
-use rustc_span::def_id::LocalDefId;
-
-pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
- debug_assert!(matches!(tcx.def_kind(def_id), DefKind::Impl));
- let item = tcx.hir().expect_item(def_id);
- let hir::ItemKind::Impl(ref impl_) = item.kind else { bug!() };
-
- if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) {
- let trait_def = tcx.trait_def(trait_ref.def_id);
- let unsafe_attr =
- impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle");
- match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) {
- (Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => {
- struct_span_err!(
- tcx.sess,
- item.span,
- E0199,
- "implementing the trait `{}` is not unsafe",
- trait_ref.print_only_trait_path()
- )
- .emit();
- }
-
- (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => {
- struct_span_err!(
- tcx.sess,
- item.span,
- E0200,
- "the trait `{}` requires an `unsafe impl` declaration",
- trait_ref.print_only_trait_path()
- )
- .emit();
- }
-
- (Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => {
- struct_span_err!(
- tcx.sess,
- item.span,
- E0569,
- "requires an `unsafe impl` declaration due to `#[{}]` attribute",
- attr_name
- )
- .emit();
- }
-
- (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => {
- // Reported in AST validation
- tcx.sess.delay_span_bug(item.span, "unsafe negative impl");
- }
- (_, _, Unsafety::Normal, hir::ImplPolarity::Negative(_))
- | (Unsafety::Unsafe, _, Unsafety::Unsafe, hir::ImplPolarity::Positive)
- | (Unsafety::Normal, Some(_), Unsafety::Unsafe, hir::ImplPolarity::Positive)
- | (Unsafety::Normal, None, Unsafety::Normal, _) => {
- // OK
- }
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs
deleted file mode 100644
index 99996e80c..000000000
--- a/compiler/rustc_typeck/src/collect.rs
+++ /dev/null
@@ -1,3361 +0,0 @@
-//! "Collection" is the process of determining the type and other external
-//! details of each item in Rust. Collection is specifically concerned
-//! with *inter-procedural* things -- for example, for a function
-//! definition, collection will figure out the type and signature of the
-//! function, but it will not visit the *body* of the function in any way,
-//! nor examine type annotations on local variables (that's the job of
-//! type *checking*).
-//!
-//! Collecting is ultimately defined by a bundle of queries that
-//! inquire after various facts about the items in the crate (e.g.,
-//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function
-//! for the full set.
-//!
-//! At present, however, we do run collection across all items in the
-//! crate as a kind of pass. This should eventually be factored away.
-
-use crate::astconv::AstConv;
-use crate::bounds::Bounds;
-use crate::check::intrinsic::intrinsic_operation_unsafety;
-use crate::constrained_generic_params as cgp;
-use crate::errors;
-use crate::middle::resolve_lifetime as rl;
-use rustc_ast as ast;
-use rustc_ast::{MetaItemKind, NestedMetaItem};
-use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
-use rustc_data_structures::captures::Captures;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
-use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, DefKind};
-use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
-use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::weak_lang_items;
-use rustc_hir::{GenericParamKind, HirId, Node};
-use rustc_middle::hir::nested_filter;
-use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
-use rustc_middle::mir::mono::Linkage;
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::InternalSubsts;
-use rustc_middle::ty::util::Discr;
-use rustc_middle::ty::util::IntTypeExt;
-use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
-use rustc_middle::ty::{ReprOptions, ToPredicate};
-use rustc_session::lint;
-use rustc_session::parse::feature_err;
-use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{Span, DUMMY_SP};
-use rustc_target::spec::{abi, SanitizerSet};
-use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName;
-use std::iter;
-
-mod item_bounds;
-mod type_of;
-
-#[derive(Debug)]
-struct OnlySelfBounds(bool);
-
-///////////////////////////////////////////////////////////////////////////
-// Main entry point
-
-fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
- tcx.hir().visit_item_likes_in_module(module_def_id, &mut CollectItemTypesVisitor { tcx });
-}
-
-pub fn provide(providers: &mut Providers) {
- *providers = Providers {
- opt_const_param_of: type_of::opt_const_param_of,
- type_of: type_of::type_of,
- item_bounds: item_bounds::item_bounds,
- explicit_item_bounds: item_bounds::explicit_item_bounds,
- generics_of,
- predicates_of,
- predicates_defined_on,
- explicit_predicates_of,
- super_predicates_of,
- super_predicates_that_define_assoc_type,
- trait_explicit_predicates_and_bounds,
- type_param_predicates,
- trait_def,
- adt_def,
- fn_sig,
- impl_trait_ref,
- impl_polarity,
- is_foreign_item,
- generator_kind,
- codegen_fn_attrs,
- asm_target_features,
- collect_mod_item_types,
- should_inherit_track_caller,
- ..*providers
- };
-}
-
-///////////////////////////////////////////////////////////////////////////
-
-/// Context specific to some particular item. This is what implements
-/// `AstConv`. It has information about the predicates that are defined
-/// on the trait. Unfortunately, this predicate information is
-/// available in various different forms at various points in the
-/// process. So we can't just store a pointer to e.g., the AST or the
-/// parsed ty form, we have to be more flexible. To this end, the
-/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy
-/// `get_type_parameter_bounds` requests, drawing the information from
-/// the AST (`hir::Generics`), recursively.
-pub struct ItemCtxt<'tcx> {
- tcx: TyCtxt<'tcx>,
- item_def_id: DefId,
-}
-
-///////////////////////////////////////////////////////////////////////////
-
-#[derive(Default)]
-pub(crate) struct HirPlaceholderCollector(pub(crate) Vec<Span>);
-
-impl<'v> Visitor<'v> for HirPlaceholderCollector {
- fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
- if let hir::TyKind::Infer = t.kind {
- self.0.push(t.span);
- }
- intravisit::walk_ty(self, t)
- }
- fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) {
- match generic_arg {
- hir::GenericArg::Infer(inf) => {
- self.0.push(inf.span);
- intravisit::walk_inf(self, inf);
- }
- hir::GenericArg::Type(t) => self.visit_ty(t),
- _ => {}
- }
- }
- fn visit_array_length(&mut self, length: &'v hir::ArrayLen) {
- if let &hir::ArrayLen::Infer(_, span) = length {
- self.0.push(span);
- }
- intravisit::walk_array_len(self, length)
- }
-}
-
-struct CollectItemTypesVisitor<'tcx> {
- tcx: TyCtxt<'tcx>,
-}
-
-/// If there are any placeholder types (`_`), emit an error explaining that this is not allowed
-/// and suggest adding type parameters in the appropriate place, taking into consideration any and
-/// all already existing generic type parameters to avoid suggesting a name that is already in use.
-pub(crate) fn placeholder_type_error<'tcx>(
- tcx: TyCtxt<'tcx>,
- generics: Option<&hir::Generics<'_>>,
- placeholder_types: Vec<Span>,
- suggest: bool,
- hir_ty: Option<&hir::Ty<'_>>,
- kind: &'static str,
-) {
- if placeholder_types.is_empty() {
- return;
- }
-
- placeholder_type_error_diag(tcx, generics, placeholder_types, vec![], suggest, hir_ty, kind)
- .emit();
-}
-
-pub(crate) fn placeholder_type_error_diag<'tcx>(
- tcx: TyCtxt<'tcx>,
- generics: Option<&hir::Generics<'_>>,
- placeholder_types: Vec<Span>,
- additional_spans: Vec<Span>,
- suggest: bool,
- hir_ty: Option<&hir::Ty<'_>>,
- kind: &'static str,
-) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- if placeholder_types.is_empty() {
- return bad_placeholder(tcx, additional_spans, kind);
- }
-
- let params = generics.map(|g| g.params).unwrap_or_default();
- let type_name = params.next_type_param_name(None);
- let mut sugg: Vec<_> =
- placeholder_types.iter().map(|sp| (*sp, (*type_name).to_string())).collect();
-
- if let Some(generics) = generics {
- if let Some(arg) = params.iter().find(|arg| {
- matches!(arg.name, hir::ParamName::Plain(Ident { name: kw::Underscore, .. }))
- }) {
- // Account for `_` already present in cases like `struct S<_>(_);` and suggest
- // `struct S<T>(T);` instead of `struct S<_, T>(T);`.
- sugg.push((arg.span, (*type_name).to_string()));
- } else if let Some(span) = generics.span_for_param_suggestion() {
- // Account for bounds, we want `fn foo<T: E, K>(_: K)` not `fn foo<T, K: E>(_: K)`.
- sugg.push((span, format!(", {}", type_name)));
- } else {
- sugg.push((generics.span, format!("<{}>", type_name)));
- }
- }
-
- let mut err =
- bad_placeholder(tcx, placeholder_types.into_iter().chain(additional_spans).collect(), kind);
-
- // Suggest, but only if it is not a function in const or static
- if suggest {
- let mut is_fn = false;
- let mut is_const_or_static = false;
-
- if let Some(hir_ty) = hir_ty && let hir::TyKind::BareFn(_) = hir_ty.kind {
- is_fn = true;
-
- // Check if parent is const or static
- let parent_id = tcx.hir().get_parent_node(hir_ty.hir_id);
- let parent_node = tcx.hir().get(parent_id);
-
- is_const_or_static = matches!(
- parent_node,
- Node::Item(&hir::Item {
- kind: hir::ItemKind::Const(..) | hir::ItemKind::Static(..),
- ..
- }) | Node::TraitItem(&hir::TraitItem {
- kind: hir::TraitItemKind::Const(..),
- ..
- }) | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. })
- );
- }
-
- // if function is wrapped around a const or static,
- // then don't show the suggestion
- if !(is_fn && is_const_or_static) {
- err.multipart_suggestion(
- "use type parameters instead",
- sugg,
- Applicability::HasPlaceholders,
- );
- }
- }
-
- err
-}
-
-fn reject_placeholder_type_signatures_in_item<'tcx>(
- tcx: TyCtxt<'tcx>,
- item: &'tcx hir::Item<'tcx>,
-) {
- let (generics, suggest) = match &item.kind {
- hir::ItemKind::Union(_, generics)
- | hir::ItemKind::Enum(_, generics)
- | hir::ItemKind::TraitAlias(generics, _)
- | hir::ItemKind::Trait(_, _, generics, ..)
- | hir::ItemKind::Impl(hir::Impl { generics, .. })
- | hir::ItemKind::Struct(_, generics) => (generics, true),
- hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. })
- | hir::ItemKind::TyAlias(_, generics) => (generics, false),
- // `static`, `fn` and `const` are handled elsewhere to suggest appropriate type.
- _ => return,
- };
-
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_item(item);
-
- placeholder_type_error(tcx, Some(generics), visitor.0, suggest, None, item.kind.descr());
-}
-
-impl<'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'tcx> {
- type NestedFilter = nested_filter::OnlyBodies;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- convert_item(self.tcx, item.item_id());
- reject_placeholder_type_signatures_in_item(self.tcx, item);
- intravisit::walk_item(self, item);
- }
-
- fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
- for param in generics.params {
- match param.kind {
- hir::GenericParamKind::Lifetime { .. } => {}
- hir::GenericParamKind::Type { default: Some(_), .. } => {
- let def_id = self.tcx.hir().local_def_id(param.hir_id);
- self.tcx.ensure().type_of(def_id);
- }
- hir::GenericParamKind::Type { .. } => {}
- hir::GenericParamKind::Const { default, .. } => {
- let def_id = self.tcx.hir().local_def_id(param.hir_id);
- self.tcx.ensure().type_of(def_id);
- if let Some(default) = default {
- let default_def_id = self.tcx.hir().local_def_id(default.hir_id);
- // need to store default and type of default
- self.tcx.ensure().type_of(default_def_id);
- self.tcx.ensure().const_param_default(def_id);
- }
- }
- }
- }
- intravisit::walk_generics(self, generics);
- }
-
- fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
- if let hir::ExprKind::Closure { .. } = expr.kind {
- let def_id = self.tcx.hir().local_def_id(expr.hir_id);
- self.tcx.ensure().generics_of(def_id);
- // We do not call `type_of` for closures here as that
- // depends on typecheck and would therefore hide
- // any further errors in case one typeck fails.
- }
- intravisit::walk_expr(self, expr);
- }
-
- fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
- convert_trait_item(self.tcx, trait_item.trait_item_id());
- intravisit::walk_trait_item(self, trait_item);
- }
-
- fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
- convert_impl_item(self.tcx, impl_item.impl_item_id());
- intravisit::walk_impl_item(self, impl_item);
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Utility types and common code for the above passes.
-
-fn bad_placeholder<'tcx>(
- tcx: TyCtxt<'tcx>,
- mut spans: Vec<Span>,
- kind: &'static str,
-) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let kind = if kind.ends_with('s') { format!("{}es", kind) } else { format!("{}s", kind) };
-
- spans.sort();
- let mut err = struct_span_err!(
- tcx.sess,
- spans.clone(),
- E0121,
- "the placeholder `_` is not allowed within types on item signatures for {}",
- kind
- );
- for span in spans {
- err.span_label(span, "not allowed in type signatures");
- }
- err
-}
-
-impl<'tcx> ItemCtxt<'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> ItemCtxt<'tcx> {
- ItemCtxt { tcx, item_def_id }
- }
-
- pub fn to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
- <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_ty)
- }
-
- pub fn hir_id(&self) -> hir::HirId {
- self.tcx.hir().local_def_id_to_hir_id(self.item_def_id.expect_local())
- }
-
- pub fn node(&self) -> hir::Node<'tcx> {
- self.tcx.hir().get(self.hir_id())
- }
-}
-
-impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> {
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- fn item_def_id(&self) -> Option<DefId> {
- Some(self.item_def_id)
- }
-
- fn get_type_parameter_bounds(
- &self,
- span: Span,
- def_id: DefId,
- assoc_name: Ident,
- ) -> ty::GenericPredicates<'tcx> {
- self.tcx.at(span).type_param_predicates((
- self.item_def_id,
- def_id.expect_local(),
- assoc_name,
- ))
- }
-
- fn re_infer(&self, _: Option<&ty::GenericParamDef>, _: Span) -> Option<ty::Region<'tcx>> {
- None
- }
-
- fn allow_ty_infer(&self) -> bool {
- false
- }
-
- fn ty_infer(&self, _: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
- self.tcx().ty_error_with_message(span, "bad placeholder type")
- }
-
- fn ct_infer(&self, ty: Ty<'tcx>, _: Option<&ty::GenericParamDef>, span: Span) -> Const<'tcx> {
- let ty = self.tcx.fold_regions(ty, |r, _| match *r {
- ty::ReErased => self.tcx.lifetimes.re_static,
- _ => r,
- });
- self.tcx().const_error_with_message(ty, span, "bad placeholder constant")
- }
-
- fn projected_ty_from_poly_trait_ref(
- &self,
- span: Span,
- item_def_id: DefId,
- item_segment: &hir::PathSegment<'_>,
- poly_trait_ref: ty::PolyTraitRef<'tcx>,
- ) -> Ty<'tcx> {
- if let Some(trait_ref) = poly_trait_ref.no_bound_vars() {
- let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
- self,
- self.tcx,
- span,
- item_def_id,
- item_segment,
- trait_ref.substs,
- );
- self.tcx().mk_projection(item_def_id, item_substs)
- } else {
- // There are no late-bound regions; we can just ignore the binder.
- let mut err = struct_span_err!(
- self.tcx().sess,
- span,
- E0212,
- "cannot use the associated type of a trait \
- with uninferred generic parameters"
- );
-
- match self.node() {
- hir::Node::Field(_) | hir::Node::Ctor(_) | hir::Node::Variant(_) => {
- let item =
- self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(self.hir_id()));
- match &item.kind {
- hir::ItemKind::Enum(_, generics)
- | hir::ItemKind::Struct(_, generics)
- | hir::ItemKind::Union(_, generics) => {
- let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics);
- let (lt_sp, sugg) = match generics.params {
- [] => (generics.span, format!("<{}>", lt_name)),
- [bound, ..] => {
- (bound.span.shrink_to_lo(), format!("{}, ", lt_name))
- }
- };
- let suggestions = vec![
- (lt_sp, sugg),
- (
- span.with_hi(item_segment.ident.span.lo()),
- format!(
- "{}::",
- // Replace the existing lifetimes with a new named lifetime.
- self.tcx.replace_late_bound_regions_uncached(
- poly_trait_ref,
- |_| {
- self.tcx.mk_region(ty::ReEarlyBound(
- ty::EarlyBoundRegion {
- def_id: item_def_id,
- index: 0,
- name: Symbol::intern(&lt_name),
- },
- ))
- }
- ),
- ),
- ),
- ];
- err.multipart_suggestion(
- "use a fully qualified path with explicit lifetimes",
- suggestions,
- Applicability::MaybeIncorrect,
- );
- }
- _ => {}
- }
- }
- hir::Node::Item(hir::Item {
- kind:
- hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..),
- ..
- }) => {}
- hir::Node::Item(_)
- | hir::Node::ForeignItem(_)
- | hir::Node::TraitItem(_)
- | hir::Node::ImplItem(_) => {
- err.span_suggestion_verbose(
- span.with_hi(item_segment.ident.span.lo()),
- "use a fully qualified path with inferred lifetimes",
- format!(
- "{}::",
- // Erase named lt, we want `<A as B<'_>::C`, not `<A as B<'a>::C`.
- self.tcx.anonymize_late_bound_regions(poly_trait_ref).skip_binder(),
- ),
- Applicability::MaybeIncorrect,
- );
- }
- _ => {}
- }
- err.emit();
- self.tcx().ty_error()
- }
- }
-
- fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
- // Types in item signatures are not normalized to avoid undue dependencies.
- ty
- }
-
- fn set_tainted_by_errors(&self) {
- // There's no obvious place to track this, so just let it go.
- }
-
- fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) {
- // There's no place to record types from signatures?
- }
-}
-
-/// Synthesize a new lifetime name that doesn't clash with any of the lifetimes already present.
-fn get_new_lifetime_name<'tcx>(
- tcx: TyCtxt<'tcx>,
- poly_trait_ref: ty::PolyTraitRef<'tcx>,
- generics: &hir::Generics<'tcx>,
-) -> String {
- let existing_lifetimes = tcx
- .collect_referenced_late_bound_regions(&poly_trait_ref)
- .into_iter()
- .filter_map(|lt| {
- if let ty::BoundRegionKind::BrNamed(_, name) = lt {
- Some(name.as_str().to_string())
- } else {
- None
- }
- })
- .chain(generics.params.iter().filter_map(|param| {
- if let hir::GenericParamKind::Lifetime { .. } = &param.kind {
- Some(param.name.ident().as_str().to_string())
- } else {
- None
- }
- }))
- .collect::<FxHashSet<String>>();
-
- let a_to_z_repeat_n = |n| {
- (b'a'..=b'z').map(move |c| {
- let mut s = '\''.to_string();
- s.extend(std::iter::repeat(char::from(c)).take(n));
- s
- })
- };
-
- // If all single char lifetime names are present, we wrap around and double the chars.
- (1..).flat_map(a_to_z_repeat_n).find(|lt| !existing_lifetimes.contains(lt.as_str())).unwrap()
-}
-
-/// Returns the predicates defined on `item_def_id` of the form
-/// `X: Foo` where `X` is the type parameter `def_id`.
-fn type_param_predicates(
- tcx: TyCtxt<'_>,
- (item_def_id, def_id, assoc_name): (DefId, LocalDefId, Ident),
-) -> ty::GenericPredicates<'_> {
- use rustc_hir::*;
-
- // In the AST, bounds can derive from two places. Either
- // written inline like `<T: Foo>` or in a where-clause like
- // `where T: Foo`.
-
- let param_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let param_owner = tcx.hir().ty_param_owner(def_id);
- let generics = tcx.generics_of(param_owner);
- let index = generics.param_def_id_to_index[&def_id.to_def_id()];
- let ty = tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id));
-
- // Don't look for bounds where the type parameter isn't in scope.
- let parent = if item_def_id == param_owner.to_def_id() {
- None
- } else {
- tcx.generics_of(item_def_id).parent
- };
-
- let mut result = parent
- .map(|parent| {
- let icx = ItemCtxt::new(tcx, parent);
- icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id(), assoc_name)
- })
- .unwrap_or_default();
- let mut extend = None;
-
- let item_hir_id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
- let ast_generics = match tcx.hir().get(item_hir_id) {
- Node::TraitItem(item) => &item.generics,
-
- Node::ImplItem(item) => &item.generics,
-
- Node::Item(item) => {
- match item.kind {
- ItemKind::Fn(.., ref generics, _)
- | ItemKind::Impl(hir::Impl { ref generics, .. })
- | ItemKind::TyAlias(_, ref generics)
- | ItemKind::OpaqueTy(OpaqueTy {
- ref generics,
- origin: hir::OpaqueTyOrigin::TyAlias,
- ..
- })
- | ItemKind::Enum(_, ref generics)
- | ItemKind::Struct(_, ref generics)
- | ItemKind::Union(_, ref generics) => generics,
- ItemKind::Trait(_, _, ref generics, ..) => {
- // Implied `Self: Trait` and supertrait bounds.
- if param_id == item_hir_id {
- let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id);
- extend =
- Some((identity_trait_ref.without_const().to_predicate(tcx), item.span));
- }
- generics
- }
- _ => return result,
- }
- }
-
- Node::ForeignItem(item) => match item.kind {
- ForeignItemKind::Fn(_, _, ref generics) => generics,
- _ => return result,
- },
-
- _ => return result,
- };
-
- let icx = ItemCtxt::new(tcx, item_def_id);
- let extra_predicates = extend.into_iter().chain(
- icx.type_parameter_bounds_in_generics(
- ast_generics,
- param_id,
- ty,
- OnlySelfBounds(true),
- Some(assoc_name),
- )
- .into_iter()
- .filter(|(predicate, _)| match predicate.kind().skip_binder() {
- ty::PredicateKind::Trait(data) => data.self_ty().is_param(index),
- _ => false,
- }),
- );
- result.predicates =
- tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(extra_predicates));
- result
-}
-
-impl<'tcx> ItemCtxt<'tcx> {
- /// Finds bounds from `hir::Generics`. This requires scanning through the
- /// AST. We do this to avoid having to convert *all* the bounds, which
- /// would create artificial cycles. Instead, we can only convert the
- /// bounds for a type parameter `X` if `X::Foo` is used.
- #[instrument(level = "trace", skip(self, ast_generics))]
- fn type_parameter_bounds_in_generics(
- &self,
- ast_generics: &'tcx hir::Generics<'tcx>,
- param_id: hir::HirId,
- ty: Ty<'tcx>,
- only_self_bounds: OnlySelfBounds,
- assoc_name: Option<Ident>,
- ) -> Vec<(ty::Predicate<'tcx>, Span)> {
- let param_def_id = self.tcx.hir().local_def_id(param_id).to_def_id();
- debug!(?param_def_id);
- ast_generics
- .predicates
- .iter()
- .filter_map(|wp| match *wp {
- hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
- _ => None,
- })
- .flat_map(|bp| {
- let bt = if bp.is_param_bound(param_def_id) {
- Some(ty)
- } else if !only_self_bounds.0 {
- Some(self.to_ty(bp.bounded_ty))
- } else {
- None
- };
- let bvars = self.tcx.late_bound_vars(bp.bounded_ty.hir_id);
-
- bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b, bvars))).filter(
- |(_, b, _)| match assoc_name {
- Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name),
- None => true,
- },
- )
- })
- .flat_map(|(bt, b, bvars)| predicates_from_bound(self, bt, b, bvars))
- .collect()
- }
-
- fn bound_defines_assoc_item(&self, b: &hir::GenericBound<'_>, assoc_name: Ident) -> bool {
- debug!("bound_defines_assoc_item(b={:?}, assoc_name={:?})", b, assoc_name);
-
- match b {
- hir::GenericBound::Trait(poly_trait_ref, _) => {
- let trait_ref = &poly_trait_ref.trait_ref;
- if let Some(trait_did) = trait_ref.trait_def_id() {
- self.tcx.trait_may_define_assoc_type(trait_did, assoc_name)
- } else {
- false
- }
- }
- _ => false,
- }
- }
-}
-
-fn convert_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) {
- let it = tcx.hir().item(item_id);
- debug!("convert: item {} with id {}", it.ident, it.hir_id());
- let def_id = item_id.def_id;
-
- match it.kind {
- // These don't define types.
- hir::ItemKind::ExternCrate(_)
- | hir::ItemKind::Use(..)
- | hir::ItemKind::Macro(..)
- | hir::ItemKind::Mod(_)
- | hir::ItemKind::GlobalAsm(_) => {}
- hir::ItemKind::ForeignMod { items, .. } => {
- for item in items {
- let item = tcx.hir().foreign_item(item.id);
- tcx.ensure().generics_of(item.def_id);
- tcx.ensure().type_of(item.def_id);
- tcx.ensure().predicates_of(item.def_id);
- match item.kind {
- hir::ForeignItemKind::Fn(..) => tcx.ensure().fn_sig(item.def_id),
- hir::ForeignItemKind::Static(..) => {
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_foreign_item(item);
- placeholder_type_error(
- tcx,
- None,
- visitor.0,
- false,
- None,
- "static variable",
- );
- }
- _ => (),
- }
- }
- }
- hir::ItemKind::Enum(ref enum_definition, _) => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
- convert_enum_variant_types(tcx, def_id.to_def_id(), enum_definition.variants);
- }
- hir::ItemKind::Impl { .. } => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().impl_trait_ref(def_id);
- tcx.ensure().predicates_of(def_id);
- }
- hir::ItemKind::Trait(..) => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().trait_def(def_id);
- tcx.at(it.span).super_predicates_of(def_id);
- tcx.ensure().predicates_of(def_id);
- }
- hir::ItemKind::TraitAlias(..) => {
- tcx.ensure().generics_of(def_id);
- tcx.at(it.span).super_predicates_of(def_id);
- tcx.ensure().predicates_of(def_id);
- }
- hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
-
- for f in struct_def.fields() {
- let def_id = tcx.hir().local_def_id(f.hir_id);
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
- }
-
- if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
- convert_variant_ctor(tcx, ctor_hir_id);
- }
- }
-
- // Desugared from `impl Trait`, so visited by the function's return type.
- hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
- ..
- }) => {}
-
- // Don't call `type_of` on opaque types, since that depends on type
- // checking function bodies. `check_item_type` ensures that it's called
- // instead.
- hir::ItemKind::OpaqueTy(..) => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().predicates_of(def_id);
- tcx.ensure().explicit_item_bounds(def_id);
- }
- hir::ItemKind::TyAlias(..)
- | hir::ItemKind::Static(..)
- | hir::ItemKind::Const(..)
- | hir::ItemKind::Fn(..) => {
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
- match it.kind {
- hir::ItemKind::Fn(..) => tcx.ensure().fn_sig(def_id),
- hir::ItemKind::OpaqueTy(..) => tcx.ensure().item_bounds(def_id),
- hir::ItemKind::Const(ty, ..) | hir::ItemKind::Static(ty, ..) => {
- if !is_suggestable_infer_ty(ty) {
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_item(it);
- placeholder_type_error(tcx, None, visitor.0, false, None, it.kind.descr());
- }
- }
- _ => (),
- }
- }
- }
-}
-
-fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::TraitItemId) {
- let trait_item = tcx.hir().trait_item(trait_item_id);
- tcx.ensure().generics_of(trait_item_id.def_id);
-
- match trait_item.kind {
- hir::TraitItemKind::Fn(..) => {
- tcx.ensure().type_of(trait_item_id.def_id);
- tcx.ensure().fn_sig(trait_item_id.def_id);
- }
-
- hir::TraitItemKind::Const(.., Some(_)) => {
- tcx.ensure().type_of(trait_item_id.def_id);
- }
-
- hir::TraitItemKind::Const(..) => {
- tcx.ensure().type_of(trait_item_id.def_id);
- // Account for `const C: _;`.
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_trait_item(trait_item);
- placeholder_type_error(tcx, None, visitor.0, false, None, "constant");
- }
-
- hir::TraitItemKind::Type(_, Some(_)) => {
- tcx.ensure().item_bounds(trait_item_id.def_id);
- tcx.ensure().type_of(trait_item_id.def_id);
- // Account for `type T = _;`.
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_trait_item(trait_item);
- placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
- }
-
- hir::TraitItemKind::Type(_, None) => {
- tcx.ensure().item_bounds(trait_item_id.def_id);
- // #74612: Visit and try to find bad placeholders
- // even if there is no concrete type.
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_trait_item(trait_item);
-
- placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
- }
- };
-
- tcx.ensure().predicates_of(trait_item_id.def_id);
-}
-
-fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::ImplItemId) {
- let def_id = impl_item_id.def_id;
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
- let impl_item = tcx.hir().impl_item(impl_item_id);
- match impl_item.kind {
- hir::ImplItemKind::Fn(..) => {
- tcx.ensure().fn_sig(def_id);
- }
- hir::ImplItemKind::TyAlias(_) => {
- // Account for `type T = _;`
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_impl_item(impl_item);
-
- placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
- }
- hir::ImplItemKind::Const(..) => {}
- }
-}
-
-fn convert_variant_ctor(tcx: TyCtxt<'_>, ctor_id: hir::HirId) {
- let def_id = tcx.hir().local_def_id(ctor_id);
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
-}
-
-fn convert_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId, variants: &[hir::Variant<'_>]) {
- let def = tcx.adt_def(def_id);
- let repr_type = def.repr().discr_type();
- let initial = repr_type.initial_discriminant(tcx);
- let mut prev_discr = None::<Discr<'_>>;
-
- // fill the discriminant values and field types
- for variant in variants {
- let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
- prev_discr = Some(
- if let Some(ref e) = variant.disr_expr {
- let expr_did = tcx.hir().local_def_id(e.hir_id);
- def.eval_explicit_discr(tcx, expr_did.to_def_id())
- } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) {
- Some(discr)
- } else {
- struct_span_err!(tcx.sess, variant.span, E0370, "enum discriminant overflowed")
- .span_label(
- variant.span,
- format!("overflowed on value after {}", prev_discr.unwrap()),
- )
- .note(&format!(
- "explicitly set `{} = {}` if that is desired outcome",
- variant.ident, wrapped_discr
- ))
- .emit();
- None
- }
- .unwrap_or(wrapped_discr),
- );
-
- for f in variant.data.fields() {
- let def_id = tcx.hir().local_def_id(f.hir_id);
- tcx.ensure().generics_of(def_id);
- tcx.ensure().type_of(def_id);
- tcx.ensure().predicates_of(def_id);
- }
-
- // Convert the ctor, if any. This also registers the variant as
- // an item.
- if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
- convert_variant_ctor(tcx, ctor_hir_id);
- }
- }
-}
-
-fn convert_variant(
- tcx: TyCtxt<'_>,
- variant_did: Option<LocalDefId>,
- ctor_did: Option<LocalDefId>,
- ident: Ident,
- discr: ty::VariantDiscr,
- def: &hir::VariantData<'_>,
- adt_kind: ty::AdtKind,
- parent_did: LocalDefId,
-) -> ty::VariantDef {
- let mut seen_fields: FxHashMap<Ident, Span> = Default::default();
- let fields = def
- .fields()
- .iter()
- .map(|f| {
- let fid = tcx.hir().local_def_id(f.hir_id);
- let dup_span = seen_fields.get(&f.ident.normalize_to_macros_2_0()).cloned();
- if let Some(prev_span) = dup_span {
- tcx.sess.emit_err(errors::FieldAlreadyDeclared {
- field_name: f.ident,
- span: f.span,
- prev_span,
- });
- } else {
- seen_fields.insert(f.ident.normalize_to_macros_2_0(), f.span);
- }
-
- ty::FieldDef { did: fid.to_def_id(), name: f.ident.name, vis: tcx.visibility(fid) }
- })
- .collect();
- let recovered = match def {
- hir::VariantData::Struct(_, r) => *r,
- _ => false,
- };
- ty::VariantDef::new(
- ident.name,
- variant_did.map(LocalDefId::to_def_id),
- ctor_did.map(LocalDefId::to_def_id),
- discr,
- fields,
- CtorKind::from_hir(def),
- adt_kind,
- parent_did.to_def_id(),
- recovered,
- adt_kind == AdtKind::Struct && tcx.has_attr(parent_did.to_def_id(), sym::non_exhaustive)
- || variant_did.map_or(false, |variant_did| {
- tcx.has_attr(variant_did.to_def_id(), sym::non_exhaustive)
- }),
- )
-}
-
-fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
- use rustc_hir::*;
-
- let def_id = def_id.expect_local();
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let Node::Item(item) = tcx.hir().get(hir_id) else {
- bug!();
- };
-
- let repr = ReprOptions::new(tcx, def_id.to_def_id());
- let (kind, variants) = match item.kind {
- ItemKind::Enum(ref def, _) => {
- let mut distance_from_explicit = 0;
- let variants = def
- .variants
- .iter()
- .map(|v| {
- let variant_did = Some(tcx.hir().local_def_id(v.id));
- let ctor_did =
- v.data.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
-
- let discr = if let Some(ref e) = v.disr_expr {
- distance_from_explicit = 0;
- ty::VariantDiscr::Explicit(tcx.hir().local_def_id(e.hir_id).to_def_id())
- } else {
- ty::VariantDiscr::Relative(distance_from_explicit)
- };
- distance_from_explicit += 1;
-
- convert_variant(
- tcx,
- variant_did,
- ctor_did,
- v.ident,
- discr,
- &v.data,
- AdtKind::Enum,
- def_id,
- )
- })
- .collect();
-
- (AdtKind::Enum, variants)
- }
- ItemKind::Struct(ref def, _) => {
- let variant_did = None::<LocalDefId>;
- let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
-
- let variants = std::iter::once(convert_variant(
- tcx,
- variant_did,
- ctor_did,
- item.ident,
- ty::VariantDiscr::Relative(0),
- def,
- AdtKind::Struct,
- def_id,
- ))
- .collect();
-
- (AdtKind::Struct, variants)
- }
- ItemKind::Union(ref def, _) => {
- let variant_did = None;
- let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
-
- let variants = std::iter::once(convert_variant(
- tcx,
- variant_did,
- ctor_did,
- item.ident,
- ty::VariantDiscr::Relative(0),
- def,
- AdtKind::Union,
- def_id,
- ))
- .collect();
-
- (AdtKind::Union, variants)
- }
- _ => bug!(),
- };
- tcx.alloc_adt_def(def_id.to_def_id(), kind, variants, repr)
-}
-
-/// Ensures that the super-predicates of the trait with a `DefId`
-/// of `trait_def_id` are converted and stored. This also ensures that
-/// the transitive super-predicates are converted.
-fn super_predicates_of(tcx: TyCtxt<'_>, trait_def_id: DefId) -> ty::GenericPredicates<'_> {
- debug!("super_predicates(trait_def_id={:?})", trait_def_id);
- tcx.super_predicates_that_define_assoc_type((trait_def_id, None))
-}
-
-/// Ensures that the super-predicates of the trait with a `DefId`
-/// of `trait_def_id` are converted and stored. This also ensures that
-/// the transitive super-predicates are converted.
-fn super_predicates_that_define_assoc_type(
- tcx: TyCtxt<'_>,
- (trait_def_id, assoc_name): (DefId, Option<Ident>),
-) -> ty::GenericPredicates<'_> {
- debug!(
- "super_predicates_that_define_assoc_type(trait_def_id={:?}, assoc_name={:?})",
- trait_def_id, assoc_name
- );
- if trait_def_id.is_local() {
- debug!("super_predicates_that_define_assoc_type: local trait_def_id={:?}", trait_def_id);
- let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local());
-
- let Node::Item(item) = tcx.hir().get(trait_hir_id) else {
- bug!("trait_node_id {} is not an item", trait_hir_id);
- };
-
- let (generics, bounds) = match item.kind {
- hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
- hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
- _ => span_bug!(item.span, "super_predicates invoked on non-trait"),
- };
-
- let icx = ItemCtxt::new(tcx, trait_def_id);
-
- // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`.
- let self_param_ty = tcx.types.self_param;
- let superbounds1 = if let Some(assoc_name) = assoc_name {
- <dyn AstConv<'_>>::compute_bounds_that_match_assoc_type(
- &icx,
- self_param_ty,
- bounds,
- assoc_name,
- )
- } else {
- <dyn AstConv<'_>>::compute_bounds(&icx, self_param_ty, bounds)
- };
-
- let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
-
- // Convert any explicit superbounds in the where-clause,
- // e.g., `trait Foo where Self: Bar`.
- // In the case of trait aliases, however, we include all bounds in the where-clause,
- // so e.g., `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
- // as one of its "superpredicates".
- let is_trait_alias = tcx.is_trait_alias(trait_def_id);
- let superbounds2 = icx.type_parameter_bounds_in_generics(
- generics,
- item.hir_id(),
- self_param_ty,
- OnlySelfBounds(!is_trait_alias),
- assoc_name,
- );
-
- // Combine the two lists to form the complete set of superbounds:
- let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2));
- debug!(?superbounds);
-
- // Now require that immediate supertraits are converted,
- // which will, in turn, reach indirect supertraits.
- if assoc_name.is_none() {
- // Now require that immediate supertraits are converted,
- // which will, in turn, reach indirect supertraits.
- for &(pred, span) in superbounds {
- debug!("superbound: {:?}", pred);
- if let ty::PredicateKind::Trait(bound) = pred.kind().skip_binder() {
- tcx.at(span).super_predicates_of(bound.def_id());
- }
- }
- }
-
- ty::GenericPredicates { parent: None, predicates: superbounds }
- } else {
- // if `assoc_name` is None, then the query should've been redirected to an
- // external provider
- assert!(assoc_name.is_some());
- tcx.super_predicates_of(trait_def_id)
- }
-}
-
-fn trait_def(tcx: TyCtxt<'_>, def_id: DefId) -> ty::TraitDef {
- let item = tcx.hir().expect_item(def_id.expect_local());
-
- let (is_auto, unsafety, items) = match item.kind {
- hir::ItemKind::Trait(is_auto, unsafety, .., items) => {
- (is_auto == hir::IsAuto::Yes, unsafety, items)
- }
- hir::ItemKind::TraitAlias(..) => (false, hir::Unsafety::Normal, &[][..]),
- _ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"),
- };
-
- let paren_sugar = tcx.has_attr(def_id, sym::rustc_paren_sugar);
- if paren_sugar && !tcx.features().unboxed_closures {
- tcx.sess
- .struct_span_err(
- item.span,
- "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \
- which traits can use parenthetical notation",
- )
- .help("add `#![feature(unboxed_closures)]` to the crate attributes to use it")
- .emit();
- }
-
- let is_marker = tcx.has_attr(def_id, sym::marker);
- let skip_array_during_method_dispatch =
- tcx.has_attr(def_id, sym::rustc_skip_array_during_method_dispatch);
- let spec_kind = if tcx.has_attr(def_id, sym::rustc_unsafe_specialization_marker) {
- ty::trait_def::TraitSpecializationKind::Marker
- } else if tcx.has_attr(def_id, sym::rustc_specialization_trait) {
- ty::trait_def::TraitSpecializationKind::AlwaysApplicable
- } else {
- ty::trait_def::TraitSpecializationKind::None
- };
- let must_implement_one_of = tcx
- .get_attr(def_id, sym::rustc_must_implement_one_of)
- // Check that there are at least 2 arguments of `#[rustc_must_implement_one_of]`
- // and that they are all identifiers
- .and_then(|attr| match attr.meta_item_list() {
- Some(items) if items.len() < 2 => {
- tcx.sess
- .struct_span_err(
- attr.span,
- "the `#[rustc_must_implement_one_of]` attribute must be \
- used with at least 2 args",
- )
- .emit();
-
- None
- }
- Some(items) => items
- .into_iter()
- .map(|item| item.ident().ok_or(item.span()))
- .collect::<Result<Box<[_]>, _>>()
- .map_err(|span| {
- tcx.sess
- .struct_span_err(span, "must be a name of an associated function")
- .emit();
- })
- .ok()
- .zip(Some(attr.span)),
- // Error is reported by `rustc_attr!`
- None => None,
- })
- // Check that all arguments of `#[rustc_must_implement_one_of]` reference
- // functions in the trait with default implementations
- .and_then(|(list, attr_span)| {
- let errors = list.iter().filter_map(|ident| {
- let item = items.iter().find(|item| item.ident == *ident);
-
- match item {
- Some(item) if matches!(item.kind, hir::AssocItemKind::Fn { .. }) => {
- if !tcx.impl_defaultness(item.id.def_id).has_value() {
- tcx.sess
- .struct_span_err(
- item.span,
- "This function doesn't have a default implementation",
- )
- .span_note(attr_span, "required by this annotation")
- .emit();
-
- return Some(());
- }
-
- return None;
- }
- Some(item) => {
- tcx.sess
- .struct_span_err(item.span, "Not a function")
- .span_note(attr_span, "required by this annotation")
- .note(
- "All `#[rustc_must_implement_one_of]` arguments \
- must be associated function names",
- )
- .emit();
- }
- None => {
- tcx.sess
- .struct_span_err(ident.span, "Function not found in this trait")
- .emit();
- }
- }
-
- Some(())
- });
-
- (errors.count() == 0).then_some(list)
- })
- // Check for duplicates
- .and_then(|list| {
- let mut set: FxHashMap<Symbol, Span> = FxHashMap::default();
- let mut no_dups = true;
-
- for ident in &*list {
- if let Some(dup) = set.insert(ident.name, ident.span) {
- tcx.sess
- .struct_span_err(vec![dup, ident.span], "Functions names are duplicated")
- .note(
- "All `#[rustc_must_implement_one_of]` arguments \
- must be unique",
- )
- .emit();
-
- no_dups = false;
- }
- }
-
- no_dups.then_some(list)
- });
-
- ty::TraitDef::new(
- def_id,
- unsafety,
- paren_sugar,
- is_auto,
- is_marker,
- skip_array_during_method_dispatch,
- spec_kind,
- must_implement_one_of,
- )
-}
-
-fn has_late_bound_regions<'tcx>(tcx: TyCtxt<'tcx>, node: Node<'tcx>) -> Option<Span> {
- struct LateBoundRegionsDetector<'tcx> {
- tcx: TyCtxt<'tcx>,
- outer_index: ty::DebruijnIndex,
- has_late_bound_regions: Option<Span>,
- }
-
- impl<'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'tcx> {
- fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
- if self.has_late_bound_regions.is_some() {
- return;
- }
- match ty.kind {
- hir::TyKind::BareFn(..) => {
- self.outer_index.shift_in(1);
- intravisit::walk_ty(self, ty);
- self.outer_index.shift_out(1);
- }
- _ => intravisit::walk_ty(self, ty),
- }
- }
-
- fn visit_poly_trait_ref(
- &mut self,
- tr: &'tcx hir::PolyTraitRef<'tcx>,
- m: hir::TraitBoundModifier,
- ) {
- if self.has_late_bound_regions.is_some() {
- return;
- }
- self.outer_index.shift_in(1);
- intravisit::walk_poly_trait_ref(self, tr, m);
- self.outer_index.shift_out(1);
- }
-
- fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
- if self.has_late_bound_regions.is_some() {
- return;
- }
-
- match self.tcx.named_region(lt.hir_id) {
- Some(rl::Region::Static | rl::Region::EarlyBound(..)) => {}
- Some(rl::Region::LateBound(debruijn, _, _)) if debruijn < self.outer_index => {}
- Some(rl::Region::LateBound(..) | rl::Region::Free(..)) | None => {
- self.has_late_bound_regions = Some(lt.span);
- }
- }
- }
- }
-
- fn has_late_bound_regions<'tcx>(
- tcx: TyCtxt<'tcx>,
- generics: &'tcx hir::Generics<'tcx>,
- decl: &'tcx hir::FnDecl<'tcx>,
- ) -> Option<Span> {
- let mut visitor = LateBoundRegionsDetector {
- tcx,
- outer_index: ty::INNERMOST,
- has_late_bound_regions: None,
- };
- for param in generics.params {
- if let GenericParamKind::Lifetime { .. } = param.kind {
- if tcx.is_late_bound(param.hir_id) {
- return Some(param.span);
- }
- }
- }
- visitor.visit_fn_decl(decl);
- visitor.has_late_bound_regions
- }
-
- match node {
- Node::TraitItem(item) => match item.kind {
- hir::TraitItemKind::Fn(ref sig, _) => {
- has_late_bound_regions(tcx, &item.generics, sig.decl)
- }
- _ => None,
- },
- Node::ImplItem(item) => match item.kind {
- hir::ImplItemKind::Fn(ref sig, _) => {
- has_late_bound_regions(tcx, &item.generics, sig.decl)
- }
- _ => None,
- },
- Node::ForeignItem(item) => match item.kind {
- hir::ForeignItemKind::Fn(fn_decl, _, ref generics) => {
- has_late_bound_regions(tcx, generics, fn_decl)
- }
- _ => None,
- },
- Node::Item(item) => match item.kind {
- hir::ItemKind::Fn(ref sig, .., ref generics, _) => {
- has_late_bound_regions(tcx, generics, sig.decl)
- }
- _ => None,
- },
- _ => None,
- }
-}
-
-struct AnonConstInParamTyDetector {
- in_param_ty: bool,
- found_anon_const_in_param_ty: bool,
- ct: HirId,
-}
-
-impl<'v> Visitor<'v> for AnonConstInParamTyDetector {
- fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) {
- if let GenericParamKind::Const { ty, default: _ } = p.kind {
- let prev = self.in_param_ty;
- self.in_param_ty = true;
- self.visit_ty(ty);
- self.in_param_ty = prev;
- }
- }
-
- fn visit_anon_const(&mut self, c: &'v hir::AnonConst) {
- if self.in_param_ty && self.ct == c.hir_id {
- self.found_anon_const_in_param_ty = true;
- } else {
- intravisit::walk_anon_const(self, c)
- }
- }
-}
-
-fn generics_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Generics {
- use rustc_hir::*;
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
-
- let node = tcx.hir().get(hir_id);
- let parent_def_id = match node {
- Node::ImplItem(_)
- | Node::TraitItem(_)
- | Node::Variant(_)
- | Node::Ctor(..)
- | Node::Field(_) => {
- let parent_id = tcx.hir().get_parent_item(hir_id);
- Some(parent_id.to_def_id())
- }
- // FIXME(#43408) always enable this once `lazy_normalization` is
- // stable enough and does not need a feature gate anymore.
- Node::AnonConst(_) => {
- let parent_def_id = tcx.hir().get_parent_item(hir_id);
-
- let mut in_param_ty = false;
- for (_parent, node) in tcx.hir().parent_iter(hir_id) {
- if let Some(generics) = node.generics() {
- let mut visitor = AnonConstInParamTyDetector {
- in_param_ty: false,
- found_anon_const_in_param_ty: false,
- ct: hir_id,
- };
-
- visitor.visit_generics(generics);
- in_param_ty = visitor.found_anon_const_in_param_ty;
- break;
- }
- }
-
- if in_param_ty {
- // We do not allow generic parameters in anon consts if we are inside
- // of a const parameter type, e.g. `struct Foo<const N: usize, const M: [u8; N]>` is not allowed.
- None
- } else if tcx.lazy_normalization() {
- if let Some(param_id) = tcx.hir().opt_const_param_default_param_hir_id(hir_id) {
- // If the def_id we are calling generics_of on is an anon ct default i.e:
- //
- // struct Foo<const N: usize = { .. }>;
- // ^^^ ^ ^^^^^^ def id of this anon const
- // ^ ^ param_id
- // ^ parent_def_id
- //
- // then we only want to return generics for params to the left of `N`. If we don't do that we
- // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, substs: [N#0])`.
- //
- // This causes ICEs (#86580) when building the substs for Foo in `fn foo() -> Foo { .. }` as
- // we substitute the defaults with the partially built substs when we build the substs. Subst'ing
- // the `N#0` on the unevaluated const indexes into the empty substs we're in the process of building.
- //
- // We fix this by having this function return the parent's generics ourselves and truncating the
- // generics to only include non-forward declared params (with the exception of the `Self` ty)
- //
- // For the above code example that means we want `substs: []`
- // For the following struct def we want `substs: [N#0]` when generics_of is called on
- // the def id of the `{ N + 1 }` anon const
- // struct Foo<const N: usize, const M: usize = { N + 1 }>;
- //
- // This has some implications for how we get the predicates available to the anon const
- // see `explicit_predicates_of` for more information on this
- let generics = tcx.generics_of(parent_def_id.to_def_id());
- let param_def = tcx.hir().local_def_id(param_id).to_def_id();
- let param_def_idx = generics.param_def_id_to_index[&param_def];
- // In the above example this would be .params[..N#0]
- let params = generics.params[..param_def_idx as usize].to_owned();
- let param_def_id_to_index =
- params.iter().map(|param| (param.def_id, param.index)).collect();
-
- return ty::Generics {
- // we set the parent of these generics to be our parent's parent so that we
- // dont end up with substs: [N, M, N] for the const default on a struct like this:
- // struct Foo<const N: usize, const M: usize = { ... }>;
- parent: generics.parent,
- parent_count: generics.parent_count,
- params,
- param_def_id_to_index,
- has_self: generics.has_self,
- has_late_bound_regions: generics.has_late_bound_regions,
- };
- }
-
- // HACK(eddyb) this provides the correct generics when
- // `feature(generic_const_expressions)` is enabled, so that const expressions
- // used with const generics, e.g. `Foo<{N+1}>`, can work at all.
- //
- // Note that we do not supply the parent generics when using
- // `min_const_generics`.
- Some(parent_def_id.to_def_id())
- } else {
- let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
- match parent_node {
- // HACK(eddyb) this provides the correct generics for repeat
- // expressions' count (i.e. `N` in `[x; N]`), and explicit
- // `enum` discriminants (i.e. `D` in `enum Foo { Bar = D }`),
- // as they shouldn't be able to cause query cycle errors.
- Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
- if constant.hir_id() == hir_id =>
- {
- Some(parent_def_id.to_def_id())
- }
- Node::Variant(Variant { disr_expr: Some(ref constant), .. })
- if constant.hir_id == hir_id =>
- {
- Some(parent_def_id.to_def_id())
- }
- Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) => {
- Some(tcx.typeck_root_def_id(def_id))
- }
- // Exclude `GlobalAsm` here which cannot have generics.
- Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. })
- if asm.operands.iter().any(|(op, _op_sp)| match op {
- hir::InlineAsmOperand::Const { anon_const }
- | hir::InlineAsmOperand::SymFn { anon_const } => {
- anon_const.hir_id == hir_id
- }
- _ => false,
- }) =>
- {
- Some(parent_def_id.to_def_id())
- }
- _ => None,
- }
- }
- }
- Node::Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
- Some(tcx.typeck_root_def_id(def_id))
- }
- Node::Item(item) => match item.kind {
- ItemKind::OpaqueTy(hir::OpaqueTy {
- origin:
- hir::OpaqueTyOrigin::FnReturn(fn_def_id) | hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
- ..
- }) => Some(fn_def_id.to_def_id()),
- ItemKind::OpaqueTy(hir::OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => {
- let parent_id = tcx.hir().get_parent_item(hir_id);
- assert_ne!(parent_id, CRATE_DEF_ID);
- debug!("generics_of: parent of opaque ty {:?} is {:?}", def_id, parent_id);
- // Opaque types are always nested within another item, and
- // inherit the generics of the item.
- Some(parent_id.to_def_id())
- }
- _ => None,
- },
- _ => None,
- };
-
- let no_generics = hir::Generics::empty();
- let ast_generics = node.generics().unwrap_or(&no_generics);
- let (opt_self, allow_defaults) = match node {
- Node::Item(item) => {
- match item.kind {
- ItemKind::Trait(..) | ItemKind::TraitAlias(..) => {
- // Add in the self type parameter.
- //
- // Something of a hack: use the node id for the trait, also as
- // the node id for the Self type parameter.
- let opt_self = Some(ty::GenericParamDef {
- index: 0,
- name: kw::SelfUpper,
- def_id,
- pure_wrt_drop: false,
- kind: ty::GenericParamDefKind::Type {
- has_default: false,
- object_lifetime_default: rl::Set1::Empty,
- synthetic: false,
- },
- });
-
- (opt_self, true)
- }
- ItemKind::TyAlias(..)
- | ItemKind::Enum(..)
- | ItemKind::Struct(..)
- | ItemKind::OpaqueTy(..)
- | ItemKind::Union(..) => (None, true),
- _ => (None, false),
- }
- }
- _ => (None, false),
- };
-
- let has_self = opt_self.is_some();
- let mut parent_has_self = false;
- let mut own_start = has_self as u32;
- let parent_count = parent_def_id.map_or(0, |def_id| {
- let generics = tcx.generics_of(def_id);
- assert!(!has_self);
- parent_has_self = generics.has_self;
- own_start = generics.count() as u32;
- generics.parent_count + generics.params.len()
- });
-
- let mut params: Vec<_> = Vec::with_capacity(ast_generics.params.len() + has_self as usize);
-
- if let Some(opt_self) = opt_self {
- params.push(opt_self);
- }
-
- let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics);
- params.extend(early_lifetimes.enumerate().map(|(i, param)| ty::GenericParamDef {
- name: param.name.ident().name,
- index: own_start + i as u32,
- def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
- pure_wrt_drop: param.pure_wrt_drop,
- kind: ty::GenericParamDefKind::Lifetime,
- }));
-
- let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id.owner);
-
- // Now create the real type and const parameters.
- let type_start = own_start - has_self as u32 + params.len() as u32;
- let mut i = 0;
-
- params.extend(ast_generics.params.iter().filter_map(|param| match param.kind {
- GenericParamKind::Lifetime { .. } => None,
- GenericParamKind::Type { ref default, synthetic, .. } => {
- if !allow_defaults && default.is_some() {
- if !tcx.features().default_type_parameter_fallback {
- tcx.struct_span_lint_hir(
- lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
- param.hir_id,
- param.span,
- |lint| {
- lint.build(
- "defaults for type parameters are only allowed in \
- `struct`, `enum`, `type`, or `trait` definitions",
- )
- .emit();
- },
- );
- }
- }
-
- let kind = ty::GenericParamDefKind::Type {
- has_default: default.is_some(),
- object_lifetime_default: object_lifetime_defaults
- .as_ref()
- .map_or(rl::Set1::Empty, |o| o[i]),
- synthetic,
- };
-
- let param_def = ty::GenericParamDef {
- index: type_start + i as u32,
- name: param.name.ident().name,
- def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
- pure_wrt_drop: param.pure_wrt_drop,
- kind,
- };
- i += 1;
- Some(param_def)
- }
- GenericParamKind::Const { default, .. } => {
- if !allow_defaults && default.is_some() {
- tcx.sess.span_err(
- param.span,
- "defaults for const parameters are only allowed in \
- `struct`, `enum`, `type`, or `trait` definitions",
- );
- }
-
- let param_def = ty::GenericParamDef {
- index: type_start + i as u32,
- name: param.name.ident().name,
- def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
- pure_wrt_drop: param.pure_wrt_drop,
- kind: ty::GenericParamDefKind::Const { has_default: default.is_some() },
- };
- i += 1;
- Some(param_def)
- }
- }));
-
- // provide junk type parameter defs - the only place that
- // cares about anything but the length is instantiation,
- // and we don't do that for closures.
- if let Node::Expr(&hir::Expr {
- kind: hir::ExprKind::Closure(hir::Closure { movability: gen, .. }),
- ..
- }) = node
- {
- let dummy_args = if gen.is_some() {
- &["<resume_ty>", "<yield_ty>", "<return_ty>", "<witness>", "<upvars>"][..]
- } else {
- &["<closure_kind>", "<closure_signature>", "<upvars>"][..]
- };
-
- params.extend(dummy_args.iter().enumerate().map(|(i, &arg)| ty::GenericParamDef {
- index: type_start + i as u32,
- name: Symbol::intern(arg),
- def_id,
- pure_wrt_drop: false,
- kind: ty::GenericParamDefKind::Type {
- has_default: false,
- object_lifetime_default: rl::Set1::Empty,
- synthetic: false,
- },
- }));
- }
-
- // provide junk type parameter defs for const blocks.
- if let Node::AnonConst(_) = node {
- let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
- if let Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) = parent_node {
- params.push(ty::GenericParamDef {
- index: type_start,
- name: Symbol::intern("<const_ty>"),
- def_id,
- pure_wrt_drop: false,
- kind: ty::GenericParamDefKind::Type {
- has_default: false,
- object_lifetime_default: rl::Set1::Empty,
- synthetic: false,
- },
- });
- }
- }
-
- let param_def_id_to_index = params.iter().map(|param| (param.def_id, param.index)).collect();
-
- ty::Generics {
- parent: parent_def_id,
- parent_count,
- params,
- param_def_id_to_index,
- has_self: has_self || parent_has_self,
- has_late_bound_regions: has_late_bound_regions(tcx, node),
- }
-}
-
-fn are_suggestable_generic_args(generic_args: &[hir::GenericArg<'_>]) -> bool {
- generic_args.iter().any(|arg| match arg {
- hir::GenericArg::Type(ty) => is_suggestable_infer_ty(ty),
- hir::GenericArg::Infer(_) => true,
- _ => false,
- })
-}
-
-/// Whether `ty` is a type with `_` placeholders that can be inferred. Used in diagnostics only to
-/// use inference to provide suggestions for the appropriate type if possible.
-fn is_suggestable_infer_ty(ty: &hir::Ty<'_>) -> bool {
- debug!(?ty);
- use hir::TyKind::*;
- match &ty.kind {
- Infer => true,
- Slice(ty) => is_suggestable_infer_ty(ty),
- Array(ty, length) => {
- is_suggestable_infer_ty(ty) || matches!(length, hir::ArrayLen::Infer(_, _))
- }
- Tup(tys) => tys.iter().any(is_suggestable_infer_ty),
- Ptr(mut_ty) | Rptr(_, mut_ty) => is_suggestable_infer_ty(mut_ty.ty),
- OpaqueDef(_, generic_args) => are_suggestable_generic_args(generic_args),
- Path(hir::QPath::TypeRelative(ty, segment)) => {
- is_suggestable_infer_ty(ty) || are_suggestable_generic_args(segment.args().args)
- }
- Path(hir::QPath::Resolved(ty_opt, hir::Path { segments, .. })) => {
- ty_opt.map_or(false, is_suggestable_infer_ty)
- || segments.iter().any(|segment| are_suggestable_generic_args(segment.args().args))
- }
- _ => false,
- }
-}
-
-pub fn get_infer_ret_ty<'hir>(output: &'hir hir::FnRetTy<'hir>) -> Option<&'hir hir::Ty<'hir>> {
- if let hir::FnRetTy::Return(ty) = output {
- if is_suggestable_infer_ty(ty) {
- return Some(&*ty);
- }
- }
- None
-}
-
-fn fn_sig(tcx: TyCtxt<'_>, def_id: DefId) -> ty::PolyFnSig<'_> {
- use rustc_hir::Node::*;
- use rustc_hir::*;
-
- let def_id = def_id.expect_local();
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-
- let icx = ItemCtxt::new(tcx, def_id.to_def_id());
-
- match tcx.hir().get(hir_id) {
- TraitItem(hir::TraitItem {
- kind: TraitItemKind::Fn(sig, TraitFn::Provided(_)),
- generics,
- ..
- })
- | Item(hir::Item { kind: ItemKind::Fn(sig, generics, _), .. }) => {
- infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
- }
-
- ImplItem(hir::ImplItem { kind: ImplItemKind::Fn(sig, _), generics, .. }) => {
- // Do not try to inference the return type for a impl method coming from a trait
- if let Item(hir::Item { kind: ItemKind::Impl(i), .. }) =
- tcx.hir().get(tcx.hir().get_parent_node(hir_id))
- && i.of_trait.is_some()
- {
- <dyn AstConv<'_>>::ty_of_fn(
- &icx,
- hir_id,
- sig.header.unsafety,
- sig.header.abi,
- sig.decl,
- Some(generics),
- None,
- )
- } else {
- infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
- }
- }
-
- TraitItem(hir::TraitItem {
- kind: TraitItemKind::Fn(FnSig { header, decl, span: _ }, _),
- generics,
- ..
- }) => <dyn AstConv<'_>>::ty_of_fn(
- &icx,
- hir_id,
- header.unsafety,
- header.abi,
- decl,
- Some(generics),
- None,
- ),
-
- ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
- let abi = tcx.hir().get_foreign_abi(hir_id);
- compute_sig_of_foreign_fn_decl(tcx, def_id.to_def_id(), fn_decl, abi)
- }
-
- Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor_hir_id().is_some() => {
- let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id));
- let inputs =
- data.fields().iter().map(|f| tcx.type_of(tcx.hir().local_def_id(f.hir_id)));
- ty::Binder::dummy(tcx.mk_fn_sig(
- inputs,
- ty,
- false,
- hir::Unsafety::Normal,
- abi::Abi::Rust,
- ))
- }
-
- Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
- // Closure signatures are not like other function
- // signatures and cannot be accessed through `fn_sig`. For
- // example, a closure signature excludes the `self`
- // argument. In any case they are embedded within the
- // closure type as part of the `ClosureSubsts`.
- //
- // To get the signature of a closure, you should use the
- // `sig` method on the `ClosureSubsts`:
- //
- // substs.as_closure().sig(def_id, tcx)
- bug!(
- "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
- );
- }
-
- x => {
- bug!("unexpected sort of node in fn_sig(): {:?}", x);
- }
- }
-}
-
-fn infer_return_ty_for_fn_sig<'tcx>(
- tcx: TyCtxt<'tcx>,
- sig: &hir::FnSig<'_>,
- generics: &hir::Generics<'_>,
- def_id: LocalDefId,
- icx: &ItemCtxt<'tcx>,
-) -> ty::PolyFnSig<'tcx> {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-
- match get_infer_ret_ty(&sig.decl.output) {
- Some(ty) => {
- let fn_sig = tcx.typeck(def_id).liberated_fn_sigs()[hir_id];
- // Typeck doesn't expect erased regions to be returned from `type_of`.
- let fn_sig = tcx.fold_regions(fn_sig, |r, _| match *r {
- ty::ReErased => tcx.lifetimes.re_static,
- _ => r,
- });
- let fn_sig = ty::Binder::dummy(fn_sig);
-
- let mut visitor = HirPlaceholderCollector::default();
- visitor.visit_ty(ty);
- let mut diag = bad_placeholder(tcx, visitor.0, "return type");
- let ret_ty = fn_sig.skip_binder().output();
- if ret_ty.is_suggestable(tcx, false) {
- diag.span_suggestion(
- ty.span,
- "replace with the correct return type",
- ret_ty,
- Applicability::MachineApplicable,
- );
- } else if matches!(ret_ty.kind(), ty::FnDef(..)) {
- let fn_sig = ret_ty.fn_sig(tcx);
- if fn_sig
- .skip_binder()
- .inputs_and_output
- .iter()
- .all(|t| t.is_suggestable(tcx, false))
- {
- diag.span_suggestion(
- ty.span,
- "replace with the correct return type",
- fn_sig,
- Applicability::MachineApplicable,
- );
- }
- } else if ret_ty.is_closure() {
- // We're dealing with a closure, so we should suggest using `impl Fn` or trait bounds
- // to prevent the user from getting a papercut while trying to use the unique closure
- // syntax (e.g. `[closure@src/lib.rs:2:5: 2:9]`).
- diag.help("consider using an `Fn`, `FnMut`, or `FnOnce` trait bound");
- diag.note("for more information on `Fn` traits and closure types, see https://doc.rust-lang.org/book/ch13-01-closures.html");
- }
- diag.emit();
-
- fn_sig
- }
- None => <dyn AstConv<'_>>::ty_of_fn(
- icx,
- hir_id,
- sig.header.unsafety,
- sig.header.abi,
- sig.decl,
- Some(generics),
- None,
- ),
- }
-}
-
-fn impl_trait_ref(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::TraitRef<'_>> {
- let icx = ItemCtxt::new(tcx, def_id);
- match tcx.hir().expect_item(def_id.expect_local()).kind {
- hir::ItemKind::Impl(ref impl_) => impl_.of_trait.as_ref().map(|ast_trait_ref| {
- let selfty = tcx.type_of(def_id);
- <dyn AstConv<'_>>::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
- }),
- _ => bug!(),
- }
-}
-
-fn impl_polarity(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ImplPolarity {
- let is_rustc_reservation = tcx.has_attr(def_id, sym::rustc_reservation_impl);
- let item = tcx.hir().expect_item(def_id.expect_local());
- match &item.kind {
- hir::ItemKind::Impl(hir::Impl {
- polarity: hir::ImplPolarity::Negative(span),
- of_trait,
- ..
- }) => {
- if is_rustc_reservation {
- let span = span.to(of_trait.as_ref().map_or(*span, |t| t.path.span));
- tcx.sess.span_err(span, "reservation impls can't be negative");
- }
- ty::ImplPolarity::Negative
- }
- hir::ItemKind::Impl(hir::Impl {
- polarity: hir::ImplPolarity::Positive,
- of_trait: None,
- ..
- }) => {
- if is_rustc_reservation {
- tcx.sess.span_err(item.span, "reservation impls can't be inherent");
- }
- ty::ImplPolarity::Positive
- }
- hir::ItemKind::Impl(hir::Impl {
- polarity: hir::ImplPolarity::Positive,
- of_trait: Some(_),
- ..
- }) => {
- if is_rustc_reservation {
- ty::ImplPolarity::Reservation
- } else {
- ty::ImplPolarity::Positive
- }
- }
- item => bug!("impl_polarity: {:?} not an impl", item),
- }
-}
-
-/// Returns the early-bound lifetimes declared in this generics
-/// listing. For anything other than fns/methods, this is just all
-/// the lifetimes that are declared. For fns or methods, we have to
-/// screen out those that do not appear in any where-clauses etc using
-/// `resolve_lifetime::early_bound_lifetimes`.
-fn early_bound_lifetimes_from_generics<'a, 'tcx: 'a>(
- tcx: TyCtxt<'tcx>,
- generics: &'a hir::Generics<'a>,
-) -> impl Iterator<Item = &'a hir::GenericParam<'a>> + Captures<'tcx> {
- generics.params.iter().filter(move |param| match param.kind {
- GenericParamKind::Lifetime { .. } => !tcx.is_late_bound(param.hir_id),
- _ => false,
- })
-}
-
-/// Returns a list of type predicates for the definition with ID `def_id`, including inferred
-/// lifetime constraints. This includes all predicates returned by `explicit_predicates_of`, plus
-/// inferred constraints concerning which regions outlive other regions.
-fn predicates_defined_on(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
- debug!("predicates_defined_on({:?})", def_id);
- let mut result = tcx.explicit_predicates_of(def_id);
- debug!("predicates_defined_on: explicit_predicates_of({:?}) = {:?}", def_id, result,);
- let inferred_outlives = tcx.inferred_outlives_of(def_id);
- if !inferred_outlives.is_empty() {
- debug!(
- "predicates_defined_on: inferred_outlives_of({:?}) = {:?}",
- def_id, inferred_outlives,
- );
- if result.predicates.is_empty() {
- result.predicates = inferred_outlives;
- } else {
- result.predicates = tcx
- .arena
- .alloc_from_iter(result.predicates.iter().chain(inferred_outlives).copied());
- }
- }
-
- debug!("predicates_defined_on({:?}) = {:?}", def_id, result);
- result
-}
-
-/// Returns a list of all type predicates (explicit and implicit) for the definition with
-/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus
-/// `Self: Trait` predicates for traits.
-fn predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
- let mut result = tcx.predicates_defined_on(def_id);
-
- if tcx.is_trait(def_id) {
- // For traits, add `Self: Trait` predicate. This is
- // not part of the predicates that a user writes, but it
- // is something that one must prove in order to invoke a
- // method or project an associated type.
- //
- // In the chalk setup, this predicate is not part of the
- // "predicates" for a trait item. But it is useful in
- // rustc because if you directly (e.g.) invoke a trait
- // method like `Trait::method(...)`, you must naturally
- // prove that the trait applies to the types that were
- // used, and adding the predicate into this list ensures
- // that this is done.
- //
- // We use a DUMMY_SP here as a way to signal trait bounds that come
- // from the trait itself that *shouldn't* be shown as the source of
- // an obligation and instead be skipped. Otherwise we'd use
- // `tcx.def_span(def_id);`
-
- let constness = if tcx.has_attr(def_id, sym::const_trait) {
- ty::BoundConstness::ConstIfConst
- } else {
- ty::BoundConstness::NotConst
- };
-
- let span = rustc_span::DUMMY_SP;
- result.predicates =
- tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(std::iter::once((
- ty::TraitRef::identity(tcx, def_id).with_constness(constness).to_predicate(tcx),
- span,
- ))));
- }
- debug!("predicates_of(def_id={:?}) = {:?}", def_id, result);
- result
-}
-
-/// Returns a list of user-specified type predicates for the definition with ID `def_id`.
-/// N.B., this does not include any implied/inferred constraints.
-fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
- use rustc_hir::*;
-
- debug!("explicit_predicates_of(def_id={:?})", def_id);
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- let node = tcx.hir().get(hir_id);
-
- let mut is_trait = None;
- let mut is_default_impl_trait = None;
-
- let icx = ItemCtxt::new(tcx, def_id);
-
- const NO_GENERICS: &hir::Generics<'_> = hir::Generics::empty();
-
- // We use an `IndexSet` to preserves order of insertion.
- // Preserving the order of insertion is important here so as not to break UI tests.
- let mut predicates: FxIndexSet<(ty::Predicate<'_>, Span)> = FxIndexSet::default();
-
- let ast_generics = match node {
- Node::TraitItem(item) => item.generics,
-
- Node::ImplItem(item) => item.generics,
-
- Node::Item(item) => {
- match item.kind {
- ItemKind::Impl(ref impl_) => {
- if impl_.defaultness.is_default() {
- is_default_impl_trait = tcx.impl_trait_ref(def_id).map(ty::Binder::dummy);
- }
- &impl_.generics
- }
- ItemKind::Fn(.., ref generics, _)
- | ItemKind::TyAlias(_, ref generics)
- | ItemKind::Enum(_, ref generics)
- | ItemKind::Struct(_, ref generics)
- | ItemKind::Union(_, ref generics) => *generics,
-
- ItemKind::Trait(_, _, ref generics, ..) => {
- is_trait = Some(ty::TraitRef::identity(tcx, def_id));
- *generics
- }
- ItemKind::TraitAlias(ref generics, _) => {
- is_trait = Some(ty::TraitRef::identity(tcx, def_id));
- *generics
- }
- ItemKind::OpaqueTy(OpaqueTy {
- origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
- ..
- }) => {
- // return-position impl trait
- //
- // We don't inherit predicates from the parent here:
- // If we have, say `fn f<'a, T: 'a>() -> impl Sized {}`
- // then the return type is `f::<'static, T>::{{opaque}}`.
- //
- // If we inherited the predicates of `f` then we would
- // require that `T: 'static` to show that the return
- // type is well-formed.
- //
- // The only way to have something with this opaque type
- // is from the return type of the containing function,
- // which will ensure that the function's predicates
- // hold.
- return ty::GenericPredicates { parent: None, predicates: &[] };
- }
- ItemKind::OpaqueTy(OpaqueTy {
- ref generics,
- origin: hir::OpaqueTyOrigin::TyAlias,
- ..
- }) => {
- // type-alias impl trait
- generics
- }
-
- _ => NO_GENERICS,
- }
- }
-
- Node::ForeignItem(item) => match item.kind {
- ForeignItemKind::Static(..) => NO_GENERICS,
- ForeignItemKind::Fn(_, _, ref generics) => *generics,
- ForeignItemKind::Type => NO_GENERICS,
- },
-
- _ => NO_GENERICS,
- };
-
- let generics = tcx.generics_of(def_id);
- let parent_count = generics.parent_count as u32;
- let has_own_self = generics.has_self && parent_count == 0;
-
- // Below we'll consider the bounds on the type parameters (including `Self`)
- // and the explicit where-clauses, but to get the full set of predicates
- // on a trait we need to add in the supertrait bounds and bounds found on
- // associated types.
- if let Some(_trait_ref) = is_trait {
- predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned());
- }
-
- // In default impls, we can assume that the self type implements
- // the trait. So in:
- //
- // default impl Foo for Bar { .. }
- //
- // we add a default where clause `Foo: Bar`. We do a similar thing for traits
- // (see below). Recall that a default impl is not itself an impl, but rather a
- // set of defaults that can be incorporated into another impl.
- if let Some(trait_ref) = is_default_impl_trait {
- predicates.insert((trait_ref.without_const().to_predicate(tcx), tcx.def_span(def_id)));
- }
-
- // Collect the region predicates that were declared inline as
- // well. In the case of parameters declared on a fn or method, we
- // have to be careful to only iterate over early-bound regions.
- let mut index = parent_count
- + has_own_self as u32
- + early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32;
-
- // Collect the predicates that were written inline by the user on each
- // type parameter (e.g., `<T: Foo>`).
- for param in ast_generics.params {
- match param.kind {
- // We already dealt with early bound lifetimes above.
- GenericParamKind::Lifetime { .. } => (),
- GenericParamKind::Type { .. } => {
- let name = param.name.ident().name;
- let param_ty = ty::ParamTy::new(index, name).to_ty(tcx);
- index += 1;
-
- let mut bounds = Bounds::default();
- // Params are implicitly sized unless a `?Sized` bound is found
- <dyn AstConv<'_>>::add_implicitly_sized(
- &icx,
- &mut bounds,
- &[],
- Some((param.hir_id, ast_generics.predicates)),
- param.span,
- );
- predicates.extend(bounds.predicates(tcx, param_ty));
- }
- GenericParamKind::Const { .. } => {
- // Bounds on const parameters are currently not possible.
- index += 1;
- }
- }
- }
-
- // Add in the bounds that appear in the where-clause.
- for predicate in ast_generics.predicates {
- match predicate {
- hir::WherePredicate::BoundPredicate(bound_pred) => {
- let ty = icx.to_ty(bound_pred.bounded_ty);
- let bound_vars = icx.tcx.late_bound_vars(bound_pred.bounded_ty.hir_id);
-
- // Keep the type around in a dummy predicate, in case of no bounds.
- // That way, `where Ty:` is not a complete noop (see #53696) and `Ty`
- // is still checked for WF.
- if bound_pred.bounds.is_empty() {
- if let ty::Param(_) = ty.kind() {
- // This is a `where T:`, which can be in the HIR from the
- // transformation that moves `?Sized` to `T`'s declaration.
- // We can skip the predicate because type parameters are
- // trivially WF, but also we *should*, to avoid exposing
- // users who never wrote `where Type:,` themselves, to
- // compiler/tooling bugs from not handling WF predicates.
- } else {
- let span = bound_pred.bounded_ty.span;
- let predicate = ty::Binder::bind_with_vars(
- ty::PredicateKind::WellFormed(ty.into()),
- bound_vars,
- );
- predicates.insert((predicate.to_predicate(tcx), span));
- }
- }
-
- let mut bounds = Bounds::default();
- <dyn AstConv<'_>>::add_bounds(
- &icx,
- ty,
- bound_pred.bounds.iter(),
- &mut bounds,
- bound_vars,
- );
- predicates.extend(bounds.predicates(tcx, ty));
- }
-
- hir::WherePredicate::RegionPredicate(region_pred) => {
- let r1 = <dyn AstConv<'_>>::ast_region_to_region(&icx, &region_pred.lifetime, None);
- predicates.extend(region_pred.bounds.iter().map(|bound| {
- let (r2, span) = match bound {
- hir::GenericBound::Outlives(lt) => {
- (<dyn AstConv<'_>>::ast_region_to_region(&icx, lt, None), lt.span)
- }
- _ => bug!(),
- };
- let pred = ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
- ty::OutlivesPredicate(r1, r2),
- ))
- .to_predicate(icx.tcx);
-
- (pred, span)
- }))
- }
-
- hir::WherePredicate::EqPredicate(..) => {
- // FIXME(#20041)
- }
- }
- }
-
- if tcx.features().generic_const_exprs {
- predicates.extend(const_evaluatable_predicates_of(tcx, def_id.expect_local()));
- }
-
- let mut predicates: Vec<_> = predicates.into_iter().collect();
-
- // Subtle: before we store the predicates into the tcx, we
- // sort them so that predicates like `T: Foo<Item=U>` come
- // before uses of `U`. This avoids false ambiguity errors
- // in trait checking. See `setup_constraining_predicates`
- // for details.
- if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node {
- let self_ty = tcx.type_of(def_id);
- let trait_ref = tcx.impl_trait_ref(def_id);
- cgp::setup_constraining_predicates(
- tcx,
- &mut predicates,
- trait_ref,
- &mut cgp::parameters_for_impl(self_ty, trait_ref),
- );
- }
-
- let result = ty::GenericPredicates {
- parent: generics.parent,
- predicates: tcx.arena.alloc_from_iter(predicates),
- };
- debug!("explicit_predicates_of(def_id={:?}) = {:?}", def_id, result);
- result
-}
-
-fn const_evaluatable_predicates_of<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: LocalDefId,
-) -> FxIndexSet<(ty::Predicate<'tcx>, Span)> {
- struct ConstCollector<'tcx> {
- tcx: TyCtxt<'tcx>,
- preds: FxIndexSet<(ty::Predicate<'tcx>, Span)>,
- }
-
- impl<'tcx> intravisit::Visitor<'tcx> for ConstCollector<'tcx> {
- fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
- let def_id = self.tcx.hir().local_def_id(c.hir_id);
- let ct = ty::Const::from_anon_const(self.tcx, def_id);
- if let ty::ConstKind::Unevaluated(uv) = ct.kind() {
- assert_eq!(uv.promoted, None);
- let span = self.tcx.hir().span(c.hir_id);
- self.preds.insert((
- ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(uv.shrink()))
- .to_predicate(self.tcx),
- span,
- ));
- }
- }
-
- fn visit_const_param_default(&mut self, _param: HirId, _ct: &'tcx hir::AnonConst) {
- // Do not look into const param defaults,
- // these get checked when they are actually instantiated.
- //
- // We do not want the following to error:
- //
- // struct Foo<const N: usize, const M: usize = { N + 1 }>;
- // struct Bar<const N: usize>(Foo<N, 3>);
- }
- }
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let node = tcx.hir().get(hir_id);
-
- let mut collector = ConstCollector { tcx, preds: FxIndexSet::default() };
- if let hir::Node::Item(item) = node && let hir::ItemKind::Impl(ref impl_) = item.kind {
- if let Some(of_trait) = &impl_.of_trait {
- debug!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id);
- collector.visit_trait_ref(of_trait);
- }
-
- debug!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id);
- collector.visit_ty(impl_.self_ty);
- }
-
- if let Some(generics) = node.generics() {
- debug!("const_evaluatable_predicates_of({:?}): visit_generics", def_id);
- collector.visit_generics(generics);
- }
-
- if let Some(fn_sig) = tcx.hir().fn_sig_by_hir_id(hir_id) {
- debug!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id);
- collector.visit_fn_decl(fn_sig.decl);
- }
- debug!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds);
-
- collector.preds
-}
-
-fn trait_explicit_predicates_and_bounds(
- tcx: TyCtxt<'_>,
- def_id: LocalDefId,
-) -> ty::GenericPredicates<'_> {
- assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
- gather_explicit_predicates_of(tcx, def_id.to_def_id())
-}
-
-fn explicit_predicates_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::GenericPredicates<'tcx> {
- let def_kind = tcx.def_kind(def_id);
- if let DefKind::Trait = def_kind {
- // Remove bounds on associated types from the predicates, they will be
- // returned by `explicit_item_bounds`.
- let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id.expect_local());
- let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
-
- let is_assoc_item_ty = |ty: Ty<'tcx>| {
- // For a predicate from a where clause to become a bound on an
- // associated type:
- // * It must use the identity substs of the item.
- // * Since any generic parameters on the item are not in scope,
- // this means that the item is not a GAT, and its identity
- // substs are the same as the trait's.
- // * It must be an associated type for this trait (*not* a
- // supertrait).
- if let ty::Projection(projection) = ty.kind() {
- projection.substs == trait_identity_substs
- && tcx.associated_item(projection.item_def_id).container_id(tcx) == def_id
- } else {
- false
- }
- };
-
- let predicates: Vec<_> = predicates_and_bounds
- .predicates
- .iter()
- .copied()
- .filter(|(pred, _)| match pred.kind().skip_binder() {
- ty::PredicateKind::Trait(tr) => !is_assoc_item_ty(tr.self_ty()),
- ty::PredicateKind::Projection(proj) => {
- !is_assoc_item_ty(proj.projection_ty.self_ty())
- }
- ty::PredicateKind::TypeOutlives(outlives) => !is_assoc_item_ty(outlives.0),
- _ => true,
- })
- .collect();
- if predicates.len() == predicates_and_bounds.predicates.len() {
- predicates_and_bounds
- } else {
- ty::GenericPredicates {
- parent: predicates_and_bounds.parent,
- predicates: tcx.arena.alloc_slice(&predicates),
- }
- }
- } else {
- if matches!(def_kind, DefKind::AnonConst) && tcx.lazy_normalization() {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- if tcx.hir().opt_const_param_default_param_hir_id(hir_id).is_some() {
- // In `generics_of` we set the generics' parent to be our parent's parent which means that
- // we lose out on the predicates of our actual parent if we dont return those predicates here.
- // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
- //
- // struct Foo<T, const N: usize = { <T as Trait>::ASSOC }>(T) where T: Trait;
- // ^^^ ^^^^^^^^^^^^^^^^^^^^^^^ the def id we are calling
- // ^^^ explicit_predicates_of on
- // parent item we dont have set as the
- // parent of generics returned by `generics_of`
- //
- // In the above code we want the anon const to have predicates in its param env for `T: Trait`
- let item_def_id = tcx.hir().get_parent_item(hir_id);
- // In the above code example we would be calling `explicit_predicates_of(Foo)` here
- return tcx.explicit_predicates_of(item_def_id);
- }
- }
- gather_explicit_predicates_of(tcx, def_id)
- }
-}
-
-/// Converts a specific `GenericBound` from the AST into a set of
-/// predicates that apply to the self type. A vector is returned
-/// because this can be anywhere from zero predicates (`T: ?Sized` adds no
-/// predicates) to one (`T: Foo`) to many (`T: Bar<X = i32>` adds `T: Bar`
-/// and `<T as Bar>::X == i32`).
-fn predicates_from_bound<'tcx>(
- astconv: &dyn AstConv<'tcx>,
- param_ty: Ty<'tcx>,
- bound: &'tcx hir::GenericBound<'tcx>,
- bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
-) -> Vec<(ty::Predicate<'tcx>, Span)> {
- let mut bounds = Bounds::default();
- astconv.add_bounds(param_ty, [bound].into_iter(), &mut bounds, bound_vars);
- bounds.predicates(astconv.tcx(), param_ty).collect()
-}
-
-fn compute_sig_of_foreign_fn_decl<'tcx>(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- decl: &'tcx hir::FnDecl<'tcx>,
- abi: abi::Abi,
-) -> ty::PolyFnSig<'tcx> {
- let unsafety = if abi == abi::Abi::RustIntrinsic {
- intrinsic_operation_unsafety(tcx.item_name(def_id))
- } else {
- hir::Unsafety::Unsafe
- };
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- let fty = <dyn AstConv<'_>>::ty_of_fn(
- &ItemCtxt::new(tcx, def_id),
- hir_id,
- unsafety,
- abi,
- decl,
- None,
- None,
- );
-
- // Feature gate SIMD types in FFI, since I am not sure that the
- // ABIs are handled at all correctly. -huonw
- if abi != abi::Abi::RustIntrinsic
- && abi != abi::Abi::PlatformIntrinsic
- && !tcx.features().simd_ffi
- {
- let check = |ast_ty: &hir::Ty<'_>, ty: Ty<'_>| {
- if ty.is_simd() {
- let snip = tcx
- .sess
- .source_map()
- .span_to_snippet(ast_ty.span)
- .map_or_else(|_| String::new(), |s| format!(" `{}`", s));
- tcx.sess
- .struct_span_err(
- ast_ty.span,
- &format!(
- "use of SIMD type{} in FFI is highly experimental and \
- may result in invalid code",
- snip
- ),
- )
- .help("add `#![feature(simd_ffi)]` to the crate attributes to enable")
- .emit();
- }
- };
- for (input, ty) in iter::zip(decl.inputs, fty.inputs().skip_binder()) {
- check(input, *ty)
- }
- if let hir::FnRetTy::Return(ref ty) = decl.output {
- check(ty, fty.output().skip_binder())
- }
- }
-
- fty
-}
-
-fn is_foreign_item(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
- match tcx.hir().get_if_local(def_id) {
- Some(Node::ForeignItem(..)) => true,
- Some(_) => false,
- _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id),
- }
-}
-
-fn generator_kind(tcx: TyCtxt<'_>, def_id: DefId) -> Option<hir::GeneratorKind> {
- match tcx.hir().get_if_local(def_id) {
- Some(Node::Expr(&rustc_hir::Expr {
- kind: rustc_hir::ExprKind::Closure(&rustc_hir::Closure { body, .. }),
- ..
- })) => tcx.hir().body(body).generator_kind(),
- Some(_) => None,
- _ => bug!("generator_kind applied to non-local def-id {:?}", def_id),
- }
-}
-
-fn from_target_feature(
- tcx: TyCtxt<'_>,
- attr: &ast::Attribute,
- supported_target_features: &FxHashMap<String, Option<Symbol>>,
- target_features: &mut Vec<Symbol>,
-) {
- let Some(list) = attr.meta_item_list() else { return };
- let bad_item = |span| {
- let msg = "malformed `target_feature` attribute input";
- let code = "enable = \"..\"";
- tcx.sess
- .struct_span_err(span, msg)
- .span_suggestion(span, "must be of the form", code, Applicability::HasPlaceholders)
- .emit();
- };
- let rust_features = tcx.features();
- for item in list {
- // Only `enable = ...` is accepted in the meta-item list.
- if !item.has_name(sym::enable) {
- bad_item(item.span());
- continue;
- }
-
- // Must be of the form `enable = "..."` (a string).
- let Some(value) = item.value_str() else {
- bad_item(item.span());
- continue;
- };
-
- // We allow comma separation to enable multiple features.
- target_features.extend(value.as_str().split(',').filter_map(|feature| {
- let Some(feature_gate) = supported_target_features.get(feature) else {
- let msg =
- format!("the feature named `{}` is not valid for this target", feature);
- let mut err = tcx.sess.struct_span_err(item.span(), &msg);
- err.span_label(
- item.span(),
- format!("`{}` is not valid for this target", feature),
- );
- if let Some(stripped) = feature.strip_prefix('+') {
- let valid = supported_target_features.contains_key(stripped);
- if valid {
- err.help("consider removing the leading `+` in the feature name");
- }
- }
- err.emit();
- return None;
- };
-
- // Only allow features whose feature gates have been enabled.
- let allowed = match feature_gate.as_ref().copied() {
- Some(sym::arm_target_feature) => rust_features.arm_target_feature,
- Some(sym::hexagon_target_feature) => rust_features.hexagon_target_feature,
- Some(sym::powerpc_target_feature) => rust_features.powerpc_target_feature,
- Some(sym::mips_target_feature) => rust_features.mips_target_feature,
- Some(sym::riscv_target_feature) => rust_features.riscv_target_feature,
- Some(sym::avx512_target_feature) => rust_features.avx512_target_feature,
- Some(sym::sse4a_target_feature) => rust_features.sse4a_target_feature,
- Some(sym::tbm_target_feature) => rust_features.tbm_target_feature,
- Some(sym::wasm_target_feature) => rust_features.wasm_target_feature,
- Some(sym::cmpxchg16b_target_feature) => rust_features.cmpxchg16b_target_feature,
- Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
- Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
- Some(sym::f16c_target_feature) => rust_features.f16c_target_feature,
- Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
- Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
- Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
- Some(name) => bug!("unknown target feature gate {}", name),
- None => true,
- };
- if !allowed {
- feature_err(
- &tcx.sess.parse_sess,
- feature_gate.unwrap(),
- item.span(),
- &format!("the target feature `{}` is currently unstable", feature),
- )
- .emit();
- }
- Some(Symbol::intern(feature))
- }));
- }
-}
-
-fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
- use rustc_middle::mir::mono::Linkage::*;
-
- // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
- // applicable to variable declarations and may not really make sense for
- // Rust code in the first place but allow them anyway and trust that the
- // user knows what they're doing. Who knows, unanticipated use cases may pop
- // up in the future.
- //
- // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
- // and don't have to be, LLVM treats them as no-ops.
- match name {
- "appending" => Appending,
- "available_externally" => AvailableExternally,
- "common" => Common,
- "extern_weak" => ExternalWeak,
- "external" => External,
- "internal" => Internal,
- "linkonce" => LinkOnceAny,
- "linkonce_odr" => LinkOnceODR,
- "private" => Private,
- "weak" => WeakAny,
- "weak_odr" => WeakODR,
- _ => tcx.sess.span_fatal(tcx.def_span(def_id), "invalid linkage specified"),
- }
-}
-
-fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
- if cfg!(debug_assertions) {
- let def_kind = tcx.def_kind(did);
- assert!(
- def_kind.has_codegen_attrs(),
- "unexpected `def_kind` in `codegen_fn_attrs`: {def_kind:?}",
- );
- }
-
- let did = did.expect_local();
- let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(did));
- let mut codegen_fn_attrs = CodegenFnAttrs::new();
- if tcx.should_inherit_track_caller(did) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
- }
-
- // The panic_no_unwind function called by TerminatorKind::Abort will never
- // unwind. If the panic handler that it invokes unwind then it will simply
- // call the panic handler again.
- if Some(did.to_def_id()) == tcx.lang_items().panic_no_unwind() {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
- }
-
- let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
-
- let mut inline_span = None;
- let mut link_ordinal_span = None;
- let mut no_sanitize_span = None;
- for attr in attrs.iter() {
- if attr.has_name(sym::cold) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
- } else if attr.has_name(sym::rustc_allocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
- } else if attr.has_name(sym::ffi_returns_twice) {
- if tcx.is_foreign_item(did) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE;
- } else {
- // `#[ffi_returns_twice]` is only allowed `extern fn`s.
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0724,
- "`#[ffi_returns_twice]` may only be used on foreign functions"
- )
- .emit();
- }
- } else if attr.has_name(sym::ffi_pure) {
- if tcx.is_foreign_item(did) {
- if attrs.iter().any(|a| a.has_name(sym::ffi_const)) {
- // `#[ffi_const]` functions cannot be `#[ffi_pure]`
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0757,
- "`#[ffi_const]` function cannot be `#[ffi_pure]`"
- )
- .emit();
- } else {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE;
- }
- } else {
- // `#[ffi_pure]` is only allowed on foreign functions
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0755,
- "`#[ffi_pure]` may only be used on foreign functions"
- )
- .emit();
- }
- } else if attr.has_name(sym::ffi_const) {
- if tcx.is_foreign_item(did) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST;
- } else {
- // `#[ffi_const]` is only allowed on foreign functions
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0756,
- "`#[ffi_const]` may only be used on foreign functions"
- )
- .emit();
- }
- } else if attr.has_name(sym::rustc_allocator_nounwind) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
- } else if attr.has_name(sym::rustc_reallocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR;
- } else if attr.has_name(sym::rustc_deallocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR;
- } else if attr.has_name(sym::rustc_allocator_zeroed) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED;
- } else if attr.has_name(sym::naked) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
- } else if attr.has_name(sym::no_mangle) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
- } else if attr.has_name(sym::no_coverage) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
- } else if attr.has_name(sym::rustc_std_internal_symbol) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
- } else if attr.has_name(sym::used) {
- let inner = attr.meta_item_list();
- match inner.as_deref() {
- Some([item]) if item.has_name(sym::linker) => {
- if !tcx.features().used_with_arg {
- feature_err(
- &tcx.sess.parse_sess,
- sym::used_with_arg,
- attr.span,
- "`#[used(linker)]` is currently unstable",
- )
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER;
- }
- Some([item]) if item.has_name(sym::compiler) => {
- if !tcx.features().used_with_arg {
- feature_err(
- &tcx.sess.parse_sess,
- sym::used_with_arg,
- attr.span,
- "`#[used(compiler)]` is currently unstable",
- )
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
- }
- Some(_) => {
- tcx.sess
- .struct_span_err(
- attr.span,
- "expected `used`, `used(compiler)` or `used(linker)`",
- )
- .emit();
- }
- None => {
- // Unfortunately, unconditionally using `llvm.used` causes
- // issues in handling `.init_array` with the gold linker,
- // but using `llvm.compiler.used` caused a nontrival amount
- // of unintentional ecosystem breakage -- particularly on
- // Mach-O targets.
- //
- // As a result, we emit `llvm.compiler.used` only on ELF
- // targets. This is somewhat ad-hoc, but actually follows
- // our pre-LLVM 13 behavior (prior to the ecosystem
- // breakage), and seems to match `clang`'s behavior as well
- // (both before and after LLVM 13), possibly because they
- // have similar compatibility concerns to us. See
- // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
- // and following comments for some discussion of this, as
- // well as the comments in `rustc_codegen_llvm` where these
- // flags are handled.
- //
- // Anyway, to be clear: this is still up in the air
- // somewhat, and is subject to change in the future (which
- // is a good thing, because this would ideally be a bit
- // more firmed up).
- let is_like_elf = !(tcx.sess.target.is_like_osx
- || tcx.sess.target.is_like_windows
- || tcx.sess.target.is_like_wasm);
- codegen_fn_attrs.flags |= if is_like_elf {
- CodegenFnAttrFlags::USED
- } else {
- CodegenFnAttrFlags::USED_LINKER
- };
- }
- }
- } else if attr.has_name(sym::cmse_nonsecure_entry) {
- if !matches!(tcx.fn_sig(did).abi(), abi::Abi::C { .. }) {
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0776,
- "`#[cmse_nonsecure_entry]` requires C ABI"
- )
- .emit();
- }
- if !tcx.sess.target.llvm_target.contains("thumbv8m") {
- struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY;
- } else if attr.has_name(sym::thread_local) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL;
- } else if attr.has_name(sym::track_caller) {
- if !tcx.is_closure(did.to_def_id()) && tcx.fn_sig(did).abi() != abi::Abi::Rust {
- struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
- .emit();
- }
- if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
- feature_err(
- &tcx.sess.parse_sess,
- sym::closure_track_caller,
- attr.span,
- "`#[track_caller]` on closures is currently unstable",
- )
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
- } else if attr.has_name(sym::export_name) {
- if let Some(s) = attr.value_str() {
- if s.as_str().contains('\0') {
- // `#[export_name = ...]` will be converted to a null-terminated string,
- // so it may not contain any null characters.
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0648,
- "`export_name` may not contain null characters"
- )
- .emit();
- }
- codegen_fn_attrs.export_name = Some(s);
- }
- } else if attr.has_name(sym::target_feature) {
- if !tcx.is_closure(did.to_def_id())
- && tcx.fn_sig(did).unsafety() == hir::Unsafety::Normal
- {
- if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
- // The `#[target_feature]` attribute is allowed on
- // WebAssembly targets on all functions, including safe
- // ones. Other targets require that `#[target_feature]` is
- // only applied to unsafe functions (pending the
- // `target_feature_11` feature) because on most targets
- // execution of instructions that are not supported is
- // considered undefined behavior. For WebAssembly which is a
- // 100% safe target at execution time it's not possible to
- // execute undefined instructions, and even if a future
- // feature was added in some form for this it would be a
- // deterministic trap. There is no undefined behavior when
- // executing WebAssembly so `#[target_feature]` is allowed
- // on safe functions (but again, only for WebAssembly)
- //
- // Note that this is also allowed if `actually_rustdoc` so
- // if a target is documenting some wasm-specific code then
- // it's not spuriously denied.
- } else if !tcx.features().target_feature_11 {
- let mut err = feature_err(
- &tcx.sess.parse_sess,
- sym::target_feature_11,
- attr.span,
- "`#[target_feature(..)]` can only be applied to `unsafe` functions",
- );
- err.span_label(tcx.def_span(did), "not an `unsafe` function");
- err.emit();
- } else {
- check_target_feature_trait_unsafe(tcx, did, attr.span);
- }
- }
- from_target_feature(
- tcx,
- attr,
- supported_target_features,
- &mut codegen_fn_attrs.target_features,
- );
- } else if attr.has_name(sym::linkage) {
- if let Some(val) = attr.value_str() {
- codegen_fn_attrs.linkage = Some(linkage_by_name(tcx, did, val.as_str()));
- }
- } else if attr.has_name(sym::link_section) {
- if let Some(val) = attr.value_str() {
- if val.as_str().bytes().any(|b| b == 0) {
- let msg = format!(
- "illegal null byte in link_section \
- value: `{}`",
- &val
- );
- tcx.sess.span_err(attr.span, &msg);
- } else {
- codegen_fn_attrs.link_section = Some(val);
- }
- }
- } else if attr.has_name(sym::link_name) {
- codegen_fn_attrs.link_name = attr.value_str();
- } else if attr.has_name(sym::link_ordinal) {
- link_ordinal_span = Some(attr.span);
- if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) {
- codegen_fn_attrs.link_ordinal = ordinal;
- }
- } else if attr.has_name(sym::no_sanitize) {
- no_sanitize_span = Some(attr.span);
- if let Some(list) = attr.meta_item_list() {
- for item in list.iter() {
- if item.has_name(sym::address) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::ADDRESS;
- } else if item.has_name(sym::cfi) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI;
- } else if item.has_name(sym::memory) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
- } else if item.has_name(sym::memtag) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG;
- } else if item.has_name(sym::shadow_call_stack) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK;
- } else if item.has_name(sym::thread) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD;
- } else if item.has_name(sym::hwaddress) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS;
- } else {
- tcx.sess
- .struct_span_err(item.span(), "invalid argument for `no_sanitize`")
- .note("expected one of: `address`, `cfi`, `hwaddress`, `memory`, `memtag`, `shadow-call-stack`, or `thread`")
- .emit();
- }
- }
- }
- } else if attr.has_name(sym::instruction_set) {
- codegen_fn_attrs.instruction_set = match attr.meta_kind() {
- Some(MetaItemKind::List(ref items)) => match items.as_slice() {
- [NestedMetaItem::MetaItem(set)] => {
- let segments =
- set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
- match segments.as_slice() {
- [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
- if !tcx.sess.target.has_thumb_interworking {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "target does not support `#[instruction_set]`"
- )
- .emit();
- None
- } else if segments[1] == sym::a32 {
- Some(InstructionSetAttr::ArmA32)
- } else if segments[1] == sym::t32 {
- Some(InstructionSetAttr::ArmT32)
- } else {
- unreachable!()
- }
- }
- _ => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "invalid instruction set specified",
- )
- .emit();
- None
- }
- }
- }
- [] => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0778,
- "`#[instruction_set]` requires an argument"
- )
- .emit();
- None
- }
- _ => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "cannot specify more than one instruction set"
- )
- .emit();
- None
- }
- },
- _ => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0778,
- "must specify an instruction set"
- )
- .emit();
- None
- }
- };
- } else if attr.has_name(sym::repr) {
- codegen_fn_attrs.alignment = match attr.meta_item_list() {
- Some(items) => match items.as_slice() {
- [item] => match item.name_value_literal() {
- Some((sym::align, literal)) => {
- let alignment = rustc_attr::parse_alignment(&literal.kind);
-
- match alignment {
- Ok(align) => Some(align),
- Err(msg) => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0589,
- "invalid `repr(align)` attribute: {}",
- msg
- )
- .emit();
-
- None
- }
- }
- }
- _ => None,
- },
- [] => None,
- _ => None,
- },
- None => None,
- };
- }
- }
-
- codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| {
- if !attr.has_name(sym::inline) {
- return ia;
- }
- match attr.meta_kind() {
- Some(MetaItemKind::Word) => InlineAttr::Hint,
- Some(MetaItemKind::List(ref items)) => {
- inline_span = Some(attr.span);
- if items.len() != 1 {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0534,
- "expected one argument"
- )
- .emit();
- InlineAttr::None
- } else if list_contains_name(&items, sym::always) {
- InlineAttr::Always
- } else if list_contains_name(&items, sym::never) {
- InlineAttr::Never
- } else {
- struct_span_err!(
- tcx.sess.diagnostic(),
- items[0].span(),
- E0535,
- "invalid argument"
- )
- .emit();
-
- InlineAttr::None
- }
- }
- Some(MetaItemKind::NameValue(_)) => ia,
- None => ia,
- }
- });
-
- codegen_fn_attrs.optimize = attrs.iter().fold(OptimizeAttr::None, |ia, attr| {
- if !attr.has_name(sym::optimize) {
- return ia;
- }
- let err = |sp, s| struct_span_err!(tcx.sess.diagnostic(), sp, E0722, "{}", s).emit();
- match attr.meta_kind() {
- Some(MetaItemKind::Word) => {
- err(attr.span, "expected one argument");
- ia
- }
- Some(MetaItemKind::List(ref items)) => {
- inline_span = Some(attr.span);
- if items.len() != 1 {
- err(attr.span, "expected one argument");
- OptimizeAttr::None
- } else if list_contains_name(&items, sym::size) {
- OptimizeAttr::Size
- } else if list_contains_name(&items, sym::speed) {
- OptimizeAttr::Speed
- } else {
- err(items[0].span(), "invalid argument");
- OptimizeAttr::None
- }
- }
- Some(MetaItemKind::NameValue(_)) => ia,
- None => ia,
- }
- });
-
- // #73631: closures inherit `#[target_feature]` annotations
- if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) {
- let owner_id = tcx.parent(did.to_def_id());
- if tcx.def_kind(owner_id).has_codegen_attrs() {
- codegen_fn_attrs
- .target_features
- .extend(tcx.codegen_fn_attrs(owner_id).target_features.iter().copied());
- }
- }
-
- // If a function uses #[target_feature] it can't be inlined into general
- // purpose functions as they wouldn't have the right target features
- // enabled. For that reason we also forbid #[inline(always)] as it can't be
- // respected.
- if !codegen_fn_attrs.target_features.is_empty() {
- if codegen_fn_attrs.inline == InlineAttr::Always {
- if let Some(span) = inline_span {
- tcx.sess.span_err(
- span,
- "cannot use `#[inline(always)]` with \
- `#[target_feature]`",
- );
- }
- }
- }
-
- if !codegen_fn_attrs.no_sanitize.is_empty() {
- if codegen_fn_attrs.inline == InlineAttr::Always {
- if let (Some(no_sanitize_span), Some(inline_span)) = (no_sanitize_span, inline_span) {
- let hir_id = tcx.hir().local_def_id_to_hir_id(did);
- tcx.struct_span_lint_hir(
- lint::builtin::INLINE_NO_SANITIZE,
- hir_id,
- no_sanitize_span,
- |lint| {
- lint.build("`no_sanitize` will have no effect after inlining")
- .span_note(inline_span, "inlining requested here")
- .emit();
- },
- )
- }
- }
- }
-
- // Weak lang items have the same semantics as "std internal" symbols in the
- // sense that they're preserved through all our LTO passes and only
- // strippable by the linker.
- //
- // Additionally weak lang items have predetermined symbol names.
- if tcx.is_weak_lang_item(did.to_def_id()) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
- }
- if let Some(name) = weak_lang_items::link_name(attrs) {
- codegen_fn_attrs.export_name = Some(name);
- codegen_fn_attrs.link_name = Some(name);
- }
- check_link_name_xor_ordinal(tcx, &codegen_fn_attrs, link_ordinal_span);
-
- // Internal symbols to the standard library all have no_mangle semantics in
- // that they have defined symbol names present in the function name. This
- // also applies to weak symbols where they all have known symbol names.
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
- }
-
- // Any linkage to LLVM intrinsics for now forcibly marks them all as never
- // unwinds since LLVM sometimes can't handle codegen which `invoke`s
- // intrinsic functions.
- if let Some(name) = &codegen_fn_attrs.link_name {
- if name.as_str().starts_with("llvm.") {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
- }
- }
-
- codegen_fn_attrs
-}
-
-/// Computes the set of target features used in a function for the purposes of
-/// inline assembly.
-fn asm_target_features<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx FxHashSet<Symbol> {
- let mut target_features = tcx.sess.unstable_target_features.clone();
- if tcx.def_kind(did).has_codegen_attrs() {
- let attrs = tcx.codegen_fn_attrs(did);
- target_features.extend(&attrs.target_features);
- match attrs.instruction_set {
- None => {}
- Some(InstructionSetAttr::ArmA32) => {
- target_features.remove(&sym::thumb_mode);
- }
- Some(InstructionSetAttr::ArmT32) => {
- target_features.insert(sym::thumb_mode);
- }
- }
- }
-
- tcx.arena.alloc(target_features)
-}
-
-/// Checks if the provided DefId is a method in a trait impl for a trait which has track_caller
-/// applied to the method prototype.
-fn should_inherit_track_caller(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
- if let Some(impl_item) = tcx.opt_associated_item(def_id)
- && let ty::AssocItemContainer::ImplContainer = impl_item.container
- && let Some(trait_item) = impl_item.trait_item_def_id
- {
- return tcx
- .codegen_fn_attrs(trait_item)
- .flags
- .intersects(CodegenFnAttrFlags::TRACK_CALLER);
- }
-
- false
-}
-
-fn check_link_ordinal(tcx: TyCtxt<'_>, attr: &ast::Attribute) -> Option<u16> {
- use rustc_ast::{Lit, LitIntType, LitKind};
- let meta_item_list = attr.meta_item_list();
- let meta_item_list: Option<&[ast::NestedMetaItem]> = meta_item_list.as_ref().map(Vec::as_ref);
- let sole_meta_list = match meta_item_list {
- Some([item]) => item.literal(),
- Some(_) => {
- tcx.sess
- .struct_span_err(attr.span, "incorrect number of arguments to `#[link_ordinal]`")
- .note("the attribute requires exactly one argument")
- .emit();
- return None;
- }
- _ => None,
- };
- if let Some(Lit { kind: LitKind::Int(ordinal, LitIntType::Unsuffixed), .. }) = sole_meta_list {
- // According to the table at https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#import-header,
- // the ordinal must fit into 16 bits. Similarly, the Ordinal field in COFFShortExport (defined
- // in llvm/include/llvm/Object/COFFImportFile.h), which we use to communicate import information
- // to LLVM for `#[link(kind = "raw-dylib"_])`, is also defined to be uint16_t.
- //
- // FIXME: should we allow an ordinal of 0? The MSVC toolchain has inconsistent support for this:
- // both LINK.EXE and LIB.EXE signal errors and abort when given a .DEF file that specifies
- // a zero ordinal. However, llvm-dlltool is perfectly happy to generate an import library
- // for such a .DEF file, and MSVC's LINK.EXE is also perfectly happy to consume an import
- // library produced by LLVM with an ordinal of 0, and it generates an .EXE. (I don't know yet
- // if the resulting EXE runs, as I haven't yet built the necessary DLL -- see earlier comment
- // about LINK.EXE failing.)
- if *ordinal <= u16::MAX as u128 {
- Some(*ordinal as u16)
- } else {
- let msg = format!("ordinal value in `link_ordinal` is too large: `{}`", &ordinal);
- tcx.sess
- .struct_span_err(attr.span, &msg)
- .note("the value may not exceed `u16::MAX`")
- .emit();
- None
- }
- } else {
- tcx.sess
- .struct_span_err(attr.span, "illegal ordinal format in `link_ordinal`")
- .note("an unsuffixed integer value, e.g., `1`, is expected")
- .emit();
- None
- }
-}
-
-fn check_link_name_xor_ordinal(
- tcx: TyCtxt<'_>,
- codegen_fn_attrs: &CodegenFnAttrs,
- inline_span: Option<Span>,
-) {
- if codegen_fn_attrs.link_name.is_none() || codegen_fn_attrs.link_ordinal.is_none() {
- return;
- }
- let msg = "cannot use `#[link_name]` with `#[link_ordinal]`";
- if let Some(span) = inline_span {
- tcx.sess.span_err(span, msg);
- } else {
- tcx.sess.err(msg);
- }
-}
-
-/// Checks the function annotated with `#[target_feature]` is not a safe
-/// trait method implementation, reporting an error if it is.
-fn check_target_feature_trait_unsafe(tcx: TyCtxt<'_>, id: LocalDefId, attr_span: Span) {
- let hir_id = tcx.hir().local_def_id_to_hir_id(id);
- let node = tcx.hir().get(hir_id);
- if let Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) = node {
- let parent_id = tcx.hir().get_parent_item(hir_id);
- let parent_item = tcx.hir().expect_item(parent_id);
- if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) = parent_item.kind {
- tcx.sess
- .struct_span_err(
- attr_span,
- "`#[target_feature(..)]` cannot be applied to safe trait method",
- )
- .span_label(attr_span, "cannot be applied to safe trait method")
- .span_label(tcx.def_span(id), "not an `unsafe` function")
- .emit();
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/errors.rs b/compiler/rustc_typeck/src/errors.rs
deleted file mode 100644
index 0438ac02e..000000000
--- a/compiler/rustc_typeck/src/errors.rs
+++ /dev/null
@@ -1,326 +0,0 @@
-//! Errors emitted by typeck.
-use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed};
-use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
-use rustc_middle::ty::Ty;
-use rustc_session::{parse::ParseSess, SessionDiagnostic};
-use rustc_span::{symbol::Ident, Span, Symbol};
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::field_multiply_specified_in_initializer, code = "E0062")]
-pub struct FieldMultiplySpecifiedInInitializer {
- #[primary_span]
- #[label]
- pub span: Span,
- #[label(typeck::previous_use_label)]
- pub prev_span: Span,
- pub ident: Ident,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::unrecognized_atomic_operation, code = "E0092")]
-pub struct UnrecognizedAtomicOperation<'a> {
- #[primary_span]
- #[label]
- pub span: Span,
- pub op: &'a str,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::wrong_number_of_generic_arguments_to_intrinsic, code = "E0094")]
-pub struct WrongNumberOfGenericArgumentsToIntrinsic<'a> {
- #[primary_span]
- #[label]
- pub span: Span,
- pub found: usize,
- pub expected: usize,
- pub descr: &'a str,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::unrecognized_intrinsic_function, code = "E0093")]
-pub struct UnrecognizedIntrinsicFunction {
- #[primary_span]
- #[label]
- pub span: Span,
- pub name: Symbol,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::lifetimes_or_bounds_mismatch_on_trait, code = "E0195")]
-pub struct LifetimesOrBoundsMismatchOnTrait {
- #[primary_span]
- #[label]
- pub span: Span,
- #[label(typeck::generics_label)]
- pub generics_span: Option<Span>,
- pub item_kind: &'static str,
- pub ident: Ident,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::drop_impl_on_wrong_item, code = "E0120")]
-pub struct DropImplOnWrongItem {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::field_already_declared, code = "E0124")]
-pub struct FieldAlreadyDeclared {
- pub field_name: Ident,
- #[primary_span]
- #[label]
- pub span: Span,
- #[label(typeck::previous_decl_label)]
- pub prev_span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::copy_impl_on_type_with_dtor, code = "E0184")]
-pub struct CopyImplOnTypeWithDtor {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::multiple_relaxed_default_bounds, code = "E0203")]
-pub struct MultipleRelaxedDefaultBounds {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::copy_impl_on_non_adt, code = "E0206")]
-pub struct CopyImplOnNonAdt {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::trait_object_declared_with_no_traits, code = "E0224")]
-pub struct TraitObjectDeclaredWithNoTraits {
- #[primary_span]
- pub span: Span,
- #[label(typeck::alias_span)]
- pub trait_alias_span: Option<Span>,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::ambiguous_lifetime_bound, code = "E0227")]
-pub struct AmbiguousLifetimeBound {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::assoc_type_binding_not_allowed, code = "E0229")]
-pub struct AssocTypeBindingNotAllowed {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::functional_record_update_on_non_struct, code = "E0436")]
-pub struct FunctionalRecordUpdateOnNonStruct {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::typeof_reserved_keyword_used, code = "E0516")]
-pub struct TypeofReservedKeywordUsed<'tcx> {
- pub ty: Ty<'tcx>,
- #[primary_span]
- #[label]
- pub span: Span,
- #[suggestion_verbose(code = "{ty}")]
- pub opt_sugg: Option<(Span, Applicability)>,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::return_stmt_outside_of_fn_body, code = "E0572")]
-pub struct ReturnStmtOutsideOfFnBody {
- #[primary_span]
- pub span: Span,
- #[label(typeck::encl_body_label)]
- pub encl_body_span: Option<Span>,
- #[label(typeck::encl_fn_label)]
- pub encl_fn_span: Option<Span>,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::yield_expr_outside_of_generator, code = "E0627")]
-pub struct YieldExprOutsideOfGenerator {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::struct_expr_non_exhaustive, code = "E0639")]
-pub struct StructExprNonExhaustive {
- #[primary_span]
- pub span: Span,
- pub what: &'static str,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::method_call_on_unknown_type, code = "E0699")]
-pub struct MethodCallOnUnknownType {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::value_of_associated_struct_already_specified, code = "E0719")]
-pub struct ValueOfAssociatedStructAlreadySpecified {
- #[primary_span]
- #[label]
- pub span: Span,
- #[label(typeck::previous_bound_label)]
- pub prev_span: Span,
- pub item_name: Ident,
- pub def_path: String,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::address_of_temporary_taken, code = "E0745")]
-pub struct AddressOfTemporaryTaken {
- #[primary_span]
- #[label]
- pub span: Span,
-}
-
-#[derive(SessionSubdiagnostic)]
-pub enum AddReturnTypeSuggestion<'tcx> {
- #[suggestion(
- typeck::add_return_type_add,
- code = "-> {found} ",
- applicability = "machine-applicable"
- )]
- Add {
- #[primary_span]
- span: Span,
- found: Ty<'tcx>,
- },
- #[suggestion(
- typeck::add_return_type_missing_here,
- code = "-> _ ",
- applicability = "has-placeholders"
- )]
- MissingHere {
- #[primary_span]
- span: Span,
- },
-}
-
-#[derive(SessionSubdiagnostic)]
-pub enum ExpectedReturnTypeLabel<'tcx> {
- #[label(typeck::expected_default_return_type)]
- Unit {
- #[primary_span]
- span: Span,
- },
- #[label(typeck::expected_return_type)]
- Other {
- #[primary_span]
- span: Span,
- expected: Ty<'tcx>,
- },
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::unconstrained_opaque_type)]
-#[note]
-pub struct UnconstrainedOpaqueType {
- #[primary_span]
- pub span: Span,
- pub name: Symbol,
-}
-
-pub struct MissingTypeParams {
- pub span: Span,
- pub def_span: Span,
- pub missing_type_params: Vec<Symbol>,
- pub empty_generic_args: bool,
-}
-
-// Manual implementation of `SessionDiagnostic` to be able to call `span_to_snippet`.
-impl<'a> SessionDiagnostic<'a> for MissingTypeParams {
- fn into_diagnostic(self, sess: &'a ParseSess) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
- let mut err = sess.span_diagnostic.struct_span_err_with_code(
- self.span,
- rustc_errors::fluent::typeck::missing_type_params,
- error_code!(E0393),
- );
- err.set_arg("parameterCount", self.missing_type_params.len());
- err.set_arg(
- "parameters",
- self.missing_type_params
- .iter()
- .map(|n| format!("`{}`", n))
- .collect::<Vec<_>>()
- .join(", "),
- );
-
- err.span_label(self.def_span, rustc_errors::fluent::typeck::label);
-
- let mut suggested = false;
- if let (Ok(snippet), true) = (
- sess.source_map().span_to_snippet(self.span),
- // Don't suggest setting the type params if there are some already: the order is
- // tricky to get right and the user will already know what the syntax is.
- self.empty_generic_args,
- ) {
- if snippet.ends_with('>') {
- // The user wrote `Trait<'a, T>` or similar. To provide an accurate suggestion
- // we would have to preserve the right order. For now, as clearly the user is
- // aware of the syntax, we do nothing.
- } else {
- // The user wrote `Iterator`, so we don't have a type we can suggest, but at
- // least we can clue them to the correct syntax `Iterator<Type>`.
- err.span_suggestion(
- self.span,
- rustc_errors::fluent::typeck::suggestion,
- format!(
- "{}<{}>",
- snippet,
- self.missing_type_params
- .iter()
- .map(|n| n.to_string())
- .collect::<Vec<_>>()
- .join(", ")
- ),
- Applicability::HasPlaceholders,
- );
- suggested = true;
- }
- }
- if !suggested {
- err.span_label(self.span, rustc_errors::fluent::typeck::no_suggestion_label);
- }
-
- err.note(rustc_errors::fluent::typeck::note);
- err
- }
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::manual_implementation, code = "E0183")]
-#[help]
-pub struct ManualImplementation {
- #[primary_span]
- #[label]
- pub span: Span,
- pub trait_name: String,
-}
-
-#[derive(SessionDiagnostic)]
-#[error(typeck::substs_on_overridden_impl)]
-pub struct SubstsOnOverriddenImpl {
- #[primary_span]
- pub span: Span,
-}
diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs
deleted file mode 100644
index f98ae46c5..000000000
--- a/compiler/rustc_typeck/src/lib.rs
+++ /dev/null
@@ -1,579 +0,0 @@
-/*!
-
-# typeck
-
-The type checker is responsible for:
-
-1. Determining the type of each expression.
-2. Resolving methods and traits.
-3. Guaranteeing that most type rules are met. ("Most?", you say, "why most?"
- Well, dear reader, read on.)
-
-The main entry point is [`check_crate()`]. Type checking operates in
-several major phases:
-
-1. The collect phase first passes over all items and determines their
- type, without examining their "innards".
-
-2. Variance inference then runs to compute the variance of each parameter.
-
-3. Coherence checks for overlapping or orphaned impls.
-
-4. Finally, the check phase then checks function bodies and so forth.
- Within the check phase, we check each function body one at a time
- (bodies of function expressions are checked as part of the
- containing function). Inference is used to supply types wherever
- they are unknown. The actual checking of a function itself has
- several phases (check, regionck, writeback), as discussed in the
- documentation for the [`check`] module.
-
-The type checker is defined into various submodules which are documented
-independently:
-
-- astconv: converts the AST representation of types
- into the `ty` representation.
-
-- collect: computes the types of each top-level item and enters them into
- the `tcx.types` table for later use.
-
-- coherence: enforces coherence rules, builds some tables.
-
-- variance: variance inference
-
-- outlives: outlives inference
-
-- check: walks over function bodies and type checks them, inferring types for
- local variables, type parameters, etc as necessary.
-
-- infer: finds the types to use for each type variable such that
- all subtyping and assignment constraints are met. In essence, the check
- module specifies the constraints, and the infer module solves them.
-
-## Note
-
-This API is completely unstable and subject to change.
-
-*/
-
-#![allow(rustc::potential_query_instability)]
-#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(box_patterns)]
-#![feature(control_flow_enum)]
-#![feature(drain_filter)]
-#![feature(hash_drain_filter)]
-#![feature(if_let_guard)]
-#![feature(is_sorted)]
-#![feature(iter_intersperse)]
-#![feature(label_break_value)]
-#![feature(let_chains)]
-#![feature(let_else)]
-#![feature(min_specialization)]
-#![feature(never_type)]
-#![feature(once_cell)]
-#![feature(slice_partition_dedup)]
-#![feature(try_blocks)]
-#![feature(is_some_with)]
-#![recursion_limit = "256"]
-
-#[macro_use]
-extern crate tracing;
-
-#[macro_use]
-extern crate rustc_middle;
-
-// These are used by Clippy.
-pub mod check;
-pub mod expr_use_visitor;
-
-mod astconv;
-mod bounds;
-mod check_unused;
-mod coherence;
-mod collect;
-mod constrained_generic_params;
-mod errors;
-pub mod hir_wf_check;
-mod impl_wf_check;
-mod mem_categorization;
-mod outlives;
-mod structured_errors;
-mod variance;
-
-use rustc_errors::{struct_span_err, ErrorGuaranteed};
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
-use rustc_hir::{Node, CRATE_HIR_ID};
-use rustc_infer::infer::{InferOk, TyCtxtInferExt};
-use rustc_infer::traits::TraitEngineExt as _;
-use rustc_middle::middle;
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{self, Ty, TyCtxt};
-use rustc_middle::util;
-use rustc_session::config::EntryFnType;
-use rustc_span::{symbol::sym, Span, DUMMY_SP};
-use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::infer::InferCtxtExt;
-use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
-use rustc_trait_selection::traits::{
- self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt as _,
-};
-
-use std::iter;
-
-use astconv::AstConv;
-use bounds::Bounds;
-
-fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) {
- match (decl.c_variadic, abi) {
- // The function has the correct calling convention, or isn't a "C-variadic" function.
- (false, _) | (true, Abi::C { .. }) | (true, Abi::Cdecl { .. }) => {}
- // The function is a "C-variadic" function with an incorrect calling convention.
- (true, _) => {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0045,
- "C-variadic function must have C or cdecl calling convention"
- );
- err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
- }
- }
-}
-
-fn require_same_types<'tcx>(
- tcx: TyCtxt<'tcx>,
- cause: &ObligationCause<'tcx>,
- expected: Ty<'tcx>,
- actual: Ty<'tcx>,
-) -> bool {
- tcx.infer_ctxt().enter(|ref infcx| {
- let param_env = ty::ParamEnv::empty();
- let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
- match infcx.at(cause, param_env).eq(expected, actual) {
- Ok(InferOk { obligations, .. }) => {
- fulfill_cx.register_predicate_obligations(infcx, obligations);
- }
- Err(err) => {
- infcx.report_mismatched_types(cause, expected, actual, err).emit();
- return false;
- }
- }
-
- match fulfill_cx.select_all_or_error(infcx).as_slice() {
- [] => true,
- errors => {
- infcx.report_fulfillment_errors(errors, None, false);
- false
- }
- }
- })
-}
-
-fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
- let main_fnsig = tcx.fn_sig(main_def_id);
- let main_span = tcx.def_span(main_def_id);
-
- fn main_fn_diagnostics_hir_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> hir::HirId {
- if let Some(local_def_id) = def_id.as_local() {
- let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
- let hir_type = tcx.type_of(local_def_id);
- if !matches!(hir_type.kind(), ty::FnDef(..)) {
- span_bug!(sp, "main has a non-function type: found `{}`", hir_type);
- }
- hir_id
- } else {
- CRATE_HIR_ID
- }
- }
-
- fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
- if !generics.params.is_empty() {
- Some(generics.span)
- } else {
- None
- }
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
- Some(generics.where_clause_span)
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- Some(tcx.def_span(def_id))
- }
-
- fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
- if !def_id.is_local() {
- return None;
- }
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
- match tcx.hir().find(hir_id) {
- Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(ref fn_sig, _, _), .. })) => {
- Some(fn_sig.decl.output.span())
- }
- _ => {
- span_bug!(tcx.def_span(def_id), "main has a non-function type");
- }
- }
- }
-
- let mut error = false;
- let main_diagnostics_hir_id = main_fn_diagnostics_hir_id(tcx, main_def_id, main_span);
- let main_fn_generics = tcx.generics_of(main_def_id);
- let main_fn_predicates = tcx.predicates_of(main_def_id);
- if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() {
- let generics_param_span = main_fn_generics_params_span(tcx, main_def_id);
- let msg = "`main` function is not allowed to have generic \
- parameters";
- let mut diag =
- struct_span_err!(tcx.sess, generics_param_span.unwrap_or(main_span), E0131, "{}", msg);
- if let Some(generics_param_span) = generics_param_span {
- let label = "`main` cannot have generic parameters";
- diag.span_label(generics_param_span, label);
- }
- diag.emit();
- error = true;
- } else if !main_fn_predicates.predicates.is_empty() {
- // generics may bring in implicit predicates, so we skip this check if generics is present.
- let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id);
- let mut diag = struct_span_err!(
- tcx.sess,
- generics_where_clauses_span.unwrap_or(main_span),
- E0646,
- "`main` function is not allowed to have a `where` clause"
- );
- if let Some(generics_where_clauses_span) = generics_where_clauses_span {
- diag.span_label(generics_where_clauses_span, "`main` cannot have a `where` clause");
- }
- diag.emit();
- error = true;
- }
-
- let main_asyncness = tcx.asyncness(main_def_id);
- if let hir::IsAsync::Async = main_asyncness {
- let mut diag = struct_span_err!(
- tcx.sess,
- main_span,
- E0752,
- "`main` function is not allowed to be `async`"
- );
- let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
- if let Some(asyncness_span) = asyncness_span {
- diag.span_label(asyncness_span, "`main` function is not allowed to be `async`");
- }
- diag.emit();
- error = true;
- }
-
- for attr in tcx.get_attrs(main_def_id, sym::track_caller) {
- tcx.sess
- .struct_span_err(attr.span, "`main` function is not allowed to be `#[track_caller]`")
- .span_label(main_span, "`main` function is not allowed to be `#[track_caller]`")
- .emit();
- error = true;
- }
-
- if error {
- return;
- }
-
- let expected_return_type;
- if let Some(term_id) = tcx.lang_items().termination() {
- let return_ty = main_fnsig.output();
- let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span);
- if !return_ty.bound_vars().is_empty() {
- let msg = "`main` function return type is not allowed to have generic \
- parameters";
- struct_span_err!(tcx.sess, return_ty_span, E0131, "{}", msg).emit();
- error = true;
- }
- let return_ty = return_ty.skip_binder();
- tcx.infer_ctxt().enter(|infcx| {
- let cause = traits::ObligationCause::new(
- return_ty_span,
- main_diagnostics_hir_id,
- ObligationCauseCode::MainFunctionType,
- );
- let mut fulfillment_cx = traits::FulfillmentContext::new();
- // normalize any potential projections in the return type, then add
- // any possible obligations to the fulfillment context.
- // HACK(ThePuzzlemaker) this feels symptomatic of a problem within
- // checking trait fulfillment, not this here. I'm not sure why it
- // works in the example in `fn test()` given in #88609? This also
- // probably isn't the best way to do this.
- let InferOk { value: norm_return_ty, obligations } = infcx
- .partially_normalize_associated_types_in(
- cause.clone(),
- ty::ParamEnv::empty(),
- return_ty,
- );
- fulfillment_cx.register_predicate_obligations(&infcx, obligations);
- fulfillment_cx.register_bound(
- &infcx,
- ty::ParamEnv::empty(),
- norm_return_ty,
- term_id,
- cause,
- );
- let errors = fulfillment_cx.select_all_or_error(&infcx);
- if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- error = true;
- }
- });
- // now we can take the return type of the given main function
- expected_return_type = main_fnsig.output();
- } else {
- // standard () main return type
- expected_return_type = ty::Binder::dummy(tcx.mk_unit());
- }
-
- if error {
- return;
- }
-
- let se_ty = tcx.mk_fn_ptr(expected_return_type.map_bound(|expected_return_type| {
- tcx.mk_fn_sig(iter::empty(), expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
- }));
-
- require_same_types(
- tcx,
- &ObligationCause::new(
- main_span,
- main_diagnostics_hir_id,
- ObligationCauseCode::MainFunctionType,
- ),
- se_ty,
- tcx.mk_fn_ptr(main_fnsig),
- );
-}
-fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
- let start_def_id = start_def_id.expect_local();
- let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id);
- let start_span = tcx.def_span(start_def_id);
- let start_t = tcx.type_of(start_def_id);
- match start_t.kind() {
- ty::FnDef(..) => {
- if let Some(Node::Item(it)) = tcx.hir().find(start_id) {
- if let hir::ItemKind::Fn(ref sig, ref generics, _) = it.kind {
- let mut error = false;
- if !generics.params.is_empty() {
- struct_span_err!(
- tcx.sess,
- generics.span,
- E0132,
- "start function is not allowed to have type parameters"
- )
- .span_label(generics.span, "start function cannot have type parameters")
- .emit();
- error = true;
- }
- if generics.has_where_clause_predicates {
- struct_span_err!(
- tcx.sess,
- generics.where_clause_span,
- E0647,
- "start function is not allowed to have a `where` clause"
- )
- .span_label(
- generics.where_clause_span,
- "start function cannot have a `where` clause",
- )
- .emit();
- error = true;
- }
- if let hir::IsAsync::Async = sig.header.asyncness {
- let span = tcx.def_span(it.def_id);
- struct_span_err!(
- tcx.sess,
- span,
- E0752,
- "`start` is not allowed to be `async`"
- )
- .span_label(span, "`start` is not allowed to be `async`")
- .emit();
- error = true;
- }
-
- let attrs = tcx.hir().attrs(start_id);
- for attr in attrs {
- if attr.has_name(sym::track_caller) {
- tcx.sess
- .struct_span_err(
- attr.span,
- "`start` is not allowed to be `#[track_caller]`",
- )
- .span_label(
- start_span,
- "`start` is not allowed to be `#[track_caller]`",
- )
- .emit();
- error = true;
- }
- }
-
- if error {
- return;
- }
- }
- }
-
- let se_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
- [tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))].iter().cloned(),
- tcx.types.isize,
- false,
- hir::Unsafety::Normal,
- Abi::Rust,
- )));
-
- require_same_types(
- tcx,
- &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType),
- se_ty,
- tcx.mk_fn_ptr(tcx.fn_sig(start_def_id)),
- );
- }
- _ => {
- span_bug!(start_span, "start has a non-function type: found `{}`", start_t);
- }
- }
-}
-
-fn check_for_entry_fn(tcx: TyCtxt<'_>) {
- match tcx.entry_fn(()) {
- Some((def_id, EntryFnType::Main)) => check_main_fn_ty(tcx, def_id),
- Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
- _ => {}
- }
-}
-
-pub fn provide(providers: &mut Providers) {
- collect::provide(providers);
- coherence::provide(providers);
- check::provide(providers);
- variance::provide(providers);
- outlives::provide(providers);
- impl_wf_check::provide(providers);
- hir_wf_check::provide(providers);
-}
-
-pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
- let _prof_timer = tcx.sess.timer("type_check_crate");
-
- // this ensures that later parts of type checking can assume that items
- // have valid types and not error
- // FIXME(matthewjasper) We shouldn't need to use `track_errors`.
- tcx.sess.track_errors(|| {
- tcx.sess.time("type_collecting", || {
- tcx.hir().for_each_module(|module| tcx.ensure().collect_mod_item_types(module))
- });
- })?;
-
- if tcx.features().rustc_attrs {
- tcx.sess.track_errors(|| {
- tcx.sess.time("outlives_testing", || outlives::test::test_inferred_outlives(tcx));
- })?;
- }
-
- tcx.sess.track_errors(|| {
- tcx.sess.time("impl_wf_inference", || {
- tcx.hir().for_each_module(|module| tcx.ensure().check_mod_impl_wf(module))
- });
- })?;
-
- tcx.sess.track_errors(|| {
- tcx.sess.time("coherence_checking", || {
- for &trait_def_id in tcx.all_local_trait_impls(()).keys() {
- tcx.ensure().coherent_trait(trait_def_id);
- }
-
- // these queries are executed for side-effects (error reporting):
- tcx.ensure().crate_inherent_impls(());
- tcx.ensure().crate_inherent_impls_overlap_check(());
- });
- })?;
-
- if tcx.features().rustc_attrs {
- tcx.sess.track_errors(|| {
- tcx.sess.time("variance_testing", || variance::test::test_variance(tcx));
- })?;
- }
-
- tcx.sess.track_errors(|| {
- tcx.sess.time("wf_checking", || {
- tcx.hir().par_for_each_module(|module| tcx.ensure().check_mod_type_wf(module))
- });
- })?;
-
- // NOTE: This is copy/pasted in librustdoc/core.rs and should be kept in sync.
- tcx.sess.time("item_types_checking", || {
- tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module))
- });
-
- tcx.sess.time("item_bodies_checking", || tcx.typeck_item_bodies(()));
-
- check_unused::check_crate(tcx);
- check_for_entry_fn(tcx);
-
- if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
-}
-
-/// A quasi-deprecated helper used in rustdoc and clippy to get
-/// the type from a HIR node.
-pub fn hir_ty_to_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty<'_>) -> Ty<'tcx> {
- // In case there are any projections, etc., find the "environment"
- // def-ID that will be used to determine the traits/predicates in
- // scope. This is derived from the enclosing item-like thing.
- let env_def_id = tcx.hir().get_parent_item(hir_ty.hir_id);
- let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
- <dyn AstConv<'_>>::ast_ty_to_ty(&item_cx, hir_ty)
-}
-
-pub fn hir_trait_to_predicates<'tcx>(
- tcx: TyCtxt<'tcx>,
- hir_trait: &hir::TraitRef<'_>,
- self_ty: Ty<'tcx>,
-) -> Bounds<'tcx> {
- // In case there are any projections, etc., find the "environment"
- // def-ID that will be used to determine the traits/predicates in
- // scope. This is derived from the enclosing item-like thing.
- let env_def_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id);
- let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
- let mut bounds = Bounds::default();
- let _ = <dyn AstConv<'_>>::instantiate_poly_trait_ref(
- &item_cx,
- hir_trait,
- DUMMY_SP,
- ty::BoundConstness::NotConst,
- self_ty,
- &mut bounds,
- true,
- );
-
- bounds
-}
diff --git a/compiler/rustc_typeck/src/outlives/mod.rs b/compiler/rustc_typeck/src/outlives/mod.rs
deleted file mode 100644
index 8fa65d51e..000000000
--- a/compiler/rustc_typeck/src/outlives/mod.rs
+++ /dev/null
@@ -1,130 +0,0 @@
-use hir::Node;
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
-use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::{self, CratePredicatesMap, ToPredicate, TyCtxt};
-use rustc_span::symbol::sym;
-use rustc_span::Span;
-
-mod explicit;
-mod implicit_infer;
-pub(crate) mod outlives_bounds;
-/// Code to write unit test for outlives.
-pub mod test;
-mod utils;
-
-pub fn provide(providers: &mut Providers) {
- *providers = Providers { inferred_outlives_of, inferred_outlives_crate, ..*providers };
-}
-
-fn inferred_outlives_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[(ty::Predicate<'_>, Span)] {
- let id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
-
- if matches!(tcx.def_kind(item_def_id), hir::def::DefKind::AnonConst) && tcx.lazy_normalization()
- {
- if tcx.hir().opt_const_param_default_param_hir_id(id).is_some() {
- // In `generics_of` we set the generics' parent to be our parent's parent which means that
- // we lose out on the predicates of our actual parent if we dont return those predicates here.
- // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
- //
- // struct Foo<'a, 'b, const N: usize = { ... }>(&'a &'b ());
- // ^^^ ^^^^^^^ the def id we are calling
- // ^^^ inferred_outlives_of on
- // parent item we dont have set as the
- // parent of generics returned by `generics_of`
- //
- // In the above code we want the anon const to have predicates in its param env for `'b: 'a`
- let item_def_id = tcx.hir().get_parent_item(id);
- // In the above code example we would be calling `inferred_outlives_of(Foo)` here
- return tcx.inferred_outlives_of(item_def_id);
- }
- }
-
- match tcx.hir().get(id) {
- Node::Item(item) => match item.kind {
- hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..) => {
- let crate_map = tcx.inferred_outlives_crate(());
-
- let predicates = crate_map.predicates.get(&item_def_id).copied().unwrap_or(&[]);
-
- if tcx.has_attr(item_def_id, sym::rustc_outlives) {
- let mut pred: Vec<String> = predicates
- .iter()
- .map(|(out_pred, _)| match out_pred.kind().skip_binder() {
- ty::PredicateKind::RegionOutlives(p) => p.to_string(),
- ty::PredicateKind::TypeOutlives(p) => p.to_string(),
- err => bug!("unexpected predicate {:?}", err),
- })
- .collect();
- pred.sort();
-
- let span = tcx.def_span(item_def_id);
- let mut err = tcx.sess.struct_span_err(span, "rustc_outlives");
- for p in &pred {
- err.note(p);
- }
- err.emit();
- }
-
- debug!("inferred_outlives_of({:?}) = {:?}", item_def_id, predicates);
-
- predicates
- }
-
- _ => &[],
- },
-
- _ => &[],
- }
-}
-
-fn inferred_outlives_crate(tcx: TyCtxt<'_>, (): ()) -> CratePredicatesMap<'_> {
- // Compute a map from each struct/enum/union S to the **explicit**
- // outlives predicates (`T: 'a`, `'a: 'b`) that the user wrote.
- // Typically there won't be many of these, except in older code where
- // they were mandatory. Nonetheless, we have to ensure that every such
- // predicate is satisfied, so they form a kind of base set of requirements
- // for the type.
-
- // Compute the inferred predicates
- let global_inferred_outlives = implicit_infer::infer_predicates(tcx);
-
- // Convert the inferred predicates into the "collected" form the
- // global data structure expects.
- //
- // FIXME -- consider correcting impedance mismatch in some way,
- // probably by updating the global data structure.
- let predicates = global_inferred_outlives
- .iter()
- .map(|(&def_id, set)| {
- let predicates = &*tcx.arena.alloc_from_iter(set.0.iter().filter_map(
- |(ty::OutlivesPredicate(kind1, region2), &span)| {
- match kind1.unpack() {
- GenericArgKind::Type(ty1) => Some((
- ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
- ty::OutlivesPredicate(ty1, *region2),
- ))
- .to_predicate(tcx),
- span,
- )),
- GenericArgKind::Lifetime(region1) => Some((
- ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
- ty::OutlivesPredicate(region1, *region2),
- ))
- .to_predicate(tcx),
- span,
- )),
- GenericArgKind::Const(_) => {
- // Generic consts don't impose any constraints.
- None
- }
- }
- },
- ));
- (def_id, predicates)
- })
- .collect();
-
- ty::CratePredicatesMap { predicates }
-}
diff --git a/compiler/rustc_typeck/src/outlives/outlives_bounds.rs b/compiler/rustc_typeck/src/outlives/outlives_bounds.rs
deleted file mode 100644
index 229a64650..000000000
--- a/compiler/rustc_typeck/src/outlives/outlives_bounds.rs
+++ /dev/null
@@ -1,90 +0,0 @@
-use rustc_hir as hir;
-use rustc_middle::ty::{self, Ty};
-use rustc_trait_selection::infer::InferCtxt;
-use rustc_trait_selection::traits::query::type_op::{self, TypeOp, TypeOpOutput};
-use rustc_trait_selection::traits::query::NoSolution;
-use rustc_trait_selection::traits::{ObligationCause, TraitEngine, TraitEngineExt};
-
-pub use rustc_middle::traits::query::OutlivesBound;
-
-pub trait InferCtxtExt<'tcx> {
- fn implied_outlives_bounds(
- &self,
- param_env: ty::ParamEnv<'tcx>,
- body_id: hir::HirId,
- ty: Ty<'tcx>,
- ) -> Vec<OutlivesBound<'tcx>>;
-}
-
-impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
- /// Implied bounds are region relationships that we deduce
- /// automatically. The idea is that (e.g.) a caller must check that a
- /// function's argument types are well-formed immediately before
- /// calling that fn, and hence the *callee* can assume that its
- /// argument types are well-formed. This may imply certain relationships
- /// between generic parameters. For example:
- /// ```
- /// fn foo<'a,T>(x: &'a T) {}
- /// ```
- /// can only be called with a `'a` and `T` such that `&'a T` is WF.
- /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
- ///
- /// # Parameters
- ///
- /// - `param_env`, the where-clauses in scope
- /// - `body_id`, the body-id to use when normalizing assoc types.
- /// Note that this may cause outlives obligations to be injected
- /// into the inference context with this body-id.
- /// - `ty`, the type that we are supposed to assume is WF.
- #[instrument(level = "debug", skip(self, param_env, body_id))]
- fn implied_outlives_bounds(
- &self,
- param_env: ty::ParamEnv<'tcx>,
- body_id: hir::HirId,
- ty: Ty<'tcx>,
- ) -> Vec<OutlivesBound<'tcx>> {
- let span = self.tcx.hir().span(body_id);
- let result = param_env
- .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty })
- .fully_perform(self);
- let result = match result {
- Ok(r) => r,
- Err(NoSolution) => {
- self.tcx.sess.delay_span_bug(
- span,
- "implied_outlives_bounds failed to solve all obligations",
- );
- return vec![];
- }
- };
-
- let TypeOpOutput { output, constraints, .. } = result;
-
- if let Some(constraints) = constraints {
- // Instantiation may have produced new inference variables and constraints on those
- // variables. Process these constraints.
- let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(self.tcx);
- let cause = ObligationCause::misc(span, body_id);
- for &constraint in &constraints.outlives {
- let obligation = self.query_outlives_constraint_to_obligation(
- constraint,
- cause.clone(),
- param_env,
- );
- fulfill_cx.register_predicate_obligation(self, obligation);
- }
- if !constraints.member_constraints.is_empty() {
- span_bug!(span, "{:#?}", constraints.member_constraints);
- }
- let errors = fulfill_cx.select_all_or_error(self);
- if !errors.is_empty() {
- self.tcx.sess.delay_span_bug(
- span,
- "implied_outlives_bounds failed to solve obligations from instantiation",
- );
- }
- };
-
- output
- }
-}
diff --git a/compiler/rustc_typeck/src/outlives/test.rs b/compiler/rustc_typeck/src/outlives/test.rs
deleted file mode 100644
index eb0e12034..000000000
--- a/compiler/rustc_typeck/src/outlives/test.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use rustc_errors::struct_span_err;
-use rustc_middle::ty::TyCtxt;
-use rustc_span::symbol::sym;
-
-pub fn test_inferred_outlives(tcx: TyCtxt<'_>) {
- for id in tcx.hir().items() {
- // For unit testing: check for a special "rustc_outlives"
- // attribute and report an error with various results if found.
- if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_outlives) {
- let inferred_outlives_of = tcx.inferred_outlives_of(id.def_id);
- struct_span_err!(
- tcx.sess,
- tcx.def_span(id.def_id),
- E0640,
- "{:?}",
- inferred_outlives_of
- )
- .emit();
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/outlives/utils.rs b/compiler/rustc_typeck/src/outlives/utils.rs
deleted file mode 100644
index b718ca942..000000000
--- a/compiler/rustc_typeck/src/outlives/utils.rs
+++ /dev/null
@@ -1,175 +0,0 @@
-use rustc_infer::infer::outlives::components::{push_outlives_components, Component};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
-use rustc_middle::ty::{self, Region, Ty, TyCtxt};
-use rustc_span::Span;
-use smallvec::smallvec;
-use std::collections::BTreeMap;
-
-/// Tracks the `T: 'a` or `'a: 'a` predicates that we have inferred
-/// must be added to the struct header.
-pub(crate) type RequiredPredicates<'tcx> =
- BTreeMap<ty::OutlivesPredicate<GenericArg<'tcx>, ty::Region<'tcx>>, Span>;
-
-/// Given a requirement `T: 'a` or `'b: 'a`, deduce the
-/// outlives_component and add it to `required_predicates`
-pub(crate) fn insert_outlives_predicate<'tcx>(
- tcx: TyCtxt<'tcx>,
- kind: GenericArg<'tcx>,
- outlived_region: Region<'tcx>,
- span: Span,
- required_predicates: &mut RequiredPredicates<'tcx>,
-) {
- // If the `'a` region is bound within the field type itself, we
- // don't want to propagate this constraint to the header.
- if !is_free_region(outlived_region) {
- return;
- }
-
- match kind.unpack() {
- GenericArgKind::Type(ty) => {
- // `T: 'outlived_region` for some type `T`
- // But T could be a lot of things:
- // e.g., if `T = &'b u32`, then `'b: 'outlived_region` is
- // what we want to add.
- //
- // Or if within `struct Foo<U>` you had `T = Vec<U>`, then
- // we would want to add `U: 'outlived_region`
- let mut components = smallvec![];
- push_outlives_components(tcx, ty, &mut components);
- for component in components {
- match component {
- Component::Region(r) => {
- // This would arise from something like:
- //
- // ```
- // struct Foo<'a, 'b> {
- // x: &'a &'b u32
- // }
- // ```
- //
- // Here `outlived_region = 'a` and `kind = &'b
- // u32`. Decomposing `&'b u32` into
- // components would yield `'b`, and we add the
- // where clause that `'b: 'a`.
- insert_outlives_predicate(
- tcx,
- r.into(),
- outlived_region,
- span,
- required_predicates,
- );
- }
-
- Component::Param(param_ty) => {
- // param_ty: ty::ParamTy
- // This would arise from something like:
- //
- // ```
- // struct Foo<'a, U> {
- // x: &'a Vec<U>
- // }
- // ```
- //
- // Here `outlived_region = 'a` and `kind =
- // Vec<U>`. Decomposing `Vec<U>` into
- // components would yield `U`, and we add the
- // where clause that `U: 'a`.
- let ty: Ty<'tcx> = param_ty.to_ty(tcx);
- required_predicates
- .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
- .or_insert(span);
- }
-
- Component::Projection(proj_ty) => {
- // This would arise from something like:
- //
- // ```
- // struct Foo<'a, T: Iterator> {
- // x: &'a <T as Iterator>::Item
- // }
- // ```
- //
- // Here we want to add an explicit `where <T as Iterator>::Item: 'a`.
- let ty: Ty<'tcx> = tcx.mk_projection(proj_ty.item_def_id, proj_ty.substs);
- required_predicates
- .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
- .or_insert(span);
- }
-
- Component::EscapingProjection(_) => {
- // As above, but the projection involves
- // late-bound regions. Therefore, the WF
- // requirement is not checked in type definition
- // but at fn call site, so ignore it.
- //
- // ```
- // struct Foo<'a, T: Iterator> {
- // x: for<'b> fn(<&'b T as Iterator>::Item)
- // // ^^^^^^^^^^^^^^^^^^^^^^^^^
- // }
- // ```
- //
- // Since `'b` is not in scope on `Foo`, can't
- // do anything here, ignore it.
- }
-
- Component::UnresolvedInferenceVariable(_) => bug!("not using infcx"),
- }
- }
- }
-
- GenericArgKind::Lifetime(r) => {
- if !is_free_region(r) {
- return;
- }
- required_predicates.entry(ty::OutlivesPredicate(kind, outlived_region)).or_insert(span);
- }
-
- GenericArgKind::Const(_) => {
- // Generic consts don't impose any constraints.
- }
- }
-}
-
-fn is_free_region(region: Region<'_>) -> bool {
- // First, screen for regions that might appear in a type header.
- match *region {
- // These correspond to `T: 'a` relationships:
- //
- // struct Foo<'a, T> {
- // field: &'a T, // this would generate a ReEarlyBound referencing `'a`
- // }
- //
- // We care about these, so fall through.
- ty::ReEarlyBound(_) => true,
-
- // These correspond to `T: 'static` relationships which can be
- // rather surprising.
- //
- // struct Foo<'a, T> {
- // field: &'static T, // this would generate a ReStatic
- // }
- ty::ReStatic => false,
-
- // Late-bound regions can appear in `fn` types:
- //
- // struct Foo<T> {
- // field: for<'b> fn(&'b T) // e.g., 'b here
- // }
- //
- // The type above might generate a `T: 'b` bound, but we can
- // ignore it. We can't put it on the struct header anyway.
- ty::ReLateBound(..) => false,
-
- // This can appear in `where Self: ` bounds (#64855):
- //
- // struct Bar<T>(<Self as Foo>::Type) where Self: ;
- // struct Baz<'a>(&'a Self) where Self: ;
- ty::ReEmpty(_) => false,
-
- // These regions don't appear in types from type declarations:
- ty::ReErased | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReFree(..) => {
- bug!("unexpected region in outlives inference: {:?}", region);
- }
- }
-}
diff --git a/compiler/rustc_typeck/src/variance/test.rs b/compiler/rustc_typeck/src/variance/test.rs
deleted file mode 100644
index 2ba87db88..000000000
--- a/compiler/rustc_typeck/src/variance/test.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-use rustc_errors::struct_span_err;
-use rustc_middle::ty::TyCtxt;
-use rustc_span::symbol::sym;
-
-pub fn test_variance(tcx: TyCtxt<'_>) {
- // For unit testing: check for a special "rustc_variance"
- // attribute and report an error with various results if found.
- for id in tcx.hir().items() {
- if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_variance) {
- let variances_of = tcx.variances_of(id.def_id);
- struct_span_err!(tcx.sess, tcx.def_span(id.def_id), E0208, "{:?}", variances_of).emit();
- }
- }
-}